You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by is...@apache.org on 2021/02/12 19:37:36 UTC

[lucene-solr] branch jira/solr15138 updated (9c62745 -> 2e98e65)

This is an automated email from the ASF dual-hosted git repository.

ishan pushed a change to branch jira/solr15138
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git.


    from 9c62745  Merging master to this branch
     new 2a9b688  Revert "Merging master to this branch"
     new 2e98e65  SOLR-15138: Collection creation for PerReplicaStates does not scale to large collections as well as regular collections

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../apache/lucene/missingdoclet/MissingDoclet.java |  13 +-
 gradle/ant-compat/folder-layout.gradle             |  20 -
 gradle/documentation/render-javadoc.gradle         |   2 +
 gradle/testing/randomization/policies/tests.policy |   6 +-
 gradle/validation/forbidden-apis.gradle            |  17 -
 gradle/validation/rat-sources.gradle               |  10 +-
 lucene/CHANGES.txt                                 |   8 +-
 lucene/analysis/common/build.gradle                |  15 -
 .../analysis/hunspell/CheckCompoundPattern.java    |   4 +-
 .../lucene/analysis/hunspell/CompoundRule.java     |  24 +-
 .../lucene/analysis/hunspell/Dictionary.java       | 328 +++++++--------
 .../lucene/analysis/hunspell/FlagEnumerator.java   |  86 ----
 .../analysis/hunspell/GeneratingSuggester.java     | 438 ---------------------
 .../analysis/hunspell/ModifyingSuggester.java      |  86 +---
 .../org/apache/lucene/analysis/hunspell/Root.java  |  53 ---
 .../hunspell/{Hunspell.java => SpellChecker.java}  | 219 ++++-------
 .../apache/lucene/analysis/hunspell/Stemmer.java   | 211 +++++-----
 .../lucene/analysis/hunspell/package-info.java     |   6 +-
 ...estSpellChecking.java => SpellCheckerTest.java} |  45 +--
 .../analysis/hunspell/TestAllDictionaries.java     | 191 ++-------
 .../lucene/analysis/hunspell/TestDictionary.java   |  27 +-
 .../lucene/analysis/hunspell/TestDutchIJ.java      |   1 +
 .../hunspell/TestHunspellRepositoryTestCases.java  |  28 +-
 .../lucene/analysis/hunspell/TestPerformance.java  |  30 +-
 .../apache/lucene/analysis/hunspell/allcaps.sug    |   3 -
 .../apache/lucene/analysis/hunspell/base_utf.sug   |  13 -
 .../lucene/analysis/hunspell/checksharps.sug       |   1 -
 .../lucene/analysis/hunspell/forbiddenword.aff     |  11 -
 .../lucene/analysis/hunspell/forbiddenword.dic     |  11 -
 .../lucene/analysis/hunspell/forbiddenword.good    |   3 -
 .../lucene/analysis/hunspell/forbiddenword.wrong   |   4 -
 .../apache/lucene/analysis/hunspell/forceucase.sug |   2 -
 .../lucene/analysis/hunspell/forgivable-errors.aff |   8 +-
 .../lucene/analysis/hunspell/forgivable-errors.dic |   2 -
 .../org/apache/lucene/analysis/hunspell/i58202.aff |   4 -
 .../org/apache/lucene/analysis/hunspell/i58202.dic |   5 -
 .../apache/lucene/analysis/hunspell/i58202.good    |  10 -
 .../org/apache/lucene/analysis/hunspell/i58202.sug |  13 -
 .../apache/lucene/analysis/hunspell/i58202.wrong   |  13 -
 .../apache/lucene/analysis/hunspell/keepcase.sug   |   8 -
 .../org/apache/lucene/analysis/hunspell/map.aff    |   9 -
 .../org/apache/lucene/analysis/hunspell/map.dic    |   4 -
 .../org/apache/lucene/analysis/hunspell/map.sug    |   3 -
 .../org/apache/lucene/analysis/hunspell/map.wrong  |   3 -
 .../apache/lucene/analysis/hunspell/nosuggest.aff  |   5 -
 .../apache/lucene/analysis/hunspell/nosuggest.dic  |   3 -
 .../apache/lucene/analysis/hunspell/nosuggest.good |   3 -
 .../apache/lucene/analysis/hunspell/nosuggest.sug  |   0
 .../lucene/analysis/hunspell/nosuggest.wrong       |   3 -
 .../org/apache/lucene/analysis/hunspell/oconv.aff  |  20 -
 .../org/apache/lucene/analysis/hunspell/oconv.dic  |   4 -
 .../org/apache/lucene/analysis/hunspell/oconv.good |   2 -
 .../org/apache/lucene/analysis/hunspell/oconv.sug  |   3 -
 .../apache/lucene/analysis/hunspell/oconv.wrong    |   3 -
 .../analysis/hunspell/opentaal_forbiddenword1.aff  |   9 -
 .../analysis/hunspell/opentaal_forbiddenword1.dic  |   5 -
 .../analysis/hunspell/opentaal_forbiddenword1.good |   3 -
 .../analysis/hunspell/opentaal_forbiddenword1.sug  |   1 -
 .../hunspell/opentaal_forbiddenword1.wrong         |   5 -
 .../analysis/hunspell/opentaal_forbiddenword2.aff  |   7 -
 .../analysis/hunspell/opentaal_forbiddenword2.dic  |   5 -
 .../analysis/hunspell/opentaal_forbiddenword2.good |   4 -
 .../analysis/hunspell/opentaal_forbiddenword2.sug  |   1 -
 .../hunspell/opentaal_forbiddenword2.wrong         |   5 -
 .../apache/lucene/analysis/hunspell/wordpair.aff   |   4 -
 .../apache/lucene/analysis/hunspell/wordpair.dic   |   4 -
 .../apache/lucene/analysis/hunspell/wordpair.good  |   3 -
 .../apache/lucene/analysis/hunspell/wordpair.wrong |   1 -
 .../analysis/standard/GenerateJflexTLDMacros.java  |  16 +-
 .../analysis/icu/GenerateUTR30DataFiles.java       |  84 ++--
 .../lucene/analysis/icu/RBBIRuleCompiler.java      |  51 ++-
 .../lucene50/Lucene50CompoundFormat.java           |  88 -----
 .../backward_codecs/lucene70/Lucene70Codec.java    |   4 +-
 .../backward_codecs/lucene80/Lucene80Codec.java    |   2 +-
 .../backward_codecs/lucene84/Lucene84Codec.java    |   4 +-
 .../backward_codecs/lucene86/Lucene86Codec.java    |   4 +-
 .../backward_codecs/lucene87/Lucene87Codec.java    |   4 +-
 .../backward_codecs/Lucene87/Lucene87RWCodec.java  |  30 --
 .../lucene50/TestLucene50CompoundFormat.java       |  30 --
 .../backward_codecs/lucene70/Lucene70RWCodec.java  |   7 -
 .../backward_codecs/lucene84/Lucene84RWCodec.java  |   7 -
 .../backward_codecs/lucene86/Lucene86RWCodec.java  |   7 -
 .../codecs/lucene50/Lucene50CompoundFormat.java}   |   6 +-
 .../codecs}/lucene50/Lucene50CompoundReader.java   |   2 +-
 .../codecs/lucene86/Lucene86PointsFormat.java      |   5 +-
 .../lucene/codecs/lucene90/Lucene90Codec.java      |   3 +-
 .../codecs/lucene90/Lucene90CompoundFormat.java    | 140 -------
 .../codecs/lucene90/Lucene90CompoundReader.java    | 214 ----------
 .../codecs/lucene90/Lucene90FieldInfosFormat.java  |   8 +-
 .../codecs/lucene90/Lucene90VectorWriter.java      |  15 +-
 .../lucene/codecs/lucene90/package-info.java       |   2 +-
 .../java/org/apache/lucene/document/FieldType.java |   3 +-
 .../java/org/apache/lucene/index/CodecReader.java  |  15 -
 .../org/apache/lucene/index/ReadersAndUpdates.java |   4 +-
 .../org/apache/lucene/util/RamUsageEstimator.java  |  17 +-
 .../TestLucene50CompoundFormat.java}               |   4 +-
 .../lucene90/TestLucene90FieldInfosFormat.java     |  28 --
 .../codecs/lucene90/TestLucene90VectorFormat.java  |  29 --
 .../org/apache/lucene/index/TestVectorValues.java} | 150 ++++---
 .../apache/lucene/queries/intervals/Intervals.java |  10 -
 .../MinimumShouldMatchIntervalsSource.java         |   1 -
 .../queries/intervals/NoMatchIntervalsSource.java  |  75 ----
 .../lucene/queries/intervals/TestIntervals.java    |  21 -
 .../queries/intervals/TestSimplifications.java     |   9 -
 .../lucene/index/BaseFieldInfoFormatTestCase.java  |  55 +--
 .../org/apache/lucene/util/RamUsageTester.java     |  22 +-
 solr/CHANGES.txt                                   |   3 +-
 .../solr/cloud/api/collections/BackupCmd.java      |  48 +--
 .../cloud/api/collections/DeleteBackupCmd.java     |  14 +-
 .../OverseerCollectionMessageHandler.java          |  51 ++-
 .../java/org/apache/solr/core/CoreContainer.java   |   4 +-
 .../repository/LocalFileSystemRepository.java      |  22 +-
 .../java/org/apache/solr/handler/CatStream.java    |  11 +-
 .../java/org/apache/solr/handler/ClusterAPI.java   |  14 +-
 .../org/apache/solr/handler/CollectionsAPI.java    | 177 +--------
 .../apache/solr/handler/admin/BackupCoreOp.java    |   2 +-
 .../solr/handler/admin/CollectionsHandler.java     |   1 +
 .../java/org/apache/solr/response/JSONWriter.java  |   8 +-
 .../org/apache/solr/search/MaxScoreCollector.java  |   7 +-
 .../org/apache/solr/servlet/QueryRateLimiter.java  |   8 +-
 .../conf/solrconfig-cache-enable-disable.xml       |  15 +-
 .../conf/solrconfig-memory-circuitbreaker.xml      |   8 +-
 .../LocalFSCloudIncrementalBackupTest.java         |  13 +-
 .../cloud/api/collections/TestCollectionAPI.java   |   7 +-
 .../apache/solr/core/DirectoryFactoriesTest.java   |   4 +-
 .../src/test/org/apache/solr/core/TestConfig.java  |  18 +-
 .../solr/handler/TestIncrementalCoreBackup.java    |  74 ++--
 .../solr/handler/TestStressIncrementalBackup.java  |  22 +-
 .../solr/handler/TestStressThreadBackup.java       |   5 +-
 .../solr/handler/admin/AdminHandlersProxyTest.java |  33 ++
 .../solr/handler/admin/TestCollectionAPIs.java     |   4 +-
 .../handler/admin/V2CollectionsAPIMappingTest.java | 293 --------------
 solr/solr-ref-guide/src/_layouts/default.html      |   2 +-
 solr/solr-ref-guide/src/_layouts/home.html         |   2 +-
 .../src/major-changes-in-solr-9.adoc               |   9 -
 .../solr-ref-guide/src/parallel-sql-interface.adoc |   2 +-
 solr/solr-ref-guide/src/solr-upgrade-notes.adoc    |  10 -
 .../solrj/request/CollectionAdminRequest.java      |  51 ++-
 .../client/solrj/request/CollectionApiMapping.java |  74 +++-
 .../request/beans/BackupCollectionPayload.java     |  54 ---
 ...lusterPropPayload.java => ClusterPropInfo.java} |  10 +-
 .../solrj/request/beans/CreateAliasPayload.java    |  79 ----
 ...ateConfigPayload.java => CreateConfigInfo.java} |   2 +-
 .../client/solrj/request/beans/CreatePayload.java  |  70 ----
 .../solrj/request/beans/DeleteAliasPayload.java    |  28 --
 ...ateLimiterPayload.java => RateLimiterMeta.java} |  10 +-
 .../request/beans/RestoreCollectionPayload.java    |  53 ---
 .../request/beans/SetAliasPropertyPayload.java     |  33 --
 .../client/solrj/request/beans/V2ApiConstants.java |  55 ---
 .../org/apache/solr/common/cloud/ZkNodeProps.java  |  20 +-
 .../solr/common/params/CollectionAdminParams.java  |   5 -
 .../resources/apispec/collections.Commands.json    | 298 ++++++++++++++
 .../solrj/io/stream/StreamExpressionTest.java      |  38 +-
 .../client/solrj/request/TestV1toV2ApiMapper.java  |  26 +-
 .../apache/solr/common/util/JsonValidatorTest.java |  52 ++-
 155 files changed, 1358 insertions(+), 3768 deletions(-)
 delete mode 100644 lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/FlagEnumerator.java
 delete mode 100644 lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java
 delete mode 100644 lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Root.java
 rename lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/{Hunspell.java => SpellChecker.java} (69%)
 rename lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/{TestSpellChecking.java => SpellCheckerTest.java} (85%)
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.sug
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/base_utf.sug
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/checksharps.sug
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.aff
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.dic
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.good
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.wrong
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forceucase.sug
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.dic
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.aff
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.dic
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.good
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.sug
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.wrong
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/keepcase.sug
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.aff
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.dic
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.sug
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.wrong
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.aff
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.dic
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.good
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.sug
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.wrong
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.aff
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.dic
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.good
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.sug
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.wrong
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.aff
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.dic
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.good
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.sug
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.wrong
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.aff
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.dic
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.good
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.sug
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.wrong
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.aff
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.dic
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.good
 delete mode 100644 lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.wrong
 delete mode 100644 lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50CompoundFormat.java
 delete mode 100644 lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/Lucene87/Lucene87RWCodec.java
 delete mode 100644 lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestLucene50CompoundFormat.java
 rename lucene/{backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/Lucene50RWCompoundFormat.java => core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java} (97%)
 rename lucene/{backward-codecs/src/java/org/apache/lucene/backward_codecs => core/src/java/org/apache/lucene/codecs}/lucene50/Lucene50CompoundReader.java (99%)
 delete mode 100644 lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundFormat.java
 delete mode 100644 lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundReader.java
 rename lucene/core/src/test/org/apache/lucene/codecs/{lucene90/TestLucene90CompoundFormat.java => lucene50/TestLucene50CompoundFormat.java} (90%)
 delete mode 100644 lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90FieldInfosFormat.java
 delete mode 100644 lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90VectorFormat.java
 rename lucene/{test-framework/src/java/org/apache/lucene/index/BaseVectorFormatTestCase.java => core/src/test/org/apache/lucene/index/TestVectorValues.java} (87%)
 delete mode 100644 lucene/queries/src/java/org/apache/lucene/queries/intervals/NoMatchIntervalsSource.java
 delete mode 100644 solr/core/src/test/org/apache/solr/handler/admin/V2CollectionsAPIMappingTest.java
 delete mode 100644 solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/BackupCollectionPayload.java
 rename solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/{ClusterPropPayload.java => ClusterPropInfo.java} (86%)
 delete mode 100644 solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateAliasPayload.java
 rename solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/{CreateConfigPayload.java => CreateConfigInfo.java} (94%)
 delete mode 100644 solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreatePayload.java
 delete mode 100644 solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/DeleteAliasPayload.java
 rename solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/{RateLimiterPayload.java => RateLimiterMeta.java} (89%)
 delete mode 100644 solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RestoreCollectionPayload.java
 delete mode 100644 solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/SetAliasPropertyPayload.java
 delete mode 100644 solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/V2ApiConstants.java
 create mode 100644 solr/solrj/src/resources/apispec/collections.Commands.json


[lucene-solr] 02/02: SOLR-15138: Collection creation for PerReplicaStates does not scale to large collections as well as regular collections

Posted by is...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ishan pushed a commit to branch jira/solr15138
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit 2e98e6584b69df1206455a33291ad512feae52e7
Author: Ishan Chattopadhyaya <is...@apache.org>
AuthorDate: Sat Feb 13 01:07:12 2021 +0530

    SOLR-15138: Collection creation for PerReplicaStates does not scale to large collections as well as regular collections
---
 solr/CHANGES.txt | 20 ++++++++++++++++++--
 1 file changed, 18 insertions(+), 2 deletions(-)

diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index a0d436d..378cb54 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -199,6 +199,8 @@ Other Changes
 * SOLR-14067: StatelessScriptUpdateProcessorFactory moved to it's own /contrib/scripting/ package instead
  of shipping as part of Solr due to security concerns.  Renamed to ScriptUpdateProcessorFactory for simpler name. (Eric Pugh)
 
+* SOLR-15118: Switch /v2/collections APIs over to the now-preferred annotated-POJO implementation approach (Jason Gerlowski)
+
 Bug Fixes
 ---------------------
 * SOLR-14546: Fix for a relatively hard to hit issue in OverseerTaskProcessor that could lead to out of order execution
@@ -221,7 +223,7 @@ Improvements
 * SOLR-14234: Unhelpful message in RemoteExecutionException. (ab)
 
 * SOLR-13608: Backups are now done incrementally by default.  Multiple backups can be stored at the same location, and each
-  backup will only upload those files that are new since the last backup. (Jason Gerlowski, Shalin , Cao Manh Dat)
+  backup will only upload those files that are new since the last backup. (Jason Gerlowski, shalin , Cao Manh Dat)
 
 * SOLR-15123: Revamp SolrCLI tool's help descriptions for all commands for consistency and clarity. (Eric Pugh)
 
@@ -238,10 +240,24 @@ Bug Fixes
 
 Other Changes
 ---------------------
-(No changes)
+* SOLR-15118: Deprecate CollectionAdminRequest.getV2Request(). (Jason Gerlowski)
+
+==================  8.8.1 ==================
+
+Bug Fixes
+---------------------
+
+* SOLR-15145: System property to control whether base_url is stored in state.json to enable back-compat with older SolrJ versions.
+  (Timothy Potter)
+
+* SOLR-15114: Fix bug that caused WAND optimization to be disabled in cases where the max score is requested (such as
+  multi-shard requests in SolrCloud) (Naoto Minami via Tomás Fernández Löbbe)
 
 * SOLR-15136: Reduce excessive logging introduced with Per Replica States feature (Ishan Chattopadhyaya)
 
+* SOLR-15138: Collection creation for PerReplicaStates does not scale to large collections as well as regular collections
+  (Mike Drob, Ilan Ginzburg, noble, Ishan Chattopadhyaya)
+
 ==================  8.8.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.


[lucene-solr] 01/02: Revert "Merging master to this branch"

Posted by is...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ishan pushed a commit to branch jira/solr15138
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit 2a9b6889fa5a7f20ce510c23f2ee752aa0269907
Author: Ishan Chattopadhyaya <is...@apache.org>
AuthorDate: Sat Feb 13 01:01:43 2021 +0530

    Revert "Merging master to this branch"
    
    This reverts commit 9c627459e860eccfa7f9ba04367abf6222cba159.
---
 .../apache/lucene/missingdoclet/MissingDoclet.java |  13 +-
 gradle/ant-compat/folder-layout.gradle             |  20 -
 gradle/documentation/render-javadoc.gradle         |   2 +
 gradle/testing/randomization/policies/tests.policy |   6 +-
 gradle/validation/forbidden-apis.gradle            |  17 -
 gradle/validation/rat-sources.gradle               |  10 +-
 lucene/CHANGES.txt                                 |   8 +-
 lucene/analysis/common/build.gradle                |  15 -
 .../analysis/hunspell/CheckCompoundPattern.java    |   4 +-
 .../lucene/analysis/hunspell/CompoundRule.java     |  24 +-
 .../lucene/analysis/hunspell/Dictionary.java       | 328 +++++++--------
 .../lucene/analysis/hunspell/FlagEnumerator.java   |  86 ----
 .../analysis/hunspell/GeneratingSuggester.java     | 438 ---------------------
 .../analysis/hunspell/ModifyingSuggester.java      |  86 +---
 .../org/apache/lucene/analysis/hunspell/Root.java  |  53 ---
 .../hunspell/{Hunspell.java => SpellChecker.java}  | 219 ++++-------
 .../apache/lucene/analysis/hunspell/Stemmer.java   | 211 +++++-----
 .../lucene/analysis/hunspell/package-info.java     |   6 +-
 ...estSpellChecking.java => SpellCheckerTest.java} |  45 +--
 .../analysis/hunspell/TestAllDictionaries.java     | 191 ++-------
 .../lucene/analysis/hunspell/TestDictionary.java   |  27 +-
 .../lucene/analysis/hunspell/TestDutchIJ.java      |   1 +
 .../hunspell/TestHunspellRepositoryTestCases.java  |  28 +-
 .../lucene/analysis/hunspell/TestPerformance.java  |  30 +-
 .../apache/lucene/analysis/hunspell/allcaps.sug    |   3 -
 .../apache/lucene/analysis/hunspell/base_utf.sug   |  13 -
 .../lucene/analysis/hunspell/checksharps.sug       |   1 -
 .../lucene/analysis/hunspell/forbiddenword.aff     |  11 -
 .../lucene/analysis/hunspell/forbiddenword.dic     |  11 -
 .../lucene/analysis/hunspell/forbiddenword.good    |   3 -
 .../lucene/analysis/hunspell/forbiddenword.wrong   |   4 -
 .../apache/lucene/analysis/hunspell/forceucase.sug |   2 -
 .../lucene/analysis/hunspell/forgivable-errors.aff |   8 +-
 .../lucene/analysis/hunspell/forgivable-errors.dic |   2 -
 .../org/apache/lucene/analysis/hunspell/i58202.aff |   4 -
 .../org/apache/lucene/analysis/hunspell/i58202.dic |   5 -
 .../apache/lucene/analysis/hunspell/i58202.good    |  10 -
 .../org/apache/lucene/analysis/hunspell/i58202.sug |  13 -
 .../apache/lucene/analysis/hunspell/i58202.wrong   |  13 -
 .../apache/lucene/analysis/hunspell/keepcase.sug   |   8 -
 .../org/apache/lucene/analysis/hunspell/map.aff    |   9 -
 .../org/apache/lucene/analysis/hunspell/map.dic    |   4 -
 .../org/apache/lucene/analysis/hunspell/map.sug    |   3 -
 .../org/apache/lucene/analysis/hunspell/map.wrong  |   3 -
 .../apache/lucene/analysis/hunspell/nosuggest.aff  |   5 -
 .../apache/lucene/analysis/hunspell/nosuggest.dic  |   3 -
 .../apache/lucene/analysis/hunspell/nosuggest.good |   3 -
 .../apache/lucene/analysis/hunspell/nosuggest.sug  |   0
 .../lucene/analysis/hunspell/nosuggest.wrong       |   3 -
 .../org/apache/lucene/analysis/hunspell/oconv.aff  |  20 -
 .../org/apache/lucene/analysis/hunspell/oconv.dic  |   4 -
 .../org/apache/lucene/analysis/hunspell/oconv.good |   2 -
 .../org/apache/lucene/analysis/hunspell/oconv.sug  |   3 -
 .../apache/lucene/analysis/hunspell/oconv.wrong    |   3 -
 .../analysis/hunspell/opentaal_forbiddenword1.aff  |   9 -
 .../analysis/hunspell/opentaal_forbiddenword1.dic  |   5 -
 .../analysis/hunspell/opentaal_forbiddenword1.good |   3 -
 .../analysis/hunspell/opentaal_forbiddenword1.sug  |   1 -
 .../hunspell/opentaal_forbiddenword1.wrong         |   5 -
 .../analysis/hunspell/opentaal_forbiddenword2.aff  |   7 -
 .../analysis/hunspell/opentaal_forbiddenword2.dic  |   5 -
 .../analysis/hunspell/opentaal_forbiddenword2.good |   4 -
 .../analysis/hunspell/opentaal_forbiddenword2.sug  |   1 -
 .../hunspell/opentaal_forbiddenword2.wrong         |   5 -
 .../apache/lucene/analysis/hunspell/wordpair.aff   |   4 -
 .../apache/lucene/analysis/hunspell/wordpair.dic   |   4 -
 .../apache/lucene/analysis/hunspell/wordpair.good  |   3 -
 .../apache/lucene/analysis/hunspell/wordpair.wrong |   1 -
 .../analysis/standard/GenerateJflexTLDMacros.java  |  16 +-
 .../analysis/icu/GenerateUTR30DataFiles.java       |  84 ++--
 .../lucene/analysis/icu/RBBIRuleCompiler.java      |  51 ++-
 .../lucene50/Lucene50CompoundFormat.java           |  88 -----
 .../backward_codecs/lucene70/Lucene70Codec.java    |   4 +-
 .../backward_codecs/lucene80/Lucene80Codec.java    |   2 +-
 .../backward_codecs/lucene84/Lucene84Codec.java    |   4 +-
 .../backward_codecs/lucene86/Lucene86Codec.java    |   4 +-
 .../backward_codecs/lucene87/Lucene87Codec.java    |   4 +-
 .../backward_codecs/Lucene87/Lucene87RWCodec.java  |  30 --
 .../lucene50/TestLucene50CompoundFormat.java       |  30 --
 .../backward_codecs/lucene70/Lucene70RWCodec.java  |   7 -
 .../backward_codecs/lucene84/Lucene84RWCodec.java  |   7 -
 .../backward_codecs/lucene86/Lucene86RWCodec.java  |   7 -
 .../codecs/lucene50/Lucene50CompoundFormat.java}   |   6 +-
 .../codecs}/lucene50/Lucene50CompoundReader.java   |   2 +-
 .../codecs/lucene86/Lucene86PointsFormat.java      |   5 +-
 .../lucene/codecs/lucene90/Lucene90Codec.java      |   3 +-
 .../codecs/lucene90/Lucene90CompoundFormat.java    | 140 -------
 .../codecs/lucene90/Lucene90CompoundReader.java    | 214 ----------
 .../codecs/lucene90/Lucene90FieldInfosFormat.java  |   8 +-
 .../codecs/lucene90/Lucene90VectorWriter.java      |  15 +-
 .../lucene/codecs/lucene90/package-info.java       |   2 +-
 .../java/org/apache/lucene/document/FieldType.java |   3 +-
 .../java/org/apache/lucene/index/CodecReader.java  |  15 -
 .../org/apache/lucene/index/ReadersAndUpdates.java |   4 +-
 .../org/apache/lucene/util/RamUsageEstimator.java  |  17 +-
 .../TestLucene50CompoundFormat.java}               |   4 +-
 .../lucene90/TestLucene90FieldInfosFormat.java     |  28 --
 .../codecs/lucene90/TestLucene90VectorFormat.java  |  29 --
 .../org/apache/lucene/index/TestVectorValues.java} | 150 ++++---
 .../apache/lucene/queries/intervals/Intervals.java |  10 -
 .../MinimumShouldMatchIntervalsSource.java         |   1 -
 .../queries/intervals/NoMatchIntervalsSource.java  |  75 ----
 .../lucene/queries/intervals/TestIntervals.java    |  21 -
 .../queries/intervals/TestSimplifications.java     |   9 -
 .../lucene/index/BaseFieldInfoFormatTestCase.java  |  55 +--
 .../org/apache/lucene/util/RamUsageTester.java     |  22 +-
 solr/CHANGES.txt                                   |  19 +-
 .../solr/cloud/api/collections/BackupCmd.java      |  48 +--
 .../cloud/api/collections/DeleteBackupCmd.java     |  14 +-
 .../OverseerCollectionMessageHandler.java          |  51 ++-
 .../java/org/apache/solr/core/CoreContainer.java   |   4 +-
 .../repository/LocalFileSystemRepository.java      |  22 +-
 .../java/org/apache/solr/handler/CatStream.java    |  11 +-
 .../java/org/apache/solr/handler/ClusterAPI.java   |  14 +-
 .../org/apache/solr/handler/CollectionsAPI.java    | 177 +--------
 .../apache/solr/handler/admin/BackupCoreOp.java    |   2 +-
 .../solr/handler/admin/CollectionsHandler.java     |   1 +
 .../java/org/apache/solr/response/JSONWriter.java  |   8 +-
 .../org/apache/solr/search/MaxScoreCollector.java  |   7 +-
 .../org/apache/solr/servlet/QueryRateLimiter.java  |   8 +-
 .../conf/solrconfig-cache-enable-disable.xml       |  15 +-
 .../conf/solrconfig-memory-circuitbreaker.xml      |   8 +-
 .../LocalFSCloudIncrementalBackupTest.java         |  13 +-
 .../cloud/api/collections/TestCollectionAPI.java   |   7 +-
 .../apache/solr/core/DirectoryFactoriesTest.java   |   4 +-
 .../src/test/org/apache/solr/core/TestConfig.java  |  18 +-
 .../solr/handler/TestIncrementalCoreBackup.java    |  74 ++--
 .../solr/handler/TestStressIncrementalBackup.java  |  22 +-
 .../solr/handler/TestStressThreadBackup.java       |   5 +-
 .../solr/handler/admin/AdminHandlersProxyTest.java |  33 ++
 .../solr/handler/admin/TestCollectionAPIs.java     |   4 +-
 .../handler/admin/V2CollectionsAPIMappingTest.java | 293 --------------
 solr/solr-ref-guide/src/_layouts/default.html      |   2 +-
 solr/solr-ref-guide/src/_layouts/home.html         |   2 +-
 .../src/major-changes-in-solr-9.adoc               |   9 -
 .../solr-ref-guide/src/parallel-sql-interface.adoc |   2 +-
 solr/solr-ref-guide/src/solr-upgrade-notes.adoc    |  10 -
 .../solrj/request/CollectionAdminRequest.java      |  51 ++-
 .../client/solrj/request/CollectionApiMapping.java |  74 +++-
 .../request/beans/BackupCollectionPayload.java     |  54 ---
 ...lusterPropPayload.java => ClusterPropInfo.java} |  10 +-
 .../solrj/request/beans/CreateAliasPayload.java    |  79 ----
 ...ateConfigPayload.java => CreateConfigInfo.java} |   2 +-
 .../client/solrj/request/beans/CreatePayload.java  |  70 ----
 .../solrj/request/beans/DeleteAliasPayload.java    |  28 --
 ...ateLimiterPayload.java => RateLimiterMeta.java} |  10 +-
 .../request/beans/RestoreCollectionPayload.java    |  53 ---
 .../request/beans/SetAliasPropertyPayload.java     |  33 --
 .../client/solrj/request/beans/V2ApiConstants.java |  55 ---
 .../org/apache/solr/common/cloud/ZkNodeProps.java  |  20 +-
 .../solr/common/params/CollectionAdminParams.java  |   5 -
 .../resources/apispec/collections.Commands.json    | 298 ++++++++++++++
 .../solrj/io/stream/StreamExpressionTest.java      |  38 +-
 .../client/solrj/request/TestV1toV2ApiMapper.java  |  26 +-
 .../apache/solr/common/util/JsonValidatorTest.java |  52 ++-
 155 files changed, 1358 insertions(+), 3784 deletions(-)

diff --git a/dev-tools/missing-doclet/src/main/java/org/apache/lucene/missingdoclet/MissingDoclet.java b/dev-tools/missing-doclet/src/main/java/org/apache/lucene/missingdoclet/MissingDoclet.java
index 53dc033..89c205f 100644
--- a/dev-tools/missing-doclet/src/main/java/org/apache/lucene/missingdoclet/MissingDoclet.java
+++ b/dev-tools/missing-doclet/src/main/java/org/apache/lucene/missingdoclet/MissingDoclet.java
@@ -406,7 +406,7 @@ public class MissingDoclet extends StandardDoclet {
   /** logs a new error for the particular element */
   private void error(Element element, String message) {
     var fullMessage = new StringBuilder();
-    switch (element.getKind()) {
+    switch(element.getKind()) {
       case MODULE:
       case PACKAGE:
         // for modules/packages, we don't have filename + line number, fully qualify
@@ -426,19 +426,10 @@ public class MissingDoclet extends StandardDoclet {
         fullMessage.append(element.getSimpleName());
         break;
     }
-
     fullMessage.append(" (");
     fullMessage.append(element.getKind().toString().toLowerCase(Locale.ROOT));
     fullMessage.append("): ");
     fullMessage.append(message);
-
-    if (Runtime.version().feature() == 11 && element.getKind() == ElementKind.PACKAGE) {
-      // Avoid JDK 11 bug:
-      // https://issues.apache.org/jira/browse/LUCENE-9747
-      // https://bugs.openjdk.java.net/browse/JDK-8224082
-      reporter.print(Diagnostic.Kind.ERROR, fullMessage.toString());
-    } else {
-      reporter.print(Diagnostic.Kind.ERROR, element, fullMessage.toString());
-    }
+    reporter.print(Diagnostic.Kind.ERROR, element, fullMessage.toString());
   }
 }
diff --git a/gradle/ant-compat/folder-layout.gradle b/gradle/ant-compat/folder-layout.gradle
index 0186fb7..286c8d1 100644
--- a/gradle/ant-compat/folder-layout.gradle
+++ b/gradle/ant-compat/folder-layout.gradle
@@ -41,23 +41,3 @@ configure(project(":solr:webapp")) {
     webAppDirName = "web"
   }
 }
-
-allprojects {
-  plugins.withType(JavaPlugin) {
-    // if 'src/tools' exists, add it as a separate sourceSet.
-    if (file('src/tools/java').exists()) {
-      sourceSets {
-        tools {
-          java {
-            srcDirs = ['src/tools/java']
-          }
-        }
-      }
-
-      configurations {
-        // Inherit any dependencies from the main source set.
-        toolsImplementation.extendsFrom implementation
-      }
-    }
-  }
-}
\ No newline at end of file
diff --git a/gradle/documentation/render-javadoc.gradle b/gradle/documentation/render-javadoc.gradle
index 66d1539..55f904e 100644
--- a/gradle/documentation/render-javadoc.gradle
+++ b/gradle/documentation/render-javadoc.gradle
@@ -533,6 +533,8 @@ class RenderJavadocTask extends DefaultTask {
 
         ignoreExitValue true
       }
+
+      logger.lifecycle("Exec returned: ${result}")
     }
 
     if (result.getExitValue() != 0) {
diff --git a/gradle/testing/randomization/policies/tests.policy b/gradle/testing/randomization/policies/tests.policy
index 469892c..e17af8e 100644
--- a/gradle/testing/randomization/policies/tests.policy
+++ b/gradle/testing/randomization/policies/tests.policy
@@ -91,12 +91,10 @@ grant {
   // allows LuceneTestCase#runWithRestrictedPermissions to execute with lower (or no) permission
   permission java.security.SecurityPermission "createAccessControlContext";
 
-  // Hunspell regression and validation tests can read from external files
-  // specified in system properties.
+  // Some Hunspell tests may read from external files specified in system properties
   permission java.io.FilePermission "${hunspell.repo.path}${/}-", "read";
-  permission java.io.FilePermission "${hunspell.corpora}${/}-", "read";
-  permission java.io.FilePermission "${hunspell.dictionaries}", "read";
   permission java.io.FilePermission "${hunspell.dictionaries}${/}-", "read";
+  permission java.io.FilePermission "${hunspell.corpora}${/}-", "read";
 };
 
 // Permissions to support ant build
diff --git a/gradle/validation/forbidden-apis.gradle b/gradle/validation/forbidden-apis.gradle
index c23002a..c4fb27d 100644
--- a/gradle/validation/forbidden-apis.gradle
+++ b/gradle/validation/forbidden-apis.gradle
@@ -89,23 +89,6 @@ allprojects { prj ->
       ]
     }
 
-    // Configure defaults for sourceSets.tools (if present).
-    tasks.matching { it.name == "forbiddenApisTools" }.all {
-      bundledSignatures += [
-          'jdk-unsafe',
-          'jdk-deprecated',
-          'jdk-non-portable',
-          'jdk-reflection',
-      ]
-
-      suppressAnnotations += [
-          "**.SuppressForbidden"
-      ]
-
-      doFirst dynamicSignatures.curry(configurations.toolsCompileClasspath, "lucene")
-      inputs.dir(file(resources))
-    }
-
     // Disable sysout signatures for these projects.
     if (prj.path in [
         ":lucene:demo",
diff --git a/gradle/validation/rat-sources.gradle b/gradle/validation/rat-sources.gradle
index e13b052..5738d01 100644
--- a/gradle/validation/rat-sources.gradle
+++ b/gradle/validation/rat-sources.gradle
@@ -154,16 +154,10 @@ class RatTask extends DefaultTask {
             }
 
             if (project.plugins.findPlugin(JavaPlugin)) {
-                def checkSets = [
+                [
                     project.sourceSets.main.java.srcDirs,
                     project.sourceSets.test.java.srcDirs,
-                ]
-
-                project.sourceSets.matching { it.name == 'tools' }.all {
-                    checkSets += project.sourceSets.tools.java.srcDirs
-                }
-
-                checkSets.flatten().each { srcLocation ->
+                ].flatten().each { srcLocation ->
                     ant.fileset(dir: srcLocation, erroronmissingdir: false) {
                         srcExcludes.each { pattern -> ant.exclude(name: pattern) }
                     }
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 337f376..104d9b9 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -89,8 +89,8 @@ API Changes
 
 Improvements
 
-* LUCENE-9687: Hunspell support improvements: add API for spell-checking and suggestions, support compound words,
-  fix various behavior differences between Java and C++ implementations, improve performance (Peter Gromov, Dawid Weiss)
+* LUCENE-9687: Hunspell support improvements: add SpellChecker API, support default encoding and
+  BREAK/FORBIDDENWORD/COMPOUNDRULE affix rules, improve stemming of all-caps words (Peter Gromov)
 
 * LUCENE-9633: Improve match highlighter behavior for degenerate intervals (on non-existing positions).
   (Dawid Weiss)
@@ -288,9 +288,7 @@ Optimizations
 
 Bug Fixes
 ---------------------
-
-* LUCENE-9744: NPE on a degenerate query in MinimumShouldMatchIntervalsSource
-  $MinimumMatchesIterator.getSubMatches(). (Alan Woodward)
+(No changes)
 
 Other
 ---------------------
diff --git a/lucene/analysis/common/build.gradle b/lucene/analysis/common/build.gradle
index 24c949f..a44152c 100644
--- a/lucene/analysis/common/build.gradle
+++ b/lucene/analysis/common/build.gradle
@@ -23,18 +23,3 @@ dependencies {
   api project(':lucene:core')
   testImplementation project(':lucene:test-framework')
 }
-
-// Pass all hunspell-tests-specific project properties to tests as system properties.
-tasks.withType(Test) {
-  [
-      "hunspell.dictionaries",
-      "hunspell.corpora",
-      "hunspell.repo.path"
-  ].each {
-    def val = propertyOrDefault(it, null)
-    if (val != null) {
-      logger.lifecycle("Passing property: ${it}=${val}")
-      systemProperty it, val
-    }
-  }
-}
\ No newline at end of file
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CheckCompoundPattern.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CheckCompoundPattern.java
index b1c4b3d..3d70591 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CheckCompoundPattern.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CheckCompoundPattern.java
@@ -16,6 +16,7 @@
  */
 package org.apache.lucene.analysis.hunspell;
 
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.IntsRef;
 
@@ -26,6 +27,7 @@ class CheckCompoundPattern {
   private final char[] endFlags;
   private final char[] beginFlags;
   private final Dictionary dictionary;
+  private final BytesRef scratch = new BytesRef();
 
   CheckCompoundPattern(
       String unparsed, Dictionary.FlagParsingStrategy strategy, Dictionary dictionary) {
@@ -91,7 +93,7 @@ class CheckCompoundPattern {
 
   private boolean hasAllFlags(char[] flags, IntsRef forms) {
     for (char flag : flags) {
-      if (!dictionary.hasFlag(forms, flag)) {
+      if (!dictionary.hasFlag(forms, flag, scratch)) {
         return false;
       }
     }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CompoundRule.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CompoundRule.java
index 726c1dc..0f89de8 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CompoundRule.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CompoundRule.java
@@ -17,6 +17,7 @@
 package org.apache.lucene.analysis.hunspell;
 
 import java.util.List;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IntsRef;
 
 class CompoundRule {
@@ -50,15 +51,16 @@ class CompoundRule {
     data = parsedFlags.toString().toCharArray();
   }
 
-  boolean mayMatch(List<IntsRef> words) {
-    return match(words, 0, 0, false);
+  boolean mayMatch(List<IntsRef> words, BytesRef scratch) {
+    return match(words, 0, 0, scratch, false);
   }
 
-  boolean fullyMatches(List<IntsRef> words) {
-    return match(words, 0, 0, true);
+  boolean fullyMatches(List<IntsRef> words, BytesRef scratch) {
+    return match(words, 0, 0, scratch, true);
   }
 
-  private boolean match(List<IntsRef> words, int patternIndex, int wordIndex, boolean fully) {
+  private boolean match(
+      List<IntsRef> words, int patternIndex, int wordIndex, BytesRef scratch, boolean fully) {
     if (patternIndex >= data.length) {
       return wordIndex >= words.size();
     }
@@ -69,12 +71,12 @@ class CompoundRule {
     char flag = data[patternIndex];
     if (patternIndex < data.length - 1 && data[patternIndex + 1] == '*') {
       int startWI = wordIndex;
-      while (wordIndex < words.size() && dictionary.hasFlag(words.get(wordIndex), flag)) {
+      while (wordIndex < words.size() && dictionary.hasFlag(words.get(wordIndex), flag, scratch)) {
         wordIndex++;
       }
 
       while (wordIndex >= startWI) {
-        if (match(words, patternIndex + 2, wordIndex, fully)) {
+        if (match(words, patternIndex + 2, wordIndex, scratch, fully)) {
           return true;
         }
 
@@ -84,16 +86,16 @@ class CompoundRule {
     }
 
     boolean currentWordMatches =
-        wordIndex < words.size() && dictionary.hasFlag(words.get(wordIndex), flag);
+        wordIndex < words.size() && dictionary.hasFlag(words.get(wordIndex), flag, scratch);
 
     if (patternIndex < data.length - 1 && data[patternIndex + 1] == '?') {
-      if (currentWordMatches && match(words, patternIndex + 2, wordIndex + 1, fully)) {
+      if (currentWordMatches && match(words, patternIndex + 2, wordIndex + 1, scratch, fully)) {
         return true;
       }
-      return match(words, patternIndex + 2, wordIndex, fully);
+      return match(words, patternIndex + 2, wordIndex, scratch, fully);
     }
 
-    return currentWordMatches && match(words, patternIndex + 1, wordIndex + 1, fully);
+    return currentWordMatches && match(words, patternIndex + 1, wordIndex + 1, scratch, fully);
   }
 
   @Override
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java
index 59536fe..ae1a3a1 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java
@@ -18,11 +18,11 @@ package org.apache.lucene.analysis.hunspell;
 
 import java.io.BufferedInputStream;
 import java.io.BufferedReader;
-import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.LineNumberReader;
+import java.io.OutputStream;
 import java.nio.charset.Charset;
 import java.nio.charset.CharsetDecoder;
 import java.nio.charset.CodingErrorAction;
@@ -52,6 +52,8 @@ import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.BytesRefHash;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.IntsRef;
@@ -70,19 +72,16 @@ import org.apache.lucene.util.fst.Util;
 
 /** In-memory structure for the dictionary (.dic) and affix (.aff) data of a hunspell dictionary. */
 public class Dictionary {
-  // Derived from woorm/LibreOffice dictionaries.
-  // See TestAllDictionaries.testMaxPrologueNeeded.
-  static final int MAX_PROLOGUE_SCAN_WINDOW = 30 * 1024;
 
   static final char[] NOFLAGS = new char[0];
 
   static final char FLAG_UNSET = (char) 0;
   private static final int DEFAULT_FLAGS = 65510;
-  static final char HIDDEN_FLAG = (char) 65511; // called 'ONLYUPCASEFLAG' in Hunspell
+  private static final char HIDDEN_FLAG = (char) 65511; // called 'ONLYUPCASEFLAG' in Hunspell
 
   // TODO: really for suffixes we should reverse the automaton and run them backwards
-  private static final String PREFIX_CONDITION_REGEX = "%s.*";
-  private static final String SUFFIX_CONDITION_REGEX = ".*%s";
+  private static final String PREFIX_CONDITION_REGEX_PATTERN = "%s.*";
+  private static final String SUFFIX_CONDITION_REGEX_PATTERN = ".*%s";
   private static final Pattern MORPH_KEY_PATTERN = Pattern.compile("\\s+(?=\\p{Alpha}{2}:)");
   static final Charset DEFAULT_CHARSET = StandardCharsets.ISO_8859_1;
   CharsetDecoder decoder = replacingDecoder(DEFAULT_CHARSET);
@@ -107,7 +106,7 @@ public class Dictionary {
    * The list of unique flagsets (wordforms). theoretically huge, but practically small (for Polish
    * this is 756), otherwise humans wouldn't be able to deal with it either.
    */
-  final FlagEnumerator.Lookup flagLookup;
+  BytesRefHash flagLookup = new BytesRefHash();
 
   // the list of unique strip affixes.
   char[] stripData;
@@ -122,7 +121,7 @@ public class Dictionary {
   // offsets in affixData
   static final int AFFIX_FLAG = 0;
   static final int AFFIX_STRIP_ORD = 1;
-  private static final int AFFIX_CONDITION = 2;
+  static final int AFFIX_CONDITION = 2;
   static final int AFFIX_APPEND = 3;
 
   // Default flag parsing strategy
@@ -171,11 +170,6 @@ public class Dictionary {
   String[] neighborKeyGroups = new String[0];
   boolean enableSplitSuggestions = true;
   List<RepEntry> repTable = new ArrayList<>();
-  List<List<String>> mapTable = new ArrayList<>();
-  int maxDiff = 5;
-  int maxNGramSuggestions = Integer.MAX_VALUE;
-  boolean onlyMaxDiff;
-  char noSuggest, subStandard;
 
   // FSTs used for ICONV/OCONV, output ord pointing to replacement text
   FST<CharsRef> iconv;
@@ -230,46 +224,42 @@ public class Dictionary {
     this.ignoreCase = ignoreCase;
     this.needsInputCleaning = ignoreCase;
     this.needsOutputCleaning = false; // set if we have an OCONV
+    flagLookup.add(new BytesRef()); // no flags -> ord 0
 
-    try (BufferedInputStream affixStream =
-        new BufferedInputStream(affix, MAX_PROLOGUE_SCAN_WINDOW) {
-          @Override
-          public void close() {
-            // TODO: maybe we should consume and close it? Why does it need to stay open?
-            // Don't close the affix stream as per javadoc.
-          }
-        }) {
-      // I assume we don't support other BOMs (utf16, etc.)? We trivially could,
-      // by adding maybeConsume() with a proper bom... but I don't see hunspell repo to have
-      // any such exotic examples.
-      Charset streamCharset;
-      if (maybeConsume(affixStream, BOM_UTF8)) {
-        streamCharset = StandardCharsets.UTF_8;
-      } else {
-        streamCharset = DEFAULT_CHARSET;
+    Path tempPath = getDefaultTempDir(); // TODO: make this configurable?
+    Path aff = Files.createTempFile(tempPath, "affix", "aff");
+
+    BufferedInputStream aff1 = null;
+    InputStream aff2 = null;
+    boolean success = false;
+    try {
+      // Copy contents of the affix stream to a temp file.
+      try (OutputStream os = Files.newOutputStream(aff)) {
+        affix.transferTo(os);
       }
 
-      /*
-       * pass 1: look for encoding & flag. This is simple but works. We just prefetch
-       * a large enough chunk of the input and scan through it. The buffered data will
-       * be subsequently reused anyway so nothing is wasted.
-       */
-      affixStream.mark(MAX_PROLOGUE_SCAN_WINDOW);
-      byte[] prologue = affixStream.readNBytes(MAX_PROLOGUE_SCAN_WINDOW - 1);
-      affixStream.reset();
-      readConfig(new ByteArrayInputStream(prologue), streamCharset);
+      // pass 1: get encoding & flag
+      aff1 = new BufferedInputStream(Files.newInputStream(aff));
+      readConfig(aff1);
 
       // pass 2: parse affixes
-      FlagEnumerator flagEnumerator = new FlagEnumerator();
-      readAffixFile(affixStream, decoder, flagEnumerator);
+      aff2 = new BufferedInputStream(Files.newInputStream(aff));
+      readAffixFile(aff2, decoder);
 
       // read dictionary entries
       IndexOutput unsorted = mergeDictionaries(tempDir, tempFileNamePrefix, dictionaries, decoder);
       String sortedFile = sortWordsOffline(tempDir, tempFileNamePrefix, unsorted);
-      words = readSortedDictionaries(tempDir, sortedFile, flagEnumerator);
-      flagLookup = flagEnumerator.finish();
+      words = readSortedDictionaries(tempDir, sortedFile);
       aliases = null; // no longer needed
       morphAliases = null; // no longer needed
+      success = true;
+    } finally {
+      IOUtils.closeWhileHandlingException(aff1, aff2);
+      if (success) {
+        Files.delete(aff);
+      } else {
+        IOUtils.deleteFilesIgnoringExceptions(aff);
+      }
     }
   }
 
@@ -331,7 +321,7 @@ public class Dictionary {
    * @param decoder CharsetDecoder to decode the content of the file
    * @throws IOException Can be thrown while reading from the InputStream
    */
-  private void readAffixFile(InputStream affixStream, CharsetDecoder decoder, FlagEnumerator flags)
+  private void readAffixFile(InputStream affixStream, CharsetDecoder decoder)
       throws IOException, ParseException {
     TreeMap<String, List<Integer>> prefixes = new TreeMap<>();
     TreeMap<String, List<Integer>> suffixes = new TreeMap<>();
@@ -356,15 +346,16 @@ public class Dictionary {
       if (line.isEmpty()) continue;
 
       String firstWord = line.split("\\s")[0];
-      // TODO: convert to a switch?
       if ("AF".equals(firstWord)) {
         parseAlias(line);
       } else if ("AM".equals(firstWord)) {
         parseMorphAlias(line);
       } else if ("PFX".equals(firstWord)) {
-        parseAffix(prefixes, line, reader, PREFIX_CONDITION_REGEX, seenPatterns, seenStrips, flags);
+        parseAffix(
+            prefixes, line, reader, PREFIX_CONDITION_REGEX_PATTERN, seenPatterns, seenStrips);
       } else if ("SFX".equals(firstWord)) {
-        parseAffix(suffixes, line, reader, SUFFIX_CONDITION_REGEX, seenPatterns, seenStrips, flags);
+        parseAffix(
+            suffixes, line, reader, SUFFIX_CONDITION_REGEX_PATTERN, seenPatterns, seenStrips);
       } else if (line.equals("COMPLEXPREFIXES")) {
         complexPrefixes =
             true; // 2-stage prefix+1-stage suffix instead of 2-stage suffix+1-stage prefix
@@ -402,40 +393,21 @@ public class Dictionary {
       } else if ("BREAK".equals(firstWord)) {
         breaks = parseBreaks(reader, line);
       } else if ("WORDCHARS".equals(firstWord)) {
-        wordChars = firstArgument(reader, line);
+        wordChars = singleArgument(reader, line);
       } else if ("TRY".equals(firstWord)) {
-        tryChars = firstArgument(reader, line);
+        tryChars = singleArgument(reader, line);
       } else if ("REP".equals(firstWord)) {
         int count = parseNum(reader, line);
         for (int i = 0; i < count; i++) {
           String[] parts = splitBySpace(reader, reader.readLine(), 3, Integer.MAX_VALUE);
           repTable.add(new RepEntry(parts[1], parts[2]));
         }
-      } else if ("MAP".equals(firstWord)) {
-        int count = parseNum(reader, line);
-        for (int i = 0; i < count; i++) {
-          mapTable.add(parseMapEntry(reader, reader.readLine()));
-        }
       } else if ("KEY".equals(firstWord)) {
         neighborKeyGroups = singleArgument(reader, line).split("\\|");
       } else if ("NOSPLITSUGS".equals(firstWord)) {
         enableSplitSuggestions = false;
-      } else if ("MAXNGRAMSUGS".equals(firstWord)) {
-        maxNGramSuggestions = Integer.parseInt(singleArgument(reader, line));
-      } else if ("MAXDIFF".equals(firstWord)) {
-        int i = Integer.parseInt(singleArgument(reader, line));
-        if (i < 0 || i > 10) {
-          throw new ParseException("MAXDIFF should be between 0 and 10", reader.getLineNumber());
-        }
-        maxDiff = i;
-      } else if ("ONLYMAXDIFF".equals(firstWord)) {
-        onlyMaxDiff = true;
       } else if ("FORBIDDENWORD".equals(firstWord)) {
         forbiddenword = flagParsingStrategy.parseFlag(singleArgument(reader, line));
-      } else if ("NOSUGGEST".equals(firstWord)) {
-        noSuggest = flagParsingStrategy.parseFlag(singleArgument(reader, line));
-      } else if ("SUBSTANDARD".equals(firstWord)) {
-        subStandard = flagParsingStrategy.parseFlag(singleArgument(reader, line));
       } else if ("COMPOUNDMIN".equals(firstWord)) {
         compoundMin = Math.max(1, parseNum(reader, line));
       } else if ("COMPOUNDWORDMAX".equals(firstWord)) {
@@ -470,13 +442,6 @@ public class Dictionary {
           checkCompoundPatterns.add(
               new CheckCompoundPattern(reader.readLine(), flagParsingStrategy, this));
         }
-      } else if ("SET".equals(firstWord)) {
-        checkCriticalDirectiveSame(
-            "SET", reader, decoder.charset(), getDecoder(singleArgument(reader, line)).charset());
-      } else if ("FLAG".equals(firstWord)) {
-        FlagParsingStrategy strategy = getFlagParsingStrategy(line, decoder.charset());
-        checkCriticalDirectiveSame(
-            "FLAG", reader, flagParsingStrategy.getClass(), strategy.getClass());
       }
     }
 
@@ -500,39 +465,7 @@ public class Dictionary {
     stripOffsets[currentIndex] = currentOffset;
   }
 
-  private void checkCriticalDirectiveSame(
-      String directive, LineNumberReader reader, Object expected, Object actual)
-      throws ParseException {
-    if (!expected.equals(actual)) {
-      throw new ParseException(
-          directive
-              + " directive should occur at most once, and in the first "
-              + MAX_PROLOGUE_SCAN_WINDOW
-              + " bytes of the *.aff file",
-          reader.getLineNumber());
-    }
-  }
-
-  private List<String> parseMapEntry(LineNumberReader reader, String line) throws ParseException {
-    String unparsed = firstArgument(reader, line);
-    List<String> mapEntry = new ArrayList<>();
-    for (int j = 0; j < unparsed.length(); j++) {
-      if (unparsed.charAt(j) == '(') {
-        int closing = unparsed.indexOf(')', j);
-        if (closing < 0) {
-          throw new ParseException("Unclosed parenthesis: " + line, reader.getLineNumber());
-        }
-
-        mapEntry.add(unparsed.substring(j + 1, closing));
-        j = closing;
-      } else {
-        mapEntry.add(String.valueOf(unparsed.charAt(j)));
-      }
-    }
-    return mapEntry;
-  }
-
-  boolean hasLanguage(String... langCodes) {
+  private boolean hasLanguage(String... langCodes) {
     if (language == null) return false;
     String langCode = extractLanguageCode(language);
     for (String code : langCodes) {
@@ -556,10 +489,6 @@ public class Dictionary {
     return splitBySpace(reader, line, 2)[1];
   }
 
-  private String firstArgument(LineNumberReader reader, String line) throws ParseException {
-    return splitBySpace(reader, line, 2, Integer.MAX_VALUE)[1];
-  }
-
   private String[] splitBySpace(LineNumberReader reader, String line, int expectedParts)
       throws ParseException {
     return splitBySpace(reader, line, expectedParts, expectedParts);
@@ -654,22 +583,17 @@ public class Dictionary {
       LineNumberReader reader,
       String conditionPattern,
       Map<String, Integer> seenPatterns,
-      Map<String, Integer> seenStrips,
-      FlagEnumerator flags)
+      Map<String, Integer> seenStrips)
       throws IOException, ParseException {
 
+    BytesRefBuilder scratch = new BytesRefBuilder();
     StringBuilder sb = new StringBuilder();
     String[] args = header.split("\\s+");
 
     boolean crossProduct = args[2].equals("Y");
-    boolean isSuffix = conditionPattern.equals(SUFFIX_CONDITION_REGEX);
+    boolean isSuffix = conditionPattern.equals(SUFFIX_CONDITION_REGEX_PATTERN);
 
-    int numLines;
-    try {
-      numLines = Integer.parseInt(args[3]);
-    } catch (NumberFormatException e) {
-      return;
-    }
+    int numLines = Integer.parseInt(args[3]);
     affixData = ArrayUtil.grow(affixData, currentAffix * 4 + numLines * 4);
 
     for (int i = 0; i < numLines; i++) {
@@ -693,6 +617,7 @@ public class Dictionary {
         }
 
         appendFlags = flagParsingStrategy.parseFlags(flagPart);
+        Arrays.sort(appendFlags);
         twoStageAffix = true;
       }
       // zero affix -> empty string
@@ -751,7 +676,8 @@ public class Dictionary {
         appendFlags = NOFLAGS;
       }
 
-      int appendFlagsOrd = flags.add(appendFlags);
+      encodeFlags(scratch, appendFlags);
+      int appendFlagsOrd = flagLookup.add(scratch.get());
       if (appendFlagsOrd < 0) {
         // already exists in our hash
         appendFlagsOrd = (-appendFlagsOrd) - 1;
@@ -798,14 +724,6 @@ public class Dictionary {
     return affixData[affixIndex * 4 + offset];
   }
 
-  boolean isCrossProduct(int affix) {
-    return (affixData(affix, AFFIX_CONDITION) & 1) == 1;
-  }
-
-  int getAffixCondition(int affix) {
-    return affixData(affix, AFFIX_CONDITION) >>> 1;
-  }
-
   private FST<CharsRef> parseConversions(LineNumberReader reader, int num)
       throws IOException, ParseException {
     Map<String, String> mappings = new TreeMap<>();
@@ -831,36 +749,31 @@ public class Dictionary {
   private static final byte[] BOM_UTF8 = {(byte) 0xef, (byte) 0xbb, (byte) 0xbf};
 
   /** Parses the encoding and flag format specified in the provided InputStream */
-  private void readConfig(InputStream stream, Charset streamCharset)
-      throws IOException, ParseException {
+  private void readConfig(BufferedInputStream stream) throws IOException, ParseException {
+    // I assume we don't support other BOMs (utf16, etc.)? We trivially could,
+    // by adding maybeConsume() with a proper bom... but I don't see hunspell repo to have
+    // any such exotic examples.
+    Charset streamCharset;
+    if (maybeConsume(stream, BOM_UTF8)) {
+      streamCharset = StandardCharsets.UTF_8;
+    } else {
+      streamCharset = DEFAULT_CHARSET;
+    }
+
+    // TODO: can these flags change throughout the file? If not then we can abort sooner. And
+    // then we wouldn't even need to create a temp file for the affix stream - a large enough
+    // leading buffer (BufferedInputStream) would be sufficient?
     LineNumberReader reader = new LineNumberReader(new InputStreamReader(stream, streamCharset));
     String line;
-    String flagLine = null;
-    boolean charsetFound = false;
-    boolean flagFound = false;
     while ((line = reader.readLine()) != null) {
       if (line.isBlank()) continue;
 
       String firstWord = line.split("\\s")[0];
       if ("SET".equals(firstWord)) {
         decoder = getDecoder(singleArgument(reader, line));
-        charsetFound = true;
       } else if ("FLAG".equals(firstWord)) {
-        // Preserve the flag line for parsing later since we need the decoder's charset
-        // and just in case they come out of order.
-        flagLine = line;
-        flagFound = true;
-      } else {
-        continue;
+        flagParsingStrategy = getFlagParsingStrategy(line, decoder.charset());
       }
-
-      if (charsetFound && flagFound) {
-        break;
-      }
-    }
-
-    if (flagFound) {
-      flagParsingStrategy = getFlagParsingStrategy(flagLine, decoder.charset());
     }
   }
 
@@ -972,15 +885,14 @@ public class Dictionary {
         || ch == MORPH_SEPARATOR; // BINARY EXECUTABLES EMBEDDED IN ZULU DICTIONARIES!!!!!!!
   }
 
-  private static int morphBoundary(String line) {
+  static int morphBoundary(String line) {
     int end = indexOfSpaceOrTab(line, 0);
     if (end == -1) {
       return line.length();
     }
     while (end >= 0 && end < line.length()) {
       if (line.charAt(end) == '\t'
-          || end > 0
-              && end + 3 < line.length()
+          || end + 3 < line.length()
               && Character.isLetter(line.charAt(end + 1))
               && Character.isLetter(line.charAt(end + 2))
               && line.charAt(end + 3) == ':') {
@@ -1152,11 +1064,10 @@ public class Dictionary {
     return sorted;
   }
 
-  private FST<IntsRef> readSortedDictionaries(
-      Directory tempDir, String sorted, FlagEnumerator flags) throws IOException {
+  private FST<IntsRef> readSortedDictionaries(Directory tempDir, String sorted) throws IOException {
     boolean success = false;
 
-    EntryGrouper grouper = new EntryGrouper(flags);
+    EntryGrouper grouper = new EntryGrouper();
 
     try (ByteSequencesReader reader =
         new ByteSequencesReader(tempDir.openChecksumInput(sorted, IOContext.READONCE), sorted)) {
@@ -1193,6 +1104,7 @@ public class Dictionary {
             wordForm = ArrayUtil.growExact(wordForm, wordForm.length + 1);
             wordForm[wordForm.length - 1] = HIDDEN_FLAG;
           }
+          Arrays.sort(wordForm);
           entry = line.substring(0, flagSep);
         }
         // we possibly have morphological data
@@ -1279,13 +1191,9 @@ public class Dictionary {
         new FSTCompiler<>(FST.INPUT_TYPE.BYTE4, IntSequenceOutputs.getSingleton());
     private final List<char[]> group = new ArrayList<>();
     private final List<Integer> stemExceptionIDs = new ArrayList<>();
+    private final BytesRefBuilder flagsScratch = new BytesRefBuilder();
     private final IntsRefBuilder scratchInts = new IntsRefBuilder();
     private String currentEntry = null;
-    private final FlagEnumerator flagEnumerator;
-
-    EntryGrouper(FlagEnumerator flagEnumerator) {
-      this.flagEnumerator = flagEnumerator;
-    }
 
     void add(String entry, char[] flags, int stemExceptionID) throws IOException {
       if (!entry.equals(currentEntry)) {
@@ -1321,7 +1229,12 @@ public class Dictionary {
           continue;
         }
 
-        currentOrds.append(flagEnumerator.add(flags));
+        encodeFlags(flagsScratch, flags);
+        int ord = flagLookup.add(flagsScratch.get());
+        if (ord < 0) {
+          ord = -ord - 1; // already exists in our hash
+        }
+        currentOrds.append(ord);
         if (hasStemExceptions) {
           currentOrds.append(stemExceptionIDs.get(i));
         }
@@ -1335,13 +1248,34 @@ public class Dictionary {
     }
   }
 
-  private static boolean hasHiddenFlag(char[] flags) {
-    for (char flag : flags) {
-      if (flag == HIDDEN_FLAG) {
-        return true;
-      }
+  static boolean hasHiddenFlag(char[] flags) {
+    return hasFlag(flags, HIDDEN_FLAG);
+  }
+
+  char[] decodeFlags(int entryId, BytesRef b) {
+    this.flagLookup.get(entryId, b);
+
+    if (b.length == 0) {
+      return CharsRef.EMPTY_CHARS;
+    }
+    int len = b.length >>> 1;
+    char[] flags = new char[len];
+    int upto = 0;
+    int end = b.offset + b.length;
+    for (int i = b.offset; i < end; i += 2) {
+      flags[upto++] = (char) ((b.bytes[i] << 8) | (b.bytes[i + 1] & 0xff));
+    }
+    return flags;
+  }
+
+  private static void encodeFlags(BytesRefBuilder b, char[] flags) {
+    int len = flags.length << 1;
+    b.grow(len);
+    b.clear();
+    for (int flag : flags) {
+      b.append((byte) ((flag >> 8) & 0xff));
+      b.append((byte) (flag & 0xff));
     }
-    return false;
   }
 
   private void parseAlias(String line) {
@@ -1407,10 +1341,18 @@ public class Dictionary {
         .collect(Collectors.toList());
   }
 
-  boolean hasFlag(IntsRef forms, char flag) {
+  boolean isForbiddenWord(char[] word, int length, BytesRef scratch) {
+    if (forbiddenword != FLAG_UNSET) {
+      IntsRef forms = lookupWord(word, 0, length);
+      return forms != null && hasFlag(forms, forbiddenword, scratch);
+    }
+    return false;
+  }
+
+  boolean hasFlag(IntsRef forms, char flag, BytesRef scratch) {
     int formStep = formStep();
     for (int i = 0; i < forms.length; i += formStep) {
-      if (hasFlag(forms.ints[forms.offset + i], flag)) {
+      if (hasFlag(forms.ints[forms.offset + i], flag, scratch)) {
         return true;
       }
     }
@@ -1472,26 +1414,30 @@ public class Dictionary {
   private static class NumFlagParsingStrategy extends FlagParsingStrategy {
     @Override
     public char[] parseFlags(String rawFlags) {
-      StringBuilder result = new StringBuilder();
-      StringBuilder group = new StringBuilder();
-      for (int i = 0; i <= rawFlags.length(); i++) {
-        if (i == rawFlags.length() || rawFlags.charAt(i) == ',') {
-          if (group.length() > 0) { // ignoring empty flags (this happens in danish, for example)
-            int flag = Integer.parseInt(group, 0, group.length(), 10);
-            if (flag >= DEFAULT_FLAGS) {
-              // accept 0 due to https://github.com/hunspell/hunspell/issues/708
-              throw new IllegalArgumentException(
-                  "Num flags should be between 0 and " + DEFAULT_FLAGS + ", found " + flag);
-            }
-            result.append((char) flag);
-            group.setLength(0);
-          }
-        } else if (rawFlags.charAt(i) >= '0' && rawFlags.charAt(i) <= '9') {
-          group.append(rawFlags.charAt(i));
+      String[] rawFlagParts = rawFlags.trim().split(",");
+      char[] flags = new char[rawFlagParts.length];
+      int upto = 0;
+
+      for (String rawFlagPart : rawFlagParts) {
+        // note, removing the trailing X/leading I for nepali... what is the rule here?!
+        String replacement = rawFlagPart.replaceAll("[^0-9]", "");
+        // note, ignoring empty flags (this happens in danish, for example)
+        if (replacement.isEmpty()) {
+          continue;
+        }
+        int flag = Integer.parseInt(replacement);
+        if (flag >= Character.MAX_VALUE) { // read default flags as well
+          // accept 0 due to https://github.com/hunspell/hunspell/issues/708
+          throw new IllegalArgumentException(
+              "Num flags should be between 0 and " + DEFAULT_FLAGS + ", found " + flag);
         }
+        flags[upto++] = (char) flag;
       }
 
-      return result.toString().toCharArray();
+      if (upto < flags.length) {
+        flags = ArrayUtil.copyOfSubArray(flags, 0, upto);
+      }
+      return flags;
     }
   }
 
@@ -1522,8 +1468,12 @@ public class Dictionary {
     }
   }
 
-  boolean hasFlag(int entryId, char flag) {
-    return flagLookup.hasFlag(entryId, flag);
+  boolean hasFlag(int entryId, char flag, BytesRef scratch) {
+    return flag != FLAG_UNSET && hasFlag(decodeFlags(entryId, scratch), flag);
+  }
+
+  static boolean hasFlag(char[] flags, char flag) {
+    return flag != FLAG_UNSET && Arrays.binarySearch(flags, flag) >= 0;
   }
 
   CharSequence cleanInput(CharSequence input, StringBuilder reuse) {
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/FlagEnumerator.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/FlagEnumerator.java
deleted file mode 100644
index 57aac40..0000000
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/FlagEnumerator.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.analysis.hunspell;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.lucene.util.ArrayUtil;
-
-/**
- * A structure similar to {@link org.apache.lucene.util.BytesRefHash}, but specialized for sorted
- * char sequences used for Hunspell flags. It deduplicates flag sequences, gives them unique ids,
- * stores the sequences in a contiguous char[] (via {@link #finish()} and allows to query presence
- * of the flags later via {@link Lookup#hasFlag}.
- */
-class FlagEnumerator {
-  private final StringBuilder builder = new StringBuilder();
-  private final Map<String, Integer> indices = new HashMap<>();
-
-  FlagEnumerator() {
-    add(new char[0]); // no flags -> ord 0
-  }
-
-  int add(char[] chars) {
-    Arrays.sort(chars);
-    String key = new String(chars);
-    if (key.length() > Character.MAX_VALUE) {
-      throw new IllegalArgumentException("Too many flags: " + key);
-    }
-
-    Integer existing = indices.get(key);
-    if (existing != null) {
-      return existing;
-    }
-
-    int result = builder.length();
-    indices.put(key, result);
-    builder.append((char) key.length());
-    builder.append(key);
-    return result;
-  }
-
-  Lookup finish() {
-    char[] result = new char[builder.length()];
-    builder.getChars(0, builder.length(), result, 0);
-    return new Lookup(result);
-  }
-
-  static class Lookup {
-    private final char[] data;
-
-    private Lookup(char[] data) {
-      this.data = data;
-    }
-
-    boolean hasFlag(int entryId, char flag) {
-      if (entryId < 0 || flag == Dictionary.FLAG_UNSET) return false;
-
-      int length = data[entryId];
-      for (int i = entryId + 1; i < entryId + 1 + length; i++) {
-        char c = data[i];
-        if (c == flag) return true;
-        if (c > flag) return false;
-      }
-      return false;
-    }
-
-    char[] getFlags(int entryId) {
-      return ArrayUtil.copyOfSubArray(data, entryId + 1, entryId + 1 + data[entryId]);
-    }
-  }
-}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java
deleted file mode 100644
index 9d9c582..0000000
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.analysis.hunspell;
-
-import static org.apache.lucene.analysis.hunspell.Dictionary.AFFIX_APPEND;
-import static org.apache.lucene.analysis.hunspell.Dictionary.AFFIX_FLAG;
-import static org.apache.lucene.analysis.hunspell.Dictionary.AFFIX_STRIP_ORD;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.EnumSet;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Objects;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.function.BiConsumer;
-import java.util.stream.Collectors;
-import org.apache.lucene.util.IntsRef;
-import org.apache.lucene.util.fst.FST;
-import org.apache.lucene.util.fst.IntsRefFSTEnum;
-
-/**
- * A class that traverses the entire dictionary and applies affix rules to check if those yield
- * correct suggestions similar enough to the given misspelled word
- */
-class GeneratingSuggester {
-  private static final int MAX_ROOTS = 100;
-  private static final int MAX_WORDS = 100;
-  private static final int MAX_GUESSES = 200;
-  private final Dictionary dictionary;
-  private final Hunspell speller;
-
-  GeneratingSuggester(Hunspell speller) {
-    this.dictionary = speller.dictionary;
-    this.speller = speller;
-  }
-
-  List<String> suggest(String word, WordCase originalCase, Set<String> prevSuggestions) {
-    List<Weighted<Root<String>>> roots = findSimilarDictionaryEntries(word, originalCase);
-    List<Weighted<String>> expanded = expandRoots(word, roots);
-    TreeSet<Weighted<String>> bySimilarity = rankBySimilarity(word, expanded);
-    return getMostRelevantSuggestions(bySimilarity, prevSuggestions);
-  }
-
-  private List<Weighted<Root<String>>> findSimilarDictionaryEntries(
-      String word, WordCase originalCase) {
-    TreeSet<Weighted<Root<String>>> roots = new TreeSet<>();
-    processFST(
-        dictionary.words,
-        (key, forms) -> {
-          if (Math.abs(key.length - word.length()) > 4) return;
-
-          String root = toString(key);
-          List<Root<String>> entries = filterSuitableEntries(root, forms);
-          if (entries.isEmpty()) return;
-
-          if (originalCase == WordCase.LOWER
-              && WordCase.caseOf(root) == WordCase.TITLE
-              && !dictionary.hasLanguage("de")) {
-            return;
-          }
-
-          String lower = dictionary.toLowerCase(root);
-          int sc =
-              ngram(3, word, lower, EnumSet.of(NGramOptions.LONGER_WORSE))
-                  + commonPrefix(word, root);
-
-          entries.forEach(e -> roots.add(new Weighted<>(e, sc)));
-        });
-    return roots.stream().limit(MAX_ROOTS).collect(Collectors.toList());
-  }
-
-  private void processFST(FST<IntsRef> fst, BiConsumer<IntsRef, IntsRef> keyValueConsumer) {
-    if (fst == null) return;
-    try {
-      IntsRefFSTEnum<IntsRef> fstEnum = new IntsRefFSTEnum<>(fst);
-      IntsRefFSTEnum.InputOutput<IntsRef> mapping;
-      while ((mapping = fstEnum.next()) != null) {
-        keyValueConsumer.accept(mapping.input, mapping.output);
-      }
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  private static String toString(IntsRef key) {
-    char[] chars = new char[key.length];
-    for (int i = 0; i < key.length; i++) {
-      chars[i] = (char) key.ints[i + key.offset];
-    }
-    return new String(chars);
-  }
-
-  private List<Root<String>> filterSuitableEntries(String word, IntsRef forms) {
-    List<Root<String>> result = new ArrayList<>();
-    for (int i = 0; i < forms.length; i += dictionary.formStep()) {
-      int entryId = forms.ints[forms.offset + i];
-      if (dictionary.hasFlag(entryId, dictionary.forbiddenword)
-          || dictionary.hasFlag(entryId, dictionary.noSuggest)
-          || dictionary.hasFlag(entryId, Dictionary.HIDDEN_FLAG)
-          || dictionary.hasFlag(entryId, dictionary.onlyincompound)) {
-        continue;
-      }
-      result.add(new Root<>(word, entryId));
-    }
-
-    return result;
-  }
-
-  private List<Weighted<String>> expandRoots(
-      String misspelled, List<Weighted<Root<String>>> roots) {
-    int thresh = calcThreshold(misspelled);
-
-    TreeSet<Weighted<String>> expanded = new TreeSet<>();
-    for (Weighted<Root<String>> weighted : roots) {
-      for (String guess : expandRoot(weighted.word, misspelled)) {
-        String lower = dictionary.toLowerCase(guess);
-        int sc =
-            ngram(misspelled.length(), misspelled, lower, EnumSet.of(NGramOptions.ANY_MISMATCH))
-                + commonPrefix(misspelled, guess);
-        if (sc > thresh) {
-          expanded.add(new Weighted<>(guess, sc));
-        }
-      }
-    }
-    return expanded.stream().limit(MAX_GUESSES).collect(Collectors.toList());
-  }
-
-  // find minimum threshold for a passable suggestion
-  // mangle original word three different ways
-  // and score them to generate a minimum acceptable score
-  private static int calcThreshold(String word) {
-    int thresh = 0;
-    for (int sp = 1; sp < 4; sp++) {
-      char[] mw = word.toCharArray();
-      for (int k = sp; k < word.length(); k += 4) {
-        mw[k] = '*';
-      }
-
-      thresh += ngram(word.length(), word, new String(mw), EnumSet.of(NGramOptions.ANY_MISMATCH));
-    }
-    return thresh / 3 - 1;
-  }
-
-  private List<String> expandRoot(Root<String> root, String misspelled) {
-    List<String> crossProducts = new ArrayList<>();
-    Set<String> result = new LinkedHashSet<>();
-
-    if (!dictionary.hasFlag(root.entryId, dictionary.needaffix)) {
-      result.add(root.word);
-    }
-
-    // suffixes
-    processFST(
-        dictionary.suffixes,
-        (key, ids) -> {
-          String suffix = new StringBuilder(toString(key)).reverse().toString();
-          if (misspelled.length() <= suffix.length() || !misspelled.endsWith(suffix)) return;
-
-          for (int i = 0; i < ids.length; i++) {
-            int suffixId = ids.ints[ids.offset + i];
-            if (!hasCompatibleFlags(root, suffixId) || !checkAffixCondition(suffixId, root.word)) {
-              continue;
-            }
-
-            String withSuffix =
-                root.word.substring(0, root.word.length() - affixStripLength(suffixId)) + suffix;
-            result.add(withSuffix);
-            if (dictionary.isCrossProduct(suffixId)) {
-              crossProducts.add(withSuffix);
-            }
-          }
-        });
-
-    // cross-product prefixes
-    processFST(
-        dictionary.prefixes,
-        (key, ids) -> {
-          String prefix = toString(key);
-          if (misspelled.length() <= prefix.length() || !misspelled.startsWith(prefix)) return;
-
-          for (int i = 0; i < ids.length; i++) {
-            int prefixId = ids.ints[ids.offset + i];
-            if (!dictionary.hasFlag(root.entryId, dictionary.affixData(prefixId, AFFIX_FLAG))
-                || !dictionary.isCrossProduct(prefixId)) {
-              continue;
-            }
-
-            for (String suffixed : crossProducts) {
-              if (checkAffixCondition(prefixId, suffixed)) {
-                result.add(prefix + suffixed.substring(affixStripLength(prefixId)));
-              }
-            }
-          }
-        });
-
-    // pure prefixes
-    processFST(
-        dictionary.prefixes,
-        (key, ids) -> {
-          String prefix = toString(key);
-          if (misspelled.length() <= prefix.length() || !misspelled.startsWith(prefix)) return;
-
-          for (int i = 0; i < ids.length; i++) {
-            int prefixId = ids.ints[ids.offset + i];
-            if (hasCompatibleFlags(root, prefixId) && checkAffixCondition(prefixId, root.word)) {
-              result.add(prefix + root.word.substring(affixStripLength(prefixId)));
-            }
-          }
-        });
-
-    return result.stream().limit(MAX_WORDS).collect(Collectors.toList());
-  }
-
-  private boolean hasCompatibleFlags(Root<?> root, int affixId) {
-    if (!dictionary.hasFlag(root.entryId, dictionary.affixData(affixId, AFFIX_FLAG))) {
-      return false;
-    }
-
-    int append = dictionary.affixData(affixId, AFFIX_APPEND);
-    return !dictionary.hasFlag(append, dictionary.needaffix)
-        && !dictionary.hasFlag(append, dictionary.circumfix)
-        && !dictionary.hasFlag(append, dictionary.onlyincompound);
-  }
-
-  private boolean checkAffixCondition(int suffixId, String stem) {
-    int condition = dictionary.getAffixCondition(suffixId);
-    return condition == 0 || dictionary.patterns.get(condition).run(stem);
-  }
-
-  private int affixStripLength(int affixId) {
-    char stripOrd = dictionary.affixData(affixId, AFFIX_STRIP_ORD);
-    return dictionary.stripOffsets[stripOrd + 1] - dictionary.stripOffsets[stripOrd];
-  }
-
-  private TreeSet<Weighted<String>> rankBySimilarity(String word, List<Weighted<String>> expanded) {
-    double fact = (10.0 - dictionary.maxDiff) / 5.0;
-    TreeSet<Weighted<String>> bySimilarity = new TreeSet<>();
-    for (Weighted<String> weighted : expanded) {
-      String guess = weighted.word;
-      String lower = dictionary.toLowerCase(guess);
-      if (lower.equals(word)) {
-        bySimilarity.add(new Weighted<>(guess, weighted.score + 2000));
-        break;
-      }
-
-      int re =
-          ngram(2, word, lower, EnumSet.of(NGramOptions.ANY_MISMATCH, NGramOptions.WEIGHTED))
-              + ngram(2, lower, word, EnumSet.of(NGramOptions.ANY_MISMATCH, NGramOptions.WEIGHTED));
-
-      int score =
-          2 * lcs(word, lower)
-              - Math.abs(word.length() - lower.length())
-              + commonCharacterPositionScore(word, lower)
-              + commonPrefix(word, lower)
-              + ngram(4, word, lower, EnumSet.of(NGramOptions.ANY_MISMATCH))
-              + re
-              + (re < (word.length() + lower.length()) * fact ? -1000 : 0);
-      bySimilarity.add(new Weighted<>(guess, score));
-    }
-    return bySimilarity;
-  }
-
-  private List<String> getMostRelevantSuggestions(
-      TreeSet<Weighted<String>> bySimilarity, Set<String> prevSuggestions) {
-    List<String> result = new ArrayList<>();
-    boolean hasExcellent = false;
-    for (Weighted<String> weighted : bySimilarity) {
-      if (weighted.score > 1000) {
-        hasExcellent = true;
-      } else if (hasExcellent) {
-        break; // leave only excellent suggestions, if any
-      }
-
-      boolean bad = weighted.score < -100;
-      // keep the best ngram suggestions, unless in ONLYMAXDIFF mode
-      if (bad && (!result.isEmpty() || dictionary.onlyMaxDiff)) {
-        break;
-      }
-
-      if (prevSuggestions.stream().noneMatch(weighted.word::contains)
-          && result.stream().noneMatch(weighted.word::contains)
-          && speller.checkWord(weighted.word)) {
-        result.add(weighted.word);
-        if (result.size() > dictionary.maxNGramSuggestions) {
-          break;
-        }
-      }
-
-      if (bad) {
-        break;
-      }
-    }
-    return result;
-  }
-
-  private static int commonPrefix(String s1, String s2) {
-    int i = 0;
-    int limit = Math.min(s1.length(), s2.length());
-    while (i < limit && s1.charAt(i) == s2.charAt(i)) {
-      i++;
-    }
-    return i;
-  }
-
-  // generate an n-gram score comparing s1 and s2
-  private static int ngram(int n, String s1, String s2, EnumSet<NGramOptions> opt) {
-    int score = 0;
-    int l1 = s1.length();
-    int l2 = s2.length();
-    if (l2 == 0) {
-      return 0;
-    }
-    for (int j = 1; j <= n; j++) {
-      int ns = 0;
-      for (int i = 0; i <= (l1 - j); i++) {
-        if (s2.contains(s1.substring(i, i + j))) {
-          ns++;
-        } else if (opt.contains(NGramOptions.WEIGHTED)) {
-          ns--;
-          if (i == 0 || i == l1 - j) {
-            ns--; // side weight
-          }
-        }
-      }
-      score = score + ns;
-      if (ns < 2 && !opt.contains(NGramOptions.WEIGHTED)) {
-        break;
-      }
-    }
-
-    int ns = 0;
-    if (opt.contains(NGramOptions.LONGER_WORSE)) {
-      ns = (l2 - l1) - 2;
-    }
-    if (opt.contains(NGramOptions.ANY_MISMATCH)) {
-      ns = Math.abs(l2 - l1) - 2;
-    }
-    return score - Math.max(ns, 0);
-  }
-
-  private static int lcs(String s1, String s2) {
-    int[] lengths = new int[s2.length() + 1];
-
-    for (int i = 1; i <= s1.length(); i++) {
-      int prev = 0;
-      for (int j = 1; j <= s2.length(); j++) {
-        int cur = lengths[j];
-        lengths[j] =
-            s1.charAt(i - 1) == s2.charAt(j - 1) ? prev + 1 : Math.max(cur, lengths[j - 1]);
-        prev = cur;
-      }
-    }
-    return lengths[s2.length()];
-  }
-
-  private static int commonCharacterPositionScore(String s1, String s2) {
-    int num = 0;
-    int diffPos1 = -1;
-    int diffPos2 = -1;
-    int diff = 0;
-    int i;
-    for (i = 0; i < s1.length() && i < s2.length(); ++i) {
-      if (s1.charAt(i) == s2.charAt(i)) {
-        num++;
-      } else {
-        if (diff == 0) diffPos1 = i;
-        else if (diff == 1) diffPos2 = i;
-        diff++;
-      }
-    }
-    int commonScore = num > 0 ? 1 : 0;
-    if (diff == 2
-        && i == s1.length()
-        && i == s2.length()
-        && s1.charAt(diffPos1) == s2.charAt(diffPos2)
-        && s1.charAt(diffPos2) == s2.charAt(diffPos1)) {
-      return commonScore + 10;
-    }
-    return commonScore;
-  }
-
-  private enum NGramOptions {
-    WEIGHTED,
-    LONGER_WORSE,
-    ANY_MISMATCH
-  }
-
-  private static class Weighted<T extends Comparable<T>> implements Comparable<Weighted<T>> {
-    final T word;
-    final int score;
-
-    Weighted(T word, int score) {
-      this.word = word;
-      this.score = score;
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (this == o) return true;
-      if (!(o instanceof Weighted)) return false;
-      @SuppressWarnings("unchecked")
-      Weighted<T> that = (Weighted<T>) o;
-      return score == that.score && word.equals(that.word);
-    }
-
-    @Override
-    public int hashCode() {
-      return Objects.hash(word, score);
-    }
-
-    @Override
-    public String toString() {
-      return word + "(" + score + ")";
-    }
-
-    @Override
-    public int compareTo(Weighted<T> o) {
-      int cmp = Integer.compare(score, o.score);
-      return cmp != 0 ? -cmp : word.compareTo(o.word);
-    }
-  }
-}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/ModifyingSuggester.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/ModifyingSuggester.java
index 5017ff2..0c60e1b 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/ModifyingSuggester.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/ModifyingSuggester.java
@@ -23,33 +23,31 @@ import java.util.List;
 import java.util.Locale;
 import java.util.stream.Collectors;
 
-/** A class that modifies the given misspelled word in various ways to get correct suggestions */
 class ModifyingSuggester {
   private static final int MAX_CHAR_DISTANCE = 4;
   private final LinkedHashSet<String> result = new LinkedHashSet<>();
   private final char[] tryChars;
-  private final Hunspell speller;
-  boolean hasGoodSuggestions;
+  private final SpellChecker speller;
 
-  ModifyingSuggester(Hunspell speller) {
+  ModifyingSuggester(SpellChecker speller) {
     this.speller = speller;
     tryChars = speller.dictionary.tryChars.toCharArray();
   }
 
-  LinkedHashSet<String> suggest(String word, WordCase wordCase) {
-    String low = wordCase != WordCase.LOWER ? speller.dictionary.toLowerCase(word) : word;
-    if (wordCase == WordCase.UPPER || wordCase == WordCase.MIXED) {
-      trySuggestion(low);
-    }
-
+  LinkedHashSet<String> suggest(String word) {
     tryVariationsOf(word);
 
-    if (wordCase == WordCase.TITLE) {
-      tryVariationsOf(low);
-    } else if (wordCase == WordCase.UPPER) {
-      tryVariationsOf(low);
+    WordCase wc = WordCase.caseOf(word);
+
+    if (wc == WordCase.UPPER) {
+      tryVariationsOf(speller.dictionary.toLowerCase(word));
       tryVariationsOf(speller.dictionary.toTitleCase(word));
-    } else if (wordCase == WordCase.MIXED) {
+      return result.stream()
+          .map(this::tryUpperCase)
+          .collect(Collectors.toCollection(LinkedHashSet::new));
+    }
+
+    if (wc == WordCase.MIXED) {
       int dot = word.indexOf('.');
       if (dot > 0
           && dot < word.length() - 1
@@ -57,46 +55,25 @@ class ModifyingSuggester {
         result.add(word.substring(0, dot + 1) + " " + word.substring(dot + 1));
       }
 
-      boolean capitalized = Character.isUpperCase(word.charAt(0));
-      if (capitalized) {
-        tryVariationsOf(speller.dictionary.caseFold(word.charAt(0)) + word.substring(1));
-      }
-
-      tryVariationsOf(low);
-
-      if (capitalized) {
-        tryVariationsOf(speller.dictionary.toTitleCase(low));
-      }
-
-      return result.stream()
-          .map(s -> capitalizeAfterSpace(low, s))
-          .collect(Collectors.toCollection(LinkedHashSet::new));
+      tryVariationsOf(speller.dictionary.toLowerCase(word));
     }
 
     return result;
   }
 
-  // aNew -> "a New" (instead of "a new")
-  private String capitalizeAfterSpace(String lowMisspelled, String candidate) {
-    int space = candidate.indexOf(' ');
-    int tail = candidate.length() - space - 1;
-    if (space > 0
-        && lowMisspelled.regionMatches(lowMisspelled.length() - tail, candidate, space + 1, tail)) {
-      return candidate.substring(0, space + 1)
-          + Character.toUpperCase(candidate.charAt(space + 1))
-          + candidate.substring(space + 2);
+  private String tryUpperCase(String candidate) {
+    String upper = candidate.toUpperCase(Locale.ROOT);
+    if (upper.contains(" ") || speller.spell(upper)) {
+      return upper;
     }
-    return candidate;
+    String title = speller.dictionary.toTitleCase(candidate);
+    return speller.spell(title) ? title : candidate;
   }
 
   private void tryVariationsOf(String word) {
-    hasGoodSuggestions |= trySuggestion(word.toUpperCase(Locale.ROOT));
+    boolean hasGoodSuggestions = trySuggestion(word.toUpperCase(Locale.ROOT));
     hasGoodSuggestions |= tryRep(word);
 
-    if (!speller.dictionary.mapTable.isEmpty()) {
-      enumerateMapReplacements(word, "", 0);
-    }
-
     trySwappingChars(word);
     tryLongSwap(word);
     tryNeighborKeys(word);
@@ -139,27 +116,6 @@ class ModifyingSuggester {
     return result.size() > before;
   }
 
-  private void enumerateMapReplacements(String word, String accumulated, int offset) {
-    if (offset == word.length()) {
-      trySuggestion(accumulated);
-      return;
-    }
-
-    for (List<String> entries : speller.dictionary.mapTable) {
-      for (String entry : entries) {
-        if (word.regionMatches(offset, entry, 0, entry.length())) {
-          for (String replacement : entries) {
-            if (!entry.equals(replacement)) {
-              enumerateMapReplacements(word, accumulated + replacement, offset + entry.length());
-            }
-          }
-        }
-      }
-    }
-
-    enumerateMapReplacements(word, accumulated + word.charAt(offset), offset + 1);
-  }
-
   private boolean checkSimpleWord(String part) {
     return Boolean.TRUE.equals(speller.checkSimpleWord(part.toCharArray(), part.length(), null));
   }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Root.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Root.java
deleted file mode 100644
index e65992e..0000000
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Root.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.analysis.hunspell;
-
-import java.util.Objects;
-
-class Root<T extends CharSequence> implements Comparable<Root<T>> {
-  final T word;
-  final int entryId;
-
-  Root(T word, int entryId) {
-    this.word = word;
-    this.entryId = entryId;
-  }
-
-  @Override
-  public String toString() {
-    return word.toString();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (!(o instanceof Root)) return false;
-    @SuppressWarnings("unchecked")
-    Root<T> root = (Root<T>) o;
-    return entryId == root.entryId && word.equals(root.word);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(word, entryId);
-  }
-
-  @Override
-  public int compareTo(Root<T> o) {
-    return CharSequence.compare(word, o.word);
-  }
-}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/SpellChecker.java
similarity index 69%
rename from lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java
rename to lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/SpellChecker.java
index db0e3e4..53bf53e 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/SpellChecker.java
@@ -22,37 +22,25 @@ import static org.apache.lucene.analysis.hunspell.WordContext.COMPOUND_END;
 import static org.apache.lucene.analysis.hunspell.WordContext.COMPOUND_MIDDLE;
 import static org.apache.lucene.analysis.hunspell.WordContext.SIMPLE_WORD;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.LinkedHashSet;
 import java.util.List;
-import java.util.Locale;
 import java.util.Set;
-import java.util.stream.Collectors;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.IntsRef;
 
 /**
- * A spell checker based on Hunspell dictionaries. This class can be used in place of native
- * Hunspell for many languages for spell-checking and suggesting purposes. Note that not all
- * languages are supported yet. For example:
- *
- * <ul>
- *   <li>Hungarian (as it doesn't only rely on dictionaries, but has some logic directly in the
- *       source code
- *   <li>Languages with Unicode characters outside of the Basic Multilingual Plane
- *   <li>PHONE affix file option for suggestions
- * </ul>
- *
- * <p>The objects of this class are not thread-safe (but a single underlying Dictionary can be
- * shared by multiple spell-checkers in different threads).
+ * A spell checker based on Hunspell dictionaries. The objects of this class are not thread-safe
+ * (but a single underlying Dictionary can be shared by multiple spell-checkers in different
+ * threads). Not all Hunspell features are supported yet.
  */
-public class Hunspell {
+public class SpellChecker {
   final Dictionary dictionary;
   final Stemmer stemmer;
+  private final BytesRef scratch = new BytesRef();
 
-  public Hunspell(Dictionary dictionary) {
+  public SpellChecker(Dictionary dictionary) {
     this.dictionary = dictionary;
     stemmer = new Stemmer(dictionary);
   }
@@ -78,22 +66,17 @@ public class Hunspell {
     }
 
     char[] wordChars = word.toCharArray();
-    Boolean simpleResult = checkSimpleWord(wordChars, wordChars.length, null);
-    if (simpleResult != null) {
-      return simpleResult;
+    if (dictionary.isForbiddenWord(wordChars, wordChars.length, scratch)) {
+      return false;
     }
 
-    if (checkCompounds(wordChars, wordChars.length, null)) {
+    if (checkWord(wordChars, wordChars.length, null)) {
       return true;
     }
 
     WordCase wc = stemmer.caseOf(wordChars, wordChars.length);
-    if ((wc == WordCase.UPPER || wc == WordCase.TITLE)) {
-      Stemmer.CaseVariationProcessor variationProcessor =
-          (variant, varLength, originalCase) -> !checkWord(variant, varLength, originalCase);
-      if (!stemmer.varyCase(wordChars, wordChars.length, wc, variationProcessor)) {
-        return true;
-      }
+    if ((wc == WordCase.UPPER || wc == WordCase.TITLE) && checkCaseVariants(wordChars, wc)) {
+      return true;
     }
 
     if (dictionary.breaks.isNotEmpty() && !hasTooManyBreakOccurrences(word)) {
@@ -111,14 +94,53 @@ public class Hunspell {
     return spellClean(word.substring(0, length)) || spellClean(word.substring(0, length + 1));
   }
 
+  private boolean checkCaseVariants(char[] wordChars, WordCase wordCase) {
+    char[] caseVariant = wordChars;
+    if (wordCase == WordCase.UPPER) {
+      caseVariant = stemmer.caseFoldTitle(caseVariant, wordChars.length);
+      if (checkWord(caseVariant, wordChars.length, wordCase)) {
+        return true;
+      }
+      char[] aposCase = Stemmer.capitalizeAfterApostrophe(caseVariant, wordChars.length);
+      if (aposCase != null && checkWord(aposCase, aposCase.length, wordCase)) {
+        return true;
+      }
+      for (char[] variation : stemmer.sharpSVariations(caseVariant, wordChars.length)) {
+        if (checkWord(variation, variation.length, null)) {
+          return true;
+        }
+      }
+    }
+
+    if (dictionary.isDotICaseChangeDisallowed(wordChars)) {
+      return false;
+    }
+
+    char[] lower = stemmer.caseFoldLower(caseVariant, wordChars.length);
+    if (checkWord(lower, wordChars.length, wordCase)) {
+      return true;
+    }
+    if (wordCase == WordCase.UPPER) {
+      for (char[] variation : stemmer.sharpSVariations(lower, wordChars.length)) {
+        if (checkWord(variation, variation.length, null)) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
   boolean checkWord(String word) {
     return checkWord(word.toCharArray(), word.length(), null);
   }
 
   Boolean checkSimpleWord(char[] wordChars, int length, WordCase originalCase) {
-    Root<CharsRef> entry = findStem(wordChars, 0, length, originalCase, SIMPLE_WORD);
-    if (entry != null) {
-      return !dictionary.hasFlag(entry.entryId, dictionary.forbiddenword);
+    if (dictionary.isForbiddenWord(wordChars, length, scratch)) {
+      return false;
+    }
+
+    if (findStem(wordChars, 0, length, originalCase, SIMPLE_WORD) != null) {
+      return true;
     }
 
     return null;
@@ -130,10 +152,6 @@ public class Hunspell {
       return simpleResult;
     }
 
-    return checkCompounds(wordChars, length, originalCase);
-  }
-
-  private boolean checkCompounds(char[] wordChars, int length, WordCase originalCase) {
     if (dictionary.compoundRules != null
         && checkCompoundRules(wordChars, 0, length, new ArrayList<>())) {
       return true;
@@ -146,29 +164,22 @@ public class Hunspell {
     return false;
   }
 
-  private Root<CharsRef> findStem(
+  private CharsRef findStem(
       char[] wordChars, int offset, int length, WordCase originalCase, WordContext context) {
-    @SuppressWarnings({"rawtypes", "unchecked"})
-    Root<CharsRef>[] result = new Root[1];
+    CharsRef[] result = {null};
     stemmer.doStem(
         wordChars,
         offset,
         length,
         originalCase,
         context,
-        (stem, formID, stemException) -> {
-          if (acceptsStem(formID)) {
-            result[0] = new Root<>(stem, formID);
-          }
+        (stem, forms, formID) -> {
+          result[0] = stem;
           return false;
         });
     return result[0];
   }
 
-  boolean acceptsStem(int formID) {
-    return true;
-  }
-
   private boolean checkCompounds(CharsRef word, WordCase originalCase, CompoundPart prev) {
     if (prev != null && prev.index > dictionary.compoundMax - 2) return false;
 
@@ -177,15 +188,13 @@ public class Hunspell {
       WordContext context = prev == null ? COMPOUND_BEGIN : COMPOUND_MIDDLE;
       int breakOffset = word.offset + breakPos;
       if (mayBreakIntoCompounds(word.chars, word.offset, word.length, breakOffset)) {
-        Root<CharsRef> stem = findStem(word.chars, word.offset, breakPos, originalCase, context);
+        CharsRef stem = findStem(word.chars, word.offset, breakPos, originalCase, context);
         if (stem == null
             && dictionary.simplifiedTriple
             && word.chars[breakOffset - 1] == word.chars[breakOffset]) {
           stem = findStem(word.chars, word.offset, breakPos + 1, originalCase, context);
         }
-        if (stem != null
-            && !dictionary.hasFlag(stem.entryId, dictionary.forbiddenword)
-            && (prev == null || prev.mayCompound(stem, breakPos, originalCase))) {
+        if (stem != null && (prev == null || prev.mayCompound(stem, breakPos, originalCase))) {
           CompoundPart part = new CompoundPart(prev, word, breakPos, stem, null);
           if (checkCompoundsAfter(originalCase, part)) {
             return true;
@@ -208,8 +217,7 @@ public class Hunspell {
       if (expanded != null) {
         WordContext context = prev == null ? COMPOUND_BEGIN : COMPOUND_MIDDLE;
         int breakPos = pos + pattern.endLength();
-        Root<CharsRef> stem =
-            findStem(expanded.chars, expanded.offset, breakPos, originalCase, context);
+        CharsRef stem = findStem(expanded.chars, expanded.offset, breakPos, originalCase, context);
         if (stem != null) {
           CompoundPart part = new CompoundPart(prev, expanded, breakPos, stem, pattern);
           if (checkCompoundsAfter(originalCase, part)) {
@@ -226,11 +234,10 @@ public class Hunspell {
     int breakPos = prev.length;
     int remainingLength = word.length - breakPos;
     int breakOffset = word.offset + breakPos;
-    Root<CharsRef> tailStem =
+    CharsRef tailStem =
         findStem(word.chars, breakOffset, remainingLength, originalCase, COMPOUND_END);
     if (tailStem != null
-        && !dictionary.hasFlag(tailStem.entryId, dictionary.forbiddenword)
-        && !(dictionary.checkCompoundDup && equalsIgnoreCase(prev.stem, tailStem.word))
+        && !(dictionary.checkCompoundDup && equalsIgnoreCase(prev.stem, tailStem))
         && !hasForceUCaseProblem(word.chars, breakOffset, remainingLength, originalCase)
         && prev.mayCompound(tailStem, remainingLength, originalCase)) {
       return true;
@@ -246,10 +253,10 @@ public class Hunspell {
     if (originalCase == WordCase.TITLE || originalCase == WordCase.UPPER) return false;
 
     IntsRef forms = dictionary.lookupWord(chars, offset, length);
-    return forms != null && dictionary.hasFlag(forms, dictionary.forceUCase);
+    return forms != null && dictionary.hasFlag(forms, dictionary.forceUCase, scratch);
   }
 
-  private boolean equalsIgnoreCase(CharSequence cr1, CharSequence cr2) {
+  private boolean equalsIgnoreCase(CharsRef cr1, CharsRef cr2) {
     return cr1.toString().equalsIgnoreCase(cr2.toString());
   }
 
@@ -260,15 +267,11 @@ public class Hunspell {
     final CheckCompoundPattern enablingPattern;
 
     CompoundPart(
-        CompoundPart prev,
-        CharsRef tail,
-        int length,
-        Root<CharsRef> stem,
-        CheckCompoundPattern enabler) {
+        CompoundPart prev, CharsRef tail, int length, CharsRef stem, CheckCompoundPattern enabler) {
       this.prev = prev;
       this.tail = tail;
       this.length = length;
-      this.stem = stem.word;
+      this.stem = stem;
       index = prev == null ? 1 : prev.index + 1;
       enablingPattern = enabler;
     }
@@ -278,26 +281,22 @@ public class Hunspell {
       return (prev == null ? "" : prev + "+") + tail.subSequence(0, length);
     }
 
-    boolean mayCompound(Root<CharsRef> nextStem, int nextPartLength, WordCase originalCase) {
+    boolean mayCompound(CharsRef nextStem, int nextPartLength, WordCase originalCase) {
       boolean patternsOk =
           enablingPattern != null
-              ? enablingPattern.prohibitsCompounding(tail, length, stem, nextStem.word)
+              ? enablingPattern.prohibitsCompounding(tail, length, stem, nextStem)
               : dictionary.checkCompoundPatterns.stream()
-                  .noneMatch(p -> p.prohibitsCompounding(tail, length, stem, nextStem.word));
+                  .noneMatch(p -> p.prohibitsCompounding(tail, length, stem, nextStem));
       if (!patternsOk) {
         return false;
       }
 
+      //noinspection RedundantIfStatement
       if (dictionary.checkCompoundRep
           && isMisspelledSimpleWord(length + nextPartLength, originalCase)) {
         return false;
       }
-
-      String spaceSeparated =
-          new String(tail.chars, tail.offset, length)
-              + " "
-              + new String(tail.chars, tail.offset + length, nextPartLength);
-      return !checkWord(spaceSeparated);
+      return true;
     }
 
     private boolean isMisspelledSimpleWord(int length, WordCase originalCase) {
@@ -342,7 +341,7 @@ public class Hunspell {
         words.add(forms);
 
         if (dictionary.compoundRules != null
-            && dictionary.compoundRules.stream().anyMatch(r -> r.mayMatch(words))) {
+            && dictionary.compoundRules.stream().anyMatch(r -> r.mayMatch(words, scratch))) {
           if (checkLastCompoundPart(wordChars, offset + breakPos, length - breakPos, words)) {
             return true;
           }
@@ -365,7 +364,8 @@ public class Hunspell {
     if (forms == null) return false;
 
     words.add(forms);
-    boolean result = dictionary.compoundRules.stream().anyMatch(r -> r.fullyMatches(words));
+    boolean result =
+        dictionary.compoundRules.stream().anyMatch(r -> r.fullyMatches(words, scratch));
     words.remove(words.size() - 1);
     return result;
   }
@@ -450,59 +450,14 @@ public class Hunspell {
       word = dictionary.cleanInput(word, new StringBuilder()).toString();
     }
 
-    WordCase wordCase = WordCase.caseOf(word);
-    if (dictionary.forceUCase != FLAG_UNSET && wordCase == WordCase.LOWER) {
-      String title = dictionary.toTitleCase(word);
-      if (spell(title)) {
-        return Collections.singletonList(title);
-      }
-    }
-
-    Hunspell suggestionSpeller =
-        new Hunspell(dictionary) {
-          @Override
-          boolean acceptsStem(int formID) {
-            return !dictionary.hasFlag(formID, dictionary.noSuggest)
-                && !dictionary.hasFlag(formID, dictionary.subStandard);
-          }
-        };
-    ModifyingSuggester modifier = new ModifyingSuggester(suggestionSpeller);
-    Set<String> suggestions = modifier.suggest(word, wordCase);
-
-    if (!modifier.hasGoodSuggestions && dictionary.maxNGramSuggestions > 0) {
-      suggestions.addAll(
-          new GeneratingSuggester(suggestionSpeller)
-              .suggest(dictionary.toLowerCase(word), wordCase, suggestions));
-    }
+    ModifyingSuggester modifier = new ModifyingSuggester(this);
+    Set<String> result = modifier.suggest(word);
 
-    if (word.contains("-") && suggestions.stream().noneMatch(s -> s.contains("-"))) {
-      suggestions.addAll(modifyChunksBetweenDashes(word));
+    if (word.contains("-") && result.stream().noneMatch(s -> s.contains("-"))) {
+      result.addAll(modifyChunksBetweenDashes(word));
     }
 
-    Set<String> result = new LinkedHashSet<>();
-    for (String candidate : suggestions) {
-      result.add(adjustSuggestionCase(candidate, wordCase, word));
-      if (wordCase == WordCase.UPPER && dictionary.checkSharpS && candidate.contains("ß")) {
-        result.add(candidate);
-      }
-    }
-    return result.stream().map(this::cleanOutput).collect(Collectors.toList());
-  }
-
-  private String adjustSuggestionCase(String candidate, WordCase originalCase, String original) {
-    if (originalCase == WordCase.UPPER) {
-      String upper = candidate.toUpperCase(Locale.ROOT);
-      if (upper.contains(" ") || spell(upper)) {
-        return upper;
-      }
-    }
-    if (Character.isUpperCase(original.charAt(0))) {
-      String title = Character.toUpperCase(candidate.charAt(0)) + candidate.substring(1);
-      if (title.contains(" ") || spell(title)) {
-        return title;
-      }
-    }
-    return candidate;
+    return new ArrayList<>(result);
   }
 
   private List<String> modifyChunksBetweenDashes(String word) {
@@ -519,7 +474,7 @@ public class Hunspell {
         if (!spell(chunk)) {
           for (String chunkSug : suggest(chunk)) {
             String replaced = word.substring(0, chunkStart) + chunkSug + word.substring(chunkEnd);
-            if (spell(replaced)) {
+            if (!dictionary.isForbiddenWord(replaced.toCharArray(), replaced.length(), scratch)) {
               result.add(replaced);
             }
           }
@@ -530,16 +485,4 @@ public class Hunspell {
     }
     return result;
   }
-
-  private String cleanOutput(String s) {
-    if (!dictionary.needsOutputCleaning) return s;
-
-    try {
-      StringBuilder sb = new StringBuilder(s);
-      Dictionary.applyMappings(dictionary.oconv, sb);
-      return sb.toString();
-    } catch (IOException bogus) {
-      throw new RuntimeException(bogus);
-    }
-  }
 }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java
index 8afd9fc..44e2675 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java
@@ -18,11 +18,13 @@ package org.apache.lucene.analysis.hunspell;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import org.apache.lucene.analysis.CharArraySet;
 import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.IntsRef;
 import org.apache.lucene.util.automaton.CharacterRunAutomaton;
@@ -35,6 +37,7 @@ import org.apache.lucene.util.fst.FST;
  */
 final class Stemmer {
   private final Dictionary dictionary;
+  private final BytesRef scratch = new BytesRef();
   private final StringBuilder segment = new StringBuilder();
 
   // used for normalization
@@ -93,10 +96,14 @@ final class Stemmer {
       word = scratchBuffer;
     }
 
+    if (dictionary.isForbiddenWord(word, length, scratch)) {
+      return Collections.emptyList();
+    }
+
     List<CharsRef> list = new ArrayList<>();
     RootProcessor processor =
-        (stem, formID, stemException) -> {
-          list.add(newStem(stem, stemException));
+        (stem, forms, formID) -> {
+          list.add(newStem(stem, forms, formID));
           return true;
         };
 
@@ -106,47 +113,46 @@ final class Stemmer {
 
     WordCase wordCase = caseOf(word, length);
     if (wordCase == WordCase.UPPER || wordCase == WordCase.TITLE) {
-      CaseVariationProcessor variationProcessor =
-          (variant, varLength, originalCase) ->
-              doStem(variant, 0, varLength, originalCase, WordContext.SIMPLE_WORD, processor);
-      varyCase(word, length, wordCase, variationProcessor);
+      addCaseVariations(word, length, wordCase, processor);
     }
     return list;
   }
 
-  interface CaseVariationProcessor {
-    boolean process(char[] word, int length, WordCase originalCase);
-  }
-
-  boolean varyCase(char[] word, int length, WordCase wordCase, CaseVariationProcessor processor) {
+  private void addCaseVariations(
+      char[] word, int length, WordCase wordCase, RootProcessor processor) {
     if (wordCase == WordCase.UPPER) {
       caseFoldTitle(word, length);
       char[] aposCase = capitalizeAfterApostrophe(titleBuffer, length);
-      if (aposCase != null && !processor.process(aposCase, length, wordCase)) {
-        return false;
+      if (aposCase != null) {
+        if (!doStem(aposCase, 0, length, wordCase, WordContext.SIMPLE_WORD, processor)) {
+          return;
+        }
       }
-      if (!processor.process(titleBuffer, length, wordCase)) {
-        return false;
+      if (!doStem(titleBuffer, 0, length, wordCase, WordContext.SIMPLE_WORD, processor)) {
+        return;
       }
-      if (dictionary.checkSharpS && !varySharpS(titleBuffer, length, processor)) {
-        return false;
+      for (char[] variation : sharpSVariations(titleBuffer, length)) {
+        if (!doStem(variation, 0, variation.length, null, WordContext.SIMPLE_WORD, processor)) {
+          return;
+        }
       }
     }
 
     if (dictionary.isDotICaseChangeDisallowed(word)) {
-      return true;
+      return;
     }
 
     caseFoldLower(wordCase == WordCase.UPPER ? titleBuffer : word, length);
-    if (!processor.process(lowerBuffer, length, wordCase)) {
-      return false;
+    if (!doStem(lowerBuffer, 0, length, wordCase, WordContext.SIMPLE_WORD, processor)) {
+      return;
     }
-    if (wordCase == WordCase.UPPER
-        && dictionary.checkSharpS
-        && !varySharpS(lowerBuffer, length, processor)) {
-      return false;
+    if (wordCase == WordCase.UPPER) {
+      for (char[] variation : sharpSVariations(lowerBuffer, length)) {
+        if (!doStem(variation, 0, variation.length, null, WordContext.SIMPLE_WORD, processor)) {
+          return;
+        }
+      }
     }
-    return true;
   }
 
   // temporary buffers for case variants
@@ -163,24 +169,26 @@ final class Stemmer {
   }
 
   /** folds titlecase variant of word to titleBuffer */
-  private void caseFoldTitle(char[] word, int length) {
+  char[] caseFoldTitle(char[] word, int length) {
     titleBuffer = ArrayUtil.grow(titleBuffer, length);
     System.arraycopy(word, 0, titleBuffer, 0, length);
     for (int i = 1; i < length; i++) {
       titleBuffer[i] = dictionary.caseFold(titleBuffer[i]);
     }
+    return titleBuffer;
   }
 
   /** folds lowercase variant of word (title cased) to lowerBuffer */
-  private void caseFoldLower(char[] word, int length) {
+  char[] caseFoldLower(char[] word, int length) {
     lowerBuffer = ArrayUtil.grow(lowerBuffer, length);
     System.arraycopy(word, 0, lowerBuffer, 0, length);
     lowerBuffer[0] = dictionary.caseFold(lowerBuffer[0]);
+    return lowerBuffer;
   }
 
   // Special prefix handling for Catalan, French, Italian:
   // prefixes separated by apostrophe (SANT'ELIA -> Sant'+Elia).
-  private static char[] capitalizeAfterApostrophe(char[] word, int length) {
+  static char[] capitalizeAfterApostrophe(char[] word, int length) {
     for (int i = 1; i < length - 1; i++) {
       if (word[i] == '\'') {
         char next = word[i + 1];
@@ -195,7 +203,9 @@ final class Stemmer {
     return null;
   }
 
-  private boolean varySharpS(char[] word, int length, CaseVariationProcessor processor) {
+  List<char[]> sharpSVariations(char[] word, int length) {
+    if (!dictionary.checkSharpS) return Collections.emptyList();
+
     Stream<String> result =
         new Object() {
           int findSS(int start) {
@@ -225,15 +235,10 @@ final class Stemmer {
             }
           }
         }.replaceSS(0, 0);
-    if (result == null) return true;
+    if (result == null) return Collections.emptyList();
 
     String src = new String(word, 0, length);
-    for (String s : result.collect(Collectors.toList())) {
-      if (!s.equals(src) && !processor.process(s.toCharArray(), s.length(), null)) {
-        return false;
-      }
-    }
-    return true;
+    return result.filter(s -> !s.equals(src)).map(String::toCharArray).collect(Collectors.toList());
   }
 
   boolean doStem(
@@ -246,29 +251,29 @@ final class Stemmer {
     IntsRef forms = dictionary.lookupWord(word, offset, length);
     if (forms != null) {
       for (int i = 0; i < forms.length; i += formStep) {
-        int entryId = forms.ints[forms.offset + i];
-        if (!acceptCase(originalCase, entryId, word, offset, length)) {
+        char[] wordFlags = dictionary.decodeFlags(forms.ints[forms.offset + i], scratch);
+        if (!acceptCase(originalCase, wordFlags, word, offset, length)) {
           continue;
         }
         // we can't add this form, it's a pseudostem requiring an affix
-        if (dictionary.hasFlag(entryId, dictionary.needaffix)) {
+        if (Dictionary.hasFlag(wordFlags, dictionary.needaffix)) {
           continue;
         }
         // we can't add this form, it only belongs inside a compound word
-        if (!context.isCompound() && dictionary.hasFlag(entryId, dictionary.onlyincompound)) {
+        if (!context.isCompound() && Dictionary.hasFlag(wordFlags, dictionary.onlyincompound)) {
           continue;
         }
         if (context.isCompound()) {
           if (context != WordContext.COMPOUND_END
-              && dictionary.hasFlag(entryId, dictionary.compoundForbid)) {
+              && Dictionary.hasFlag(wordFlags, dictionary.compoundForbid)) {
             return false;
           }
-          if (!dictionary.hasFlag(entryId, dictionary.compoundFlag)
-              && !dictionary.hasFlag(entryId, context.requiredFlag(dictionary))) {
+          if (!Dictionary.hasFlag(wordFlags, dictionary.compoundFlag)
+              && !Dictionary.hasFlag(wordFlags, context.requiredFlag(dictionary))) {
             continue;
           }
         }
-        if (!callProcessor(word, offset, length, processor, forms, i)) {
+        if (!processor.processRoot(new CharsRef(word, offset, length), forms, i)) {
           return false;
         }
       }
@@ -284,6 +289,8 @@ final class Stemmer {
           -1,
           0,
           true,
+          true,
+          false,
           false,
           originalCase,
           processor);
@@ -293,8 +300,8 @@ final class Stemmer {
   }
 
   private boolean acceptCase(
-      WordCase originalCase, int entryId, char[] word, int offset, int length) {
-    boolean keepCase = dictionary.hasFlag(entryId, dictionary.keepcase);
+      WordCase originalCase, char[] wordFlags, char[] word, int offset, int length) {
+    boolean keepCase = Dictionary.hasFlag(wordFlags, dictionary.keepcase);
     if (originalCase != null) {
       if (keepCase
           && dictionary.checkSharpS
@@ -304,7 +311,7 @@ final class Stemmer {
       }
       return !keepCase;
     }
-    return !dictionary.hasFlag(entryId, Dictionary.HIDDEN_FLAG);
+    return !Dictionary.hasHiddenFlag(wordFlags);
   }
 
   private boolean containsSharpS(char[] word, int offset, int length) {
@@ -339,27 +346,23 @@ final class Stemmer {
   }
 
   interface RootProcessor {
-    /**
-     * @param stem the text of the found dictionary entry
-     * @param formID internal id of the dictionary entry, e.g. to be used in {@link
-     *     Dictionary#hasFlag(int, char)}
-     * @param stemException "st:" morphological data if present, {@code null} otherwise
-     * @return whether the processing should be continued
-     */
-    boolean processRoot(CharsRef stem, int formID, String stemException);
+    /** @return whether the processing should be continued */
+    boolean processRoot(CharsRef stem, IntsRef forms, int formID);
   }
 
-  private String stemException(IntsRef forms, int formIndex) {
+  private CharsRef newStem(CharsRef stem, IntsRef forms, int formID) {
+    final String exception;
     if (dictionary.hasStemExceptions) {
-      int exceptionID = forms.ints[forms.offset + formIndex + 1];
+      int exceptionID = forms.ints[forms.offset + formID + 1];
       if (exceptionID > 0) {
-        return dictionary.getStemException(exceptionID);
+        exception = dictionary.getStemException(exceptionID);
+      } else {
+        exception = null;
       }
+    } else {
+      exception = null;
     }
-    return null;
-  }
 
-  private CharsRef newStem(CharsRef stem, String exception) {
     if (dictionary.needsOutputCleaning) {
       scratchSegment.setLength(0);
       if (exception != null) {
@@ -405,9 +408,12 @@ final class Stemmer {
    *     checked against the word
    * @param recursionDepth current recursiondepth
    * @param doPrefix true if we should remove prefixes
+   * @param doSuffix true if we should remove suffixes
    * @param previousWasPrefix true if the previous removal was a prefix: if we are removing a
    *     suffix, and it has no continuation requirements, it's ok. but two prefixes
    *     (COMPLEXPREFIXES) or two suffixes must have continuation requirements to recurse.
+   * @param circumfix true if the previous prefix removal was signed as a circumfix this means inner
+   *     most suffix must also contain circumfix flag.
    * @param originalCase if non-null, represents original word case to disallow case variations of
    *     word with KEEPCASE flags
    * @return whether the processing should be continued
@@ -422,7 +428,9 @@ final class Stemmer {
       int prefixId,
       int recursionDepth,
       boolean doPrefix,
+      boolean doSuffix,
       boolean previousWasPrefix,
+      boolean circumfix,
       WordCase originalCase,
       RootProcessor processor)
       throws IOException {
@@ -470,6 +478,7 @@ final class Stemmer {
                 -1,
                 recursionDepth,
                 true,
+                circumfix,
                 originalCase,
                 processor)) {
               return false;
@@ -479,7 +488,7 @@ final class Stemmer {
       }
     }
 
-    if (dictionary.suffixes != null) {
+    if (doSuffix && dictionary.suffixes != null) {
       FST<IntsRef> fst = dictionary.suffixes;
       FST.Arc<IntsRef> arc = suffixArcs[recursionDepth];
       fst.getFirstArc(arc);
@@ -524,6 +533,7 @@ final class Stemmer {
                 prefixId,
                 recursionDepth,
                 false,
+                circumfix,
                 originalCase,
                 processor)) {
               return false;
@@ -583,30 +593,32 @@ final class Stemmer {
     int append = dictionary.affixData(affix, Dictionary.AFFIX_APPEND);
 
     if (context.isCompound()) {
-      if (!isPrefix && dictionary.hasFlag(append, dictionary.compoundForbid)) {
+      if (!isPrefix && dictionary.hasFlag(append, dictionary.compoundForbid, scratch)) {
         return false;
       }
       WordContext allowed = isPrefix ? WordContext.COMPOUND_BEGIN : WordContext.COMPOUND_END;
-      if (context != allowed && !dictionary.hasFlag(append, dictionary.compoundPermit)) {
+      if (context != allowed && !dictionary.hasFlag(append, dictionary.compoundPermit, scratch)) {
         return false;
       }
       if (context == WordContext.COMPOUND_END
           && !isPrefix
           && !previousWasPrefix
-          && dictionary.hasFlag(append, dictionary.onlyincompound)) {
+          && dictionary.hasFlag(append, dictionary.onlyincompound, scratch)) {
         return false;
       }
     }
 
     if (recursionDepth == 0) {
       // check if affix is allowed in a non-compound word
-      return context.isCompound() || !dictionary.hasFlag(append, dictionary.onlyincompound);
+      return context.isCompound()
+          || !dictionary.hasFlag(append, dictionary.onlyincompound, scratch);
     }
 
-    if (dictionary.isCrossProduct(affix)) {
+    if (isCrossProduct(affix)) {
       // cross check incoming continuation class (flag of previous affix) against list.
-      if (context.isCompound() || !dictionary.hasFlag(append, dictionary.onlyincompound)) {
-        return previousWasPrefix || dictionary.hasFlag(append, prevFlag);
+      char[] appendFlags = dictionary.decodeFlags(append, scratch);
+      if (context.isCompound() || !Dictionary.hasFlag(appendFlags, dictionary.onlyincompound)) {
+        return previousWasPrefix || Dictionary.hasFlag(appendFlags, prevFlag);
       }
     }
 
@@ -619,7 +631,7 @@ final class Stemmer {
   // but this is a little bit more complicated.
   private boolean checkCondition(
       int affix, char[] c1, int c1off, int c1len, char[] c2, int c2off, int c2len) {
-    int condition = dictionary.getAffixCondition(affix);
+    int condition = dictionary.affixData(affix, Dictionary.AFFIX_CONDITION) >>> 1;
     if (condition != 0) {
       CharacterRunAutomaton pattern = dictionary.patterns.get(condition);
       int state = 0;
@@ -664,53 +676,69 @@ final class Stemmer {
       int prefixId,
       int recursionDepth,
       boolean prefix,
+      boolean circumfix,
       WordCase originalCase,
       RootProcessor processor)
       throws IOException {
     char flag = dictionary.affixData(affix, Dictionary.AFFIX_FLAG);
 
-    boolean skipLookup = needsAnotherAffix(affix, previousAffix, !prefix, prefixId);
+    boolean skipLookup = needsAnotherAffix(affix, previousAffix, !prefix);
     IntsRef forms = skipLookup ? null : dictionary.lookupWord(strippedWord, offset, length);
     if (forms != null) {
       for (int i = 0; i < forms.length; i += formStep) {
-        int entryId = forms.ints[forms.offset + i];
-        if (dictionary.hasFlag(entryId, flag) || isFlagAppendedByAffix(prefixId, flag)) {
+        char[] wordFlags = dictionary.decodeFlags(forms.ints[forms.offset + i], scratch);
+        if (Dictionary.hasFlag(wordFlags, flag) || isFlagAppendedByAffix(prefixId, flag)) {
           // confusing: in this one exception, we already chained the first prefix against the
           // second,
           // so it doesnt need to be checked against the word
           boolean chainedPrefix = dictionary.complexPrefixes && recursionDepth == 1 && prefix;
           if (!chainedPrefix && prefixId >= 0) {
             char prefixFlag = dictionary.affixData(prefixId, Dictionary.AFFIX_FLAG);
-            if (!dictionary.hasFlag(entryId, prefixFlag)
+            if (!Dictionary.hasFlag(wordFlags, prefixFlag)
                 && !isFlagAppendedByAffix(affix, prefixFlag)) {
               continue;
             }
           }
 
+          // if circumfix was previously set by a prefix, we must check this suffix,
+          // to ensure it has it, and vice versa
+          if (dictionary.circumfix != Dictionary.FLAG_UNSET) {
+            boolean suffixCircumfix = isFlagAppendedByAffix(affix, dictionary.circumfix);
+            if (circumfix != suffixCircumfix) {
+              continue;
+            }
+          }
+
           // we are looking for a case variant, but this word does not allow it
-          if (!acceptCase(originalCase, entryId, strippedWord, offset, length)) {
+          if (!acceptCase(originalCase, wordFlags, strippedWord, offset, length)) {
             continue;
           }
-          if (!context.isCompound() && dictionary.hasFlag(entryId, dictionary.onlyincompound)) {
+          if (!context.isCompound() && Dictionary.hasFlag(wordFlags, dictionary.onlyincompound)) {
             continue;
           }
           if (context.isCompound()) {
             char cFlag = context.requiredFlag(dictionary);
-            if (!dictionary.hasFlag(entryId, cFlag)
+            if (!Dictionary.hasFlag(wordFlags, cFlag)
                 && !isFlagAppendedByAffix(affix, cFlag)
-                && !dictionary.hasFlag(entryId, dictionary.compoundFlag)
+                && !Dictionary.hasFlag(wordFlags, dictionary.compoundFlag)
                 && !isFlagAppendedByAffix(affix, dictionary.compoundFlag)) {
               continue;
             }
           }
-          if (!callProcessor(strippedWord, offset, length, processor, forms, i)) {
+          if (!processor.processRoot(new CharsRef(strippedWord, offset, length), forms, i)) {
             return false;
           }
         }
       }
     }
 
-    if (dictionary.isCrossProduct(affix) && recursionDepth <= 1) {
+    // if a circumfix flag is defined in the dictionary, and we are a prefix, we need to check if we
+    // have that flag
+    if (dictionary.circumfix != Dictionary.FLAG_UNSET && !circumfix && prefix) {
+      circumfix = isFlagAppendedByAffix(affix, dictionary.circumfix);
+    }
+
+    if (isCrossProduct(affix) && recursionDepth <= 1) {
       boolean doPrefix;
       if (recursionDepth == 0) {
         if (prefix) {
@@ -748,7 +776,9 @@ final class Stemmer {
           prefixId,
           recursionDepth + 1,
           doPrefix,
+          true,
           prefix,
+          circumfix,
           originalCase,
           processor);
     }
@@ -756,20 +786,7 @@ final class Stemmer {
     return true;
   }
 
-  private boolean callProcessor(
-      char[] word, int offset, int length, RootProcessor processor, IntsRef forms, int i) {
-    CharsRef stem = new CharsRef(word, offset, length);
-    return processor.processRoot(stem, forms.ints[forms.offset + i], stemException(forms, i));
-  }
-
-  private boolean needsAnotherAffix(int affix, int previousAffix, boolean isSuffix, int prefixId) {
-    char circumfix = dictionary.circumfix;
-    // if circumfix was previously set by a prefix, we must check this suffix,
-    // to ensure it has it, and vice versa
-    if (isSuffix
-        && isFlagAppendedByAffix(prefixId, circumfix) != isFlagAppendedByAffix(affix, circumfix)) {
-      return true;
-    }
+  private boolean needsAnotherAffix(int affix, int previousAffix, boolean isSuffix) {
     if (isFlagAppendedByAffix(affix, dictionary.needaffix)) {
       return !isSuffix
           || previousAffix < 0
@@ -781,6 +798,10 @@ final class Stemmer {
   private boolean isFlagAppendedByAffix(int affixId, char flag) {
     if (affixId < 0 || flag == Dictionary.FLAG_UNSET) return false;
     int appendId = dictionary.affixData(affixId, Dictionary.AFFIX_APPEND);
-    return dictionary.hasFlag(appendId, flag);
+    return dictionary.hasFlag(appendId, flag, scratch);
+  }
+
+  private boolean isCrossProduct(int affix) {
+    return (dictionary.affixData(affix, Dictionary.AFFIX_CONDITION) & 1) == 1;
   }
 }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/package-info.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/package-info.java
index 4d6cd04..94870a3 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/package-info.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/package-info.java
@@ -17,11 +17,13 @@
 
 /**
  * A Java implementation of <a href="http://hunspell.github.io/">Hunspell</a> stemming and
- * spell-checking algorithms ({@link org.apache.lucene.analysis.hunspell.Hunspell}), and a stemming
- * TokenFilter ({@link org.apache.lucene.analysis.hunspell.HunspellStemFilter}) based on it.
+ * spell-checking algorithms, and a stemming TokenFilter based on it.
  *
  * <p>For dictionaries, see e.g. <a href="https://github.com/LibreOffice/dictionaries">LibreOffice
  * repository</a> or <a href="https://github.com/wooorm/dictionaries">Titus Wormer's collection
  * (UTF)</a>
+ *
+ * @see org.apache.lucene.analysis.hunspell.HunspellStemFilter
+ * @see org.apache.lucene.analysis.hunspell.SpellChecker
  */
 package org.apache.lucene.analysis.hunspell;
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestSpellChecking.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/SpellCheckerTest.java
similarity index 85%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestSpellChecking.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/SpellCheckerTest.java
index 57adce6..f4ca6b5 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestSpellChecking.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/SpellCheckerTest.java
@@ -26,7 +26,7 @@ import java.util.stream.Collectors;
 import org.apache.lucene.store.ByteBuffersDirectory;
 import org.apache.lucene.util.IOUtils;
 
-public class TestSpellChecking extends StemmerTestBase {
+public class SpellCheckerTest extends StemmerTestBase {
 
   public void testBase() throws Exception {
     doTest("base");
@@ -132,10 +132,6 @@ public class TestSpellChecking extends StemmerTestBase {
     doTest("checkcompoundrep");
   }
 
-  public void testDisallowCompoundsWhenDictionaryContainsSeparatedWordPair() throws Exception {
-    doTest("wordpair");
-  }
-
   public void testCompoundrule() throws Exception {
     doTest("compoundrule");
   }
@@ -172,26 +168,10 @@ public class TestSpellChecking extends StemmerTestBase {
     doTest("onlyincompound2");
   }
 
-  public void testForbiddenWord() throws Exception {
-    doTest("forbiddenword");
-  }
-
-  public void testForbiddenWord1() throws Exception {
-    doTest("opentaal_forbiddenword1");
-  }
-
-  public void testForbiddenWord2() throws Exception {
-    doTest("opentaal_forbiddenword2");
-  }
-
   public void testGermanCompounding() throws Exception {
     doTest("germancompounding");
   }
 
-  public void testApplyOconvToSuggestions() throws Exception {
-    doTest("oconv");
-  }
-
   public void testModifyingSuggestions() throws Exception {
     doTest("sug");
   }
@@ -200,32 +180,21 @@ public class TestSpellChecking extends StemmerTestBase {
     doTest("sug2");
   }
 
-  public void testMixedCaseSuggestionHeuristics() throws Exception {
-    doTest("i58202");
-  }
-
-  public void testMapSuggestions() throws Exception {
-    doTest("map");
-  }
-
-  public void testNoSuggest() throws Exception {
-    doTest("nosuggest");
-  }
-
   protected void doTest(String name) throws Exception {
     checkSpellCheckerExpectations(
-        Path.of(getClass().getResource(name + ".aff").toURI()).getParent().resolve(name));
+        Path.of(getClass().getResource(name + ".aff").toURI()).getParent().resolve(name), true);
   }
 
-  static void checkSpellCheckerExpectations(Path basePath) throws IOException, ParseException {
+  static void checkSpellCheckerExpectations(Path basePath, boolean checkSuggestions)
+      throws IOException, ParseException {
     InputStream affixStream = Files.newInputStream(Path.of(basePath.toString() + ".aff"));
     InputStream dictStream = Files.newInputStream(Path.of(basePath.toString() + ".dic"));
 
-    Hunspell speller;
+    SpellChecker speller;
     try {
       Dictionary dictionary =
           new Dictionary(new ByteBuffersDirectory(), "dictionary", affixStream, dictStream);
-      speller = new Hunspell(dictionary);
+      speller = new SpellChecker(dictionary);
     } finally {
       IOUtils.closeWhileHandlingException(affixStream);
       IOUtils.closeWhileHandlingException(dictStream);
@@ -245,7 +214,7 @@ public class TestSpellChecking extends StemmerTestBase {
       for (String word : wrongWords) {
         assertFalse("Unexpectedly considered correct: " + word, speller.spell(word.trim()));
       }
-      if (Files.exists(sug)) {
+      if (Files.exists(sug) && checkSuggestions) {
         String suggestions =
             wrongWords.stream()
                 .map(s -> String.join(", ", speller.suggest(s)))
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java
index f64c6d8..886272c 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java
@@ -16,55 +16,35 @@
  */
 package org.apache.lucene.analysis.hunspell;
 
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
-import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.text.ParseException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.function.Function;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
-import org.apache.lucene.util.NamedThreadFactory;
-import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.RamUsageTester;
-import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Ignore;
 
 /**
- * Loads all dictionaries from the directory specified in {@code hunspell.dictionaries} system
- * property and prints their memory usage. All *.aff files are traversed recursively inside the
- * given directory. Each *.aff file must have a same-named sibling *.dic file. For examples of such
- * directories, refer to the {@link org.apache.lucene.analysis.hunspell package documentation}.
+ * Loads all dictionaries from the directory specified in {@code -Dhunspell.dictionaries=...} and
+ * prints their memory usage. All *.aff files are traversed directly inside the given directory or
+ * in its immediate subdirectories. Each *.aff file must have a same-named sibling *.dic file. For
+ * examples of such directories, refer to the {@link org.apache.lucene.analysis.hunspell package
+ * documentation}
  */
+@Ignore("enable manually")
 @SuppressSysoutChecks(bugUrl = "prints important memory utilization stats per dictionary")
 public class TestAllDictionaries extends LuceneTestCase {
+
   static Stream<Path> findAllAffixFiles() throws IOException {
     String dicDir = System.getProperty("hunspell.dictionaries");
-    Assume.assumeFalse(
-        "Requires Hunspell dictionaries at -Dhunspell.dictionaries=...", dicDir == null);
-    Path dicPath = Paths.get(dicDir);
-    return Files.walk(dicPath).filter(f -> f.toString().endsWith(".aff")).sorted();
+    Assume.assumeFalse("Missing -Dhunspell.dictionaries=...", dicDir == null);
+    return Files.walk(Path.of(dicDir), 2).filter(f -> f.toString().endsWith(".aff"));
   }
 
   static Dictionary loadDictionary(Path aff) throws IOException, ParseException {
@@ -78,134 +58,43 @@ public class TestAllDictionaries extends LuceneTestCase {
     }
   }
 
-  /** Hack bais to expose current position. */
-  private static class ExposePosition extends ByteArrayInputStream {
-    public ExposePosition(byte[] buf) {
-      super(buf);
-    }
-
-    public long position() {
-      return super.pos;
-    }
-  }
-
-  @Ignore
-  public void testMaxPrologueNeeded() throws Exception {
-    AtomicBoolean failTest = new AtomicBoolean();
-
-    Map<String, List<Long>> global = new LinkedHashMap<>();
-    for (Path aff : findAllAffixFiles().collect(Collectors.toList())) {
-      Map<String, List<Long>> local = new LinkedHashMap<>();
-      ByteArrayOutputStream baos = new ByteArrayOutputStream();
-      try (ExposePosition is = new ExposePosition(Files.readAllBytes(aff))) {
-        int chr;
-        while ((chr = is.read()) >= 0) {
-          baos.write(chr);
-
-          if (chr == '\n') {
-            String line = baos.toString(StandardCharsets.ISO_8859_1);
-            if (!line.isBlank()) {
-              String firstWord = line.split("\\s")[0];
-              switch (firstWord) {
-                case "SET":
-                case "FLAG":
-                  local.computeIfAbsent(firstWord, (k) -> new ArrayList<>()).add(is.position());
-                  global.computeIfAbsent(firstWord, (k) -> new ArrayList<>()).add(is.position());
-                  break;
-              }
-            }
-
-            baos.reset();
-          }
-        }
-      }
-
-      local.forEach(
-          (flag, positions) -> {
-            if (positions.size() > 1) {
-              System.out.format(
-                  Locale.ROOT,
-                  "Flag %s at more than one position in %s: %s%n",
-                  flag,
-                  aff,
-                  positions);
-              failTest.set(true);
-            }
-          });
-    }
-
-    global.forEach(
-        (flag, positions) -> {
-          long max = positions.stream().mapToLong(v -> v).max().orElse(0);
-          System.out.printf(Locale.ROOT, "Flag %s at maximum offset %s%n", flag, max);
-          Assert.assertTrue(
-              "Flags beyond max prologue scan window: " + max,
-              max < Dictionary.MAX_PROLOGUE_SCAN_WINDOW);
-        });
-
-    if (failTest.get()) {
-      throw new AssertionError("Duplicate flags were present in at least one .aff file.");
-    }
-  }
-
   public void testDictionariesLoadSuccessfully() throws Exception {
-    AtomicLong totalMemory = new AtomicLong();
-    AtomicLong totalWords = new AtomicLong();
-    int threads = Runtime.getRuntime().availableProcessors();
-    ExecutorService executor =
-        Executors.newFixedThreadPool(threads, new NamedThreadFactory("dictCheck-"));
-    List<Path> failures = Collections.synchronizedList(new ArrayList<>());
-    Function<Path, Void> process =
-        (Path aff) -> {
-          try {
-            Dictionary dic = loadDictionary(aff);
-            totalMemory.addAndGet(RamUsageTester.sizeOf(dic));
-            totalWords.addAndGet(RamUsageTester.sizeOf(dic.words));
-            System.out.println(aff + "\t" + memoryUsageSummary(dic));
-          } catch (Throwable e) {
-            failures.add(aff);
-            System.err.println("While checking " + aff + ":");
-            e.printStackTrace();
-          }
-          return null;
-        };
-
-    List<Callable<Void>> tasks =
-        findAllAffixFiles()
-            .map(aff -> (Callable<Void>) () -> process.apply(aff))
-            .collect(Collectors.toList());
-    try {
-      for (Future<?> future : executor.invokeAll(tasks)) {
-        future.get();
-      }
-
-      if (!failures.isEmpty()) {
-        throw new AssertionError(
-            "Certain dictionaries failed to parse:\n  - "
-                + failures.stream()
-                    .map(path -> path.toAbsolutePath().toString())
-                    .collect(Collectors.joining("\n  - ")));
+    int failures = 0;
+    for (Path aff : findAllAffixFiles().collect(Collectors.toList())) {
+      try {
+        System.out.println(aff + "\t" + memoryUsage(loadDictionary(aff)));
+      } catch (Throwable e) {
+        failures++;
+        System.err.println("While checking " + aff + ":");
+        e.printStackTrace();
       }
-    } finally {
-      executor.shutdown();
-      assertTrue(executor.awaitTermination(1, TimeUnit.MINUTES));
     }
-
-    System.out.println("Total dictionaries loaded: " + tasks.size());
-    System.out.println("Total memory: " + RamUsageEstimator.humanReadableUnits(totalMemory.get()));
-    System.out.println(
-        "Total memory for word storage: " + RamUsageEstimator.humanReadableUnits(totalWords.get()));
+    assertEquals(failures + " failures!", 0, failures);
   }
 
-  private static String memoryUsageSummary(Dictionary dic) {
+  private static String memoryUsage(Dictionary dic) {
     return RamUsageTester.humanSizeOf(dic)
         + "\t("
-        + ("words=" + RamUsageTester.humanSizeOf(dic.words) + ", ")
-        + ("flags=" + RamUsageTester.humanSizeOf(dic.flagLookup) + ", ")
-        + ("strips=" + RamUsageTester.humanSizeOf(dic.stripData) + ", ")
-        + ("conditions=" + RamUsageTester.humanSizeOf(dic.patterns) + ", ")
-        + ("affixData=" + RamUsageTester.humanSizeOf(dic.affixData) + ", ")
-        + ("prefixes=" + RamUsageTester.humanSizeOf(dic.prefixes) + ", ")
-        + ("suffixes=" + RamUsageTester.humanSizeOf(dic.suffixes) + ")");
+        + "words="
+        + RamUsageTester.humanSizeOf(dic.words)
+        + ", "
+        + "flags="
+        + RamUsageTester.humanSizeOf(dic.flagLookup)
+        + ", "
+        + "strips="
+        + RamUsageTester.humanSizeOf(dic.stripData)
+        + ", "
+        + "conditions="
+        + RamUsageTester.humanSizeOf(dic.patterns)
+        + ", "
+        + "affixData="
+        + RamUsageTester.humanSizeOf(dic.affixData)
+        + ", "
+        + "prefixes="
+        + RamUsageTester.humanSizeOf(dic.prefixes)
+        + ", "
+        + "suffixes="
+        + RamUsageTester.humanSizeOf(dic.suffixes)
+        + ")";
   }
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java
index 6ef783c..8c4bc30 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java
@@ -24,6 +24,7 @@ import java.nio.charset.StandardCharsets;
 import java.text.ParseException;
 import org.apache.lucene.store.ByteBuffersDirectory;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.IntsRef;
 import org.apache.lucene.util.IntsRefBuilder;
@@ -45,20 +46,16 @@ public class TestDictionary extends LuceneTestCase {
     assertNotNull(ordList);
     assertEquals(1, ordList.length);
 
-    assertEquals('B', assertSingleFlag(dictionary, ordList));
+    BytesRef ref = new BytesRef();
+    char[] flags = dictionary.decodeFlags(ordList.ints[0], ref);
+    assertEquals(1, flags.length);
 
     int offset = random().nextInt(10);
     ordList = dictionary.lookupWord((" ".repeat(offset) + "lucen").toCharArray(), offset, 5);
     assertNotNull(ordList);
     assertEquals(1, ordList.length);
-    assertEquals('A', assertSingleFlag(dictionary, ordList));
-  }
-
-  private static char assertSingleFlag(Dictionary dictionary, IntsRef ordList) {
-    int entryId = ordList.ints[0];
-    char[] flags = dictionary.flagLookup.getFlags(entryId);
+    flags = dictionary.decodeFlags(ordList.ints[0], ref);
     assertEquals(1, flags.length);
-    return flags[0];
   }
 
   public void testCompressedDictionary() throws Exception {
@@ -66,7 +63,9 @@ public class TestDictionary extends LuceneTestCase {
     assertEquals(3, dictionary.lookupSuffix(new char[] {'e'}).length);
     assertEquals(1, dictionary.lookupPrefix(new char[] {'s'}).length);
     IntsRef ordList = dictionary.lookupWord(new char[] {'o', 'l', 'r'}, 0, 3);
-    assertSingleFlag(dictionary, ordList);
+    BytesRef ref = new BytesRef();
+    char[] flags = dictionary.decodeFlags(ordList.ints[0], ref);
+    assertEquals(1, flags.length);
   }
 
   public void testCompressedBeforeSetDictionary() throws Exception {
@@ -74,7 +73,9 @@ public class TestDictionary extends LuceneTestCase {
     assertEquals(3, dictionary.lookupSuffix(new char[] {'e'}).length);
     assertEquals(1, dictionary.lookupPrefix(new char[] {'s'}).length);
     IntsRef ordList = dictionary.lookupWord(new char[] {'o', 'l', 'r'}, 0, 3);
-    assertSingleFlag(dictionary, ordList);
+    BytesRef ref = new BytesRef();
+    char[] flags = dictionary.decodeFlags(ordList.ints[0], ref);
+    assertEquals(1, flags.length);
   }
 
   public void testCompressedEmptyAliasDictionary() throws Exception {
@@ -82,7 +83,9 @@ public class TestDictionary extends LuceneTestCase {
     assertEquals(3, dictionary.lookupSuffix(new char[] {'e'}).length);
     assertEquals(1, dictionary.lookupPrefix(new char[] {'s'}).length);
     IntsRef ordList = dictionary.lookupWord(new char[] {'o', 'l', 'r'}, 0, 3);
-    assertSingleFlag(dictionary, ordList);
+    BytesRef ref = new BytesRef();
+    char[] flags = dictionary.decodeFlags(ordList.ints[0], ref);
+    assertEquals(1, flags.length);
   }
 
   // malformed rule causes ParseException
@@ -108,7 +111,7 @@ public class TestDictionary extends LuceneTestCase {
   }
 
   public void testForgivableErrors() throws Exception {
-    Dictionary dictionary = loadDictionary("forgivable-errors.aff", "forgivable-errors.dic");
+    Dictionary dictionary = loadDictionary("forgivable-errors.aff", "simple.dic");
     assertEquals(1, dictionary.repTable.size());
     assertEquals(2, dictionary.compoundMax);
 
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDutchIJ.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDutchIJ.java
index dc4b897..58477d8 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDutchIJ.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDutchIJ.java
@@ -27,5 +27,6 @@ public class TestDutchIJ extends StemmerTestBase {
   public void testStemming() {
     assertStemsTo("ijs", "ijs");
     assertStemsTo("IJs", "ijs");
+    assertStemsTo("Ijs");
   }
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java
index c7a6776..048dc04 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java
@@ -20,38 +20,25 @@ import java.io.IOException;
 import java.nio.file.DirectoryStream;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.text.ParseException;
 import java.util.Collection;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.stream.Collectors;
-import org.junit.Assert;
 import org.junit.AssumptionViolatedException;
 import org.junit.Test;
-import org.junit.function.ThrowingRunnable;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
 /**
- * Same as {@link TestSpellChecking}, but checks all Hunspell's test data. The path to the checked
- * out Hunspell repository should be in {@code hunspell.repo.path} system property.
+ * Same as {@link SpellCheckerTest}, but checks all Hunspell's test data. The path to the checked
+ * out Hunspell repository should be in {@code -Dhunspell.repo.path=...} system property.
  */
 @RunWith(Parameterized.class)
 public class TestHunspellRepositoryTestCases {
-  private static final Set<String> EXPECTED_FAILURES =
-      Set.of(
-          "hu", // Hungarian is hard: a lot of its rules are hardcoded in Hunspell code, not aff/dic
-          "morph", // we don't do morphological analysis yet
-          "opentaal_keepcase", // Hunspell bug: https://github.com/hunspell/hunspell/issues/712
-          "forbiddenword", // needs https://github.com/hunspell/hunspell/pull/713 PR to be merged
-          "nepali", // not supported yet
-          "utf8_nonbmp", // code points not supported yet
-          "phone" // not supported yet, used only for suggestions in en_ZA
-          );
-  private final String testName;
   private final Path pathPrefix;
 
   public TestHunspellRepositoryTestCases(String testName, Path pathPrefix) {
-    this.testName = testName;
     this.pathPrefix = pathPrefix;
   }
 
@@ -77,12 +64,7 @@ public class TestHunspellRepositoryTestCases {
   }
 
   @Test
-  public void test() throws Throwable {
-    ThrowingRunnable test = () -> TestSpellChecking.checkSpellCheckerExpectations(pathPrefix);
-    if (EXPECTED_FAILURES.contains(testName)) {
-      Assert.assertThrows(Throwable.class, test);
-    } else {
-      test.run();
-    }
+  public void test() throws IOException, ParseException {
+    SpellCheckerTest.checkSpellCheckerExpectations(pathPrefix, false);
   }
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java
index 8ae5642..33da1ca 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java
@@ -24,15 +24,13 @@ import java.io.InputStreamReader;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.function.Consumer;
 import java.util.regex.Pattern;
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Assume;
-import org.junit.AssumptionViolatedException;
-import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 
 /**
@@ -42,15 +40,8 @@ import org.junit.Test;
  * en.txt}) in a directory specified in {@code -Dhunspell.corpora=...}
  */
 @TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class)
+@Ignore("enable manually")
 public class TestPerformance extends LuceneTestCase {
-  private static Path corporaDir;
-
-  @BeforeClass
-  public static void resolveCorpora() {
-    String dir = System.getProperty("hunspell.corpora");
-    Assume.assumeFalse("Requires test word corpora at -Dhunspell.corpora=...", dir == null);
-    corporaDir = Paths.get(dir);
-  }
 
   @Test
   public void en() throws Exception {
@@ -59,24 +50,23 @@ public class TestPerformance extends LuceneTestCase {
 
   @Test
   public void de() throws Exception {
-    checkPerformance("de", 200_000);
+    checkPerformance("de", 100_000);
   }
 
   @Test
   public void fr() throws Exception {
-    checkPerformance("fr", 40_000);
+    checkPerformance("fr", 20_000);
   }
 
   private void checkPerformance(String code, int wordCount) throws Exception {
     Path aff = findAffFile(code);
-
     Dictionary dictionary = TestAllDictionaries.loadDictionary(aff);
     System.out.println("Loaded " + aff);
 
     List<String> words = loadWords(code, wordCount, dictionary);
 
     Stemmer stemmer = new Stemmer(dictionary);
-    Hunspell speller = new Hunspell(dictionary);
+    SpellChecker speller = new SpellChecker(dictionary);
     measure(
         "Stemming " + code,
         blackHole -> {
@@ -102,17 +92,15 @@ public class TestPerformance extends LuceneTestCase {
               return code.equals(Dictionary.extractLanguageCode(parentName));
             })
         .findFirst()
-        .orElseThrow(
-            () -> new AssumptionViolatedException("Ignored, cannot find aff/dic for: " + code));
+        .orElseThrow(() -> new IllegalArgumentException("Cannot find aff/dic for " + code));
   }
 
   private List<String> loadWords(String code, int wordCount, Dictionary dictionary)
       throws IOException {
-    Path dataPath = corporaDir.resolve(code + ".txt");
-    if (!Files.isReadable(dataPath)) {
-      throw new AssumptionViolatedException("Missing text corpora at: " + dataPath);
-    }
+    String corpusDir = System.getProperty("hunspell.corpora");
+    Assume.assumeFalse("", corpusDir == null);
 
+    Path dataPath = Path.of(corpusDir).resolve(code + ".txt");
     List<String> words = new ArrayList<>();
     try (InputStream stream = Files.newInputStream(dataPath)) {
       BufferedReader reader =
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.sug
deleted file mode 100644
index d372ff2..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.sug
+++ /dev/null
@@ -1,3 +0,0 @@
-OpenOffice.org
-UNICEF
-UNICEF's
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/base_utf.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/base_utf.sug
deleted file mode 100644
index 03a9c9d..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/base_utf.sug
+++ /dev/null
@@ -1,13 +0,0 @@
-looked, look
-text
-hello
-said
-rotten day, rotten-day, rotten
-tomorrow
-seven
-NASA
-horrifying
-speech
-suggest
-Imply
-IMPLY
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/checksharps.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/checksharps.sug
deleted file mode 100644
index ab68568..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/checksharps.sug
+++ /dev/null
@@ -1 +0,0 @@
-MÜSSIG, müßig
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.aff
deleted file mode 100644
index de7f8ad..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.aff
+++ /dev/null
@@ -1,11 +0,0 @@
-# FORBIDDENWORD flag
-# The signed word, and its suffixed forms are all forbidden,
-# excepts with root homonyms.
-# Useful for forbidding bad suffixed forms or compounds.
-
-
-FORBIDDENWORD X
-COMPOUNDFLAG Y
-
-SFX A Y 1
-SFX A 0 s .
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.dic
deleted file mode 100644
index cb63592..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.dic
+++ /dev/null
@@ -1,11 +0,0 @@
-10
-foo/S
-foo/YX
-bar/YS
-bars/X
-foos/X
-kg
-Kg/X
-KG/X
-cm
-Cm/X
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.good
deleted file mode 100644
index 7bd112e..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.good
+++ /dev/null
@@ -1,3 +0,0 @@
-foo
-bar
-
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.wrong
deleted file mode 100644
index 5752c1e..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.wrong
+++ /dev/null
@@ -1,4 +0,0 @@
-bars
-foos
-foobar
-barfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forceucase.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forceucase.sug
deleted file mode 100644
index 6a77cbd..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forceucase.sug
+++ /dev/null
@@ -1,2 +0,0 @@
-Foobaz
-Foobarbaz
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.aff
index b9b56cc..8d17b4e 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.aff
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.aff
@@ -2,14 +2,8 @@ REP 1
 REP foo bar goo doo zoo
 
 COMPOUNDWORDMAX 2 y
-WORDCHARS 0123456789'.-’ ̃
-TRY a b c
 
 KEEPCASE Aa
 
-MAP 1
-MAP a b
-
 SFX A Y 1
-SFX A   nout        l          [aeiouyáéíóúýůěr][^aeiouyáéíóúýůěrl][^aeiouy
-SFX A b c d
\ No newline at end of file
+SFX A   nout        l          [aeiouyáéíóúýůěr][^aeiouyáéíóúýůěrl][^aeiouy
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.dic
deleted file mode 100644
index 51a4bfb..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.dic
+++ /dev/null
@@ -1,2 +0,0 @@
-1
- st:abc
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.aff
deleted file mode 100644
index 11249d4..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.aff
+++ /dev/null
@@ -1,4 +0,0 @@
-# case suggestions
-MAXNGRAMSUGS 0
-# capitalise baz->Baz
-TRY B
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.dic
deleted file mode 100644
index 19e1980..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.dic
+++ /dev/null
@@ -1,5 +0,0 @@
-4
-foo
-bar
-Baz
-Boo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.good
deleted file mode 100644
index 88a079a..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.good
+++ /dev/null
@@ -1,10 +0,0 @@
-foo
-bar
-Foo
-Bar
-Baz
-Boo
-FOO
-BAR
-BAZ
-BOO
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.sug
deleted file mode 100644
index bc784ac..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.sug
+++ /dev/null
@@ -1,13 +0,0 @@
-foo, Boo
-Bar
-Baz
-Boo
-foo bar
-foo Bar
-Foo bar
-Foo Bar
-foo Baz
-Foo Baz
-Baz foo
-Baz Foo
-Baz Boo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.wrong
deleted file mode 100644
index 886584d..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.wrong
+++ /dev/null
@@ -1,13 +0,0 @@
-fOO
-BAr
-baz
-BOo
-foobar
-fooBar
-Foobar
-FooBar
-fooBaz
-FooBaz
-Bazfoo
-BazFoo
-BazBoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/keepcase.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/keepcase.sug
deleted file mode 100644
index 69e80dd..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/keepcase.sug
+++ /dev/null
@@ -1,8 +0,0 @@
-foo
-foo
-Bar
-Bar, baz.
-baz.
-baz.
-Quux.
-Quux.
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.aff
deleted file mode 100644
index 3e78bab..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.aff
+++ /dev/null
@@ -1,9 +0,0 @@
-# With MAP suggestion, Hunspell can add missing accents to a word.
-
-# switch off ngram suggestion for testing
-MAXNGRAMSUGS 0
-
-MAP 3
-MAP u��
-MAP o��
-MAP �(ss)
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.dic
deleted file mode 100644
index 744394f..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.dic
+++ /dev/null
@@ -1,4 +0,0 @@
-3
-Fr�hst�ck
-t�k�rf�r�
-gro�
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.sug
deleted file mode 100644
index 81d09e0..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.sug
+++ /dev/null
@@ -1,3 +0,0 @@
-Frühstück
-tükörfúró
-groß
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.wrong
deleted file mode 100644
index 251c8a1..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.wrong
+++ /dev/null
@@ -1,3 +0,0 @@
-Fruhstuck
-tukorfuro
-gross
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.aff
deleted file mode 100644
index c9361da..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.aff
+++ /dev/null
@@ -1,5 +0,0 @@
-# don't suggest word with NOSUGGEST flag (for example vulgar or obscene words)
-# See OpenOffice.org Issue #55498
-# (nosuggest.sug is an empty file)
-NOSUGGEST A
-COMPOUNDFLAG B
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.dic
deleted file mode 100644
index dc80c91..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.dic
+++ /dev/null
@@ -1,3 +0,0 @@
-1
-foo/AB
-bar/B
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.good
deleted file mode 100644
index ad91a5e..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.good
+++ /dev/null
@@ -1,3 +0,0 @@
-foo
-foobar
-barfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.sug
deleted file mode 100644
index e69de29..0000000
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.wrong
deleted file mode 100644
index 89c7a1a..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.wrong
+++ /dev/null
@@ -1,3 +0,0 @@
-foox
-foobarx
-barfoox
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.aff
deleted file mode 100644
index 0059a2d..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.aff
+++ /dev/null
@@ -1,20 +0,0 @@
-# output conversion 
-SET UTF-8
-
-# Testing also whitespace and comments.
-OCONV 7 # space, space
-OCONV	a A # tab, space, space
-OCONV	á	Á # tab, tab, space
-OCONV	b	B	# tab, tab, tab
-OCONV  c  C		# 2xspace, 2xspace, 2xtab
-OCONV	 d 	D # tab+space, space+tab, space
-OCONV e E #
-OCONV é É 	
-# Only comment. Note that line above ends with space+tab.
-
- # space
-  # 2xspace
-	# tab
-		# 2xtab
- 	# space+tab
-	 # tab+space
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.dic
deleted file mode 100644
index 359186c..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.dic
+++ /dev/null
@@ -1,4 +0,0 @@
-3
-bébé
-dádá
-aábcdeé
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.good
deleted file mode 100644
index 6cdaab1..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.good
+++ /dev/null
@@ -1,2 +0,0 @@
-bébé
-dádá
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.sug
deleted file mode 100644
index a191c62..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.sug
+++ /dev/null
@@ -1,3 +0,0 @@
-BÉBÉ
-DÁDÁ
-AÁBCDEÉ
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.wrong
deleted file mode 100644
index 73dcc89..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.wrong
+++ /dev/null
@@ -1,3 +0,0 @@
-béb
-dád
-aábcde
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.aff
deleted file mode 100644
index fa07343..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.aff
+++ /dev/null
@@ -1,9 +0,0 @@
-TRY r
-
-FORBIDDENWORD F
-COMPOUNDRULE 2
-COMPOUNDRULE WW
-COMPOUNDRULE WWW
-
-SFX S Y 1
-SFX S 0 s .
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.dic
deleted file mode 100644
index 4437594..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.dic
+++ /dev/null
@@ -1,5 +0,0 @@
-4
-foo/W
-word/W
-bar/WS
-foowordbar/FS
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.good
deleted file mode 100644
index 73a96a7..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.good
+++ /dev/null
@@ -1,3 +0,0 @@
-fooword
-wordbar
-barwordfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.sug
deleted file mode 100644
index 60111a4..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.sug
+++ /dev/null
@@ -1 +0,0 @@
-barwordfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.wrong
deleted file mode 100644
index 59dfddf..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.wrong
+++ /dev/null
@@ -1,5 +0,0 @@
-foowordbar
-foowordbars
-foowordba
-foowordbas
-barwodfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.aff
deleted file mode 100644
index 441354d..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.aff
+++ /dev/null
@@ -1,7 +0,0 @@
-TRY r
-
-FORBIDDENWORD F
-COMPOUNDFLAG W
-
-SFX S Y 1
-SFX S 0 s .
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.dic
deleted file mode 100644
index 895dd62..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.dic
+++ /dev/null
@@ -1,5 +0,0 @@
-3
-foo/WS
-word/W
-bar/WS
-foowordbar/FS
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.good
deleted file mode 100644
index 17cf47d..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.good
+++ /dev/null
@@ -1,4 +0,0 @@
-fooword
-wordbar
-barwordfoo
-barwordfoos
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.sug
deleted file mode 100644
index 60111a4..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.sug
+++ /dev/null
@@ -1 +0,0 @@
-barwordfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.wrong
deleted file mode 100644
index 59dfddf..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.wrong
+++ /dev/null
@@ -1,5 +0,0 @@
-foowordbar
-foowordbars
-foowordba
-foowordbas
-barwodfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.aff
deleted file mode 100644
index e788b17..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.aff
+++ /dev/null
@@ -1,4 +0,0 @@
-# a dictionary word pair separated by space
-# will avoid its recognition without space
-# at compound word analysis
-COMPOUNDFLAG Y
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.dic
deleted file mode 100644
index 96fc77f..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.dic
+++ /dev/null
@@ -1,4 +0,0 @@
-3
-word/Y
-compound/Y
-compound word
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.good
deleted file mode 100644
index d868fce..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.good
+++ /dev/null
@@ -1,3 +0,0 @@
-word
-compound
-wordcompound
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.wrong
deleted file mode 100644
index 04ca38b..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.wrong
+++ /dev/null
@@ -1 +0,0 @@
-compoundword
diff --git a/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java b/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java
index dd260fb..cea0dfc 100644
--- a/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java
+++ b/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java
@@ -17,6 +17,8 @@
 package org.apache.lucene.analysis.standard;
 
 import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
@@ -24,9 +26,6 @@ import java.io.Writer;
 import java.net.URL;
 import java.net.URLConnection;
 import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.text.DateFormat;
 import java.util.ArrayList;
 import java.util.Comparator;
@@ -103,7 +102,7 @@ public class GenerateJflexTLDMacros {
       Pattern.compile("([-A-Za-z0-9]+)\\.\\s+\\d+\\s+IN\\s+NS\\s+.*");
   private final URL tldFileURL;
   private long tldFileLastModified = -1L;
-  private final Path outputFile;
+  private final File outputFile;
   private final SortedMap<String, Boolean> processedTLDsLongestFirst =
       new TreeMap<>(
           Comparator.comparing(String::length).reversed().thenComparing(String::compareTo));
@@ -112,7 +111,7 @@ public class GenerateJflexTLDMacros {
 
   public GenerateJflexTLDMacros(String tldFileURL, String outputFile) throws Exception {
     this.tldFileURL = new URL(tldFileURL);
-    this.outputFile = Paths.get(outputFile);
+    this.outputFile = new File(outputFile);
   }
 
   /**
@@ -131,10 +130,9 @@ public class GenerateJflexTLDMacros {
     for (int suffixLength = 0; suffixLength < TLDsBySuffixLength.size(); ++suffixLength) {
       int domainsAtThisSuffixLength = TLDsBySuffixLength.get(suffixLength).size();
       totalDomains += domainsAtThisSuffixLength;
-      System.out.printf(
-          Locale.ROOT, "%30s: %4d TLDs%n", getMacroName(suffixLength), domainsAtThisSuffixLength);
+      System.out.printf("%30s: %4d TLDs%n", getMacroName(suffixLength), domainsAtThisSuffixLength);
     }
-    System.out.printf(Locale.ROOT, "%30s: %4d TLDs%n", "Total", totalDomains);
+    System.out.printf("%30s: %4d TLDs%n", "Total", totalDomains);
   }
 
   /**
@@ -218,7 +216,7 @@ public class GenerateJflexTLDMacros {
         DateFormat.getDateTimeInstance(DateFormat.FULL, DateFormat.FULL, Locale.ROOT);
     dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
     try (Writer writer =
-        new OutputStreamWriter(Files.newOutputStream(outputFile), StandardCharsets.UTF_8)) {
+        new OutputStreamWriter(new FileOutputStream(outputFile), StandardCharsets.UTF_8)) {
       writer.write(APACHE_LICENSE);
       writer.write("// Generated from IANA Root Zone Database <");
       writer.write(tldFileURL.toString());
diff --git a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java
index bb7d8a3..18818a3 100644
--- a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java
+++ b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java
@@ -21,6 +21,10 @@ import com.ibm.icu.lang.UProperty;
 import com.ibm.icu.text.UnicodeSet;
 import com.ibm.icu.text.UnicodeSetIterator;
 import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
@@ -30,16 +34,11 @@ import java.io.Writer;
 import java.net.URL;
 import java.net.URLConnection;
 import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Locale;
-import java.util.function.Predicate;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
-import java.util.stream.Collectors;
 
 /**
  * Downloads/generates lucene/analysis/icu/src/data/utr30/*.txt
@@ -83,33 +82,33 @@ public class GenerateUTR30DataFiles {
   }
 
   private static void expandRulesInUTR30DataFiles() throws IOException {
-    Predicate<Path> predicate =
-        (path) -> {
-          String name = path.getFileName().toString();
-          return Files.isRegularFile(path)
-              && name.matches(".*\\.(?s:txt)")
-              && !name.equals(NFC_TXT)
-              && !name.equals(NFKC_TXT)
-              && !name.equals(NFKC_CF_TXT);
+    FileFilter filter =
+        new FileFilter() {
+          @Override
+          public boolean accept(File pathname) {
+            String name = pathname.getName();
+            return pathname.isFile()
+                && name.matches(".*\\.(?s:txt)")
+                && !name.equals(NFC_TXT)
+                && !name.equals(NFKC_TXT)
+                && !name.equals(NFKC_CF_TXT);
+          }
         };
-    try (var stream = Files.list(Paths.get(".")).filter(predicate)) {
-      for (Path file : stream.collect(Collectors.toList())) {
-        expandDataFileRules(file);
-      }
+    for (File file : new File(".").listFiles(filter)) {
+      expandDataFileRules(file);
     }
   }
 
-  private static void expandDataFileRules(Path file) throws IOException {
-    boolean modified = false;
+  private static void expandDataFileRules(File file) throws IOException {
+    final FileInputStream stream = new FileInputStream(file);
+    final InputStreamReader reader = new InputStreamReader(stream, StandardCharsets.UTF_8);
+    final BufferedReader bufferedReader = new BufferedReader(reader);
     StringBuilder builder = new StringBuilder();
-
-    try (InputStream stream = Files.newInputStream(file);
-        InputStreamReader reader = new InputStreamReader(stream, StandardCharsets.UTF_8);
-        BufferedReader bufferedReader = new BufferedReader(reader)) {
-      String line;
-      boolean verbatim = false;
-      int lineNum = 0;
-
+    String line;
+    boolean verbatim = false;
+    boolean modified = false;
+    int lineNum = 0;
+    try {
       while (null != (line = bufferedReader.readLine())) {
         ++lineNum;
         if (VERBATIM_RULE_LINE_PATTERN.matcher(line).matches()) {
@@ -125,7 +124,7 @@ public class GenerateUTR30DataFiles {
               String rightHandSide = ruleMatcher.group(2).trim();
               expandSingleRule(builder, leftHandSide, rightHandSide);
             } catch (IllegalArgumentException e) {
-              System.err.println("ERROR in " + file.getFileName() + " line #" + lineNum + ":");
+              System.err.println("ERROR in " + file.getName() + " line #" + lineNum + ":");
               e.printStackTrace(System.err);
               System.exit(1);
             }
@@ -143,11 +142,18 @@ public class GenerateUTR30DataFiles {
           }
         }
       }
+    } finally {
+      bufferedReader.close();
     }
-
     if (modified) {
-      System.err.println("Expanding rules in and overwriting " + file.getFileName());
-      Files.writeString(file, builder.toString(), StandardCharsets.UTF_8);
+      System.err.println("Expanding rules in and overwriting " + file.getName());
+      final FileOutputStream out = new FileOutputStream(file, false);
+      Writer writer = new OutputStreamWriter(out, StandardCharsets.UTF_8);
+      try {
+        writer.write(builder.toString());
+      } finally {
+        writer.close();
+      }
     }
   }
 
@@ -165,12 +171,11 @@ public class GenerateUTR30DataFiles {
 
     System.err.print("Downloading " + NFKC_CF_TXT + " and making diacritic rules one-way ... ");
     URLConnection connection = openConnection(new URL(norm2url, NFC_TXT));
-    try (BufferedReader reader =
-            new BufferedReader(
-                new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8));
-        Writer writer =
-            new OutputStreamWriter(
-                Files.newOutputStream(Path.of(NFC_TXT)), StandardCharsets.UTF_8)) {
+    BufferedReader reader =
+        new BufferedReader(
+            new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8));
+    Writer writer = new OutputStreamWriter(new FileOutputStream(NFC_TXT), StandardCharsets.UTF_8);
+    try {
       String line;
 
       while (null != (line = reader.readLine())) {
@@ -203,6 +208,9 @@ public class GenerateUTR30DataFiles {
         writer.write(line);
         writer.write("\n");
       }
+    } finally {
+      reader.close();
+      writer.close();
     }
     System.err.println("done.");
   }
@@ -210,7 +218,7 @@ public class GenerateUTR30DataFiles {
   private static void download(URL url, String outputFile) throws IOException {
     final URLConnection connection = openConnection(url);
     final InputStream inputStream = connection.getInputStream();
-    final OutputStream outputStream = Files.newOutputStream(Path.of(outputFile));
+    final OutputStream outputStream = new FileOutputStream(outputFile);
     int numBytes;
     try {
       while (-1 != (numBytes = inputStream.read(DOWNLOAD_BUFFER))) {
diff --git a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java
index 7797ae8..a210244 100644
--- a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java
+++ b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java
@@ -18,16 +18,14 @@ package org.apache.lucene.analysis.icu;
 
 import com.ibm.icu.text.RuleBasedBreakIterator;
 import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
-import java.io.OutputStream;
 import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.List;
-import java.util.stream.Collectors;
 
 /**
  * Command-line utility to converts RuleBasedBreakIterator (.rbbi) files into binary compiled form
@@ -35,9 +33,9 @@ import java.util.stream.Collectors;
  */
 public class RBBIRuleCompiler {
 
-  static String getRules(Path ruleFile) throws IOException {
+  static String getRules(File ruleFile) throws IOException {
     StringBuilder rules = new StringBuilder();
-    InputStream in = Files.newInputStream(ruleFile);
+    InputStream in = new FileInputStream(ruleFile);
     BufferedReader cin = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8));
     String line = null;
     while ((line = cin.readLine()) != null) {
@@ -51,21 +49,20 @@ public class RBBIRuleCompiler {
     return rules.toString();
   }
 
-  static void compile(Path srcDir, Path destDir) throws Exception {
-    List<Path> files;
-    try (var stream = Files.list(srcDir)) {
-      files =
-          stream
-              .filter(name -> name.getFileName().toString().endsWith("rbbi"))
-              .collect(Collectors.toList());
-    }
-
-    if (files.isEmpty()) throw new IOException("No input files matching *.rbbi at: " + srcDir);
-    for (Path file : files) {
-      Path outputFile = destDir.resolve(file.getFileName().toString().replaceAll("rbbi$", "brk"));
+  static void compile(File srcDir, File destDir) throws Exception {
+    File files[] =
+        srcDir.listFiles(
+            new FilenameFilter() {
+              public boolean accept(File dir, String name) {
+                return name.endsWith("rbbi");
+              }
+            });
+    if (files == null) throw new IOException("Path does not exist: " + srcDir);
+    for (int i = 0; i < files.length; i++) {
+      File file = files[i];
+      File outputFile = new File(destDir, file.getName().replaceAll("rbbi$", "brk"));
       String rules = getRules(file);
-      System.err.print(
-          "Compiling " + file.getFileName() + " to " + outputFile.getFileName() + ": ");
+      System.err.print("Compiling " + file.getName() + " to " + outputFile.getName() + ": ");
       /*
        * if there is a syntax error, compileRules() may succeed. the way to
        * check is to try to instantiate from the string. additionally if the
@@ -81,10 +78,10 @@ public class RBBIRuleCompiler {
         System.err.println(e.getMessage());
         System.exit(1);
       }
-      try (OutputStream os = Files.newOutputStream(outputFile)) {
-        RuleBasedBreakIterator.compileRules(rules, os);
-      }
-      System.err.println(Files.size(outputFile) + " bytes.");
+      FileOutputStream os = new FileOutputStream(outputFile);
+      RuleBasedBreakIterator.compileRules(rules, os);
+      os.close();
+      System.err.println(outputFile.length() + " bytes.");
     }
   }
 
@@ -93,7 +90,7 @@ public class RBBIRuleCompiler {
       System.err.println("Usage: RBBIRuleComputer <sourcedir> <destdir>");
       System.exit(1);
     }
-    compile(Paths.get(args[0]), Paths.get(args[1]));
+    compile(new File(args[0]), new File(args[1]));
     System.exit(0);
   }
 }
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50CompoundFormat.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50CompoundFormat.java
deleted file mode 100644
index 75d6912..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50CompoundFormat.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.backward_codecs.lucene50;
-
-import java.io.IOException;
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.codecs.CompoundDirectory;
-import org.apache.lucene.codecs.CompoundFormat;
-import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.store.DataOutput;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-
-/**
- * Lucene 5.0 compound file format
- *
- * <p>Files:
- *
- * <ul>
- *   <li><code>.cfs</code>: An optional "virtual" file consisting of all the other index files for
- *       systems that frequently run out of file handles.
- *   <li><code>.cfe</code>: The "virtual" compound file's entry table holding all entries in the
- *       corresponding .cfs file.
- * </ul>
- *
- * <p>Description:
- *
- * <ul>
- *   <li>Compound (.cfs) --&gt; Header, FileData <sup>FileCount</sup>, Footer
- *   <li>Compound Entry Table (.cfe) --&gt; Header, FileCount, &lt;FileName, DataOffset,
- *       DataLength&gt; <sup>FileCount</sup>
- *   <li>Header --&gt; {@link CodecUtil#writeIndexHeader IndexHeader}
- *   <li>FileCount --&gt; {@link DataOutput#writeVInt VInt}
- *   <li>DataOffset,DataLength,Checksum --&gt; {@link DataOutput#writeLong UInt64}
- *   <li>FileName --&gt; {@link DataOutput#writeString String}
- *   <li>FileData --&gt; raw file data
- *   <li>Footer --&gt; {@link CodecUtil#writeFooter CodecFooter}
- * </ul>
- *
- * <p>Notes:
- *
- * <ul>
- *   <li>FileCount indicates how many files are contained in this compound file. The entry table
- *       that follows has that many entries.
- *   <li>Each directory entry contains a long pointer to the start of this file's data section, the
- *       files length, and a String with that file's name.
- * </ul>
- */
-public final class Lucene50CompoundFormat extends CompoundFormat {
-
-  /** Extension of compound file */
-  static final String DATA_EXTENSION = "cfs";
-  /** Extension of compound file entries */
-  static final String ENTRIES_EXTENSION = "cfe";
-
-  static final String DATA_CODEC = "Lucene50CompoundData";
-  static final String ENTRY_CODEC = "Lucene50CompoundEntries";
-  static final int VERSION_START = 0;
-  static final int VERSION_CURRENT = VERSION_START;
-
-  /** Sole constructor. */
-  public Lucene50CompoundFormat() {}
-
-  @Override
-  public CompoundDirectory getCompoundReader(Directory dir, SegmentInfo si, IOContext context)
-      throws IOException {
-    return new Lucene50CompoundReader(dir, si, context);
-  }
-
-  @Override
-  public void write(Directory dir, SegmentInfo si, IOContext context) throws IOException {
-    throw new UnsupportedOperationException("Old formats can't be used for writing");
-  }
-}
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70Codec.java
index e6e1d9e..e34502e 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70Codec.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.backward_codecs.lucene70;
 
-import org.apache.lucene.backward_codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
@@ -35,6 +34,7 @@ import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.VectorFormat;
+import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
 import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
 import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
@@ -112,7 +112,7 @@ public class Lucene70Codec extends Codec {
   }
 
   @Override
-  public CompoundFormat compoundFormat() {
+  public final CompoundFormat compoundFormat() {
     return compoundFormat;
   }
 
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80Codec.java
index 92b6a21..f39ffa7 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80Codec.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.backward_codecs.lucene80;
 
-import org.apache.lucene.backward_codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene60.Lucene60FieldInfosFormat;
@@ -34,6 +33,7 @@ import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.VectorFormat;
+import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
 import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84Codec.java
index c476e9f..0b3ffb7 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84Codec.java
@@ -17,7 +17,6 @@
 package org.apache.lucene.backward_codecs.lucene84;
 
 import java.util.Objects;
-import org.apache.lucene.backward_codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
@@ -37,6 +36,7 @@ import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.VectorFormat;
+import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
 import org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat;
@@ -125,7 +125,7 @@ public class Lucene84Codec extends Codec {
   }
 
   @Override
-  public CompoundFormat compoundFormat() {
+  public final CompoundFormat compoundFormat() {
     return compoundFormat;
   }
 
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene86/Lucene86Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene86/Lucene86Codec.java
index b8659f7..db02573 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene86/Lucene86Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene86/Lucene86Codec.java
@@ -18,7 +18,6 @@
 package org.apache.lucene.backward_codecs.lucene86;
 
 import java.util.Objects;
-import org.apache.lucene.backward_codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene60.Lucene60FieldInfosFormat;
@@ -35,6 +34,7 @@ import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.VectorFormat;
+import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
 import org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat;
@@ -126,7 +126,7 @@ public class Lucene86Codec extends Codec {
   }
 
   @Override
-  public CompoundFormat compoundFormat() {
+  public final CompoundFormat compoundFormat() {
     return compoundFormat;
   }
 
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87Codec.java
index 52bc76c..8543de6 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87Codec.java
@@ -18,7 +18,6 @@
 package org.apache.lucene.backward_codecs.lucene87;
 
 import java.util.Objects;
-import org.apache.lucene.backward_codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.backward_codecs.lucene60.Lucene60FieldInfosFormat;
 import org.apache.lucene.codecs.Codec;
@@ -34,6 +33,7 @@ import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.VectorFormat;
+import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80DocValuesFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
@@ -138,7 +138,7 @@ public class Lucene87Codec extends Codec {
   }
 
   @Override
-  public CompoundFormat compoundFormat() {
+  public final CompoundFormat compoundFormat() {
     return compoundFormat;
   }
 
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/Lucene87/Lucene87RWCodec.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/Lucene87/Lucene87RWCodec.java
deleted file mode 100644
index 6467bc7..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/Lucene87/Lucene87RWCodec.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.backward_codecs.Lucene87;
-
-import org.apache.lucene.backward_codecs.lucene50.Lucene50RWCompoundFormat;
-import org.apache.lucene.backward_codecs.lucene87.Lucene87Codec;
-import org.apache.lucene.codecs.CompoundFormat;
-
-/** RW impersonation of {@link Lucene87Codec}. */
-public class Lucene87RWCodec extends Lucene87Codec {
-
-  @Override
-  public final CompoundFormat compoundFormat() {
-    return new Lucene50RWCompoundFormat();
-  }
-}
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestLucene50CompoundFormat.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestLucene50CompoundFormat.java
deleted file mode 100644
index 28624bf..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestLucene50CompoundFormat.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.backward_codecs.lucene50;
-
-import org.apache.lucene.backward_codecs.Lucene87.Lucene87RWCodec;
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.index.BaseCompoundFormatTestCase;
-
-public class TestLucene50CompoundFormat extends BaseCompoundFormatTestCase {
-  ;
-
-  @Override
-  protected Codec getCodec() {
-    return new Lucene87RWCodec();
-  }
-}
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene70/Lucene70RWCodec.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene70/Lucene70RWCodec.java
index 7b44821..16041ae9 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene70/Lucene70RWCodec.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene70/Lucene70RWCodec.java
@@ -16,10 +16,8 @@
  */
 package org.apache.lucene.backward_codecs.lucene70;
 
-import org.apache.lucene.backward_codecs.lucene50.Lucene50RWCompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50RWPostingsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50RWStoredFieldsFormat;
-import org.apache.lucene.codecs.CompoundFormat;
 import org.apache.lucene.codecs.NormsFormat;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.SegmentInfoFormat;
@@ -60,9 +58,4 @@ public final class Lucene70RWCodec extends Lucene70Codec {
   public PostingsFormat postingsFormat() {
     return postingsFormat;
   }
-
-  @Override
-  public CompoundFormat compoundFormat() {
-    return new Lucene50RWCompoundFormat();
-  }
 }
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene84/Lucene84RWCodec.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene84/Lucene84RWCodec.java
index 05736d9..dd08c5d 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene84/Lucene84RWCodec.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene84/Lucene84RWCodec.java
@@ -16,11 +16,9 @@
  */
 package org.apache.lucene.backward_codecs.lucene84;
 
-import org.apache.lucene.backward_codecs.lucene50.Lucene50RWCompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50RWStoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene60.Lucene60RWPointsFormat;
 import org.apache.lucene.backward_codecs.lucene70.Lucene70RWSegmentInfoFormat;
-import org.apache.lucene.codecs.CompoundFormat;
 import org.apache.lucene.codecs.PointsFormat;
 import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
@@ -42,9 +40,4 @@ public class Lucene84RWCodec extends Lucene84Codec {
   public StoredFieldsFormat storedFieldsFormat() {
     return new Lucene50RWStoredFieldsFormat();
   }
-
-  @Override
-  public final CompoundFormat compoundFormat() {
-    return new Lucene50RWCompoundFormat();
-  }
 }
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene86/Lucene86RWCodec.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene86/Lucene86RWCodec.java
index c1d278f..d9d3a49 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene86/Lucene86RWCodec.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene86/Lucene86RWCodec.java
@@ -16,10 +16,8 @@
  */
 package org.apache.lucene.backward_codecs.lucene86;
 
-import org.apache.lucene.backward_codecs.lucene50.Lucene50RWCompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50RWStoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
-import org.apache.lucene.codecs.CompoundFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 
 /** RW impersonation of {@link Lucene86Codec}. */
@@ -41,9 +39,4 @@ public class Lucene86RWCodec extends Lucene86Codec {
   public StoredFieldsFormat storedFieldsFormat() {
     return storedFieldsFormat;
   }
-
-  @Override
-  public final CompoundFormat compoundFormat() {
-    return new Lucene50RWCompoundFormat();
-  }
 }
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/Lucene50RWCompoundFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java
similarity index 97%
rename from lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/Lucene50RWCompoundFormat.java
rename to lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java
index 7e9b2a6..7c8ae37 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/Lucene50RWCompoundFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.backward_codecs.lucene50;
+package org.apache.lucene.codecs.lucene50;
 
 import java.io.IOException;
 import org.apache.lucene.codecs.CodecUtil;
@@ -63,7 +63,7 @@ import org.apache.lucene.store.IndexOutput;
  *       files length, and a String with that file's name.
  * </ul>
  */
-public final class Lucene50RWCompoundFormat extends CompoundFormat {
+public final class Lucene50CompoundFormat extends CompoundFormat {
 
   /** Extension of compound file */
   static final String DATA_EXTENSION = "cfs";
@@ -76,7 +76,7 @@ public final class Lucene50RWCompoundFormat extends CompoundFormat {
   static final int VERSION_CURRENT = VERSION_START;
 
   /** Sole constructor. */
-  public Lucene50RWCompoundFormat() {}
+  public Lucene50CompoundFormat() {}
 
   @Override
   public CompoundDirectory getCompoundReader(Directory dir, SegmentInfo si, IOContext context)
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50CompoundReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundReader.java
similarity index 99%
rename from lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50CompoundReader.java
rename to lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundReader.java
index 9ff5161..4c8eb84 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50CompoundReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundReader.java
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.backward_codecs.lucene50;
+package org.apache.lucene.codecs.lucene50;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsFormat.java
index 0714840..17594c0 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsFormat.java
@@ -35,12 +35,9 @@ import org.apache.lucene.index.SegmentWriteState;
  *   <li>A .kdm file that records metadata about the fields, such as numbers of dimensions or
  *       numbers of bytes per dimension.
  *   <li>A .kdi file that stores inner nodes of the tree.
- *   <li>A .kdd file that stores leaf nodes, where most of the data lives.
+ *   <li>A .kdm file that stores leaf nodes, where most of the data lives.
  * </ul>
  *
- * See <a href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=173081898">this
- * wiki</a> for detailed data structures of the three files.
- *
  * @lucene.experimental
  */
 public final class Lucene86PointsFormat extends PointsFormat {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90Codec.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90Codec.java
index 3f84280..6250592 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90Codec.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90Codec.java
@@ -30,6 +30,7 @@ import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.VectorFormat;
+import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80DocValuesFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
@@ -72,7 +73,7 @@ public class Lucene90Codec extends Codec {
   private final FieldInfosFormat fieldInfosFormat = new Lucene90FieldInfosFormat();
   private final SegmentInfoFormat segmentInfosFormat = new Lucene86SegmentInfoFormat();
   private final LiveDocsFormat liveDocsFormat = new Lucene90LiveDocsFormat();
-  private final CompoundFormat compoundFormat = new Lucene90CompoundFormat();
+  private final CompoundFormat compoundFormat = new Lucene50CompoundFormat();
   private final PostingsFormat defaultFormat;
 
   private final PostingsFormat postingsFormat =
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundFormat.java
deleted file mode 100644
index d06802c..0000000
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundFormat.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene90;
-
-import java.io.IOException;
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.codecs.CompoundDirectory;
-import org.apache.lucene.codecs.CompoundFormat;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.store.ChecksumIndexInput;
-import org.apache.lucene.store.DataOutput;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexOutput;
-
-/**
- * Lucene 9.0 compound file format
- *
- * <p>Files:
- *
- * <ul>
- *   <li><code>.cfs</code>: An optional "virtual" file consisting of all the other index files for
- *       systems that frequently run out of file handles.
- *   <li><code>.cfe</code>: The "virtual" compound file's entry table holding all entries in the
- *       corresponding .cfs file.
- * </ul>
- *
- * <p>Description:
- *
- * <ul>
- *   <li>Compound (.cfs) --&gt; Header, FileData <sup>FileCount</sup>, Footer
- *   <li>Compound Entry Table (.cfe) --&gt; Header, FileCount, &lt;FileName, DataOffset,
- *       DataLength&gt; <sup>FileCount</sup>
- *   <li>Header --&gt; {@link CodecUtil#writeIndexHeader IndexHeader}
- *   <li>FileCount --&gt; {@link DataOutput#writeVInt VInt}
- *   <li>DataOffset,DataLength,Checksum --&gt; {@link DataOutput#writeLong UInt64}
- *   <li>FileName --&gt; {@link DataOutput#writeString String}
- *   <li>FileData --&gt; raw file data
- *   <li>Footer --&gt; {@link CodecUtil#writeFooter CodecFooter}
- * </ul>
- *
- * <p>Notes:
- *
- * <ul>
- *   <li>FileCount indicates how many files are contained in this compound file. The entry table
- *       that follows has that many entries.
- *   <li>Each directory entry contains a long pointer to the start of this file's data section, the
- *       files length, and a String with that file's name.
- * </ul>
- */
-public final class Lucene90CompoundFormat extends CompoundFormat {
-
-  /** Extension of compound file */
-  static final String DATA_EXTENSION = "cfs";
-  /** Extension of compound file entries */
-  static final String ENTRIES_EXTENSION = "cfe";
-
-  static final String DATA_CODEC = "Lucene90CompoundData";
-  static final String ENTRY_CODEC = "Lucene90CompoundEntries";
-  static final int VERSION_START = 0;
-  static final int VERSION_CURRENT = VERSION_START;
-
-  /** Sole constructor. */
-  public Lucene90CompoundFormat() {}
-
-  @Override
-  public CompoundDirectory getCompoundReader(Directory dir, SegmentInfo si, IOContext context)
-      throws IOException {
-    return new Lucene90CompoundReader(dir, si, context);
-  }
-
-  @Override
-  public void write(Directory dir, SegmentInfo si, IOContext context) throws IOException {
-    String dataFile = IndexFileNames.segmentFileName(si.name, "", DATA_EXTENSION);
-    String entriesFile = IndexFileNames.segmentFileName(si.name, "", ENTRIES_EXTENSION);
-
-    try (IndexOutput data = dir.createOutput(dataFile, context);
-        IndexOutput entries = dir.createOutput(entriesFile, context)) {
-      CodecUtil.writeIndexHeader(data, DATA_CODEC, VERSION_CURRENT, si.getId(), "");
-      CodecUtil.writeIndexHeader(entries, ENTRY_CODEC, VERSION_CURRENT, si.getId(), "");
-
-      writeCompoundFile(entries, data, dir, si);
-
-      CodecUtil.writeFooter(data);
-      CodecUtil.writeFooter(entries);
-    }
-  }
-
-  private void writeCompoundFile(
-      IndexOutput entries, IndexOutput data, Directory dir, SegmentInfo si) throws IOException {
-    // write number of files
-    entries.writeVInt(si.files().size());
-    for (String file : si.files()) {
-      // write bytes for file
-      long startOffset = data.getFilePointer();
-      try (ChecksumIndexInput in = dir.openChecksumInput(file, IOContext.READONCE)) {
-
-        // just copies the index header, verifying that its id matches what we expect
-        CodecUtil.verifyAndCopyIndexHeader(in, data, si.getId());
-
-        // copy all bytes except the footer
-        long numBytesToCopy = in.length() - CodecUtil.footerLength() - in.getFilePointer();
-        data.copyBytes(in, numBytesToCopy);
-
-        // verify footer (checksum) matches for the incoming file we are copying
-        long checksum = CodecUtil.checkFooter(in);
-
-        // this is poached from CodecUtil.writeFooter, but we need to use our own checksum, not
-        // data.getChecksum(), but I think
-        // adding a public method to CodecUtil to do that is somewhat dangerous:
-        data.writeInt(CodecUtil.FOOTER_MAGIC);
-        data.writeInt(0);
-        data.writeLong(checksum);
-      }
-      long endOffset = data.getFilePointer();
-
-      long length = endOffset - startOffset;
-
-      // write entry for file
-      entries.writeString(IndexFileNames.stripSegmentName(file));
-      entries.writeLong(startOffset);
-      entries.writeLong(length);
-    }
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundReader.java
deleted file mode 100644
index cbf1e0d..0000000
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundReader.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene90;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.codecs.CompoundDirectory;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.store.ChecksumIndexInput;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.util.IOUtils;
-
-/**
- * Class for accessing a compound stream. This class implements a directory, but is limited to only
- * read operations. Directory methods that would normally modify data throw an exception.
- *
- * @lucene.experimental
- */
-final class Lucene90CompoundReader extends CompoundDirectory {
-
-  /** Offset/Length for a slice inside of a compound file */
-  public static final class FileEntry {
-    long offset;
-    long length;
-  }
-
-  private final Directory directory;
-  private final String segmentName;
-  private final Map<String, FileEntry> entries;
-  private final IndexInput handle;
-  private int version;
-
-  /** Create a new CompoundFileDirectory. */
-  // TODO: we should just pre-strip "entries" and append segment name up-front like simpletext?
-  // this need not be a "general purpose" directory anymore (it only writes index files)
-  public Lucene90CompoundReader(Directory directory, SegmentInfo si, IOContext context)
-      throws IOException {
-    this.directory = directory;
-    this.segmentName = si.name;
-    String dataFileName =
-        IndexFileNames.segmentFileName(segmentName, "", Lucene90CompoundFormat.DATA_EXTENSION);
-    String entriesFileName =
-        IndexFileNames.segmentFileName(segmentName, "", Lucene90CompoundFormat.ENTRIES_EXTENSION);
-    this.entries = readEntries(si.getId(), directory, entriesFileName);
-    boolean success = false;
-
-    long expectedLength = CodecUtil.indexHeaderLength(Lucene90CompoundFormat.DATA_CODEC, "");
-    for (Map.Entry<String, FileEntry> ent : entries.entrySet()) {
-      expectedLength += ent.getValue().length;
-    }
-    expectedLength += CodecUtil.footerLength();
-
-    handle = directory.openInput(dataFileName, context);
-    try {
-      CodecUtil.checkIndexHeader(
-          handle, Lucene90CompoundFormat.DATA_CODEC, version, version, si.getId(), "");
-
-      // NOTE: data file is too costly to verify checksum against all the bytes on open,
-      // but for now we at least verify proper structure of the checksum footer: which looks
-      // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption
-      // such as file truncation.
-      CodecUtil.retrieveChecksum(handle);
-
-      // We also validate length, because e.g. if you strip 16 bytes off the .cfs we otherwise
-      // would not detect it:
-      if (handle.length() != expectedLength) {
-        throw new CorruptIndexException(
-            "length should be " + expectedLength + " bytes, but is " + handle.length() + " instead",
-            handle);
-      }
-
-      success = true;
-    } finally {
-      if (!success) {
-        IOUtils.closeWhileHandlingException(handle);
-      }
-    }
-  }
-
-  /** Helper method that reads CFS entries from an input stream */
-  private Map<String, FileEntry> readEntries(
-      byte[] segmentID, Directory dir, String entriesFileName) throws IOException {
-    Map<String, FileEntry> mapping = null;
-    try (ChecksumIndexInput entriesStream =
-        dir.openChecksumInput(entriesFileName, IOContext.READONCE)) {
-      Throwable priorE = null;
-      try {
-        version =
-            CodecUtil.checkIndexHeader(
-                entriesStream,
-                Lucene90CompoundFormat.ENTRY_CODEC,
-                Lucene90CompoundFormat.VERSION_START,
-                Lucene90CompoundFormat.VERSION_CURRENT,
-                segmentID,
-                "");
-
-        mapping = readMapping(entriesStream);
-
-      } catch (Throwable exception) {
-        priorE = exception;
-      } finally {
-        CodecUtil.checkFooter(entriesStream, priorE);
-      }
-    }
-    return Collections.unmodifiableMap(mapping);
-  }
-
-  private Map<String, FileEntry> readMapping(IndexInput entriesStream) throws IOException {
-    final int numEntries = entriesStream.readVInt();
-    Map<String, FileEntry> mapping = new HashMap<>(numEntries);
-    for (int i = 0; i < numEntries; i++) {
-      final FileEntry fileEntry = new FileEntry();
-      final String id = entriesStream.readString();
-      FileEntry previous = mapping.put(id, fileEntry);
-      if (previous != null) {
-        throw new CorruptIndexException("Duplicate cfs entry id=" + id + " in CFS ", entriesStream);
-      }
-      fileEntry.offset = entriesStream.readLong();
-      fileEntry.length = entriesStream.readLong();
-    }
-    return mapping;
-  }
-
-  @Override
-  public void close() throws IOException {
-    IOUtils.close(handle);
-  }
-
-  @Override
-  public IndexInput openInput(String name, IOContext context) throws IOException {
-    ensureOpen();
-    final String id = IndexFileNames.stripSegmentName(name);
-    final FileEntry entry = entries.get(id);
-    if (entry == null) {
-      String datFileName =
-          IndexFileNames.segmentFileName(segmentName, "", Lucene90CompoundFormat.DATA_EXTENSION);
-      throw new FileNotFoundException(
-          "No sub-file with id "
-              + id
-              + " found in compound file \""
-              + datFileName
-              + "\" (fileName="
-              + name
-              + " files: "
-              + entries.keySet()
-              + ")");
-    }
-    return handle.slice(name, entry.offset, entry.length);
-  }
-
-  /** Returns an array of strings, one for each file in the directory. */
-  @Override
-  public String[] listAll() {
-    ensureOpen();
-    String[] res = entries.keySet().toArray(new String[entries.size()]);
-
-    // Add the segment name
-    for (int i = 0; i < res.length; i++) {
-      res[i] = segmentName + res[i];
-    }
-    return res;
-  }
-
-  /**
-   * Returns the length of a file in the directory.
-   *
-   * @throws IOException if the file does not exist
-   */
-  @Override
-  public long fileLength(String name) throws IOException {
-    ensureOpen();
-    FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
-    if (e == null) throw new FileNotFoundException(name);
-    return e.length;
-  }
-
-  @Override
-  public String toString() {
-    return "CompoundFileDirectory(segment=\"" + segmentName + "\" in dir=" + directory + ")";
-  }
-
-  @Override
-  public Set<String> getPendingDeletions() {
-    return Collections.emptySet();
-  }
-
-  @Override
-  public void checkIntegrity() throws IOException {
-    CodecUtil.checksumEntireFile(handle);
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java
index 43f4215..3050759 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java
@@ -168,7 +168,9 @@ public final class Lucene90FieldInfosFormat extends FieldInfosFormat {
           int pointNumBytes;
           int pointIndexDimensionCount = pointDataDimensionCount;
           if (pointDataDimensionCount != 0) {
-            pointIndexDimensionCount = input.readVInt();
+            if (version >= Lucene90FieldInfosFormat.FORMAT_SELECTIVE_INDEXING) {
+              pointIndexDimensionCount = input.readVInt();
+            }
             pointNumBytes = input.readVInt();
           } else {
             pointNumBytes = 0;
@@ -361,7 +363,9 @@ public final class Lucene90FieldInfosFormat extends FieldInfosFormat {
   // Codec header
   static final String CODEC_NAME = "Lucene90FieldInfos";
   static final int FORMAT_START = 0;
-  static final int FORMAT_CURRENT = FORMAT_START;
+  static final int FORMAT_SOFT_DELETES = 1;
+  static final int FORMAT_SELECTIVE_INDEXING = 2;
+  static final int FORMAT_CURRENT = FORMAT_SELECTIVE_INDEXING;
 
   // Field flags
   static final byte STORE_TERMVECTOR = 0x1;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90VectorWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90VectorWriter.java
index e070784..21ab611 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90VectorWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90VectorWriter.java
@@ -54,25 +54,23 @@ public final class Lucene90VectorWriter extends VectorWriter {
     String metaFileName =
         IndexFileNames.segmentFileName(
             state.segmentInfo.name, state.segmentSuffix, Lucene90VectorFormat.META_EXTENSION);
+    meta = state.directory.createOutput(metaFileName, state.context);
 
     String vectorDataFileName =
         IndexFileNames.segmentFileName(
             state.segmentInfo.name,
             state.segmentSuffix,
             Lucene90VectorFormat.VECTOR_DATA_EXTENSION);
+    vectorData = state.directory.createOutput(vectorDataFileName, state.context);
 
     String indexDataFileName =
         IndexFileNames.segmentFileName(
             state.segmentInfo.name,
             state.segmentSuffix,
             Lucene90VectorFormat.VECTOR_INDEX_EXTENSION);
+    vectorIndex = state.directory.createOutput(indexDataFileName, state.context);
 
-    boolean success = false;
     try {
-      meta = state.directory.createOutput(metaFileName, state.context);
-      vectorData = state.directory.createOutput(vectorDataFileName, state.context);
-      vectorIndex = state.directory.createOutput(indexDataFileName, state.context);
-
       CodecUtil.writeIndexHeader(
           meta,
           Lucene90VectorFormat.META_CODEC_NAME,
@@ -91,11 +89,8 @@ public final class Lucene90VectorWriter extends VectorWriter {
           Lucene90VectorFormat.VERSION_CURRENT,
           state.segmentInfo.getId(),
           state.segmentSuffix);
-      success = true;
-    } finally {
-      if (success == false) {
-        IOUtils.closeWhileHandlingException(this);
-      }
+    } catch (IOException e) {
+      IOUtils.closeWhileHandlingException(this);
     }
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/package-info.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/package-info.java
index 4d34a40..b7a9d4a 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/package-info.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/package-info.java
@@ -234,7 +234,7 @@
  * <td>Stores metadata about a segment</td>
  * </tr>
  * <tr>
- * <td>{@link org.apache.lucene.codecs.lucene90.Lucene90CompoundFormat Compound File}</td>
+ * <td>{@link org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat Compound File}</td>
  * <td>.cfs, .cfe</td>
  * <td>An optional "virtual" file consisting of all the other index files for
  * systems that frequently run out of file handles.</td>
diff --git a/lucene/core/src/java/org/apache/lucene/document/FieldType.java b/lucene/core/src/java/org/apache/lucene/document/FieldType.java
index 9ffa198..b04b2f6 100644
--- a/lucene/core/src/java/org/apache/lucene/document/FieldType.java
+++ b/lucene/core/src/java/org/apache/lucene/document/FieldType.java
@@ -367,8 +367,7 @@ public class FieldType implements IndexableFieldType {
     return dimensionNumBytes;
   }
 
-  /** Enable vector indexing, with the specified number of dimensions and distance function. */
-  public void setVectorDimensionsAndSearchStrategy(
+  void setVectorDimensionsAndSearchStrategy(
       int numDimensions, VectorValues.SearchStrategy distFunc) {
     checkIfFrozen();
     if (numDimensions <= 0) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
index 6974eac..4f0d3a1 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
@@ -259,11 +259,6 @@ public abstract class CodecReader extends LeafReader implements Accountable {
       ramBytesUsed += getPointsReader().ramBytesUsed();
     }
 
-    // vectors
-    if (getVectorReader() != null) {
-      ramBytesUsed += getVectorReader().ramBytesUsed();
-    }
-
     return ramBytesUsed;
   }
 
@@ -300,11 +295,6 @@ public abstract class CodecReader extends LeafReader implements Accountable {
       resources.add(Accountables.namedAccountable("points", getPointsReader()));
     }
 
-    // vectors
-    if (getVectorReader() != null) {
-      resources.add(Accountables.namedAccountable("vectors", getVectorReader()));
-    }
-
     return Collections.unmodifiableList(resources);
   }
 
@@ -339,10 +329,5 @@ public abstract class CodecReader extends LeafReader implements Accountable {
     if (getPointsReader() != null) {
       getPointsReader().checkIntegrity();
     }
-
-    // vectors
-    if (getVectorReader() != null) {
-      getVectorReader().checkIntegrity();
-    }
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
index b917be5..3746817 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
@@ -66,8 +66,8 @@ final class ReadersAndUpdates {
 
   // Indicates whether this segment is currently being merged. While a segment
   // is merging, all field updates are also registered in the
-  // mergingDVUpdates map. Also, calls to writeFieldUpdates merge the
-  // updates with mergingDVUpdates.
+  // mergingNumericUpdates map. Also, calls to writeFieldUpdates merge the
+  // updates with mergingNumericUpdates.
   // That way, when the segment is done merging, IndexWriter can apply the
   // updates on the merged segment too.
   private boolean isMerging = false;
diff --git a/lucene/core/src/java/org/apache/lucene/util/RamUsageEstimator.java b/lucene/core/src/java/org/apache/lucene/util/RamUsageEstimator.java
index 6c9a75b..7140546 100644
--- a/lucene/core/src/java/org/apache/lucene/util/RamUsageEstimator.java
+++ b/lucene/core/src/java/org/apache/lucene/util/RamUsageEstimator.java
@@ -20,7 +20,6 @@ import java.lang.reflect.Array;
 import java.lang.reflect.Field;
 import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
-import java.security.AccessControlException;
 import java.security.AccessController;
 import java.security.PrivilegedAction;
 import java.text.DecimalFormat;
@@ -528,14 +527,14 @@ public final class RamUsageEstimator {
     // Walk type hierarchy
     for (; clazz != null; clazz = clazz.getSuperclass()) {
       final Class<?> target = clazz;
-      final Field[] fields;
-      try {
-        fields =
-            AccessController.doPrivileged((PrivilegedAction<Field[]>) target::getDeclaredFields);
-      } catch (AccessControlException e) {
-        throw new RuntimeException("Can't access fields of class: " + target, e);
-      }
-
+      final Field[] fields =
+          AccessController.doPrivileged(
+              new PrivilegedAction<Field[]>() {
+                @Override
+                public Field[] run() {
+                  return target.getDeclaredFields();
+                }
+              });
       for (Field f : fields) {
         if (!Modifier.isStatic(f.getModifiers())) {
           size = adjustForField(size, f);
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90CompoundFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java
similarity index 90%
rename from lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90CompoundFormat.java
rename to lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java
index ed78abd..15fdf17 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90CompoundFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java
@@ -14,13 +14,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.codecs.lucene90;
+package org.apache.lucene.codecs.lucene50;
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.index.BaseCompoundFormatTestCase;
 import org.apache.lucene.util.TestUtil;
 
-public class TestLucene90CompoundFormat extends BaseCompoundFormatTestCase {
+public class TestLucene50CompoundFormat extends BaseCompoundFormatTestCase {
   private final Codec codec = TestUtil.getDefaultCodec();
 
   @Override
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90FieldInfosFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90FieldInfosFormat.java
deleted file mode 100644
index 83bd56a..0000000
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90FieldInfosFormat.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene90;
-
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.index.BaseFieldInfoFormatTestCase;
-import org.apache.lucene.util.TestUtil;
-
-public class TestLucene90FieldInfosFormat extends BaseFieldInfoFormatTestCase {
-  @Override
-  protected Codec getCodec() {
-    return TestUtil.getDefaultCodec();
-  }
-}
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90VectorFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90VectorFormat.java
deleted file mode 100644
index 1906ecf..0000000
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90VectorFormat.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.codecs.lucene90;
-
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.index.BaseVectorFormatTestCase;
-import org.apache.lucene.util.TestUtil;
-
-public class TestLucene90VectorFormat extends BaseVectorFormatTestCase {
-
-  @Override
-  protected Codec getCodec() {
-    return TestUtil.getDefaultCodec();
-  }
-}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseVectorFormatTestCase.java b/lucene/core/src/test/org/apache/lucene/index/TestVectorValues.java
similarity index 87%
rename from lucene/test-framework/src/java/org/apache/lucene/index/BaseVectorFormatTestCase.java
rename to lucene/core/src/test/org/apache/lucene/index/TestVectorValues.java
index 047c373..9691efb 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseVectorFormatTestCase.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestVectorValues.java
@@ -22,45 +22,42 @@ import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.util.Arrays;
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.VectorFormat;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.VectorField;
+import org.apache.lucene.index.VectorValues.SearchStrategy;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.VectorUtil;
 
-/**
- * Base class aiming at testing {@link VectorFormat vectors formats}. To test a new format, all you
- * need is to register a new {@link Codec} which uses it and extend this class and override {@link
- * #getCodec()}.
- *
- * @lucene.experimental
- */
-public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCase {
+/** Test Indexing/IndexWriter with vectors */
+public class TestVectorValues extends LuceneTestCase {
 
-  @Override
-  protected void addRandomFields(Document doc) {
-    doc.add(new VectorField("v2", randomVector(30), VectorValues.SearchStrategy.NONE));
+  private IndexWriterConfig createIndexWriterConfig() {
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    iwc.setCodec(Codec.forName("Lucene90"));
+    return iwc;
   }
 
   // Suddenly add vectors to an existing field:
   public void testUpgradeFieldToVectors() throws Exception {
     try (Directory dir = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc = new Document();
-        doc.add(newStringField("f", "foo", Field.Store.NO));
+        doc.add(newStringField("f", "foo", Store.NO));
         w.addDocument(doc);
       }
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
@@ -82,7 +79,7 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
     expectThrows(IllegalArgumentException.class, () -> new VectorField("f", null));
     expectThrows(
         IllegalArgumentException.class,
-        () -> new VectorField("f", new float[1], (VectorValues.SearchStrategy) null));
+        () -> new VectorField("f", new float[1], (SearchStrategy) null));
     expectThrows(IllegalArgumentException.class, () -> new VectorField("f", new float[0]));
     expectThrows(
         IllegalArgumentException.class,
@@ -104,15 +101,12 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
   public void testFieldCreateFieldType() {
     expectThrows(
         IllegalArgumentException.class,
-        () -> VectorField.createHnswType(0, VectorValues.SearchStrategy.EUCLIDEAN_HNSW, 16, 16));
+        () -> VectorField.createHnswType(0, SearchStrategy.EUCLIDEAN_HNSW, 16, 16));
     expectThrows(
         IllegalArgumentException.class,
         () ->
             VectorField.createHnswType(
-                VectorValues.MAX_DIMENSIONS + 1,
-                VectorValues.SearchStrategy.EUCLIDEAN_HNSW,
-                16,
-                16));
+                VectorValues.MAX_DIMENSIONS + 1, SearchStrategy.EUCLIDEAN_HNSW, 16, 16));
     expectThrows(
         IllegalArgumentException.class,
         () -> VectorField.createHnswType(VectorValues.MAX_DIMENSIONS + 1, null, 16, 16));
@@ -120,14 +114,14 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
         IllegalArgumentException.class,
         () ->
             VectorField.createHnswType(
-                VectorValues.MAX_DIMENSIONS + 1, VectorValues.SearchStrategy.NONE, 16, 16));
+                VectorValues.MAX_DIMENSIONS + 1, SearchStrategy.NONE, 16, 16));
   }
 
   // Illegal schema change tests:
 
   public void testIllegalDimChangeTwoDocs() throws Exception {
     try (Directory dir = newDirectory();
-        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
       Document doc = new Document();
       doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
       w.addDocument(doc);
@@ -147,7 +141,7 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
 
   public void testIllegalSearchStrategyChange() throws Exception {
     try (Directory dir = newDirectory();
-        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
       Document doc = new Document();
       doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
       w.addDocument(doc);
@@ -168,13 +162,13 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
 
   public void testIllegalDimChangeTwoWriters() throws Exception {
     try (Directory dir = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
 
-      try (IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc2 = new Document();
         doc2.add(new VectorField("f", new float[1], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         IllegalArgumentException expected =
@@ -187,13 +181,13 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
 
   public void testIllegalSearchStrategyChangeTwoWriters() throws Exception {
     try (Directory dir = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
 
-      try (IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc2 = new Document();
         doc2.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.EUCLIDEAN_HNSW));
         IllegalArgumentException expected =
@@ -211,10 +205,10 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
     doc.add(new VectorField(fieldName, new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
         w2.addIndexes(dir);
         w2.forceMerge(1);
         try (IndexReader reader = w2.getReader()) {
@@ -233,12 +227,12 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
     Document doc = new Document();
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         w.addDocument(doc);
       }
       doc.add(
           new VectorField(fieldName, new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
-      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
         w2.addDocument(doc);
         w2.addIndexes(dir);
         w2.forceMerge(1);
@@ -260,10 +254,10 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
     doc.add(new VectorField(fieldName, vector, VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
         vector[0] = 1;
         w2.addDocument(doc);
         w2.addIndexes(dir);
@@ -286,12 +280,12 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
   public void testIllegalDimChangeViaAddIndexesDirectory() throws Exception {
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc = new Document();
-        doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
+        doc.add(new VectorField("f", new float[4], SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[5], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w2.addDocument(doc);
@@ -307,12 +301,12 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
   public void testIllegalSearchStrategyChangeViaAddIndexesDirectory() throws Exception {
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.EUCLIDEAN_HNSW));
         w2.addDocument(doc);
@@ -328,12 +322,12 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
   public void testIllegalDimChangeViaAddIndexesCodecReader() throws Exception {
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc = new Document();
-        doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
+        doc.add(new VectorField("f", new float[4], SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[5], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w2.addDocument(doc);
@@ -352,12 +346,12 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
   public void testIllegalSearchStrategyChangeViaAddIndexesCodecReader() throws Exception {
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.EUCLIDEAN_HNSW));
         w2.addDocument(doc);
@@ -377,12 +371,12 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
   public void testIllegalDimChangeViaAddIndexesSlowCodecReader() throws Exception {
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[5], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w2.addDocument(doc);
@@ -399,14 +393,14 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
   public void testIllegalSearchStrategyChangeViaAddIndexesSlowCodecReader() throws Exception {
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
         Document doc = new Document();
-        doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.EUCLIDEAN_HNSW));
+        doc.add(new VectorField("f", new float[4], SearchStrategy.EUCLIDEAN_HNSW));
         w2.addDocument(doc);
         try (DirectoryReader r = DirectoryReader.open(dir)) {
           IllegalArgumentException expected =
@@ -421,7 +415,7 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
 
   public void testIllegalMultipleValues() throws Exception {
     try (Directory dir = newDirectory();
-        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
       Document doc = new Document();
       doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
       doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
@@ -435,7 +429,7 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
 
   public void testIllegalDimensionTooLarge() throws Exception {
     try (Directory dir = newDirectory();
-        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
       Document doc = new Document();
       expectThrows(
           IllegalArgumentException.class,
@@ -454,12 +448,12 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
 
   public void testIllegalEmptyVector() throws Exception {
     try (Directory dir = newDirectory();
-        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
       Document doc = new Document();
       Exception e =
           expectThrows(
               IllegalArgumentException.class,
-              () -> doc.add(new VectorField("f", new float[0], VectorValues.SearchStrategy.NONE)));
+              () -> doc.add(new VectorField("f", new float[0], SearchStrategy.NONE)));
       assertEquals("cannot index an empty vector", e.getMessage());
 
       Document doc2 = new Document();
@@ -471,7 +465,7 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
   // Write vectors, one segment with default codec, another with SimpleText, then forceMerge
   public void testDifferentCodecs1() throws Exception {
     try (Directory dir = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
@@ -497,7 +491,7 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
@@ -518,9 +512,9 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
 
   public void testDeleteAllVectorDocs() throws Exception {
     try (Directory dir = newDirectory();
-        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
       Document doc = new Document();
-      doc.add(new StringField("id", "0", Field.Store.NO));
+      doc.add(new StringField("id", "0", Store.NO));
       doc.add(
           new VectorField(
               "v", new float[] {2, 3, 5}, VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
@@ -541,9 +535,9 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
 
   public void testVectorFieldMissingFromOneSegment() throws Exception {
     try (Directory dir = FSDirectory.open(createTempDir());
-        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
       Document doc = new Document();
-      doc.add(new StringField("id", "0", Field.Store.NO));
+      doc.add(new StringField("id", "0", Store.NO));
       doc.add(
           new VectorField(
               "v0", new float[] {2, 3, 5}, VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
@@ -574,7 +568,7 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
               random().nextInt(VectorValues.SearchStrategy.values().length)];
     }
     try (Directory dir = newDirectory();
-        RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig())) {
+        RandomIndexWriter w = new RandomIndexWriter(random(), dir, createIndexWriterConfig())) {
       for (int i = 0; i < numDocs; i++) {
         Document doc = new Document();
         for (int field = 0; field < numFields; field++) {
@@ -616,7 +610,7 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
     String fieldName = "field";
     float[] v = {0};
     try (Directory dir = newDirectory();
-        IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig())) {
+        IndexWriter iw = new IndexWriter(dir, createIndexWriterConfig())) {
       Document doc1 = new Document();
       doc1.add(new VectorField(fieldName, v, VectorValues.SearchStrategy.EUCLIDEAN_HNSW));
       v[0] = 1;
@@ -643,7 +637,7 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
   }
 
   public void testSortedIndex() throws Exception {
-    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriterConfig iwc = createIndexWriterConfig();
     iwc.setIndexSort(new Sort(new SortField("sortkey", SortField.Type.INT)));
     String fieldName = "field";
     try (Directory dir = newDirectory();
@@ -681,15 +675,13 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
         IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig())) {
       Document doc = new Document();
       float[] v = new float[] {1};
-      doc.add(new VectorField("field1", v, VectorValues.SearchStrategy.EUCLIDEAN_HNSW));
-      doc.add(new VectorField("field2", new float[] {1, 2, 3}, VectorValues.SearchStrategy.NONE));
+      doc.add(new VectorField("field1", v, SearchStrategy.EUCLIDEAN_HNSW));
+      doc.add(new VectorField("field2", new float[] {1, 2, 3}, SearchStrategy.NONE));
       iw.addDocument(doc);
       v[0] = 2;
       iw.addDocument(doc);
       doc = new Document();
-      doc.add(
-          new VectorField(
-              "field3", new float[] {1, 2, 3}, VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
+      doc.add(new VectorField("field3", new float[] {1, 2, 3}, SearchStrategy.DOT_PRODUCT_HNSW));
       iw.addDocument(doc);
       iw.forceMerge(1);
       try (IndexReader reader = iw.getReader()) {
@@ -729,7 +721,7 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
    * consistently.
    */
   public void testRandom() throws Exception {
-    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriterConfig iwc = createIndexWriterConfig();
     if (random().nextBoolean()) {
       iwc.setIndexSort(new Sort(new SortField("sortkey", SortField.Type.INT)));
     }
@@ -750,9 +742,9 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
         if (random().nextBoolean() && values[i] != null) {
           // sometimes use a shared scratch array
           System.arraycopy(values[i], 0, scratch, 0, scratch.length);
-          add(iw, fieldName, i, scratch, VectorValues.SearchStrategy.NONE);
+          add(iw, fieldName, i, scratch, SearchStrategy.NONE);
         } else {
-          add(iw, fieldName, i, values[i], VectorValues.SearchStrategy.NONE);
+          add(iw, fieldName, i, values[i], SearchStrategy.NONE);
         }
         if (random().nextInt(10) == 2) {
           // sometimes delete a random document
@@ -825,7 +817,7 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
         values[i] = value;
         id2value[id] = value;
         id2ord[id] = i;
-        add(iw, fieldName, id, value, VectorValues.SearchStrategy.EUCLIDEAN_HNSW);
+        add(iw, fieldName, id, value, SearchStrategy.EUCLIDEAN_HNSW);
       }
       try (IndexReader reader = iw.getReader()) {
         for (LeafReaderContext ctx : reader.leaves()) {
@@ -858,18 +850,14 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
   }
 
   private void add(
-      IndexWriter iw,
-      String field,
-      int id,
-      float[] vector,
-      VectorValues.SearchStrategy searchStrategy)
+      IndexWriter iw, String field, int id, float[] vector, SearchStrategy searchStrategy)
       throws IOException {
     add(iw, field, id, random().nextInt(100), vector, searchStrategy);
   }
 
   private void add(IndexWriter iw, String field, int id, int sortkey, float[] vector)
       throws IOException {
-    add(iw, field, id, sortkey, vector, VectorValues.SearchStrategy.NONE);
+    add(iw, field, id, sortkey, vector, SearchStrategy.NONE);
   }
 
   private void add(
@@ -878,7 +866,7 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
       int id,
       int sortkey,
       float[] vector,
-      VectorValues.SearchStrategy searchStrategy)
+      SearchStrategy searchStrategy)
       throws IOException {
     Document doc = new Document();
     if (vector != null) {
@@ -902,7 +890,7 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
 
   public void testCheckIndexIncludesVectors() throws Exception {
     try (Directory dir = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("v1", randomVector(3), VectorValues.SearchStrategy.NONE));
         w.addDocument(doc);
@@ -936,14 +924,14 @@ public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCa
 
   public void testAdvance() throws Exception {
     try (Directory dir = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
         int numdocs = atLeast(1500);
         String fieldName = "field";
         for (int i = 0; i < numdocs; i++) {
           Document doc = new Document();
           // randomly add a vector field
           if (random().nextInt(4) == 3) {
-            doc.add(new VectorField(fieldName, new float[4], VectorValues.SearchStrategy.NONE));
+            doc.add(new VectorField(fieldName, new float[4], SearchStrategy.NONE));
           }
           w.addDocument(doc);
         }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/Intervals.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/Intervals.java
index 1094dbe..e2c7225 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/Intervals.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/Intervals.java
@@ -404,16 +404,6 @@ public final class Intervals {
    * Return intervals that span combinations of intervals from {@code minShouldMatch} of the sources
    */
   public static IntervalsSource atLeast(int minShouldMatch, IntervalsSource... sources) {
-    if (minShouldMatch == sources.length) {
-      return unordered(sources);
-    }
-    if (minShouldMatch > sources.length) {
-      return new NoMatchIntervalsSource(
-          "Too few sources to match minimum of ["
-              + minShouldMatch
-              + "]: "
-              + Arrays.toString(sources));
-    }
     return new MinimumShouldMatchIntervalsSource(sources, minShouldMatch);
   }
 
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimumShouldMatchIntervalsSource.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimumShouldMatchIntervalsSource.java
index f9f2677..87cd427 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimumShouldMatchIntervalsSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimumShouldMatchIntervalsSource.java
@@ -42,7 +42,6 @@ class MinimumShouldMatchIntervalsSource extends IntervalsSource {
   private final int minShouldMatch;
 
   MinimumShouldMatchIntervalsSource(IntervalsSource[] sources, int minShouldMatch) {
-    assert minShouldMatch < sources.length;
     this.sources = sources;
     this.minShouldMatch = minShouldMatch;
   }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/NoMatchIntervalsSource.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/NoMatchIntervalsSource.java
deleted file mode 100644
index cfa7364..0000000
--- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/NoMatchIntervalsSource.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.queries.intervals;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Objects;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.search.QueryVisitor;
-
-/** A source returning no matches */
-class NoMatchIntervalsSource extends IntervalsSource {
-  final String reason;
-
-  NoMatchIntervalsSource(String reason) {
-    this.reason = reason;
-  }
-
-  @Override
-  public IntervalIterator intervals(String field, LeafReaderContext ctx) throws IOException {
-    return null;
-  }
-
-  @Override
-  public IntervalMatchesIterator matches(String field, LeafReaderContext ctx, int doc)
-      throws IOException {
-    return null;
-  }
-
-  @Override
-  public void visit(String field, QueryVisitor visitor) {}
-
-  @Override
-  public int minExtent() {
-    return 0;
-  }
-
-  @Override
-  public Collection<IntervalsSource> pullUpDisjunctions() {
-    return Collections.singleton(this);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-    NoMatchIntervalsSource that = (NoMatchIntervalsSource) o;
-    return Objects.equals(reason, that.reason);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(reason);
-  }
-
-  @Override
-  public String toString() {
-    return "NOMATCH(" + reason + ")";
-  }
-}
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java
index 57e86f8..f478c00 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java
@@ -756,27 +756,6 @@ public class TestIntervals extends LuceneTestCase {
     assertEquals(3, source.minExtent());
   }
 
-  public void testDegenerateMinShouldMatch() throws IOException {
-    IntervalsSource source =
-        Intervals.ordered(
-            Intervals.atLeast(1, Intervals.term("interest")),
-            Intervals.atLeast(1, Intervals.term("anyone")));
-
-    MatchesIterator mi = getMatches(source, 0, "field1");
-    assertMatch(mi, 2, 4, 11, 29);
-    MatchesIterator subs = mi.getSubMatches();
-    assertNotNull(subs);
-    assertMatch(subs, 2, 2, 11, 19);
-    assertMatch(subs, 4, 4, 23, 29);
-    assertFalse(subs.next());
-    assertFalse(mi.next());
-  }
-
-  public void testNoMatchMinShouldMatch() throws IOException {
-    IntervalsSource source = Intervals.atLeast(4, Intervals.term("a"), Intervals.term("b"));
-    checkIntervals(source, "field", 0, new int[][] {});
-  }
-
   public void testDefinedGaps() throws IOException {
     IntervalsSource source =
         Intervals.phrase(
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestSimplifications.java b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestSimplifications.java
index 6bdc2d5..da9531b 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestSimplifications.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestSimplifications.java
@@ -111,13 +111,4 @@ public class TestSimplifications extends LuceneTestCase {
             Intervals.term("a"), Intervals.term("b"), Intervals.term("c"), Intervals.term("d")),
         actual);
   }
-
-  public void testMinShouldMatchSimplifications() {
-    IntervalsSource expected = Intervals.unordered(Intervals.term("a"), Intervals.term("b"));
-    assertEquals(expected, Intervals.atLeast(2, Intervals.term("a"), Intervals.term("b")));
-
-    assertEquals(
-        "NOMATCH(Too few sources to match minimum of [3]: [a, b])",
-        Intervals.atLeast(3, Intervals.term("a"), Intervals.term("b")).toString());
-  }
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseFieldInfoFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseFieldInfoFormatTestCase.java
index 05f8957..4a9f4fb 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseFieldInfoFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseFieldInfoFormatTestCase.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.index;
 
-import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.HashSet;
@@ -64,9 +63,6 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
     assertFalse(infos2.fieldInfo("field").omitsNorms());
     assertFalse(infos2.fieldInfo("field").hasPayloads());
     assertFalse(infos2.fieldInfo("field").hasVectors());
-    assertEquals(0, infos2.fieldInfo("field").getPointDimensionCount());
-    assertEquals(0, infos2.fieldInfo("field").getVectorDimension());
-    assertFalse(infos2.fieldInfo("field").isSoftDeletesField());
     dir.close();
   }
 
@@ -257,12 +253,7 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
     for (int i = 0; i < numFields; i++) {
       fieldNames.add(TestUtil.randomUnicodeString(random()));
     }
-
-    String softDeletesField =
-        random().nextBoolean() ? TestUtil.randomUnicodeString(random()) : null;
-    FieldInfos.Builder builder =
-        new FieldInfos.Builder(new FieldInfos.FieldNumbers(softDeletesField));
-
+    FieldInfos.Builder builder = new FieldInfos.Builder(new FieldInfos.FieldNumbers(null));
     for (String field : fieldNames) {
       IndexableFieldType fieldType = randomFieldType(random());
       FieldInfo fi = builder.getOrAdd(field);
@@ -280,19 +271,6 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
           fi.setStorePayloads();
         }
       }
-
-      if (fieldType.pointDimensionCount() > 0) {
-        fi.setPointDimensions(
-            fieldType.pointDimensionCount(),
-            fieldType.pointIndexDimensionCount(),
-            fieldType.pointNumBytes());
-      }
-
-      if (fieldType.vectorDimension() > 0) {
-        fi.setVectorDimensionAndSearchStrategy(
-            fieldType.vectorDimension(), fieldType.vectorSearchStrategy());
-      }
-
       addAttributes(fi);
     }
     FieldInfos infos = builder.finish();
@@ -302,11 +280,11 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
     dir.close();
   }
 
-  private IndexableFieldType randomFieldType(Random r) {
+  private final IndexableFieldType randomFieldType(Random r) {
     FieldType type = new FieldType();
 
     if (r.nextBoolean()) {
-      IndexOptions[] values = IndexOptions.values();
+      IndexOptions values[] = IndexOptions.values();
       type.setIndexOptions(values[r.nextInt(values.length)]);
       type.setOmitNorms(r.nextBoolean());
 
@@ -323,30 +301,27 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
     }
 
     if (r.nextBoolean()) {
-      DocValuesType[] values = DocValuesType.values();
+      DocValuesType values[] = getDocValuesTypes();
       type.setDocValuesType(values[r.nextInt(values.length)]);
     }
 
-    if (r.nextBoolean()) {
-      int dimension = 1 + r.nextInt(PointValues.MAX_DIMENSIONS);
-      int indexDimension = 1 + r.nextInt(Math.min(dimension, PointValues.MAX_INDEX_DIMENSIONS));
-      int dimensionNumBytes = 1 + r.nextInt(PointValues.MAX_NUM_BYTES);
-      type.setDimensions(dimension, indexDimension, dimensionNumBytes);
-    }
-
-    if (r.nextBoolean()) {
-      int dimension = 1 + r.nextInt(VectorValues.MAX_DIMENSIONS);
-      VectorValues.SearchStrategy searchStrategy =
-          RandomPicks.randomFrom(r, VectorValues.SearchStrategy.values());
-      type.setVectorDimensionsAndSearchStrategy(dimension, searchStrategy);
-    }
-
     return type;
   }
 
   /** Hook to add any codec attributes to fieldinfo instances added in this test. */
   protected void addAttributes(FieldInfo fi) {}
 
+  /**
+   * Docvalues types to test.
+   *
+   * @deprecated only for Only available to ancient codecs can limit this to the subset of types
+   *     they support.
+   */
+  @Deprecated
+  protected DocValuesType[] getDocValuesTypes() {
+    return DocValuesType.values();
+  }
+
   /** equality for entirety of fieldinfos */
   protected void assertEquals(FieldInfos expected, FieldInfos actual) {
     assertEquals(expected.size(), actual.size());
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/RamUsageTester.java b/lucene/test-framework/src/java/org/apache/lucene/util/RamUsageTester.java
index 2c5c6af..39d2556 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/RamUsageTester.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/RamUsageTester.java
@@ -23,14 +23,11 @@ import java.lang.reflect.Field;
 import java.lang.reflect.Modifier;
 import java.nio.ByteBuffer;
 import java.nio.ByteOrder;
-import java.nio.charset.CharsetDecoder;
-import java.nio.charset.CharsetEncoder;
 import java.nio.file.Path;
 import java.security.AccessController;
 import java.security.PrivilegedAction;
 import java.util.AbstractList;
 import java.util.ArrayList;
-import java.util.BitSet;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -154,14 +151,6 @@ public final class RamUsageTester {
       ArrayList<Object> stack,
       Object ob,
       Class<?> obClazz) {
-
-    // Ignore JDK objects we can't access or handle properly.
-    Predicate<Object> isIgnorable =
-        (clazz) -> (clazz instanceof CharsetEncoder) || (clazz instanceof CharsetDecoder);
-    if (isIgnorable.test(ob)) {
-      return accumulator.accumulateObject(ob, 0, Collections.emptyMap(), stack);
-    }
-
     /*
      * Consider an object. Push any references it has to the processing stack
      * and accumulate this object's shallow size.
@@ -170,7 +159,10 @@ public final class RamUsageTester {
       if (Constants.JRE_IS_MINIMUM_JAVA9) {
         long alignedShallowInstanceSize = RamUsageEstimator.shallowSizeOf(ob);
 
-        Predicate<Class<?>> isJavaModule = (clazz) -> clazz.getName().startsWith("java.");
+        Predicate<Class<?>> isJavaModule =
+            (clazz) -> {
+              return clazz.getName().startsWith("java.");
+            };
 
         // Java 9: Best guess for some known types, as we cannot precisely look into runtime
         // classes:
@@ -282,17 +274,13 @@ public final class RamUsageTester {
                           v.length())); // may not be correct with Java 9's compact strings!
               a(StringBuilder.class, v -> charArraySize(v.capacity()));
               a(StringBuffer.class, v -> charArraySize(v.capacity()));
-              // Approximate the underlying long[] buffer.
-              a(BitSet.class, v -> (v.size() / Byte.SIZE));
               // Types with large buffers:
               a(ByteArrayOutputStream.class, v -> byteArraySize(v.size()));
               // For File and Path, we just take the length of String representation as
               // approximation:
               a(File.class, v -> charArraySize(v.toString().length()));
               a(Path.class, v -> charArraySize(v.toString().length()));
-
-              // Ignorable JDK classes.
-              a(ByteOrder.class, v -> 0);
+              a(ByteOrder.class, v -> 0); // Instances of ByteOrder are constants
             }
 
             @SuppressWarnings("unchecked")
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index ddc8c84..a0d436d 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -199,8 +199,6 @@ Other Changes
 * SOLR-14067: StatelessScriptUpdateProcessorFactory moved to it's own /contrib/scripting/ package instead
  of shipping as part of Solr due to security concerns.  Renamed to ScriptUpdateProcessorFactory for simpler name. (Eric Pugh)
 
-* SOLR-15118: Switch /v2/collections APIs over to the now-preferred annotated-POJO implementation approach (Jason Gerlowski)
-
 Bug Fixes
 ---------------------
 * SOLR-14546: Fix for a relatively hard to hit issue in OverseerTaskProcessor that could lead to out of order execution
@@ -223,7 +221,7 @@ Improvements
 * SOLR-14234: Unhelpful message in RemoteExecutionException. (ab)
 
 * SOLR-13608: Backups are now done incrementally by default.  Multiple backups can be stored at the same location, and each
-  backup will only upload those files that are new since the last backup. (Jason Gerlowski, shalin , Cao Manh Dat)
+  backup will only upload those files that are new since the last backup. (Jason Gerlowski, Shalin , Cao Manh Dat)
 
 * SOLR-15123: Revamp SolrCLI tool's help descriptions for all commands for consistency and clarity. (Eric Pugh)
 
@@ -240,20 +238,7 @@ Bug Fixes
 
 Other Changes
 ---------------------
-* SOLR-15118: Deprecate CollectionAdminRequest.getV2Request(). (Jason Gerlowski)
-
-==================  8.8.1 ==================
-
-Bug Fixes
----------------------
-
-* SOLR-15145: System property to control whether base_url is stored in state.json to enable back-compat with older SolrJ versions.
-  (Timothy Potter)
-
-* SOLR-15114: Fix bug that caused WAND optimization to be disabled in cases where the max score is requested (such as
-  multi-shard requests in SolrCloud) (Naoto Minami via Tomás Fernández Löbbe)
-
-* SOLR-15136: Reduce excessive logging introduced with Per Replica States feature (Ishan Chattopadhyaya)
+(No changes)
 
 * SOLR-15136: Reduce excessive logging introduced with Per Replica States feature (Ishan Chattopadhyaya)
 
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
index afa1368..8928fbe 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
@@ -16,6 +16,19 @@
  */
 package org.apache.solr.cloud.api.collections;
 
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.params.CollectionAdminParams.FOLLOW_ALIASES;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Optional;
+
 import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ShardRequestTracker;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
@@ -31,7 +44,6 @@ import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.backup.BackupFilePaths;
 import org.apache.solr.core.backup.BackupManager;
 import org.apache.solr.core.backup.BackupProperties;
 import org.apache.solr.core.backup.ShardBackupId;
@@ -40,23 +52,11 @@ import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
 import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
 import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
 import org.apache.solr.core.snapshots.SolrSnapshotManager;
+import org.apache.solr.core.backup.BackupFilePaths;
 import org.apache.solr.handler.component.ShardHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Optional;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.params.CollectionAdminParams.FOLLOW_ALIASES;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
 public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
@@ -91,25 +91,25 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
 
       // Backup location
       URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
-      final URI backupUri = createAndValidateBackupPath(repository, incremental, location, backupName, collectionName);
+      final URI backupPath = createAndValidateBackupPath(repository, incremental, location, backupName, collectionName);
 
       BackupManager backupMgr = (incremental) ?
-              BackupManager.forIncrementalBackup(repository, ocmh.zkStateReader, backupUri) :
-              BackupManager.forBackup(repository, ocmh.zkStateReader, backupUri);
+              BackupManager.forIncrementalBackup(repository, ocmh.zkStateReader, backupPath) :
+              BackupManager.forBackup(repository, ocmh.zkStateReader, backupPath);
 
       String strategy = message.getStr(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY);
       switch (strategy) {
         case CollectionAdminParams.COPY_FILES_STRATEGY: {
           if (incremental) {
             try {
-              incrementalCopyIndexFiles(backupUri, collectionName, message, results, backupProperties, backupMgr);
+              incrementalCopyIndexFiles(backupPath, collectionName, message, results, backupProperties, backupMgr);
             } catch (SolrException e) {
               log.error("Error happened during incremental backup for collection:{}", collectionName, e);
-              ocmh.cleanBackup(repository, backupUri, backupMgr.getBackupId());
+              ocmh.cleanBackup(repository, backupPath, backupMgr.getBackupId());
               throw e;
             }
           } else {
-            copyIndexFiles(backupUri, collectionName, message, results);
+            copyIndexFiles(backupPath, collectionName, message, results);
           }
           break;
         }
@@ -139,7 +139,7 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
 
       int maxNumBackup = message.getInt(CoreAdminParams.MAX_NUM_BACKUP_POINTS, -1);
       if (incremental && maxNumBackup != -1) {
-        ocmh.deleteBackup(repository, backupUri, maxNumBackup, results);
+        ocmh.deleteBackup(repository, backupPath, maxNumBackup, results);
       }
     }
   }
@@ -204,7 +204,7 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
     return r.get();
   }
 
-  private void incrementalCopyIndexFiles(URI backupUri, String collectionName, ZkNodeProps request,
+  private void incrementalCopyIndexFiles(URI backupPath, String collectionName, ZkNodeProps request,
                                          NamedList<Object> results, BackupProperties backupProperties,
                                          BackupManager backupManager) throws IOException {
     String backupName = request.getStr(NAME);
@@ -213,7 +213,7 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
     ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
 
     log.info("Starting backup of collection={} with backupName={} at location={}", collectionName, backupName,
-            backupUri);
+            backupPath);
 
     Optional<BackupProperties> previousProps = backupManager.tryReadBackupProperties();
     final ShardRequestTracker shardRequestTracker = ocmh.asyncRequestTracker(asyncId);
@@ -227,7 +227,7 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
       }
       String coreName = replica.getStr(CORE_NAME_PROP);
 
-      ModifiableSolrParams params = coreBackupParams(backupUri, repoName, slice, coreName, true /* incremental backup */);
+      ModifiableSolrParams params = coreBackupParams(backupPath, repoName, slice, coreName, true /* incremental backup */);
       params.set(CoreAdminParams.BACKUP_INCREMENTAL, true);
       previousProps.flatMap(bp -> bp.getShardBackupIdFor(slice.getName()))
               .ifPresent(prevBackupPoint -> params.set(CoreAdminParams.PREV_SHARD_BACKUP_ID, prevBackupPoint.getIdAsString()));
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteBackupCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteBackupCmd.java
index 0a48f1d..7709751 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteBackupCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteBackupCmd.java
@@ -23,14 +23,14 @@ import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.backup.AggregateBackupStats;
-import org.apache.solr.core.backup.BackupFilePaths;
 import org.apache.solr.core.backup.BackupId;
+import org.apache.solr.core.backup.AggregateBackupStats;
 import org.apache.solr.core.backup.BackupManager;
 import org.apache.solr.core.backup.BackupProperties;
 import org.apache.solr.core.backup.ShardBackupId;
 import org.apache.solr.core.backup.ShardBackupMetadata;
 import org.apache.solr.core.backup.repository.BackupRepository;
+import org.apache.solr.core.backup.BackupFilePaths;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -117,10 +117,10 @@ public class DeleteBackupCmd implements OverseerCollectionMessageHandler.Cmd {
         deleteBackupIds(backupPath, repository, new HashSet<>(backupIdDeletes), results);
     }
 
-    void deleteBackupIds(URI backupUri, BackupRepository repository,
+    void deleteBackupIds(URI backupPath, BackupRepository repository,
                          Set<BackupId> backupIdsDeletes,
                          @SuppressWarnings({"rawtypes"}) NamedList results) throws IOException {
-        BackupFilePaths incBackupFiles = new BackupFilePaths(repository, backupUri);
+        BackupFilePaths incBackupFiles = new BackupFilePaths(repository, backupPath);
         URI shardBackupMetadataDir = incBackupFiles.getShardBackupMetadataDir();
 
         Set<String> referencedIndexFiles = new HashSet<>();
@@ -167,15 +167,15 @@ public class DeleteBackupCmd implements OverseerCollectionMessageHandler.Cmd {
         repository.delete(incBackupFiles.getIndexDir(), unusedFiles, true);
         try {
             for (BackupId backupId : backupIdsDeletes) {
-                repository.deleteDirectory(repository.resolve(backupUri, BackupFilePaths.getZkStateDir(backupId)));
+                repository.deleteDirectory(repository.resolve(backupPath, BackupFilePaths.getZkStateDir(backupId)));
             }
         } catch (FileNotFoundException e) {
             //ignore this
         }
 
         //add details to result before deleting backupPropFiles
-        addResult(backupUri, repository, backupIdsDeletes, backupIdToCollectionBackupPoint, results);
-        repository.delete(backupUri, backupIdsDeletes.stream().map(id -> BackupFilePaths.getBackupPropsName(id)).collect(Collectors.toList()), true);
+        addResult(backupPath, repository, backupIdsDeletes, backupIdToCollectionBackupPoint, results);
+        repository.delete(backupPath, backupIdsDeletes.stream().map(id -> BackupFilePaths.getBackupPropsName(id)).collect(Collectors.toList()), true);
     }
 
     @SuppressWarnings("unchecked")
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
index 53a02fa..b7f9c6d 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
@@ -16,6 +16,24 @@
  */
 package org.apache.solr.cloud.api.collections;
 
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
 import com.google.common.collect.ImmutableMap;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.solr.client.solrj.SolrResponse;
@@ -77,25 +95,16 @@ import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.solr.common.cloud.ZkStateReader.*;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NODE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
 import static org.apache.solr.common.params.CollectionAdminParams.COLLECTION;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
 import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
@@ -621,9 +630,9 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler,
   }
 
   @SuppressWarnings({"rawtypes"})
-  void cleanBackup(BackupRepository  repository, URI backupUri, BackupId backupId) throws Exception {
+  void cleanBackup(BackupRepository  repository, URI backupPath, BackupId backupId) throws Exception {
     ((DeleteBackupCmd)commandMap.get(DELETEBACKUP))
-            .deleteBackupIds(backupUri, repository, Collections.singleton(backupId), new NamedList());
+            .deleteBackupIds(backupPath, repository, Collections.singleton(backupId), new NamedList());
   }
 
   void deleteBackup(BackupRepository repository, URI backupPath,
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 89d9df4..26bb7d3 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -743,9 +743,7 @@ public class CoreContainer {
     createHandler(ZK_PATH, ZookeeperInfoHandler.class.getName(), ZookeeperInfoHandler.class);
     createHandler(ZK_STATUS_PATH, ZookeeperStatusHandler.class.getName(), ZookeeperStatusHandler.class);
     collectionsHandler = createHandler(COLLECTIONS_HANDLER_PATH, cfg.getCollectionsHandlerClass(), CollectionsHandler.class);
-    final CollectionsAPI collectionsAPI = new CollectionsAPI(collectionsHandler);
-    containerHandlers.getApiBag().registerObject(collectionsAPI);
-    containerHandlers.getApiBag().registerObject(collectionsAPI.collectionsCommands);
+    containerHandlers.getApiBag().registerObject(new CollectionsAPI(collectionsHandler));
     configSetsHandler = createHandler(CONFIGSETS_HANDLER_PATH, cfg.getConfigSetsHandlerClass(), ConfigSetsHandler.class);
     ClusterAPI clusterAPI = new ClusterAPI(collectionsHandler, configSetsHandler);
     containerHandlers.getApiBag().registerObject(clusterAPI);
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java b/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
index e8cfef5..f16db6d 100644
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
+++ b/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
@@ -17,16 +17,6 @@
 
 package org.apache.solr.core.backup.repository;
 
-import com.google.common.base.Preconditions;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.NIOFSDirectory;
-import org.apache.lucene.store.NoLockFactory;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.DirectoryFactory;
-
 import java.io.IOException;
 import java.io.OutputStream;
 import java.net.URI;
@@ -42,13 +32,23 @@ import java.nio.file.attribute.BasicFileAttributes;
 import java.util.Collection;
 import java.util.Objects;
 
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.NIOFSDirectory;
+import org.apache.lucene.store.NoLockFactory;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.DirectoryFactory;
+
+import com.google.common.base.Preconditions;
+
 /**
  * A concrete implementation of {@linkplain BackupRepository} interface supporting backup/restore of Solr indexes to a
  * local file-system. (Note - This can even be used for a shared file-system if it is exposed via a local file-system
  * interface e.g. NFS).
  */
 public class LocalFileSystemRepository implements BackupRepository {
-
   @SuppressWarnings("rawtypes")
   private NamedList config = null;
 
diff --git a/solr/core/src/java/org/apache/solr/handler/CatStream.java b/solr/core/src/java/org/apache/solr/handler/CatStream.java
index e6d58c5..d7f5fe6 100644
--- a/solr/core/src/java/org/apache/solr/handler/CatStream.java
+++ b/solr/core/src/java/org/apache/solr/handler/CatStream.java
@@ -17,10 +17,7 @@
 
 package org.apache.solr.handler;
 
-import java.io.File;
-import java.io.FileInputStream;
 import java.io.IOException;
-import java.io.InputStreamReader;
 import java.lang.invoke.MethodHandles;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -28,7 +25,6 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import java.util.stream.Stream;
-import java.util.zip.GZIPInputStream;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.LineIterator;
@@ -184,12 +180,7 @@ public class CatStream extends TupleStream implements Expressible {
     while (allFilesToCrawl.hasNext()) {
       closeCurrentFileIfSet();
       currentFilePath = allFilesToCrawl.next();
-      File currentFile = currentFilePath.absolutePath.toFile();
-      if(currentFile.getName().endsWith(".gz")) {
-        currentFileLines = new LineIterator(new InputStreamReader(new GZIPInputStream(new FileInputStream(currentFile)), "UTF-8"));
-      } else {
-        currentFileLines = FileUtils.lineIterator(currentFile, "UTF-8");
-      }
+      currentFileLines = FileUtils.lineIterator(currentFilePath.absolutePath.toFile(), "UTF-8");
       if (currentFileLines.hasNext()) return true;
     }
 
diff --git a/solr/core/src/java/org/apache/solr/handler/ClusterAPI.java b/solr/core/src/java/org/apache/solr/handler/ClusterAPI.java
index aee2571..ee77e3d 100644
--- a/solr/core/src/java/org/apache/solr/handler/ClusterAPI.java
+++ b/solr/core/src/java/org/apache/solr/handler/ClusterAPI.java
@@ -23,9 +23,9 @@ import java.util.Map;
 import org.apache.solr.api.Command;
 import org.apache.solr.api.EndPoint;
 import org.apache.solr.api.PayloadObj;
-import org.apache.solr.client.solrj.request.beans.ClusterPropPayload;
-import org.apache.solr.client.solrj.request.beans.CreateConfigPayload;
-import org.apache.solr.client.solrj.request.beans.RateLimiterPayload;
+import org.apache.solr.client.solrj.request.beans.ClusterPropInfo;
+import org.apache.solr.client.solrj.request.beans.CreateConfigInfo;
+import org.apache.solr.client.solrj.request.beans.RateLimiterMeta;
 import org.apache.solr.cloud.OverseerConfigSetMessageHandler;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.annotation.JsonProperty;
@@ -120,7 +120,7 @@ public class ClusterAPI {
 
     @Command(name = "create")
     @SuppressWarnings("unchecked")
-    public void create(PayloadObj<CreateConfigPayload> obj) throws Exception {
+    public void create(PayloadObj<CreateConfigInfo> obj) throws Exception {
       Map<String, Object> mapVals = obj.get().toMap(new HashMap<>());
       Map<String,Object> customProps = (Map<String, Object>) mapVals.remove("properties");
       if(customProps!= null) {
@@ -223,7 +223,7 @@ public class ClusterAPI {
 
     @Command(name = "set-obj-property")
     @SuppressWarnings({"rawtypes", "unchecked"})
-    public void setObjProperty(PayloadObj<ClusterPropPayload> obj) {
+    public void setObjProperty(PayloadObj<ClusterPropInfo> obj) {
       //Not using the object directly here because the API differentiate between {name:null} and {}
       Map m = obj.getDataMap();
       ClusterProperties clusterProperties = new ClusterProperties(getCoreContainer().getZkController().getZkClient());
@@ -242,8 +242,8 @@ public class ClusterAPI {
     }
 
     @Command(name = "set-ratelimiter")
-    public void setRateLimiters(PayloadObj<RateLimiterPayload> payLoad) {
-      RateLimiterPayload rateLimiterConfig = payLoad.get();
+    public void setRateLimiters(PayloadObj<RateLimiterMeta> payLoad) {
+      RateLimiterMeta rateLimiterConfig = payLoad.get();
       ClusterProperties clusterProperties = new ClusterProperties(getCoreContainer().getZkController().getZkClient());
 
       try {
diff --git a/solr/core/src/java/org/apache/solr/handler/CollectionsAPI.java b/solr/core/src/java/org/apache/solr/handler/CollectionsAPI.java
index a5b6b12..e02e5f2 100644
--- a/solr/core/src/java/org/apache/solr/handler/CollectionsAPI.java
+++ b/solr/core/src/java/org/apache/solr/handler/CollectionsAPI.java
@@ -17,38 +17,16 @@
 
 package org.apache.solr.handler;
 
-import org.apache.commons.collections4.CollectionUtils;
-import org.apache.solr.api.Command;
 import org.apache.solr.api.EndPoint;
-import org.apache.solr.api.PayloadObj;
-import org.apache.solr.client.solrj.request.beans.BackupCollectionPayload;
-import org.apache.solr.client.solrj.request.beans.CreateAliasPayload;
-import org.apache.solr.client.solrj.request.beans.CreatePayload;
-import org.apache.solr.client.solrj.request.beans.DeleteAliasPayload;
-import org.apache.solr.client.solrj.request.beans.RestoreCollectionPayload;
-import org.apache.solr.client.solrj.request.beans.SetAliasPropertyPayload;
-import org.apache.solr.client.solrj.request.beans.V2ApiConstants;
 import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.params.CollectionParams.CollectionAction;
 import org.apache.solr.handler.admin.CollectionsHandler;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
 
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import static org.apache.solr.client.solrj.SolrRequest.METHOD.*;
-import static org.apache.solr.client.solrj.request.beans.V2ApiConstants.ROUTER_KEY;
-import static org.apache.solr.cloud.api.collections.RoutedAlias.CREATE_COLLECTION_PREFIX;
-import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_PREFIX;
-import static org.apache.solr.common.params.CollectionAdminParams.ROUTER_PREFIX;
-import static org.apache.solr.common.params.CommonParams.ACTION;
+import static org.apache.solr.client.solrj.SolrRequest.METHOD.DELETE;
+import static org.apache.solr.client.solrj.SolrRequest.METHOD.GET;
 import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.handler.ClusterAPI.wrapParams;
 import static org.apache.solr.security.PermissionNameProvider.Name.COLL_EDIT_PERM;
 import static org.apache.solr.security.PermissionNameProvider.Name.COLL_READ_PERM;
 
@@ -58,16 +36,7 @@ import static org.apache.solr.security.PermissionNameProvider.Name.COLL_READ_PER
  */
 public class CollectionsAPI {
 
-    public static final String V2_CREATE_COLLECTION_CMD = "create";
-    public static final String V2_BACKUP_CMD = "backup-collection";
-    public static final String V2_RESTORE_CMD = "restore-collection";
-    public static final String V2_CREATE_ALIAS_CMD = "create-alias";
-    public static final String V2_SET_ALIAS_PROP_CMD = "set-alias-property";
-    public static final String V2_DELETE_ALIAS_CMD = "delete-alias";
-
-    private final CollectionsHandler collectionsHandler;
-
-  public  final CollectionsCommands collectionsCommands = new CollectionsCommands();
+  private final CollectionsHandler collectionsHandler;
 
   public CollectionsAPI(CollectionsHandler collectionsHandler) {
     this.collectionsHandler = collectionsHandler;
@@ -81,149 +50,11 @@ public class CollectionsAPI {
     CollectionsHandler.CollectionOperation.LIST_OP.execute(req, rsp, collectionsHandler);
   }
 
-    @EndPoint(
-            path = {"/c", "/collections"},
-            method = POST,
-            permission = COLL_EDIT_PERM)
-    public class CollectionsCommands {
-
-        @Command(name = V2_BACKUP_CMD)
-        @SuppressWarnings("unchecked")
-        public void backupCollection(PayloadObj<BackupCollectionPayload> obj) throws Exception {
-            final Map<String, Object> v1Params = obj.get().toMap(new HashMap<>());
-            v1Params.put(ACTION, CollectionAction.BACKUP.toLower());
-
-            collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), v1Params), obj.getResponse());
-        }
-
-        @Command(name = V2_RESTORE_CMD)
-        @SuppressWarnings("unchecked")
-        public void restoreBackup(PayloadObj<RestoreCollectionPayload> obj) throws Exception {
-            final RestoreCollectionPayload v2Body = obj.get();
-            final Map<String, Object> v1Params = v2Body.toMap(new HashMap<>());
-
-            v1Params.put(ACTION, CollectionAction.RESTORE.toLower());
-            if (v2Body.createCollectionParams != null && !v2Body.createCollectionParams.isEmpty()) {
-                final Map<String, Object> createCollParams = (Map<String, Object>) v1Params.remove(V2ApiConstants.CREATE_COLLECTION_KEY);
-                convertV2CreateCollectionMapToV1ParamMap(createCollParams);
-                v1Params.putAll(createCollParams);
-            }
-
-            collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), v1Params), obj.getResponse());
-        }
-
-        @Command(name = V2_CREATE_ALIAS_CMD)
-        @SuppressWarnings("unchecked")
-        public void createAlias(PayloadObj<CreateAliasPayload> obj) throws Exception {
-            final CreateAliasPayload v2Body = obj.get();
-            final Map<String, Object> v1Params = v2Body.toMap(new HashMap<>());
-
-            v1Params.put(ACTION, CollectionAction.CREATEALIAS.toLower());
-            if (! CollectionUtils.isEmpty(v2Body.collections)) {
-                final String collectionsStr = String.join(",", v2Body.collections);
-                v1Params.remove(V2ApiConstants.COLLECTIONS);
-                v1Params.put(V2ApiConstants.COLLECTIONS, collectionsStr);
-            }
-            if (v2Body.router != null) {
-                Map<String, Object> routerProperties = (Map<String, Object>) v1Params.remove(V2ApiConstants.ROUTER_KEY);
-                flattenMapWithPrefix(routerProperties, v1Params, ROUTER_PREFIX);
-            }
-            if (v2Body.createCollectionParams != null && !v2Body.createCollectionParams.isEmpty()) {
-                final Map<String, Object> createCollectionMap = (Map<String, Object>) v1Params.remove(V2ApiConstants.CREATE_COLLECTION_KEY);
-                convertV2CreateCollectionMapToV1ParamMap(createCollectionMap);
-                flattenMapWithPrefix(createCollectionMap, v1Params, CREATE_COLLECTION_PREFIX);
-            }
-
-            collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), v1Params), obj.getResponse());
-        }
-
-        @Command(name= V2_SET_ALIAS_PROP_CMD)
-        @SuppressWarnings("unchecked")
-        public void setAliasProperty(PayloadObj<SetAliasPropertyPayload> obj) throws Exception {
-            final SetAliasPropertyPayload v2Body = obj.get();
-            final Map<String, Object> v1Params = v2Body.toMap(new HashMap<>());
-
-            v1Params.put(ACTION, CollectionAction.ALIASPROP.toLower());
-            // Flatten "properties" map into individual prefixed params
-            final Map<String, Object> propertiesMap = (Map<String, Object>) v1Params.remove(V2ApiConstants.PROPERTIES_KEY);
-            flattenMapWithPrefix(propertiesMap, v1Params, PROPERTY_PREFIX);
-
-            collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), v1Params), obj.getResponse());
-        }
-
-        @Command(name= V2_DELETE_ALIAS_CMD)
-        @SuppressWarnings("unchecked")
-        public void deleteAlias(PayloadObj<DeleteAliasPayload> obj) throws Exception {
-            final DeleteAliasPayload v2Body = obj.get();
-            final Map<String, Object> v1Params = v2Body.toMap(new HashMap<>());
-            v1Params.put(ACTION, CollectionAction.DELETEALIAS.toLower());
-
-            collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), v1Params), obj.getResponse());
-        }
-
-        @Command(name = V2_CREATE_COLLECTION_CMD)
-        @SuppressWarnings("unchecked")
-        public void create(PayloadObj<CreatePayload> obj) throws Exception {
-            final CreatePayload v2Body = obj.get();
-            final Map<String, Object> v1Params = v2Body.toMap(new HashMap<>());
-
-            v1Params.put(ACTION, CollectionAction.CREATE.toLower());
-            convertV2CreateCollectionMapToV1ParamMap(v1Params);
-
-            collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), v1Params), obj.getResponse());
-        }
-
-        @SuppressWarnings("unchecked")
-        private void convertV2CreateCollectionMapToV1ParamMap(Map<String, Object> v2MapVals) {
-            // Keys are copied so that map can be modified as keys are looped through.
-            final Set<String> v2Keys = v2MapVals.keySet().stream().collect(Collectors.toSet());
-            for (String key : v2Keys) {
-                switch (key) {
-                    case V2ApiConstants.PROPERTIES_KEY:
-                        final Map<String, Object> propertiesMap = (Map<String, Object>) v2MapVals.remove(V2ApiConstants.PROPERTIES_KEY);
-                        flattenMapWithPrefix(propertiesMap, v2MapVals, PROPERTY_PREFIX);
-                        break;
-                    case ROUTER_KEY:
-                        final Map<String, Object> routerProperties = (Map<String, Object>) v2MapVals.remove(V2ApiConstants.ROUTER_KEY);
-                        flattenMapWithPrefix(routerProperties, v2MapVals, CollectionAdminParams.ROUTER_PREFIX);
-                        break;
-                    case V2ApiConstants.CONFIG:
-                        v2MapVals.put(CollectionAdminParams.COLL_CONF, v2MapVals.remove(V2ApiConstants.CONFIG));
-                        break;
-                    case V2ApiConstants.SHUFFLE_NODES:
-                        v2MapVals.put(CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM, v2MapVals.remove(V2ApiConstants.SHUFFLE_NODES));
-                        break;
-                    case V2ApiConstants.NODE_SET:
-                        final Object nodeSetValUncast = v2MapVals.remove(V2ApiConstants.NODE_SET);
-                        if (nodeSetValUncast instanceof String) {
-                            v2MapVals.put(CollectionAdminParams.CREATE_NODE_SET_PARAM, nodeSetValUncast);
-                        } else {
-                            final List<String> nodeSetList = (List<String>) nodeSetValUncast;
-                            final String nodeSetStr = String.join(",", nodeSetList);
-                            v2MapVals.put(CollectionAdminParams.CREATE_NODE_SET_PARAM, nodeSetStr);
-                        }
-                        break;
-                    default:
-                        break;
-                }
-            }
-        }
-
-        private void flattenMapWithPrefix(Map<String, Object> toFlatten, Map<String, Object> destination,
-                                          String additionalPrefix) {
-            if (toFlatten == null || toFlatten.isEmpty() || destination == null) {
-                return;
-            }
-
-            toFlatten.forEach((k, v) -> destination.put(additionalPrefix + k, v));
-        }
-  }
-
   @EndPoint(path = {"/c/{collection}", "/collections/{collection}"},
       method = DELETE,
       permission = COLL_EDIT_PERM)
   public void deleteCollection(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    req = wrapParams(req, ACTION,
+    req = ClusterAPI.wrapParams(req, "action",
         CollectionAction.DELETE.toString(),
         NAME, req.getPathTemplateValues().get(ZkStateReader.COLLECTION_PROP));
     collectionsHandler.handleRequestBody(req, rsp);
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/BackupCoreOp.java b/solr/core/src/java/org/apache/solr/handler/admin/BackupCoreOp.java
index 5543f04..c2c7806 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/BackupCoreOp.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/BackupCoreOp.java
@@ -60,7 +60,7 @@ class BackupCoreOp implements CoreAdminHandler.CoreAdminOp {
 
       if (incremental) {
         if ("file".equals(locationUri.getScheme())) {
-          core.getCoreContainer().assertPathAllowed(Paths.get(locationUri));
+          core.getCoreContainer().assertPathAllowed(Paths.get(location));
         }
         final ShardBackupId prevShardBackupId = prevShardBackupIdStr != null ? ShardBackupId.from(prevShardBackupIdStr) : null;
         BackupFilePaths incBackupFiles = new BackupFilePaths(repository, locationUri);
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index 54aaf28..32061c0 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -1028,6 +1028,7 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
               + " parameter or as a default repository property or as a cluster property.");
         }
       }
+
       boolean incremental = req.getParams().getBool(CoreAdminParams.BACKUP_INCREMENTAL, true);
 
       // Check if the specified location is valid for this repository.
diff --git a/solr/core/src/java/org/apache/solr/response/JSONWriter.java b/solr/core/src/java/org/apache/solr/response/JSONWriter.java
index 4e17696..cef8b7d 100644
--- a/solr/core/src/java/org/apache/solr/response/JSONWriter.java
+++ b/solr/core/src/java/org/apache/solr/response/JSONWriter.java
@@ -45,16 +45,16 @@ public class JSONWriter extends TextResponseWriter implements JsonTextWriter {
     this.wrapperFunction = wrapperFunction;
     this.namedListStyle = namedListStyle;
   }
-  private JSONWriter(Writer writer, boolean indent, String namedListStyle) throws IOException {
-    super(writer, indent);
+  private JSONWriter(Writer writer, boolean intend, String namedListStyle) throws IOException {
+    super(writer, intend);
     this.namedListStyle = namedListStyle;
 
   }
 
   /**Strictly for testing only
    */
-  public static void write(Writer writer, boolean indent,  String namedListStyle, Object val) throws IOException {
-    JSONWriter jw = new JSONWriter(writer, indent, namedListStyle);
+  public static void write(Writer writer, boolean intend,  String namedListStyle, Object val) throws IOException {
+    JSONWriter jw = new JSONWriter(writer, intend, namedListStyle);
     jw.writeVal(null, val);
     jw.close();
 
diff --git a/solr/core/src/java/org/apache/solr/search/MaxScoreCollector.java b/solr/core/src/java/org/apache/solr/search/MaxScoreCollector.java
index 0cd76b6..744e576 100644
--- a/solr/core/src/java/org/apache/solr/search/MaxScoreCollector.java
+++ b/solr/core/src/java/org/apache/solr/search/MaxScoreCollector.java
@@ -41,12 +41,7 @@ public class MaxScoreCollector extends SimpleCollector {
   }
 
   @Override
-  public void setScorer(Scorable scorer) throws IOException {
-    if (maxScore == Float.MIN_VALUE) {
-      scorer.setMinCompetitiveScore(0f);
-    } else {
-      scorer.setMinCompetitiveScore(Math.nextUp(maxScore));
-    }
+  public void setScorer(Scorable scorer) {
     this.scorer = scorer;
   }
 
diff --git a/solr/core/src/java/org/apache/solr/servlet/QueryRateLimiter.java b/solr/core/src/java/org/apache/solr/servlet/QueryRateLimiter.java
index 36e5d3b..f746aca 100644
--- a/solr/core/src/java/org/apache/solr/servlet/QueryRateLimiter.java
+++ b/solr/core/src/java/org/apache/solr/servlet/QueryRateLimiter.java
@@ -22,7 +22,7 @@ import java.util.Map;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.request.beans.RateLimiterPayload;
+import org.apache.solr.client.solrj.request.beans.RateLimiterMeta;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.Utils;
@@ -51,7 +51,7 @@ public class QueryRateLimiter extends RequestRateLimiter {
       return;
     }
 
-    RateLimiterPayload rateLimiterMeta = mapper.readValue(configInput, RateLimiterPayload.class);
+    RateLimiterMeta rateLimiterMeta = mapper.readValue(configInput, RateLimiterMeta.class);
 
     constructQueryRateLimiterConfigInternal(rateLimiterMeta, rateLimiterConfig);
   }
@@ -74,7 +74,7 @@ public class QueryRateLimiter extends RequestRateLimiter {
         return rateLimiterConfig;
       }
 
-      RateLimiterPayload rateLimiterMeta = mapper.readValue(configInput, RateLimiterPayload.class);
+      RateLimiterMeta rateLimiterMeta = mapper.readValue(configInput, RateLimiterMeta.class);
 
       constructQueryRateLimiterConfigInternal(rateLimiterMeta, rateLimiterConfig);
 
@@ -88,7 +88,7 @@ public class QueryRateLimiter extends RequestRateLimiter {
     }
   }
 
-  private static void constructQueryRateLimiterConfigInternal(RateLimiterPayload rateLimiterMeta, RateLimiterConfig rateLimiterConfig) {
+  private static void constructQueryRateLimiterConfigInternal(RateLimiterMeta rateLimiterMeta, RateLimiterConfig rateLimiterConfig) {
 
     if (rateLimiterMeta == null) {
       // No Rate limiter configuration defined in clusterprops.json
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-cache-enable-disable.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-cache-enable-disable.xml
index d51f805..29e1799 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-cache-enable-disable.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-cache-enable-disable.xml
@@ -24,7 +24,7 @@
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
   <schemaFactory class="ClassicIndexSchemaFactory"/>
   <requestHandler name="/select" class="solr.SearchHandler" />
-
+  
   <query>
     <!-- Maximum number of clauses in a boolean query... can affect
         range or wildcard queries that expand to big boolean
@@ -54,16 +54,16 @@
       autowarmCount="0"/>
 
     <cache
-      name="user_defined_cache_XXX"
-      enabled="${user_defined_cache_XXX.enabled:false}"
+      name="user_definied_cache_XXX"
+      enabled="${user_definied_cache_XXX.enabled:false}"  
       />
     <cache
-      name="user_defined_cache_ZZZ"
-      enabled="${user_defined_cache_ZZZ.enabled:false}"
+      name="user_definied_cache_ZZZ"
+      enabled="${user_definied_cache_ZZZ.enabled:false}"  
       />
 
 
-
+    
     <!-- If true, stored fields that are not requested will be loaded lazily.
     -->
     <enableLazyFieldLoading>true</enableLazyFieldLoading>
@@ -85,3 +85,6 @@
   </initParams>
 
 </config>
+
+
+
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-memory-circuitbreaker.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-memory-circuitbreaker.xml
index 6ab9f89..699a7bd 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-memory-circuitbreaker.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-memory-circuitbreaker.xml
@@ -54,12 +54,12 @@
       autowarmCount="0"/>
 
     <cache
-      name="user_defined_cache_XXX"
-      enabled="${user_defined_cache_XXX.enabled:false}"
+      name="user_definied_cache_XXX"
+      enabled="${user_definied_cache_XXX.enabled:false}"
       />
     <cache
-      name="user_defined_cache_ZZZ"
-      enabled="${user_defined_cache_ZZZ.enabled:false}"
+      name="user_definied_cache_ZZZ"
+      enabled="${user_definied_cache_ZZZ.enabled:false}"
       />
 
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/LocalFSCloudIncrementalBackupTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/LocalFSCloudIncrementalBackupTest.java
index b552c6c..b247ae6 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/LocalFSCloudIncrementalBackupTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/LocalFSCloudIncrementalBackupTest.java
@@ -20,9 +20,8 @@ package org.apache.solr.cloud.api.collections;
 import org.junit.BeforeClass;
 
 public class LocalFSCloudIncrementalBackupTest extends AbstractIncrementalBackupTest {
-    private static final String SOLR_XML = "<solr>\n" +
+    public static final String SOLR_XML = "<solr>\n" +
             "\n" +
-            "  <str name=\"allowPaths\">ALLOWPATHS_TEMPLATE_VAL</str>\n" +
             "  <str name=\"shareSchema\">${shareSchema:false}</str>\n" +
             "  <str name=\"configSetBaseDir\">${configSetBaseDir:configsets}</str>\n" +
             "  <str name=\"coreRootDirectory\">${coreRootDirectory:.}</str>\n" +
@@ -58,17 +57,17 @@ public class LocalFSCloudIncrementalBackupTest extends AbstractIncrementalBackup
 
     @BeforeClass
     public static void setupClass() throws Exception {
+        configureCluster(NUM_SHARDS)// nodes
+                .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
+                .withSolrXml(SOLR_XML)
+                .configure();
+
         boolean whitespacesInPath = random().nextBoolean();
         if (whitespacesInPath) {
             backupLocation = createTempDir("my backup").toAbsolutePath().toString();
         } else {
             backupLocation = createTempDir("mybackup").toAbsolutePath().toString();
         }
-
-        configureCluster(NUM_SHARDS)// nodes
-                .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
-                .withSolrXml(SOLR_XML.replace("ALLOWPATHS_TEMPLATE_VAL", backupLocation))
-                .configure();
     }
 
     @Override
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
index 17f34e3..ef718e0 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
@@ -69,7 +69,9 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
       } else {
         req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 1, 0, 1);
       }
+      setV2(req);
       client.request(req);
+      assertV2CallsCount();
       createCollection(null, COLLECTION_NAME1, 1, 1, client, null, "conf1");
     }
 
@@ -412,7 +414,8 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
   private void clusterStatusZNodeVersion() throws Exception {
     String cname = "clusterStatusZNodeVersion";
     try (CloudSolrClient client = createCloudClient(null)) {
-      CollectionAdminRequest.createCollection(cname, "conf1", 1, 1).process(client);
+      setV2(CollectionAdminRequest.createCollection(cname, "conf1", 1, 1)).process(client);
+      assertV2CallsCount();
       waitForRecoveriesToFinish(cname, true);
 
       ModifiableSolrParams params = new ModifiableSolrParams();
@@ -435,7 +438,9 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
       assertNotNull(znodeVersion);
 
       CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(cname, "shard1");
+      setV2(addReplica);
       addReplica.process(client);
+      assertV2CallsCount();
       waitForRecoveriesToFinish(cname, true);
 
       rsp = client.request(request);
diff --git a/solr/core/src/test/org/apache/solr/core/DirectoryFactoriesTest.java b/solr/core/src/test/org/apache/solr/core/DirectoryFactoriesTest.java
index c033e73..4748a6f 100644
--- a/solr/core/src/test/org/apache/solr/core/DirectoryFactoriesTest.java
+++ b/solr/core/src/test/org/apache/solr/core/DirectoryFactoriesTest.java
@@ -75,14 +75,14 @@ public class DirectoryFactoriesTest extends SolrTestCaseJ4 {
           file.writeInt(42);
 
           // TODO: even StandardDirectoryFactory & NRTCachingDirectoryFactory can't agree on this...
-          // ... should we consider this explicitly undefined?
+          // ... should we consider this explicitly undefinied?
           // ... or should *all* Caching DirFactories consult the cache as well as the disk itself?
           // assertFalse(path + " should still not exist until file is closed", dirFac.exists(path));
           
         } // implicitly close file...
         
         // TODO: even StandardDirectoryFactory & NRTCachingDirectoryFactory can't agree on this...
-        // ... should we consider this explicitly undefined?
+        // ... should we consider this explicitly undefinied?
         // ... or should *all* Caching DirFactories consult the cache as well as the disk itself?
         // assertTrue(path + " should exist once file is closed", dirFac.exists(path));
         
diff --git a/solr/core/src/test/org/apache/solr/core/TestConfig.java b/solr/core/src/test/org/apache/solr/core/TestConfig.java
index ccf3114..a1404e1 100644
--- a/solr/core/src/test/org/apache/solr/core/TestConfig.java
+++ b/solr/core/src/test/org/apache/solr/core/TestConfig.java
@@ -124,8 +124,8 @@ public class TestConfig extends SolrTestCaseJ4 {
    System.setProperty("filterCache.enabled", "true");
    System.setProperty("queryResultCache.enabled", "true");
    System.setProperty("documentCache.enabled", "true");
-   System.setProperty("user_defined_cache_XXX.enabled","true");
-   // user_defined_cache_ZZZ.enabled defaults to false in config
+   System.setProperty("user_definied_cache_XXX.enabled","true");
+   // user_definied_cache_ZZZ.enabled defaults to false in config
    
    sc = new SolrConfig(TEST_PATH().resolve("collection1"), "solrconfig-cache-enable-disable.xml");
    assertNotNull(sc.filterCacheConfig);
@@ -134,14 +134,14 @@ public class TestConfig extends SolrTestCaseJ4 {
    //
    assertNotNull(sc.userCacheConfigs);
    assertEquals(1, sc.userCacheConfigs.size());
-   assertNotNull(sc.userCacheConfigs.get("user_defined_cache_XXX"));
+   assertNotNull(sc.userCacheConfigs.get("user_definied_cache_XXX"));
    
    // disable all the core caches (and enable both user caches) via system properties and verify
    System.setProperty("filterCache.enabled", "false");
    System.setProperty("queryResultCache.enabled", "false");
    System.setProperty("documentCache.enabled", "false");
-   System.setProperty("user_defined_cache_XXX.enabled","true");
-   System.setProperty("user_defined_cache_ZZZ.enabled","true");
+   System.setProperty("user_definied_cache_XXX.enabled","true");
+   System.setProperty("user_definied_cache_ZZZ.enabled","true");
 
    sc = new SolrConfig(TEST_PATH().resolve("collection1"), "solrconfig-cache-enable-disable.xml");
    assertNull(sc.filterCacheConfig);
@@ -150,11 +150,11 @@ public class TestConfig extends SolrTestCaseJ4 {
    //
    assertNotNull(sc.userCacheConfigs);
    assertEquals(2, sc.userCacheConfigs.size());
-   assertNotNull(sc.userCacheConfigs.get("user_defined_cache_XXX"));
-   assertNotNull(sc.userCacheConfigs.get("user_defined_cache_ZZZ"));
+   assertNotNull(sc.userCacheConfigs.get("user_definied_cache_XXX"));
+   assertNotNull(sc.userCacheConfigs.get("user_definied_cache_ZZZ"));
    
-   System.clearProperty("user_defined_cache_XXX.enabled");
-   System.clearProperty("user_defined_cache_ZZZ.enabled");
+   System.clearProperty("user_definied_cache_XXX.enabled");
+   System.clearProperty("user_definied_cache_ZZZ.enabled");
    System.clearProperty("filterCache.enabled");
    System.clearProperty("queryResultCache.enabled");
    System.clearProperty("documentCache.enabled");
diff --git a/solr/core/src/test/org/apache/solr/handler/TestIncrementalCoreBackup.java b/solr/core/src/test/org/apache/solr/handler/TestIncrementalCoreBackup.java
index 5035827..531b1be 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestIncrementalCoreBackup.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestIncrementalCoreBackup.java
@@ -31,9 +31,9 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import java.io.File;
 import java.io.IOException;
 import java.net.URI;
-import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.Arrays;
 
@@ -62,22 +62,21 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
         assertQ(req("q", "id:2"), "//result[@numFound='0']");
 
         //call backup
-        final Path locationPath = createBackupLocation();
-        final URI locationUri = bootstrapBackupLocation(locationPath);
+        final URI location = createAndBootstrapLocationForBackup();
         final ShardBackupId shardBackupId = new ShardBackupId("shard1", BackupId.zero());
 
         final CoreContainer cores = h.getCoreContainer();
-        cores.getAllowPaths().add(Paths.get(locationUri));
+        cores.getAllowPaths().add(Paths.get(location));
         try (final CoreAdminHandler admin = new CoreAdminHandler(cores)) {
             SolrQueryResponse resp = new SolrQueryResponse();
             admin.handleRequestBody
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
-                            "location", locationPath.toString(),
+                            "location", location.getPath(),
                             CoreAdminParams.SHARD_BACKUP_ID, shardBackupId.getIdAsString())
                             , resp);
             assertNull("Backup should have succeeded", resp.getException());
-            simpleBackupCheck(locationUri, shardBackupId);
+            simpleBackupCheck(location, shardBackupId);
         }
     }
 
@@ -101,8 +100,7 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
 
         final CoreContainer cores = h.getCoreContainer();
         final CoreAdminHandler admin = new CoreAdminHandler(cores);
-        final Path locationPath = createBackupLocation();
-        final URI locationUri = bootstrapBackupLocation(locationPath);
+        final URI location = createAndBootstrapLocationForBackup();
 
         final ShardBackupId firstShardBackup = new ShardBackupId("shard1", BackupId.zero());
         { // first a backup before we've ever done *anything*...
@@ -110,11 +108,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
             admin.handleRequestBody
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
-                            "location", locationPath.toString(),
+                            "location", location.getPath(),
                             CoreAdminParams.SHARD_BACKUP_ID, firstShardBackup.getIdAsString()),
                             resp);
             assertNull("Backup should have succeeded", resp.getException());
-            simpleBackupCheck(locationUri, firstShardBackup, initialEmptyIndexSegmentFileName);
+            simpleBackupCheck(location, firstShardBackup, initialEmptyIndexSegmentFileName);
         }
 
         { // Empty (named) snapshot..
@@ -135,11 +133,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
             admin.handleRequestBody
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
-                            "location", locationPath.toString(),
+                            "location", location.getPath(),
                             CoreAdminParams.SHARD_BACKUP_ID, secondShardBackupId.getIdAsString()),
                             resp);
             assertNull("Backup should have succeeded", resp.getException());
-            simpleBackupCheck(locationUri, secondShardBackupId, initialEmptyIndexSegmentFileName);
+            simpleBackupCheck(location, secondShardBackupId, initialEmptyIndexSegmentFileName);
         }
 
         { // Second empty (named) snapshot..
@@ -156,7 +154,7 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
         assertU(commit());
 
         for (ShardBackupId shardBackupId: Arrays.asList(firstShardBackup, secondShardBackupId)) {
-            simpleBackupCheck(locationUri, shardBackupId, initialEmptyIndexSegmentFileName);
+            simpleBackupCheck(location, shardBackupId, initialEmptyIndexSegmentFileName);
         }
 
         // Make backups from each of the snapshots and check they are still empty as well...
@@ -167,11 +165,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
                             "commitName", "empty_snapshotA",
-                            "location", locationPath.toString(),
+                            "location", location.getPath(),
                             CoreAdminParams.SHARD_BACKUP_ID, thirdShardBackup.getIdAsString()),
                             resp);
             assertNull("Backup from snapshot empty_snapshotA should have succeeded", resp.getException());
-            simpleBackupCheck(locationUri, thirdShardBackup, initialEmptyIndexSegmentFileName);
+            simpleBackupCheck(location, thirdShardBackup, initialEmptyIndexSegmentFileName);
         }
         {
             final ShardBackupId fourthShardBackup = new ShardBackupId("shard1", new BackupId(3));
@@ -180,11 +178,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
                             "commitName", "empty_snapshotB",
-                            "location", locationPath.toString(),
+                            "location", location.getPath(),
                             CoreAdminParams.SHARD_BACKUP_ID, fourthShardBackup.getIdAsString()),
                             resp);
             assertNull("Backup from snapshot empty_snapshotB should have succeeded", resp.getException());
-            simpleBackupCheck(locationUri, fourthShardBackup, initialEmptyIndexSegmentFileName);
+            simpleBackupCheck(location, fourthShardBackup, initialEmptyIndexSegmentFileName);
         }
         admin.close();
     }
@@ -211,8 +209,7 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
 
         final CoreContainer cores = h.getCoreContainer();
         final CoreAdminHandler admin = new CoreAdminHandler(cores);
-        final Path locationPath = createBackupLocation();
-        final URI locationUri = bootstrapBackupLocation(locationPath);
+        final URI location = createAndBootstrapLocationForBackup();
 
         final ShardBackupId firstShardBackupId = new ShardBackupId("shard1", BackupId.zero());
         { // take an initial 'backup1a' containing our 1 document
@@ -221,11 +218,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
                             "name", "backup1a",
-                            "location", locationPath.toString(),
+                            "location", location.getPath(),
                             CoreAdminParams.SHARD_BACKUP_ID, firstShardBackupId.getIdAsString()),
                             resp);
             assertNull("Backup should have succeeded", resp.getException());
-            simpleBackupCheck(locationUri, firstShardBackupId, oneDocSegmentFile);
+            simpleBackupCheck(location, firstShardBackupId, oneDocSegmentFile);
         }
 
         { // and an initial "snapshot1a' that should eventually match
@@ -254,11 +251,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
             admin.handleRequestBody
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
-                            "location", locationPath.toString(),
+                            "location", location.getPath(),
                             CoreAdminParams.SHARD_BACKUP_ID, secondShardBackupId.getIdAsString()),
                             resp);
             assertNull("Backup should have succeeded", resp.getException());
-            simpleBackupCheck(locationUri, secondShardBackupId, oneDocSegmentFile);
+            simpleBackupCheck(location, secondShardBackupId, oneDocSegmentFile);
         }
 
         { // and a second "snapshot1b' should also still be identical
@@ -273,8 +270,8 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
 
         // Hard Committing the 2nd doc now should not affect the existing backups or snapshots...
         assertU(commit());
-        simpleBackupCheck(locationUri, firstShardBackupId, oneDocSegmentFile); // backup1a
-        simpleBackupCheck(locationUri, secondShardBackupId, oneDocSegmentFile); // backup1b
+        simpleBackupCheck(location, firstShardBackupId, oneDocSegmentFile); // backup1a
+        simpleBackupCheck(location, secondShardBackupId, oneDocSegmentFile); // backup1b
 
         final ShardBackupId thirdShardBackupId = new ShardBackupId("shard1", new BackupId(2));
         { // But we should be able to confirm both docs appear in a new backup (not based on a previous snapshot)
@@ -282,13 +279,13 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
             admin.handleRequestBody
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
-                            "location", locationPath.toString(),
+                            "location", location.getPath(),
                             CoreAdminParams.SHARD_BACKUP_ID, thirdShardBackupId.getIdAsString()),
                             resp);
             assertNull("Backup should have succeeded", resp.getException());
             // TODO This doesn't actually check that backup has both docs!  Can we do better than this without doing a full restore?
             // Maybe validate the new segments_X file at least to show that it's picked up the latest commit?
-            simpleBackupCheck(locationUri, thirdShardBackupId);
+            simpleBackupCheck(location, thirdShardBackupId);
         }
 
         // if we go back and create backups from our earlier snapshots they should still only
@@ -301,11 +298,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
                             "commitName", "snapshot1a",
-                            "location", locationPath.toString(),
+                            "location", location.getPath(),
                             CoreAdminParams.SHARD_BACKUP_ID, fourthShardBackupId.getIdAsString()),
                             resp);
             assertNull("Backup of snapshot1a should have succeeded", resp.getException());
-            simpleBackupCheck(locationUri, fourthShardBackupId, oneDocSegmentFile);
+            simpleBackupCheck(location, fourthShardBackupId, oneDocSegmentFile);
         }
         final ShardBackupId fifthShardBackupId = new ShardBackupId("shard1", new BackupId(4));
         {
@@ -314,11 +311,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
                             "commitName", "snapshot1b",
-                            "location", locationPath.toString(),
+                            "location", location.getPath(),
                             CoreAdminParams.SHARD_BACKUP_ID, fifthShardBackupId.getIdAsString()),
                             resp);
             assertNull("Backup of snapshot1b should have succeeded", resp.getException());
-            simpleBackupCheck(locationUri, fifthShardBackupId, oneDocSegmentFile);
+            simpleBackupCheck(location, fifthShardBackupId, oneDocSegmentFile);
         }
 
         admin.close();
@@ -356,18 +353,15 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
         }
     }
 
-    private Path createBackupLocation() {
-        return createTempDir().toAbsolutePath();
-    }
+    private URI createAndBootstrapLocationForBackup() throws IOException {
+        final File locationFile = createTempDir().toFile();
+        final String location = locationFile.getAbsolutePath();
 
-    private URI bootstrapBackupLocation(Path locationPath) throws IOException {
-        final String locationPathStr = locationPath.toString();
-        h.getCoreContainer().getAllowPaths().add(locationPath);
+        h.getCoreContainer().getAllowPaths().add(locationFile.toPath());
         try (BackupRepository backupRepo = h.getCoreContainer().newBackupRepository(null)) {
-            final URI locationUri = backupRepo.createURI(locationPathStr);
-            final BackupFilePaths backupFilePaths = new BackupFilePaths(backupRepo, locationUri);
+            final BackupFilePaths backupFilePaths = new BackupFilePaths(backupRepo, backupRepo.createURI(location));
             backupFilePaths.createIncrementalBackupFolders();
-            return locationUri;
+            return backupRepo.createURI(location);
         }
     }
 }
diff --git a/solr/core/src/test/org/apache/solr/handler/TestStressIncrementalBackup.java b/solr/core/src/test/org/apache/solr/handler/TestStressIncrementalBackup.java
index 0e507a4..871d349 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestStressIncrementalBackup.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestStressIncrementalBackup.java
@@ -17,6 +17,11 @@
 
 package org.apache.solr.handler;
 
+import java.io.File;
+import java.lang.invoke.MethodHandles;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -27,31 +32,26 @@ import org.apache.solr.client.solrj.response.UpdateResponse;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.params.UpdateParams;
+import org.apache.solr.util.LogLevel;
 import org.junit.After;
 import org.junit.Before;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Path;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-
 import static org.apache.solr.handler.TestStressThreadBackup.makeDoc;
 
 //@LuceneTestCase.Nightly
 @LuceneTestCase.SuppressCodecs({"SimpleText"})
+@LogLevel("org.apache.solr.handler.SnapShooter=DEBUG;org.apache.solr.core.IndexDeletionPolicyWrapper=DEBUG")
 public class TestStressIncrementalBackup extends SolrCloudTestCase {
     private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-    private Path backupPath;
+    private File backupDir;
     private SolrClient adminClient;
     private SolrClient coreClient;
-    
     @Before
     public void beforeTest() throws Exception {
-        backupPath = createTempDir(getTestClass().getSimpleName() + "_backups");
-        System.setProperty("solr.allowPaths", backupPath.toString());
+        backupDir = createTempDir(getTestClass().getSimpleName() + "_backups").toFile();
 
         // NOTE: we don't actually care about using SolrCloud, but we want to use SolrClient and I can't
         // bring myself to deal with the nonsense that is SolrJettyTestBase.
@@ -87,8 +87,6 @@ public class TestStressIncrementalBackup extends SolrCloudTestCase {
         if (null != coreClient) {
             coreClient.close();
         }
-
-        System.clearProperty("solr.allowPaths");
     }
 
     public void testCoreAdminHandler() throws Exception {
@@ -160,7 +158,7 @@ public class TestStressIncrementalBackup extends SolrCloudTestCase {
 
     public void makeBackup() throws Exception {
         CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(DEFAULT_TEST_COLLECTION_NAME, "stressBackup")
-                .setLocation(backupPath.toString())
+                .setLocation(backupDir.getAbsolutePath())
                 .setIncremental(true)
                 .setMaxNumberBackupPoints(5);
         if (random().nextBoolean()) {
diff --git a/solr/core/src/test/org/apache/solr/handler/TestStressThreadBackup.java b/solr/core/src/test/org/apache/solr/handler/TestStressThreadBackup.java
index f8f33a3..8c7128b 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestStressThreadBackup.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestStressThreadBackup.java
@@ -37,6 +37,7 @@ import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.params.UpdateParams;
 import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.util.LogLevel;
 import org.apache.solr.util.TimeOut;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -62,6 +63,7 @@ import java.util.regex.Pattern;
 
 @Nightly
 @SuppressCodecs({"SimpleText"})
+@LogLevel("org.apache.solr.handler.SnapShooter=DEBUG;org.apache.solr.core.IndexDeletionPolicyWrapper=DEBUG")
 public class TestStressThreadBackup extends SolrCloudTestCase {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
@@ -367,8 +369,7 @@ public class TestStressThreadBackup extends SolrCloudTestCase {
     public void makeBackup(final String backupName, final String snapName) throws Exception {
       ModifiableSolrParams p = params(CoreAdminParams.CORE, coreName,
                                       CoreAdminParams.NAME, backupName,
-                                      CoreAdminParams.BACKUP_LOCATION, backupDir.getAbsolutePath(),
-                                      CoreAdminParams.BACKUP_INCREMENTAL, "false");
+                                      CoreAdminParams.BACKUP_LOCATION, backupDir.getAbsolutePath());
       if (null != snapName) {
         p.add(CoreAdminParams.COMMIT_NAME, snapName);
       }
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/AdminHandlersProxyTest.java b/solr/core/src/test/org/apache/solr/handler/admin/AdminHandlersProxyTest.java
index ee6183b..a9b6e9b 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/AdminHandlersProxyTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/AdminHandlersProxyTest.java
@@ -18,6 +18,7 @@
 package org.apache.solr.handler.admin;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
@@ -27,12 +28,17 @@ import org.apache.lucene.util.IOUtils;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.GenericSolrRequest;
+import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.SimpleSolrResponse;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.SolrException;
+import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.MapSolrParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -91,6 +97,33 @@ public class AdminHandlersProxyTest extends SolrCloudTestCase {
     assertNotNull(((NamedList)nl.get(nl.getName(1))).get("metrics"));
   }
 
+  @Test
+  @BadApple(bugUrl = "https://issues.apache.org/jira/browse/SOLR-15011")
+  public void proxyLoggingHandlerAllNodes() throws IOException, SolrServerException {
+    CollectionAdminRequest.createCollection("collection", "conf", 2, 2).process(solrClient);
+    ModifiableSolrParams mparams = new ModifiableSolrParams();
+
+    mparams.set(CommonParams.QT, "/admin/logging");
+    mparams.set("nodes", "all");
+    mparams.set("set", "com.codahale.metrics.jmx.JmxReporter:WARN");
+    solrClient.query("collection", mparams, SolrRequest.METHOD.GET);
+
+    Set<String> nodes = solrClient.getClusterStateProvider().getLiveNodes();
+    nodes.forEach(node -> {
+      mparams.clear();
+      mparams.set(CommonParams.QT, "/admin/logging");
+      mparams.set("nodes", node);
+      QueryResponse rsp = null;
+      try {
+        rsp = solrClient.query("collection", mparams, SolrRequest.METHOD.GET);
+      } catch (Exception e) {
+        fail("Exception while proxying request to node " + node);
+      }
+      NamedList<Object> nl = rsp.getResponse();
+      assertEquals("WARN", ((SimpleOrderedMap) ((ArrayList)nl.get("loggers")).get(5)).get("level"));
+    });
+  }
+
   @Test(expected = SolrException.class)
   public void proxySystemInfoHandlerNonExistingNode() throws IOException, SolrServerException {
     MapSolrParams params = new MapSolrParams(Collections.singletonMap("nodes", "example.com:1234_solr"));
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/TestCollectionAPIs.java b/solr/core/src/test/org/apache/solr/handler/admin/TestCollectionAPIs.java
index cb3562c..f423f0f 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/TestCollectionAPIs.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/TestCollectionAPIs.java
@@ -84,9 +84,7 @@ public class TestCollectionAPIs extends SolrTestCaseJ4 {
     ApiBag apiBag;
     try (MockCollectionsHandler collectionsHandler = new MockCollectionsHandler()) {
       apiBag = new ApiBag(false);
-      final CollectionsAPI collectionsAPI = new CollectionsAPI(collectionsHandler);
       apiBag.registerObject(new CollectionsAPI(collectionsHandler));
-      apiBag.registerObject(collectionsAPI.collectionsCommands);
       Collection<Api> apis = collectionsHandler.getApis();
       for (Api api : apis) apiBag.register(api, Collections.emptyMap());
 
@@ -114,7 +112,7 @@ public class TestCollectionAPIs extends SolrTestCaseJ4 {
 
 
     compareOutput(apiBag, "/collections", POST,
-        "{create-alias:{name: aliasName , collections:[c1,c2] }}", null, "{operation : createalias, name: aliasName, collections:\"c1,c2\" }");
+        "{create-alias:{name: aliasName , collections:[c1,c2] }}", null, "{operation : createalias, name: aliasName, collections:[c1,c2] }");
 
     compareOutput(apiBag, "/collections", POST,
         "{delete-alias:{ name: aliasName}}", null, "{operation : deletealias, name: aliasName}");
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/V2CollectionsAPIMappingTest.java b/solr/core/src/test/org/apache/solr/handler/admin/V2CollectionsAPIMappingTest.java
deleted file mode 100644
index 689073c..0000000
--- a/solr/core/src/test/org/apache/solr/handler/admin/V2CollectionsAPIMappingTest.java
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.admin;
-
-import com.google.common.collect.Maps;
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.api.Api;
-import org.apache.solr.api.ApiBag;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.cloud.api.collections.CategoryRoutedAlias;
-import org.apache.solr.cloud.api.collections.RoutedAlias;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.CommandOperation;
-import org.apache.solr.common.util.ContentStreamBase;
-import org.apache.solr.core.backup.BackupManager;
-import org.apache.solr.handler.CollectionsAPI;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-
-import static org.apache.solr.common.params.CommonParams.ACTION;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-
-/**
- * Unit tests for the API mappings found in {@link org.apache.solr.handler.CollectionsAPI}.
- *
- * This test bears many similarities to {@link TestCollectionAPIs} which appears to test the mappings indirectly by
- * checking message sent to the ZK overseer (which is similar, but not identical to the v1 param list).  If there's no
- * particular benefit to testing the mappings in this way (there very well may be), then we should combine these two
- * test classes at some point in the future using the simpler approach here.
- *
- * Note that the V2 requests made by these tests are not necessarily semantically valid.  They shouldn't be taken as
- * examples. In several instances, mutually exclusive JSON parameters are provided.  This is done to exercise conversion
- * of all parameters, even if particular combinations are never expected in the same request.
- */
-public class V2CollectionsAPIMappingTest extends SolrTestCaseJ4 {
-
-    private ApiBag apiBag;
-
-    private ArgumentCaptor<SolrQueryRequest> queryRequestCaptor;
-    private CollectionsHandler mockCollectionsHandler;
-
-    @BeforeClass
-    public static void ensureWorkingMockito() {
-        assumeWorkingMockito();
-    }
-
-    @Before
-    public void setupApiBag() throws Exception {
-        mockCollectionsHandler = mock(CollectionsHandler.class);
-        queryRequestCaptor = ArgumentCaptor.forClass(SolrQueryRequest.class);
-
-        apiBag = new ApiBag(false);
-        final CollectionsAPI collectionsAPI = new CollectionsAPI(mockCollectionsHandler);
-        apiBag.registerObject(collectionsAPI);
-        apiBag.registerObject(collectionsAPI.collectionsCommands);
-    }
-
-    @Test
-    public void testCreateCollectionAllProperties() throws Exception {
-        final SolrParams v1Params = captureConvertedV1Params("/collections", "POST",
-                "{'create': {" +
-                        "'name': 'techproducts', " +
-                        "'config':'_default', " +
-                        "'router': {'name': 'composite', 'field': 'routeField', 'foo': 'bar'}, " +
-                        "'shards': 'customShardName,anotherCustomShardName', " +
-                        "'replicationFactor': 3," +
-                        "'nrtReplicas': 1, " +
-                        "'tlogReplicas': 1, " +
-                        "'pullReplicas': 1, " +
-                        "'nodeSet': ['localhost:8983_solr', 'localhost:7574_solr']," +
-                        "'shuffleNodes': true," +
-                        "'properties': {'foo': 'bar', 'foo2': 'bar2'}, " +
-                        "'async': 'requestTrackingId', " +
-                        "'waitForFinalState': false, " +
-                        "'perReplicaState': false," +
-                        "'numShards': 1}}");
-
-        assertEquals(CollectionParams.CollectionAction.CREATE.lowerName, v1Params.get(ACTION));
-        assertEquals("techproducts", v1Params.get(CommonParams.NAME));
-        assertEquals("_default", v1Params.get(CollectionAdminParams.COLL_CONF));
-        assertEquals("composite", v1Params.get("router.name"));
-        assertEquals("routeField", v1Params.get("router.field"));
-        assertEquals("bar", v1Params.get("router.foo"));
-        assertEquals("customShardName,anotherCustomShardName", v1Params.get(ShardParams.SHARDS));
-        assertEquals(3, v1Params.getPrimitiveInt(ZkStateReader.REPLICATION_FACTOR));
-        assertEquals(1, v1Params.getPrimitiveInt(ZkStateReader.NRT_REPLICAS));
-        assertEquals(1, v1Params.getPrimitiveInt(ZkStateReader.TLOG_REPLICAS));
-        assertEquals(1, v1Params.getPrimitiveInt(ZkStateReader.PULL_REPLICAS));
-        assertEquals("localhost:8983_solr,localhost:7574_solr", v1Params.get(CollectionAdminParams.CREATE_NODE_SET_PARAM));
-        assertEquals(true, v1Params.getPrimitiveBool(CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM));
-        assertEquals("bar", v1Params.get("property.foo"));
-        assertEquals("bar2", v1Params.get("property.foo2"));
-        assertEquals("requestTrackingId", v1Params.get(CommonAdminParams.ASYNC));
-        assertEquals(false, v1Params.getPrimitiveBool(CommonAdminParams.WAIT_FOR_FINAL_STATE));
-        assertEquals(false, v1Params.getPrimitiveBool(DocCollection.PER_REPLICA_STATE));
-        assertEquals(1, v1Params.getPrimitiveInt(CollectionAdminParams.NUM_SHARDS));
-    }
-
-    @Test
-    public void testCreateAliasAllProperties() throws Exception {
-        final SolrParams v1Params = captureConvertedV1Params("/collections", "POST",
-                "{'create-alias': {" +
-                        "'name': 'aliasName', " +
-                        "'collections': ['techproducts1', 'techproducts2'], " +
-                        "'tz': 'someTimeZone', " +
-                        "'async': 'requestTrackingId', " +
-                        "'router': {" +
-                        "    'name': 'time', " +
-                        "    'field': 'date_dt', " +
-                        "    'interval': '+1HOUR', " +
-                        "     'maxFutureMs': 3600, " +
-                        "     'preemptiveCreateMath': 'somePreemptiveCreateMathString', " +
-                        "     'autoDeleteAge': 'someAutoDeleteAgeExpression', " +
-                        "     'maxCardinality': 36, " +
-                        "     'mustMatch': 'someRegex', " +
-                        "}, " +
-                        "'create-collection': {" +
-                        "     'numShards': 1, " +
-                        "     'properties': {'foo': 'bar', 'foo2': 'bar2'}, " +
-                        "     'replicationFactor': 3 " +
-                        "}" +
-                        "}}");
-
-        assertEquals(CollectionParams.CollectionAction.CREATEALIAS.lowerName, v1Params.get(ACTION));
-        assertEquals("aliasName", v1Params.get(CommonParams.NAME));
-        assertEquals("techproducts1,techproducts2", v1Params.get("collections"));
-        assertEquals("someTimeZone", v1Params.get(CommonParams.TZ.toLowerCase(Locale.ROOT)));
-        assertEquals("requestTrackingId", v1Params.get(CommonAdminParams.ASYNC));
-        assertEquals("time", v1Params.get(CollectionAdminRequest.CreateTimeRoutedAlias.ROUTER_TYPE_NAME));
-        assertEquals("date_dt", v1Params.get(CollectionAdminRequest.CreateTimeRoutedAlias.ROUTER_FIELD));
-        assertEquals("+1HOUR", v1Params.get(CollectionAdminRequest.CreateTimeRoutedAlias.ROUTER_INTERVAL));
-        assertEquals(3600, v1Params.getPrimitiveInt(CollectionAdminRequest.CreateTimeRoutedAlias.ROUTER_MAX_FUTURE));
-        assertEquals("somePreemptiveCreateMathString", v1Params.get(CollectionAdminRequest.CreateTimeRoutedAlias.ROUTER_PREEMPTIVE_CREATE_WINDOW));
-        assertEquals("someAutoDeleteAgeExpression", v1Params.get(CollectionAdminRequest.CreateTimeRoutedAlias.ROUTER_AUTO_DELETE_AGE));
-        assertEquals(36, v1Params.getPrimitiveInt(CategoryRoutedAlias.ROUTER_MAX_CARDINALITY));
-        assertEquals("someRegex", v1Params.get(CategoryRoutedAlias.ROUTER_MUST_MATCH));
-        assertEquals(1, v1Params.getPrimitiveInt(RoutedAlias.CREATE_COLLECTION_PREFIX + CollectionAdminParams.NUM_SHARDS));
-        assertEquals("bar", v1Params.get(RoutedAlias.CREATE_COLLECTION_PREFIX + "property.foo"));
-        assertEquals("bar2", v1Params.get(RoutedAlias.CREATE_COLLECTION_PREFIX + "property.foo2"));
-        assertEquals(3, v1Params.getPrimitiveInt(RoutedAlias.CREATE_COLLECTION_PREFIX + ZkStateReader.REPLICATION_FACTOR));
-    }
-
-    @Test
-    public void testDeleteAliasAllProperties() throws Exception {
-        final SolrParams v1Params = captureConvertedV1Params("/collections", "POST",
-                "{'delete-alias': {" +
-                        "'name': 'aliasName', " +
-                        "'async': 'requestTrackingId'" +
-                        "}}");
-
-        assertEquals(CollectionParams.CollectionAction.DELETEALIAS.lowerName, v1Params.get(ACTION));
-        assertEquals("aliasName", v1Params.get(CommonParams.NAME));
-        assertEquals("requestTrackingId", v1Params.get(CommonAdminParams.ASYNC));
-    }
-
-    @Test
-    public void testSetAliasAllProperties() throws Exception {
-        final SolrParams v1Params = captureConvertedV1Params("/collections", "POST",
-                "{'set-alias-property': {" +
-                        "'name': 'aliasName', " +
-                        "'async': 'requestTrackingId', " +
-                        "'properties': {'foo':'bar', 'foo2':'bar2'}" +
-                        "}}");
-
-        assertEquals(CollectionParams.CollectionAction.ALIASPROP.lowerName, v1Params.get(ACTION));
-        assertEquals("aliasName", v1Params.get(CommonParams.NAME));
-        assertEquals("requestTrackingId", v1Params.get(CommonAdminParams.ASYNC));
-        assertEquals("bar", v1Params.get("property.foo"));
-        assertEquals("bar2", v1Params.get("property.foo2"));
-    }
-
-    @Test
-    public void testBackupAllProperties() throws Exception {
-        final SolrParams v1Params = captureConvertedV1Params("/collections", "POST",
-                "{'backup-collection': {" +
-                        "'name': 'backupName', " +
-                        "'collection': 'collectionName', " +
-                        "'location': '/some/location/uri', " +
-                        "'repository': 'someRepository', " +
-                        "'followAliases': true, " +
-                        "'indexBackup': 'copy-files', " +
-                        "'commitName': 'someSnapshotName', " +
-                        "'incremental': true, " +
-                        "'async': 'requestTrackingId' " +
-                        "}}");
-
-        assertEquals(CollectionParams.CollectionAction.BACKUP.lowerName, v1Params.get(ACTION));
-        assertEquals("backupName", v1Params.get(CommonParams.NAME));
-        assertEquals("collectionName", v1Params.get(BackupManager.COLLECTION_NAME_PROP));
-        assertEquals("/some/location/uri", v1Params.get(CoreAdminParams.BACKUP_LOCATION));
-        assertEquals("someRepository", v1Params.get(CoreAdminParams.BACKUP_REPOSITORY));
-        assertEquals(true, v1Params.getPrimitiveBool(CollectionAdminParams.FOLLOW_ALIASES));
-        assertEquals("copy-files", v1Params.get(CollectionAdminParams.INDEX_BACKUP_STRATEGY));
-        assertEquals("someSnapshotName", v1Params.get(CoreAdminParams.COMMIT_NAME));
-        assertEquals(true, v1Params.getPrimitiveBool(CoreAdminParams.BACKUP_INCREMENTAL));
-        assertEquals("requestTrackingId", v1Params.get(CommonAdminParams.ASYNC));
-    }
-
-    @Test
-    public void testRestoreAllProperties() throws Exception {
-        final SolrParams v1Params = captureConvertedV1Params("/collections", "POST",
-                "{'restore-collection': {" +
-                        "'name': 'backupName', " +
-                        "'collection': 'collectionName', " +
-                        "'location': '/some/location/uri', " +
-                        "'repository': 'someRepository', " +
-                        "'backupId': 123, " +
-                        "'async': 'requestTrackingId', " +
-                        "'create-collection': {" +
-                        "     'numShards': 1, " +
-                        "     'properties': {'foo': 'bar', 'foo2': 'bar2'}, " +
-                        "     'replicationFactor': 3 " +
-                        "}" +
-                        "}}");
-
-        assertEquals(CollectionParams.CollectionAction.RESTORE.lowerName, v1Params.get(ACTION));
-        assertEquals("backupName", v1Params.get(CommonParams.NAME));
-        assertEquals("collectionName", v1Params.get(BackupManager.COLLECTION_NAME_PROP));
-        assertEquals("/some/location/uri", v1Params.get(CoreAdminParams.BACKUP_LOCATION));
-        assertEquals("someRepository", v1Params.get(CoreAdminParams.BACKUP_REPOSITORY));
-        assertEquals(123, v1Params.getPrimitiveInt(CoreAdminParams.BACKUP_ID));
-        assertEquals("requestTrackingId", v1Params.get(CommonAdminParams.ASYNC));
-        // NOTE: Unlike other v2 APIs that have a nested object for collection-creation params, restore's v1 equivalent
-        // for these properties doesn't have a "create-collection." prefix.
-        assertEquals(1, v1Params.getPrimitiveInt(CollectionAdminParams.NUM_SHARDS));
-        assertEquals("bar", v1Params.get("property.foo"));
-        assertEquals("bar2", v1Params.get("property.foo2"));
-        assertEquals(3, v1Params.getPrimitiveInt(ZkStateReader.REPLICATION_FACTOR));
-    }
-
-    private SolrParams captureConvertedV1Params(String path, String method, String v2RequestBody) throws Exception {
-        final HashMap<String, String> parts = new HashMap<>();
-        final Api api = apiBag.lookup(path, method, parts);
-        final SolrQueryResponse rsp = new SolrQueryResponse();
-        final LocalSolrQueryRequest req = new LocalSolrQueryRequest(null, Maps.newHashMap()) {
-            @Override
-            public List<CommandOperation> getCommands(boolean validateInput) {
-                if (v2RequestBody == null) return Collections.emptyList();
-                return ApiBag.getCommandOperations(new ContentStreamBase.StringStream(v2RequestBody), api.getCommandSchema(), true);
-            }
-
-            @Override
-            public Map<String, String> getPathTemplateValues() {
-                return parts;
-            }
-
-            @Override
-            public String getHttpMethod() {
-                return method;
-            }
-        };
-
-
-        api.call(req, rsp);
-        verify(mockCollectionsHandler).handleRequestBody(queryRequestCaptor.capture(), any());
-        return queryRequestCaptor.getValue().getParams();
-    }
-}
diff --git a/solr/solr-ref-guide/src/_layouts/default.html b/solr/solr-ref-guide/src/_layouts/default.html
index 75b66b2..5b929bc 100755
--- a/solr/solr-ref-guide/src/_layouts/default.html
+++ b/solr/solr-ref-guide/src/_layouts/default.html
@@ -1,5 +1,5 @@
 <!DOCTYPE html>
-{% comment %}NOTE: page_id is also defined in page.html{% endcomment %}
+{% comment %}NOTE: page_id is also definied in page.html{% endcomment %}
 {% assign page_id = page.url | split: '/' | last | remove: '.html' %}
 <head>
     {% include head.html %}
diff --git a/solr/solr-ref-guide/src/_layouts/home.html b/solr/solr-ref-guide/src/_layouts/home.html
index c1d9cb6..e5b7119 100644
--- a/solr/solr-ref-guide/src/_layouts/home.html
+++ b/solr/solr-ref-guide/src/_layouts/home.html
@@ -7,7 +7,7 @@ layout: default
      Its main difference is that it uses a different class for the content
      container in order to have freedom to customize it as needed. -->
 
-{% comment %}NOTE: page_id is also defined in default.html{% endcomment %}
+{% comment %}NOTE: page_id is also definied in default.html{% endcomment %}
 {% assign page_id = page.url | split: '/' | last | remove: '.html' %}
 
 <div class="homepage container">
diff --git a/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc b/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc
index 5350fe5..e554c15 100644
--- a/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc
+++ b/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc
@@ -164,15 +164,6 @@ See for example https://lucene.apache.org/solr/guide/8_5/cluster-node-management
 
 == Configuration and Default Parameter Changes in Solr 9
 
-* base_url removed from stored state*
-
-If you're able to upgrade SolrJ to 8.8.x for all of your client applications, then you can set `-Dsolr.storeBaseUrl=false` (introduced in Solr 8.8.1)
-to better align the stored state in Zookeeper with future versions of Solr; as of Solr 9.x, the `base_url` will no longer be
-persisted in stored state. However, if you are not able to upgrade SolrJ to 8.8.x for all client applications, then you should
-set `-Dsolr.storeBaseUrl=true` so that Solr will continue to store the `base_url` in Zookeeper. For background, see: SOLR-12182 and SOLR-15145.
-
-Support for the `solr.storeBaseUrl` system property will be removed in Solr 10.x and `base_url` will no longer be stored.
-
 === Schema Changes in 9
 
 === Authentication & Security Changes in Solr 9
diff --git a/solr/solr-ref-guide/src/parallel-sql-interface.adoc b/solr/solr-ref-guide/src/parallel-sql-interface.adoc
index 95c7584..e1ddd4f 100644
--- a/solr/solr-ref-guide/src/parallel-sql-interface.adoc
+++ b/solr/solr-ref-guide/src/parallel-sql-interface.adoc
@@ -421,7 +421,7 @@ A step-by-step guide for setting up https://www.dbvis.com/[DbVisualizer] is in t
 
 A step-by-step guide for setting up http://squirrel-sql.sourceforge.net[SQuirreL SQL] is in the section <<solr-jdbc-squirrel-sql.adoc#,Solr JDBC - SQuirreL SQL>>.
 
-=== Apache Zeppelin
+=== Apache Zeppelin (incubating)
 
 A step-by-step guide for setting up http://zeppelin.apache.org/[Apache Zeppelin] is in the section <<solr-jdbc-apache-zeppelin.adoc#,Solr JDBC - Apache Zeppelin>>.
 
diff --git a/solr/solr-ref-guide/src/solr-upgrade-notes.adoc b/solr/solr-ref-guide/src/solr-upgrade-notes.adoc
index 0c52226..3bfbf02 100644
--- a/solr/solr-ref-guide/src/solr-upgrade-notes.adoc
+++ b/solr/solr-ref-guide/src/solr-upgrade-notes.adoc
@@ -100,16 +100,6 @@ The default Prometheus Exporter configuration includes metrics like queries-per-
 Plugin developers using `SolrPaths.locateSolrHome()` or 'new `SolrResourceLoader`' should check deprecation warnings as existing some existing functionality will be removed in 9.0.
 https://issues.apache.org/jira/browse/SOLR-14934[SOLR-14934] has more technical details about this change for those concerned.
 
-*base_url removed from stored state*
-
-As of Solr 8.8.0, the `base_url` property was removed from the stored state for replicas (SOLR-12182). If you're able to upgrade SolrJ to 8.8.x
-for all of your client applications, then you can set `-Dsolr.storeBaseUrl=false` (introduced in Solr 8.8.1) to better align the stored state
-in Zookeeper with future versions of Solr. However, if you are not able to upgrade SolrJ to 8.8.x for all client applications,
-then leave the default `-Dsolr.storeBaseUrl=true` so that Solr will continue to store the `base_url` in Zookeeper.
-
-You may also see some NPE in collection state updates during a rolling upgrade to 8.8.0 from a previous version of Solr. After upgrading all nodes in your cluster
-to 8.8.0, collections should fully recover. Trigger another rolling restart if there are any replicas that do not recover after the upgrade to re-elect leaders.
-
 === Solr 8.7
 
 See the https://cwiki.apache.org/confluence/display/SOLR/ReleaseNote87[8.7 Release Notes^]
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
index 4037609..e09ebb3 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
@@ -16,11 +16,25 @@
  */
 package org.apache.solr.client.solrj.request;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Properties;
+import java.util.Set;
+import java.util.TimeZone;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.solr.client.solrj.RoutedAliasTypes;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.V2RequestSupport;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.client.solrj.response.RequestStatusState;
 import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
@@ -40,29 +54,26 @@ import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Properties;
-import java.util.Set;
-import java.util.TimeZone;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
 import static org.apache.solr.common.cloud.DocCollection.PER_REPLICA_STATE;
-import static org.apache.solr.common.cloud.ZkStateReader.*;
-import static org.apache.solr.common.params.CollectionAdminParams.*;
+import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.READ_ONLY;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
+import static org.apache.solr.common.params.CollectionAdminParams.ALIAS;
+import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF;
+import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
+import static org.apache.solr.common.params.CollectionAdminParams.CREATE_NODE_SET_PARAM;
+import static org.apache.solr.common.params.CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM;
+import static org.apache.solr.common.params.CollectionAdminParams.ROUTER_PREFIX;
+import static org.apache.solr.common.params.CollectionAdminParams.SKIP_NODE_ASSIGNMENT;
 
 /**
  * This class is experimental and subject to change.
  *
  * @since solr 4.5
  */
-public abstract class CollectionAdminRequest<T extends CollectionAdminResponse> extends SolrRequest<T> implements MapWriter {
+public abstract class CollectionAdminRequest<T extends CollectionAdminResponse> extends SolrRequest<T> implements V2RequestSupport, MapWriter {
 
   /**
    * The set of modifiable collection properties
@@ -87,6 +98,14 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
   }
 
   @Override
+  @SuppressWarnings({"rawtypes"})
+  public SolrRequest getV2Request() {
+    return usev2 ?
+        V1toV2ApiMapper.convert(this).useBinary(useBinaryV2).build() :
+        this;
+  }
+
+  @Override
   public SolrParams getParams() {
     ModifiableSolrParams params = new ModifiableSolrParams();
     params.set(CoreAdminParams.ACTION, action.toString());
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionApiMapping.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionApiMapping.java
index a88a656..9b106ab 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionApiMapping.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionApiMapping.java
@@ -18,13 +18,6 @@
 package org.apache.solr.client.solrj.request;
 
 
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.request.beans.V2ApiConstants;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.util.CommandOperation;
-import org.apache.solr.common.util.Pair;
-import org.apache.solr.common.util.Utils;
-
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -32,11 +25,26 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.util.CommandOperation;
+import org.apache.solr.common.util.Pair;
+import org.apache.solr.common.util.Utils;
+
 import static org.apache.solr.client.solrj.SolrRequest.METHOD.DELETE;
-import static org.apache.solr.client.solrj.SolrRequest.METHOD.*;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.*;
+import static org.apache.solr.client.solrj.SolrRequest.METHOD.GET;
+import static org.apache.solr.client.solrj.SolrRequest.METHOD.POST;
+import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.CLUSTER_ALIASES;
+import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.COLLECTIONS_COMMANDS;
+import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.COLLECTION_STATE;
+import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.PER_COLLECTION;
+import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.PER_COLLECTION_PER_SHARD_COMMANDS;
+import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.PER_COLLECTION_PER_SHARD_DELETE;
+import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.PER_COLLECTION_PER_SHARD_PER_REPLICA_DELETE;
+import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.PER_COLLECTION_SHARDS_COMMANDS;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
 import static org.apache.solr.common.params.CommonParams.NAME;
 
@@ -49,6 +57,17 @@ public class CollectionApiMapping {
   public enum Meta implements CommandMeta {
     GET_A_COLLECTION(COLLECTION_STATE, GET, CLUSTERSTATUS),
     LIST_ALIASES(CLUSTER_ALIASES, GET, LISTALIASES),
+    CREATE_COLLECTION(COLLECTIONS_COMMANDS,
+        POST,
+        CREATE,
+        CREATE.toLower(),
+        Utils.makeMap(
+            "collection.configName", "config",
+            "createNodeSet.shuffle", "shuffleNodes",
+            "createNodeSet", "nodeSet"
+        ),
+        Utils.makeMap("property.", "properties.")),
+
     RELOAD_COLL(PER_COLLECTION,
         POST,
         RELOAD,
@@ -72,11 +91,34 @@ public class CollectionApiMapping {
         POST,
         REBALANCELEADERS,
         "rebalance-leaders", null),
+    CREATE_ALIAS(COLLECTIONS_COMMANDS,
+        POST,
+        CREATEALIAS,
+        "create-alias",
+        CREATE_COLLECTION.paramsToAttrs.entrySet().stream().collect(Collectors.toMap(
+            entry -> "create-collection." + entry.getKey(),
+            entry -> "create-collection." + entry.getValue()
+        )),
+        CREATE_COLLECTION.prefixParamsToAttrs.entrySet().stream().collect(Collectors.toMap(
+            entry -> "create-collection." + entry.getKey(),
+            entry -> "create-collection." + entry.getValue()
+        ))),
+    DELETE_ALIAS(COLLECTIONS_COMMANDS,
+        POST,
+        DELETEALIAS,
+        "delete-alias",
+        null),
+    ALIAS_PROP(COLLECTIONS_COMMANDS,
+        POST,
+        ALIASPROP,
+        "set-alias-property",
+        null,
+        Utils.makeMap("property.", "properties.")),
     CREATE_SHARD(PER_COLLECTION_SHARDS_COMMANDS,
         POST,
         CREATESHARD,
         "create",
-        Utils.makeMap("createNodeSet", V2ApiConstants.NODE_SET),
+        Utils.makeMap("createNodeSet", "nodeSet"),
         Utils.makeMap("property.", "coreProperties.")) {
       @Override
       public String getParamSubstitute(String param) {
@@ -127,6 +169,17 @@ public class CollectionApiMapping {
             NAME, "collection",
             "propertyName", "name",
             "propertyValue", "value")),
+    BACKUP_COLLECTION(COLLECTIONS_COMMANDS,
+        POST,
+        BACKUP,
+        "backup-collection", null
+    ),
+    RESTORE_COLLECTION(COLLECTIONS_COMMANDS,
+        POST,
+        RESTORE,
+        "restore-collection",
+        null
+    ),
     FORCE_LEADER(PER_COLLECTION_PER_SHARD_COMMANDS, POST, CollectionAction.FORCELEADER, "force-leader", null),
     BALANCE_SHARD_UNIQUE(PER_COLLECTION, POST, BALANCESHARDUNIQUE,"balance-shard-unique" , null)
     ;
@@ -251,6 +304,7 @@ public class CollectionApiMapping {
 
   public enum EndPoint implements V2EndPoint {
     CLUSTER_ALIASES("cluster.aliases"),
+    COLLECTIONS_COMMANDS("collections.Commands"),
     COLLECTION_STATE("collections.collection"),
     PER_COLLECTION("collections.collection.Commands"),
     PER_COLLECTION_SHARDS_COMMANDS("collections.collection.shards.Commands"),
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/BackupCollectionPayload.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/BackupCollectionPayload.java
deleted file mode 100644
index 5d5e7ce..0000000
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/BackupCollectionPayload.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.client.solrj.request.beans;
-
-import org.apache.solr.common.annotation.JsonProperty;
-import org.apache.solr.common.util.ReflectMapWriter;
-
-/**
- * V2 API POJO for the /v2/collections 'backup-collection' command.
- *
- * Analogous to the request parameters for v1 /admin/collections?action=BACKUP API.
- */
-public class BackupCollectionPayload implements ReflectMapWriter {
-    @JsonProperty(required = true)
-    public String collection;
-
-    @JsonProperty(required = true)
-    public String name;
-
-    @JsonProperty
-    public String location;
-
-    @JsonProperty
-    public String repository;
-
-    @JsonProperty
-    public Boolean followAliases;
-
-    @JsonProperty
-    public String indexBackup;
-
-    @JsonProperty
-    public String commitName;
-
-    @JsonProperty
-    public Boolean incremental;
-
-    @JsonProperty
-    public String async;
-}
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropPayload.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropInfo.java
similarity index 86%
rename from solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropPayload.java
rename to solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropInfo.java
index 64b233a..b8de08d 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropPayload.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropInfo.java
@@ -20,7 +20,7 @@ package org.apache.solr.client.solrj.request.beans;
 import org.apache.solr.common.annotation.JsonProperty;
 import org.apache.solr.common.util.ReflectMapWriter;
 
-public class ClusterPropPayload implements ReflectMapWriter {
+public class ClusterPropInfo implements ReflectMapWriter {
 
   @JsonProperty
   public String urlScheme;
@@ -31,7 +31,7 @@ public class ClusterPropPayload implements ReflectMapWriter {
   public String location;
 
   @JsonProperty
-  public Defaults defaults;
+  public DefaultsInfo defaults;
 
   @JsonProperty
   public CollectionDefaults collectionDefaults;
@@ -48,17 +48,17 @@ public class ClusterPropPayload implements ReflectMapWriter {
 
   }
 
-  public static class Defaults implements ReflectMapWriter {
+  public static class DefaultsInfo implements ReflectMapWriter {
 
     @JsonProperty
     public CollectionDefaults collection;
 
     @JsonProperty
-    public Cluster cluster;
+    public ClusterInfo cluster;
 
   }
 
-  public static class Cluster implements ReflectMapWriter {
+  public static class ClusterInfo implements ReflectMapWriter {
     @JsonProperty
     public Boolean useLegacyReplicaAssignment;
 
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateAliasPayload.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateAliasPayload.java
deleted file mode 100644
index b84b49c..0000000
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateAliasPayload.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.client.solrj.request.beans;
-
-import org.apache.solr.common.annotation.JsonProperty;
-import org.apache.solr.common.util.ReflectMapWriter;
-
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.solr.client.solrj.request.beans.V2ApiConstants.CREATE_COLLECTION_KEY;
-
-public class CreateAliasPayload implements ReflectMapWriter {
-    @JsonProperty(required = true)
-    public String name;
-
-    @JsonProperty
-    public List<String> collections;
-
-    @JsonProperty
-    public AliasRouter router;
-
-    @JsonProperty
-    public String tz;
-
-    @JsonProperty(CREATE_COLLECTION_KEY)
-    public Map<String, Object> createCollectionParams;
-
-    @JsonProperty
-    public String async;
-
-    public static class AliasRouter implements ReflectMapWriter {
-        @JsonProperty(required = true)
-        public String name;
-
-        @JsonProperty
-        public String field;
-
-        @JsonProperty
-        public String start;
-
-        @JsonProperty
-        public String interval;
-
-        @JsonProperty
-        public Integer maxFutureMs;
-
-        @JsonProperty
-        public String preemptiveCreateMath;
-
-        @JsonProperty
-        public String autoDeleteAge;
-
-        @JsonProperty
-        public Integer maxCardinality;
-
-        @JsonProperty
-        public String mustMatch;
-
-        @JsonProperty
-        public List<Map<String, Object>> routerList;
-    }
-}
-
-
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigPayload.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java
similarity index 94%
rename from solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigPayload.java
rename to solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java
index 98b22b4..5cd10b6 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigPayload.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java
@@ -21,7 +21,7 @@ import java.util.Map;
 import org.apache.solr.common.annotation.JsonProperty;
 import org.apache.solr.common.util.ReflectMapWriter;
 
-public class CreateConfigPayload implements ReflectMapWriter {
+public class CreateConfigInfo implements ReflectMapWriter {
   @JsonProperty(required = true)
   public String name;
   @JsonProperty
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreatePayload.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreatePayload.java
deleted file mode 100644
index 403af4d..0000000
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreatePayload.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.client.solrj.request.beans;
-
-import org.apache.solr.common.annotation.JsonProperty;
-import org.apache.solr.common.util.ReflectMapWriter;
-
-import java.util.List;
-import java.util.Map;
-
-public class CreatePayload implements ReflectMapWriter {
-    @JsonProperty(required = true)
-    public String name;
-
-    @JsonProperty
-    public String config;
-
-    @JsonProperty
-    public Map<String, Object> router;
-
-    @JsonProperty
-    public Integer numShards;
-
-    @JsonProperty
-    public String shards;
-
-    @JsonProperty
-    public Integer replicationFactor;
-
-    @JsonProperty
-    public Integer nrtReplicas;
-
-    @JsonProperty
-    public Integer tlogReplicas;
-
-    @JsonProperty
-    public Integer pullReplicas;
-
-    @JsonProperty
-    public List<String> nodeSet;
-
-    @JsonProperty
-    public Boolean shuffleNodes;
-
-    @JsonProperty
-    public Map<String, Object> properties;
-
-    @JsonProperty
-    public String async;
-
-    @JsonProperty
-    public Boolean waitForFinalState;
-
-    @JsonProperty
-    public Boolean perReplicaState;
-}
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/DeleteAliasPayload.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/DeleteAliasPayload.java
deleted file mode 100644
index 7565c3d..0000000
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/DeleteAliasPayload.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.client.solrj.request.beans;
-
-import org.apache.solr.common.annotation.JsonProperty;
-import org.apache.solr.common.util.ReflectMapWriter;
-
-public class DeleteAliasPayload implements ReflectMapWriter {
-    @JsonProperty(required = true)
-    public String name;
-
-    @JsonProperty
-    public String async;
-}
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RateLimiterPayload.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RateLimiterMeta.java
similarity index 89%
rename from solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RateLimiterPayload.java
rename to solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RateLimiterMeta.java
index 42058bc..7cf70fd 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RateLimiterPayload.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RateLimiterMeta.java
@@ -25,7 +25,7 @@ import org.apache.solr.common.util.ReflectMapWriter;
 /**
  * POJO for Rate Limiter Metadata Configuration
  */
-public class RateLimiterPayload implements ReflectMapWriter {
+public class RateLimiterMeta implements ReflectMapWriter {
   @JsonProperty
   public Boolean enabled;
 
@@ -41,8 +41,8 @@ public class RateLimiterPayload implements ReflectMapWriter {
   @JsonProperty
   public Integer slotAcquisitionTimeoutInMS;
 
-  public RateLimiterPayload copy() {
-    RateLimiterPayload result = new RateLimiterPayload();
+  public RateLimiterMeta copy() {
+    RateLimiterMeta result = new RateLimiterMeta();
 
     result.enabled = enabled;
     result.guaranteedSlots = guaranteedSlots;
@@ -55,8 +55,8 @@ public class RateLimiterPayload implements ReflectMapWriter {
 
   @Override
   public boolean equals(Object obj) {
-    if (obj instanceof RateLimiterPayload) {
-      RateLimiterPayload that = (RateLimiterPayload) obj;
+    if (obj instanceof RateLimiterMeta) {
+      RateLimiterMeta that = (RateLimiterMeta) obj;
       return Objects.equals(this.enabled, that.enabled) &&
           Objects.equals(this.guaranteedSlots, that.guaranteedSlots) &&
           Objects.equals(this.allowedRequests, that.allowedRequests) &&
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RestoreCollectionPayload.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RestoreCollectionPayload.java
deleted file mode 100644
index 2634802..0000000
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RestoreCollectionPayload.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.client.solrj.request.beans;
-
-import org.apache.solr.common.annotation.JsonProperty;
-import org.apache.solr.common.util.ReflectMapWriter;
-
-import java.util.Map;
-
-import static org.apache.solr.client.solrj.request.beans.V2ApiConstants.CREATE_COLLECTION_KEY;
-
-/**
- * V2 API POJO for the /v2/collections 'restore-collection' command.
- *
- * Analogous to the request parameters for v1 /admin/collections?action=RESTORE API.
- */
-public class RestoreCollectionPayload implements ReflectMapWriter {
-
-    @JsonProperty(required = true)
-    public String collection;
-
-    @JsonProperty(required = true)
-    public String name;
-
-    @JsonProperty
-    public String location;
-
-    @JsonProperty
-    public String repository;
-
-    @JsonProperty
-    public Integer backupId;
-
-    @JsonProperty(CREATE_COLLECTION_KEY)
-    public Map<String, Object> createCollectionParams;
-
-    @JsonProperty
-    public String async;
-}
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/SetAliasPropertyPayload.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/SetAliasPropertyPayload.java
deleted file mode 100644
index c3c8585..0000000
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/SetAliasPropertyPayload.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.client.solrj.request.beans;
-
-import org.apache.solr.common.annotation.JsonProperty;
-import org.apache.solr.common.util.ReflectMapWriter;
-
-import java.util.Map;
-
-public class SetAliasPropertyPayload implements ReflectMapWriter {
-    @JsonProperty(required = true)
-    public String name;
-
-    @JsonProperty
-    public String async;
-
-    @JsonProperty
-    public Map<String, Object> properties;
-}
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/V2ApiConstants.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/V2ApiConstants.java
deleted file mode 100644
index 174b8bf..0000000
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/V2ApiConstants.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.client.solrj.request.beans;
-
-public class V2ApiConstants {
-    private V2ApiConstants() { /* Private ctor prevents instantiation */ }
-
-    /**
-     * Parent key for collection or alias properties to set.
-     */
-    public static final String PROPERTIES_KEY = "properties";
-    /**
-     * Parent key for v2 params used to create a collection.
-     */
-    public static final String CREATE_COLLECTION_KEY = "create-collection";
-
-    /**
-     * Parent key holding alias-router parameters.
-     */
-    public static final String ROUTER_KEY = "router";
-
-    /**
-     * Parameter name for the configset used by a collection
-     */
-    public static final String CONFIG = "config";
-
-    /**
-     * Property controlling whether 'nodeSet' should be shuffled before use.
-     */
-    public static final String SHUFFLE_NODES = "shuffleNodes";
-
-    /**
-     * The set of nodes to consider as potential locations for a new collection or its constituent components.
-     */
-    public static final String NODE_SET = "nodeSet";
-
-    /**
-     * The collections to be included in an alias.
-     */
-    public static final String COLLECTIONS = "collections";
-}
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
index 357bda0..6f893c9 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
@@ -35,13 +35,6 @@ import static org.apache.solr.common.util.Utils.toJSONString;
  */
 public class ZkNodeProps implements JSONWriter.Writable {
 
-  /**
-   * Feature flag to enable storing the 'base_url' property; base_url will not be stored as of Solr 9.x.
-   * Installations that use an older (pre-8.8) SolrJ against a 8.8.0 or newer server will need to set this system
-   * property to 'true' to avoid NPEs when reading cluster state from Zookeeper, see SOLR-15145.
-   */
-  static final boolean STORE_BASE_URL = Boolean.parseBoolean(System.getProperty("solr.storeBaseUrl", "false"));
-
   protected final Map<String,Object> propMap;
 
   /**
@@ -52,7 +45,7 @@ public class ZkNodeProps implements JSONWriter.Writable {
 
     // don't store base_url if we have a node_name to recompute from when we read back from ZK
     // sub-classes that know they need a base_url (Replica) can eagerly compute in their ctor
-    if (!STORE_BASE_URL && this.propMap.containsKey(ZkStateReader.NODE_NAME_PROP)) {
+    if (this.propMap.containsKey(ZkStateReader.NODE_NAME_PROP)) {
       this.propMap.remove(ZkStateReader.BASE_URL_PROP);
     }
 
@@ -125,9 +118,14 @@ public class ZkNodeProps implements JSONWriter.Writable {
   @Override
   public void write(JSONWriter jsonWriter) {
     // don't write out the base_url if we have a node_name
-    if (!STORE_BASE_URL && propMap.containsKey(ZkStateReader.BASE_URL_PROP) && propMap.containsKey(ZkStateReader.NODE_NAME_PROP)) {
-      final Map<String,Object> filtered = new HashMap<>(propMap);
-      filtered.remove(ZkStateReader.BASE_URL_PROP);
+    if (propMap.containsKey(ZkStateReader.BASE_URL_PROP) && propMap.containsKey(ZkStateReader.NODE_NAME_PROP)) {
+      final Map<String,Object> filtered = new HashMap<>();
+      // stream / collect is no good here as the Collector doesn't like null values
+      propMap.forEach((key, value) -> {
+        if (!ZkStateReader.BASE_URL_PROP.equals(key)) {
+          filtered.put(key, value);
+        }
+      });
       jsonWriter.write(filtered);
     } else {
       jsonWriter.write(propMap);
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
index d259f35..c38f397 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
@@ -39,11 +39,6 @@ public interface CollectionAdminParams {
   String CREATE_NODE_SET_PARAM = "createNodeSet";
 
   /**
-   * The number of shards to create a particular collection with.
-   */
-  String NUM_SHARDS = "numShards";
-
-  /**
    * A parameter which specifies if the provided list of Solr nodes (via {@linkplain #CREATE_NODE_SET_PARAM})
    * should be shuffled before being used.
    */
diff --git a/solr/solrj/src/resources/apispec/collections.Commands.json b/solr/solrj/src/resources/apispec/collections.Commands.json
new file mode 100644
index 0000000..4d4a178
--- /dev/null
+++ b/solr/solrj/src/resources/apispec/collections.Commands.json
@@ -0,0 +1,298 @@
+{
+  "documentation": "https://lucene.apache.org/solr/guide/collection-management.html#create",
+  "description": "Create collections and collection aliases, backup or restore collections, and delete collections and aliases.",
+  "methods": [
+    "POST"
+  ],
+  "url": {
+    "paths": [
+      "/collections",
+      "/c"
+    ]
+  },
+  "commands": {
+    "create": {
+      "type": "object",
+      "documentation": "https://lucene.apache.org/solr/guide/collection-management.html#create",
+      "description": "Create a collection.",
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The name of the collection to be created."
+        },
+        "config": {
+          "type": "string",
+          "description": "The name of the configuration set (which must already be stored in ZooKeeper) to use for this collection. If not provided, Solr will default to the collection name as the configuration set name."
+        },
+        "router": {
+          "type": "object",
+          "documentation": "https://lucene.apache.org/solr/guide/shards-and-indexing-data-in-solrcloud.html",
+          "description": "These properties define how to distribute documents across a collection's shards.",
+          "properties": {
+            "name": {
+              "type": "string",
+              "enum":["implicit","compositeId"],
+              "description": "The router implementation to use for this collection. There are two options: compositeId or implicit. The compositeId option has Solr decide how to distribute documents (with some possibilities for customization). The implicit option requires you define your own routing strategy, and puts the balancing of documents in shards entirely in your hands.",
+              "default": "compositeId"
+            },
+            "field": {
+              "type": "string",
+              "description": "A field to be used by Solr to identify the shard a document should be routed to. By default, the field defined as the unique ID for each document is used, but an alternative field can be defined with this parameter."
+            }
+          }
+        },
+        "numShards": {
+          "type": "integer",
+          "description": "The number of shards to be created as part of the collection. Shards are logical partitions of a single collection. Each shard has at least one replica, but more replicas for each shard can be defined with the replicationFactor property. This is a required parameter when using the 'compositeId' router."
+        },
+        "shards": {
+          "type": "string",
+          "description": "A comma-separated list of shard names, e.g., shard-x,shard-y,shard-z. This is a required parameter when using the 'implicit' router."
+        },
+        "replicationFactor": {
+          "type": "integer",
+          "description": "The number of NRT replicas to be created for each shard. Replicas are physical copies of each shard, acting as failover for the shard."
+        },
+        "nrtReplicas": {
+          "type": "integer",
+          "description": "The number of NRT replicas to be created for each shard. Replicas are physical copies of each shard, acting as failover for the shard. Replicas of type NRT will be updated with each document that is added to the cluster, and can use \"softCommits\" to get a new view of the index in Near Real Time. This parameter works in the same way as 'replicationFactor'"
+        },
+        "tlogReplicas": {
+          "type": "integer",
+          "description": "The number of TLOG replicas to be created for each shard. TLOG replicas update their transaction log for every update to the cluster, but only the shard leader updates the local index, other TLOG replicas will use segment replication and copy the latest index files from the leader."
+        },
+        "pullReplicas": {
+          "type": "integer",
+          "description": "The number of PULL replicas to be created for each shard. PULL replicas don't receive copies of the documents on update requests, they just replicate the latest segments periodically from the shard leader. PULL replicas can't become shard leaders, and need at least one active TLOG(recommended) or NRT replicas in the shard to replicate from."
+        },
+        "nodeSet": {
+          "type": "array",
+          "items": {
+            "type": "string"
+          },
+          "description": "Defines nodes to spread the new collection across. If not provided, the collection will be spread across all live Solr nodes. The names to use are the 'node_name', which can be found by a request to the cluster/nodes endpoint. A special value of EMPTY will create no shards or replicas for the new collection. In this case, shards and replicas can be added later with the add-replica command available on the /collections/{collection}/shards endpoint."
+        },
+        "shuffleNodes": {
+          "type": "boolean",
+          "description": "Controls whether or not the shard-replicas created for this collection will be assigned to the nodes specified by the nodeSet property in a sequential manner, or if the list of nodes should be shuffled prior to creating individual replicas. A 'false' value makes the results of a collection creation predictable and gives more exact control over the location of the individual shard-replicas, but 'true' can be a better choice for ensuring replicas are distributed e [...]
+        },
+        "properties": {
+          "type": "object",
+          "documentation": "https://lucene.apache.org/solr/guide/defining-core-properties.html",
+          "description": "Allows adding core.properties for the collection. Some examples of core properties you may want to modify include the config set, the node name, the data directory, among others.",
+          "additionalProperties": true
+        },
+        "async": {
+          "type": "string",
+          "description": "Defines a request ID that can be used to track this action after it's submitted. The action will be processed asynchronously."
+        },
+        "waitForFinalState": {
+          "type": "boolean",
+          "description": "If true then request will complete only when all affected replicas become active.",
+          "default": false
+        },
+        "perReplicaState": {
+          "type": "boolean",
+          "description": "Use Per replica states",
+          "default": false
+        }
+      },
+      "required": [
+        "name"
+      ]
+    },
+    "create-alias": {
+      "documentation": "https://lucene.apache.org/solr/guide/collection-aliasing.html#createalias",
+      "description": "Allows one or more collections to be known by another name (to include time partitioned collections). If this command is used on an existing alias, the existing alias will be replaced with the new collection details.",
+      "type": "object",
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The alias name to be created."
+        },
+        "collections": {
+          "type": "array",
+          "description": "The list of collections to be known as this alias. Incompatible with any of the routing parameters. Either this parameter or a complete set of routing parameters is required.",
+          "items": {
+            "type": "string"
+          }
+        },
+        "router" : {
+          "type":"object",
+          "documentation": "https://lucene.apache.org/solr/guide/collection-aliasing.html#createalias",
+          "description":"Routing specific properties to define a time routed alias.  Do not specify 'collections' when creating a time routed alias.",
+          "properties" : {
+            "name" : {
+              "type" : "string",
+              "description": "The type of routing to perform. Currently only 'time' is supported, and it's required."
+            },
+            "field" : {
+              "type": "string",
+              "description": "The date field name in incoming documents that is consulted to decide which collection the document should be routed to."
+            },
+            "start": {
+              "type": "string",
+              "description": "The earliest date/time in a document that may be indexed into this alias. Documents with values less than this will return an error. For time based routing this may be a date math expression."
+            },
+            "interval" : {
+              "type": "string",
+              "description": "A specification of the width of the interval for each partition collection. For time based routing this should be a date math expression fragment starting with the + character."
+            },
+            "maxFutureMs": {
+              "type": "integer",
+              "description":"How many milliseconds into the future to accept document. Documents with a value in router.field that is greater than now() + maxFutureMs will be rejected to avoid provisioning too much resources."
+            },
+            "preemptiveCreateMath":{
+              "type": "string",
+              "description": "If a document arrives with a timestamp that is after the end time of the most recent collection minus this interval, then the next collection will be created asynchronously. Without this setting, collections are created synchronously when required by the document time stamp and thus block the flow of documents until the collection is created (possibly several seconds). Preemptive creation reduces these hiccups. If set to enough time (perhaps an hour or more) [...]
+            },
+            "autoDeleteAge": {
+              "type": "string",
+              "description": "A date math expressions yielding a time in the past. Collections covering a period of time entirely before this age will be automatically deleted."
+            },
+            "maxCardinality": {
+              "type": "integer",
+              "description": "The maximum number of categories allowed for this alias."
+            },
+            "mustMatch": {
+              "type": "string",
+              "description": "A regular expression that the value of the field specified by `router.field` must match before a corresponding collection will be created."
+            },
+            "routerList": {
+              "type": "array",
+              "description": "A list of router property sets to be used with router type Dimensional[foo,bar] where foo and bar are valid router type names (i.e. time or category). The order must correspond to the type specification in [] in the Dimensional type, so Dimensional[category,time] would require the first set of router properties to be valid for a category routed alias, and the second set to be valid for a time routed alias. In these sets of properties, router.name will be ign [...]
+              "items": {
+                "type": "object",
+                "additionalProperties": true
+              }
+            }
+          }
+        },
+        "TZ": {
+          "type": "string",
+          "description": "Optional timezone for use with any date math that may exist in other parameters.  Defaults to UTC."
+        },
+        "create-collection": {
+          "type": "object",
+          "documentation": "https://lucene.apache.org/solr/guide/collection-management.html#create",
+          "description": "The settings to use to create a collection for each new time partition. Most options from the collection create command are available, except for 'name', 'async' and 'waitForFinalState'.",
+          "additionalProperties": true
+        },
+        "async": {
+          "type": "string",
+          "description": "Defines a request ID that can be used to track this action after it's submitted. The action will be processed asynchronously."
+        }
+      },
+      "required": [
+        "name"
+      ]
+    },
+    "delete-alias": {
+      "documentation": "https://lucene.apache.org/solr/guide/collection-aliasing.html#deletealias",
+      "description": "Deletes a collection alias",
+      "type": "object",
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The name of the alias to delete."
+        },
+        "async": {
+          "type": "string",
+          "description": "Defines a request ID that can be used to track this action after it's submitted. The action will be processed asynchronously."
+        }
+      },
+      "required": [
+        "name"
+      ]
+    },
+    "set-alias-property": {
+      "documentation": "https://lucene.apache.org/solr/guide/collection-aliasing.html#modifyalias",
+      "description": "Allows changing the properties on an alias. If a key is set with an empty string then it will be removed",
+      "type": "object",
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The alias name on which to set properties."
+        },
+        "properties" : {
+          "type": "object",
+          "description": "A map of key/value pairs that will be associated with the alias as alias properties (metadata). An empty value will delete any existing value for a given key.",
+          "additionalProperties": true
+        },
+        "async": {
+          "type": "string",
+          "description": "Defines a request ID that can be used to track this action after it's submitted. The action will be processed asynchronously."
+        }
+      },
+      "required": [
+        "name"
+      ]
+    },
+    "backup-collection": {
+      "documentation": "https://lucene.apache.org/solr/guide/collection-management.html#backup",
+      "description": "Backup Solr indexes and configurations for a specific collection. One copy of the indexes will be taken from each shard, and the config set for the collection will also be copied.",
+      "type": "object",
+      "properties": {
+        "collection": {
+          "type": "string",
+          "description": "The name of the collection to back up."
+        },
+        "name": {
+          "type": "string",
+          "description": "The name of the backup."
+        },
+        "location": {
+          "type": "string",
+          "description": "A location on a shared drive for the backup-collection command to write to. Alternately, it can be set as a cluster property with the cluster endpoint, which also supports setting a location."
+        },
+        "followAliases": {
+          "type": "boolean",
+          "description": "Controls whether aliases are resolved when trying to back up the specified collection, or whether Solr should only backup the provided collection name if it matches a concrete collection."
+        },
+        "incremental": {
+          "type": "boolean",
+          "description": "An internal property that controls whether the backup should use the standard 'incremental' file format or the deprecated 'full-snapshot' based format."
+        },
+        "async": {
+          "type": "string",
+          "description": "Defines a request ID that can be used to track this action after it's submitted. The action will be processed asynchronously."
+        }
+      },
+      "required": [
+        "collection",
+        "name"
+      ]
+    },
+    "restore-collection": {
+      "documentation": "https://lucene.apache.org/solr/guide/collection-management.html#restore",
+      "description": "Restore Solr indexes and configurations from a backup. You cannot restore into the same collection you took the backup from. The target collection must not exist before calling this command, as it will be created by the restore action. The new collection will have the same number of shards and replicas as the original collection, and all routing strategies will be retained.",
+      "type": "object",
+      "properties": {
+        "collection": {
+          "type": "string",
+          "description": "The name of the collection the backup will be restored to. This collection must not exist prior to this "
+        },
+        "name": {
+          "type": "string",
+          "description": "The name of the backup file."
+        },
+        "location": {
+          "type": "string",
+          "description": "The location on the shared drive for the restore-collection command to read from. Alternately, it can be set as a cluster property with the cluster endpoint, which also supports setting a location."
+        },
+        "backupId": {
+          "type": "integer",
+          "description": "The ID of the backup to restore, when the provided location and backup name hold multiple backups for the provided collection.  Defaults to the most recent backup if not specified."
+        },
+        "async": {
+          "type": "string",
+          "description": "Defines a request ID that can be used to track this action after it's submitted. The action will be processed asynchronously."
+        }
+      },
+      "required": [
+        "collection",
+        "name"
+      ]
+    }
+  }
+}
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index 56769c2..7d8a062 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -16,7 +16,9 @@
  */
 package org.apache.solr.client.solrj.io.stream;
 
-import java.io.*;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.IOException;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -28,7 +30,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
-import java.util.zip.GZIPOutputStream;
 
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.Slow;
@@ -3483,28 +3484,6 @@ public class StreamExpressionTest extends SolrCloudTestCase {
   }
 
   @Test
-  public void testCatStreamSingleGzipFile() throws Exception {
-    final String catStream = "cat(\"topLevel1.txt.gz\")";
-    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
-    paramsLoc.set("expr", catStream);
-    paramsLoc.set("qt", "/stream");
-    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+FILESTREAM_COLLECTION;
-
-    SolrStream solrStream = new SolrStream(url, paramsLoc);
-
-    StreamContext context = new StreamContext();
-    solrStream.setStreamContext(context);
-    List<Tuple> tuples = getTuples(solrStream);
-    assertEquals(4, tuples.size());
-
-    for (int i = 0; i < 4; i++) {
-      Tuple t = tuples.get(i);
-      assertEquals("topLevel1.txt.gz line " + String.valueOf(i+1), t.get("line"));
-      assertEquals("topLevel1.txt.gz", t.get("file"));
-    }
-  }
-
-  @Test
   public void testCatStreamEmptyFile() throws Exception {
     final String catStream = "cat(\"topLevel-empty.txt\")";
     ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
@@ -3669,7 +3648,6 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     Files.createDirectories(dataDir);
     Files.createDirectories(dataDir.resolve("directory1"));
 
-    populateFileWithGzipData(dataDir.resolve("topLevel1.txt.gz"));
     populateFileWithData(dataDir.resolve("topLevel1.txt"));
     populateFileWithData(dataDir.resolve("topLevel2.txt"));
     Files.createFile(dataDir.resolve("topLevel-empty.txt"));
@@ -3687,16 +3665,6 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     }
   }
 
-  private static void populateFileWithGzipData(Path dataFile) throws Exception {
-    Files.createFile(dataFile);
-    try (final BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(dataFile.toFile())), StandardCharsets.UTF_8))) {
-      for (int i = 1; i <=4; i++) {
-        writer.write(dataFile.getFileName() + " line " + i);
-        writer.newLine();
-      }
-    }
-  }
-
   protected List<Tuple> getTuples(TupleStream tupleStream) throws IOException {
     List<Tuple> tuples = new ArrayList<Tuple>();
 
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV1toV2ApiMapper.java b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV1toV2ApiMapper.java
index a57d859..b144e40 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV1toV2ApiMapper.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV1toV2ApiMapper.java
@@ -17,19 +17,39 @@
 
 package org.apache.solr.client.solrj.request;
 
+import java.io.IOException;
+import java.util.Map;
+
+import com.google.common.collect.ImmutableMap;
 import org.apache.solr.SolrTestCase;
 import org.apache.solr.client.solrj.impl.BinaryRequestWriter;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest.Create;
 import org.apache.solr.common.util.ContentStreamBase;
 import org.apache.solr.common.util.Utils;
 import org.junit.Test;
 
-import java.io.IOException;
-import java.util.Map;
-
 public class TestV1toV2ApiMapper extends SolrTestCase {
 
   @Test
   // commented out on: 24-Dec-2018   @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
+  public void testCreate() throws IOException {
+    Create cmd = CollectionAdminRequest
+        .createCollection("mycoll", "conf1", 3, 2)
+        .setProperties(ImmutableMap.<String,String>builder()
+            .put("p1","v1")
+            .put("p2","v2")
+            .build());
+    V2Request v2r = V1toV2ApiMapper.convert(cmd).build();
+    Map<?,?> m = (Map<?,?>) Utils.fromJSON(ContentStreamBase.create(new BinaryRequestWriter(), v2r).getStream());
+    assertEquals("/c", v2r.getPath());
+    assertEquals("v1", Utils.getObjectByPath(m,true,"/create/properties/p1"));
+    assertEquals("v2", Utils.getObjectByPath(m,true,"/create/properties/p2"));
+    assertEquals("3", Utils.getObjectByPath(m,true,"/create/numShards"));
+    assertEquals("2", Utils.getObjectByPath(m,true,"/create/nrtReplicas"));
+  }
+
+  @Test
+  // commented out on: 24-Dec-2018   @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
   public void testAddReplica() throws IOException {
     CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard("mycoll", "shard1");
     V2Request v2r = V1toV2ApiMapper.convert(addReplica).build();
diff --git a/solr/solrj/src/test/org/apache/solr/common/util/JsonValidatorTest.java b/solr/solrj/src/test/org/apache/solr/common/util/JsonValidatorTest.java
index 66aa39f..d539088 100644
--- a/solr/solrj/src/test/org/apache/solr/common/util/JsonValidatorTest.java
+++ b/solr/solrj/src/test/org/apache/solr/common/util/JsonValidatorTest.java
@@ -28,6 +28,7 @@ import static org.apache.solr.common.util.ValidatingJsonMap.NOT_NULL;
 public class JsonValidatorTest extends SolrTestCaseJ4  {
 
   public void testSchema() {
+    checkSchema("collections.Commands");
     checkSchema("collections.collection.Commands");
     checkSchema("collections.collection.shards.Commands");
     checkSchema("collections.collection.shards.shard.Commands");
@@ -42,42 +43,38 @@ public class JsonValidatorTest extends SolrTestCaseJ4  {
 
 
   public void testSchemaValidation() {
-    // merge-indexes chosen to exercise string and array/list props.
-    ValidatingJsonMap spec = Utils.getSpec("cores.core.Commands").getSpec();
-    final Map<String, Object> mergeIndexesSchema = spec.getMap("commands", NOT_NULL).getMap("merge-indexes", NOT_NULL);
-    final JsonSchemaValidator mergeIndexesSchemaValidator = new JsonSchemaValidator(mergeIndexesSchema);
-
-    List<String> errs = mergeIndexesSchemaValidator.validateJson(Utils.fromJSONString("{async : x, indexDir: [ c1 , c2]}"));
+    ValidatingJsonMap spec = Utils.getSpec("collections.Commands").getSpec();
+    @SuppressWarnings({"rawtypes"})
+    Map createSchema = spec.getMap("commands", NOT_NULL).getMap("create-alias", NOT_NULL);
+    JsonSchemaValidator validator = new JsonSchemaValidator(createSchema);
+    List<String> errs = validator.validateJson(Utils.fromJSONString("{name : x, collections: [ c1 , c2]}"));
     assertNull(toJSONString(errs), errs);
-    errs = mergeIndexesSchemaValidator.validateJson(Utils.fromJSONString("{async : x, indexDir: [c1] }"));
+    errs = validator.validateJson(Utils.fromJSONString("{name : x, collections: [c1] }"));
     assertNull(toJSONString(errs), errs);
-    errs = mergeIndexesSchemaValidator.validateJson(Utils.fromJSONString("{async : x, x:y, indexDir: [ c1 , c2]}"));
+    errs = validator.validateJson(Utils.fromJSONString("{name : x, x:y, collections: [ c1 , c2]}"));
     assertNotNull(toJSONString(errs), errs);
     assertTrue(toJSONString(errs), errs.get(0).contains("Unknown"));
-    errs = mergeIndexesSchemaValidator.validateJson(Utils.fromJSONString("{async : 123, indexDir: c1 }"));
+    errs = validator.validateJson(Utils.fromJSONString("{name : 123, collections: c1 }"));
     assertNotNull(toJSONString(errs), errs);
     assertTrue(toJSONString(errs), errs.get(0).contains("expected"));
-    errs = mergeIndexesSchemaValidator.validateJson(Utils.fromJSONString("{x:y, indexDir: [ c1 , c2]}"));
+    errs = validator.validateJson(Utils.fromJSONString("{x:y, collections: [ c1 , c2]}"));
     assertTrue(toJSONString(errs), StrUtils.join(errs, '|').contains("Unknown"));
-    errs = mergeIndexesSchemaValidator.validateJson(Utils.fromJSONString("{async : x, indexDir: [ 1 , 2]}"));
+    errs = validator.validateJson(Utils.fromJSONString("{name : x, collections: [ 1 , 2]}"));
     assertFalse(toJSONString(errs), errs.isEmpty());
     assertTrue(toJSONString(errs), errs.get(0).contains("expected"));
-
-
-    final JsonSchemaValidator personSchemaValidator = new JsonSchemaValidator("{" +
+    validator = new JsonSchemaValidator("{" +
         "  type:object," +
         "  properties: {" +
         "   age : {type: number}," +
         "   adult : {type: boolean}," +
         "   name: {type: string}}}");
-    errs = personSchemaValidator.validateJson(Utils.fromJSONString("{name:x, age:21, adult:true}"));
+    errs = validator.validateJson(Utils.fromJSONString("{name:x, age:21, adult:true}"));
     assertNull(errs);
-    errs = personSchemaValidator.validateJson(Utils.fromJSONString("{name:x, age:'21', adult:'true'}"));
+    errs = validator.validateJson(Utils.fromJSONString("{name:x, age:'21', adult:'true'}"));
     assertNotNull(errs);
-    errs = personSchemaValidator.validateJson(Utils.fromJSONString("{name:x, age:'x21', adult:'true'}"));
-    assertEquals(1, errs.size());
-
 
+    errs = validator.validateJson(Utils.fromJSONString("{name:x, age:'x21', adult:'true'}"));
+    assertEquals(1, errs.size());
     Exception e = expectThrows(Exception.class, () -> {
       new JsonSchemaValidator("{" +
           "  type:object," +
@@ -109,16 +106,16 @@ public class JsonValidatorTest extends SolrTestCaseJ4  {
     });
     assertTrue(e.getMessage().contains("Unknown key : propertes"));
 
-    final JsonSchemaValidator personWithEnumValidator = new JsonSchemaValidator("{" +
+    validator = new JsonSchemaValidator("{" +
         "  type:object," +
         "  properties: {" +
         "   age : {type: number}," +
         "   sex: {type: string, enum:[M, F]}," +
         "   adult : {type: boolean}," +
         "   name: {type: string}}}");
-    errs = personWithEnumValidator.validateJson(Utils.fromJSONString("{name: 'Joe Average' , sex:M}"));
+    errs = validator.validateJson(Utils.fromJSONString("{name: 'Joe Average' , sex:M}"));
     assertNull("errs are " + errs, errs);
-    errs = personWithEnumValidator.validateJson(Utils.fromJSONString("{name: 'Joe Average' , sex:m}"));
+    errs = validator.validateJson(Utils.fromJSONString("{name: 'Joe Average' , sex:m}"));
     assertEquals(1, errs.size());
     assertTrue(errs.get(0).contains("Value of enum"));
 
@@ -142,8 +139,8 @@ public class JsonValidatorTest extends SolrTestCaseJ4  {
         "\n" +
         "  }\n" +
         "}";
-    final JsonSchemaValidator nestedObjectValidator = new JsonSchemaValidator(schema);
-    nestedObjectValidator.validateJson(Utils.fromJSONString("{\n" +
+    validator = new JsonSchemaValidator(schema);
+    validator.validateJson(Utils.fromJSONString("{\n" +
         "  'links': [\n" +
         "    {\n" +
         "        'rel': 'x',\n" +
@@ -164,12 +161,11 @@ public class JsonValidatorTest extends SolrTestCaseJ4  {
         "'type' : 'object',\n" +
         "'oneOf' : ['a', 'b']\n" +
         "}";
-
-    final JsonSchemaValidator mutuallyExclusivePropertiesValidator = new JsonSchemaValidator(schema);
-    errs = mutuallyExclusivePropertiesValidator.validateJson(Utils.fromJSONString("" +
+    validator = new JsonSchemaValidator(schema);
+    errs = validator.validateJson(Utils.fromJSONString("" +
         "{'c':'val'}"));
     assertNotNull(errs);
-    errs = mutuallyExclusivePropertiesValidator.validateJson(Utils.fromJSONString("" +
+    errs = validator.validateJson(Utils.fromJSONString("" +
         "{'a':'val'}"));
     assertNull(errs);