You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by is...@apache.org on 2021/02/12 19:30:23 UTC

[lucene-solr] branch jira/solr15138 updated: Merging master to this branch

This is an automated email from the ASF dual-hosted git repository.

ishan pushed a commit to branch jira/solr15138
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git


The following commit(s) were added to refs/heads/jira/solr15138 by this push:
     new 9c62745  Merging master to this branch
9c62745 is described below

commit 9c627459e860eccfa7f9ba04367abf6222cba159
Author: Ishan Chattopadhyaya <is...@apache.org>
AuthorDate: Sat Feb 13 01:00:01 2021 +0530

    Merging master to this branch
---
 .../apache/lucene/missingdoclet/MissingDoclet.java |  13 +-
 gradle/ant-compat/folder-layout.gradle             |  20 +
 gradle/documentation/render-javadoc.gradle         |   2 -
 gradle/testing/randomization/policies/tests.policy |   6 +-
 gradle/validation/forbidden-apis.gradle            |  17 +
 gradle/validation/rat-sources.gradle               |  10 +-
 lucene/CHANGES.txt                                 |   8 +-
 lucene/analysis/common/build.gradle                |  15 +
 .../analysis/hunspell/CheckCompoundPattern.java    |   4 +-
 .../lucene/analysis/hunspell/CompoundRule.java     |  24 +-
 .../lucene/analysis/hunspell/Dictionary.java       | 328 ++++++++-------
 .../lucene/analysis/hunspell/FlagEnumerator.java   |  86 ++++
 .../analysis/hunspell/GeneratingSuggester.java     | 438 +++++++++++++++++++++
 .../hunspell/{SpellChecker.java => Hunspell.java}  | 219 +++++++----
 .../analysis/hunspell/ModifyingSuggester.java      |  86 +++-
 .../org/apache/lucene/analysis/hunspell/Root.java} |  39 +-
 .../apache/lucene/analysis/hunspell/Stemmer.java   | 211 +++++-----
 .../lucene/analysis/hunspell/package-info.java     |   6 +-
 .../analysis/hunspell/TestAllDictionaries.java     | 191 +++++++--
 .../lucene/analysis/hunspell/TestDictionary.java   |  27 +-
 .../lucene/analysis/hunspell/TestDutchIJ.java      |   1 -
 .../hunspell/TestHunspellRepositoryTestCases.java  |  28 +-
 .../lucene/analysis/hunspell/TestPerformance.java  |  30 +-
 ...pellCheckerTest.java => TestSpellChecking.java} |  45 ++-
 .../apache/lucene/analysis/hunspell/allcaps.sug    |   3 +
 .../apache/lucene/analysis/hunspell/base_utf.sug   |  13 +
 .../lucene/analysis/hunspell/checksharps.sug       |   1 +
 .../lucene/analysis/hunspell/forbiddenword.aff     |  11 +
 .../lucene/analysis/hunspell/forbiddenword.dic     |  11 +
 .../lucene/analysis/hunspell/forbiddenword.good    |   3 +
 .../lucene/analysis/hunspell/forbiddenword.wrong   |   4 +
 .../apache/lucene/analysis/hunspell/forceucase.sug |   2 +
 .../lucene/analysis/hunspell/forgivable-errors.aff |   8 +-
 .../lucene/analysis/hunspell/forgivable-errors.dic |   2 +
 .../org/apache/lucene/analysis/hunspell/i58202.aff |   4 +
 .../org/apache/lucene/analysis/hunspell/i58202.dic |   5 +
 .../apache/lucene/analysis/hunspell/i58202.good    |  10 +
 .../org/apache/lucene/analysis/hunspell/i58202.sug |  13 +
 .../apache/lucene/analysis/hunspell/i58202.wrong   |  13 +
 .../apache/lucene/analysis/hunspell/keepcase.sug   |   8 +
 .../org/apache/lucene/analysis/hunspell/map.aff    |   9 +
 .../org/apache/lucene/analysis/hunspell/map.dic    |   4 +
 .../org/apache/lucene/analysis/hunspell/map.sug    |   3 +
 .../org/apache/lucene/analysis/hunspell/map.wrong  |   3 +
 .../apache/lucene/analysis/hunspell/nosuggest.aff  |   5 +
 .../apache/lucene/analysis/hunspell/nosuggest.dic  |   3 +
 .../apache/lucene/analysis/hunspell/nosuggest.good |   3 +
 .../apache/lucene/analysis/hunspell/nosuggest.sug  |   0
 .../lucene/analysis/hunspell/nosuggest.wrong       |   3 +
 .../org/apache/lucene/analysis/hunspell/oconv.aff  |  20 +
 .../org/apache/lucene/analysis/hunspell/oconv.dic  |   4 +
 .../org/apache/lucene/analysis/hunspell/oconv.good |   2 +
 .../org/apache/lucene/analysis/hunspell/oconv.sug  |   3 +
 .../apache/lucene/analysis/hunspell/oconv.wrong    |   3 +
 .../analysis/hunspell/opentaal_forbiddenword1.aff  |   9 +
 .../analysis/hunspell/opentaal_forbiddenword1.dic  |   5 +
 .../analysis/hunspell/opentaal_forbiddenword1.good |   3 +
 .../analysis/hunspell/opentaal_forbiddenword1.sug  |   1 +
 .../hunspell/opentaal_forbiddenword1.wrong         |   5 +
 .../analysis/hunspell/opentaal_forbiddenword2.aff  |   7 +
 .../analysis/hunspell/opentaal_forbiddenword2.dic  |   5 +
 .../analysis/hunspell/opentaal_forbiddenword2.good |   4 +
 .../analysis/hunspell/opentaal_forbiddenword2.sug  |   1 +
 .../hunspell/opentaal_forbiddenword2.wrong         |   5 +
 .../apache/lucene/analysis/hunspell/wordpair.aff   |   4 +
 .../apache/lucene/analysis/hunspell/wordpair.dic   |   4 +
 .../apache/lucene/analysis/hunspell/wordpair.good  |   3 +
 .../apache/lucene/analysis/hunspell/wordpair.wrong |   1 +
 .../analysis/standard/GenerateJflexTLDMacros.java  |  16 +-
 .../analysis/icu/GenerateUTR30DataFiles.java       |  84 ++--
 .../lucene/analysis/icu/RBBIRuleCompiler.java      |  51 +--
 .../lucene50/Lucene50CompoundFormat.java           |  56 +--
 .../lucene50/Lucene50CompoundReader.java           |   2 +-
 .../backward_codecs/lucene70/Lucene70Codec.java    |   4 +-
 .../backward_codecs/lucene80/Lucene80Codec.java    |   2 +-
 .../backward_codecs/lucene84/Lucene84Codec.java    |   4 +-
 .../backward_codecs/lucene86/Lucene86Codec.java    |   4 +-
 .../backward_codecs/lucene87/Lucene87Codec.java    |   4 +-
 .../backward_codecs/Lucene87/Lucene87RWCodec.java} |  16 +-
 .../lucene50/Lucene50RWCompoundFormat.java}        |   6 +-
 .../lucene50/TestLucene50CompoundFormat.java       |   8 +-
 .../backward_codecs/lucene70/Lucene70RWCodec.java  |   7 +
 .../backward_codecs/lucene84/Lucene84RWCodec.java  |   7 +
 .../backward_codecs/lucene86/Lucene86RWCodec.java  |   7 +
 .../codecs/lucene86/Lucene86PointsFormat.java      |   5 +-
 .../lucene/codecs/lucene90/Lucene90Codec.java      |   3 +-
 .../Lucene90CompoundFormat.java}                   |  14 +-
 .../Lucene90CompoundReader.java}                   |  22 +-
 .../codecs/lucene90/Lucene90FieldInfosFormat.java  |   8 +-
 .../codecs/lucene90/Lucene90VectorWriter.java      |  15 +-
 .../lucene/codecs/lucene90/package-info.java       |   2 +-
 .../java/org/apache/lucene/document/FieldType.java |   3 +-
 .../java/org/apache/lucene/index/CodecReader.java  |  15 +
 .../org/apache/lucene/index/ReadersAndUpdates.java |   4 +-
 .../org/apache/lucene/util/RamUsageEstimator.java  |  17 +-
 .../TestLucene90CompoundFormat.java}               |   4 +-
 .../TestLucene90FieldInfosFormat.java}             |  10 +-
 .../TestLucene90VectorFormat.java}                 |   9 +-
 .../apache/lucene/queries/intervals/Intervals.java |  10 +
 .../MinimumShouldMatchIntervalsSource.java         |   1 +
 .../queries/intervals/NoMatchIntervalsSource.java  |  75 ++++
 .../lucene/queries/intervals/TestIntervals.java    |  21 +
 .../queries/intervals/TestSimplifications.java     |   9 +
 .../lucene/index/BaseFieldInfoFormatTestCase.java  |  55 ++-
 .../lucene/index/BaseVectorFormatTestCase.java}    | 150 +++----
 .../org/apache/lucene/util/RamUsageTester.java     |  22 +-
 solr/CHANGES.txt                                   |  19 +-
 .../solr/cloud/api/collections/BackupCmd.java      |  48 +--
 .../cloud/api/collections/DeleteBackupCmd.java     |  14 +-
 .../OverseerCollectionMessageHandler.java          |  51 +--
 .../java/org/apache/solr/core/CoreContainer.java   |   4 +-
 .../repository/LocalFileSystemRepository.java      |  22 +-
 .../java/org/apache/solr/handler/CatStream.java    |  11 +-
 .../java/org/apache/solr/handler/ClusterAPI.java   |  14 +-
 .../org/apache/solr/handler/CollectionsAPI.java    | 177 ++++++++-
 .../apache/solr/handler/admin/BackupCoreOp.java    |   2 +-
 .../solr/handler/admin/CollectionsHandler.java     |   1 -
 .../java/org/apache/solr/response/JSONWriter.java  |   8 +-
 .../org/apache/solr/search/MaxScoreCollector.java  |   7 +-
 .../org/apache/solr/servlet/QueryRateLimiter.java  |   8 +-
 .../conf/solrconfig-cache-enable-disable.xml       |  15 +-
 .../conf/solrconfig-memory-circuitbreaker.xml      |   8 +-
 .../LocalFSCloudIncrementalBackupTest.java         |  13 +-
 .../cloud/api/collections/TestCollectionAPI.java   |   7 +-
 .../apache/solr/core/DirectoryFactoriesTest.java   |   4 +-
 .../src/test/org/apache/solr/core/TestConfig.java  |  18 +-
 .../solr/handler/TestIncrementalCoreBackup.java    |  74 ++--
 .../solr/handler/TestStressIncrementalBackup.java  |  22 +-
 .../solr/handler/TestStressThreadBackup.java       |   5 +-
 .../solr/handler/admin/AdminHandlersProxyTest.java |  33 --
 .../solr/handler/admin/TestCollectionAPIs.java     |   4 +-
 .../handler/admin/V2CollectionsAPIMappingTest.java | 293 ++++++++++++++
 solr/solr-ref-guide/src/_layouts/default.html      |   2 +-
 solr/solr-ref-guide/src/_layouts/home.html         |   2 +-
 .../src/major-changes-in-solr-9.adoc               |   9 +
 .../solr-ref-guide/src/parallel-sql-interface.adoc |   2 +-
 solr/solr-ref-guide/src/solr-upgrade-notes.adoc    |  10 +
 .../solrj/request/CollectionAdminRequest.java      |  51 +--
 .../client/solrj/request/CollectionApiMapping.java |  74 +---
 ...onfigInfo.java => BackupCollectionPayload.java} |  40 +-
 ...lusterPropInfo.java => ClusterPropPayload.java} |  10 +-
 .../solrj/request/beans/CreateAliasPayload.java    |  79 ++++
 ...ateConfigInfo.java => CreateConfigPayload.java} |   2 +-
 .../{ClusterPropInfo.java => CreatePayload.java}   |  54 ++-
 ...eateConfigInfo.java => DeleteAliasPayload.java} |  14 +-
 ...ateLimiterMeta.java => RateLimiterPayload.java} |  10 +-
 ...nfigInfo.java => RestoreCollectionPayload.java} |  39 +-
 ...onfigInfo.java => SetAliasPropertyPayload.java} |  19 +-
 .../client/solrj/request/beans/V2ApiConstants.java |  55 +++
 .../org/apache/solr/common/cloud/ZkNodeProps.java  |  20 +-
 .../solr/common/params/CollectionAdminParams.java  |   5 +
 .../resources/apispec/collections.Commands.json    | 298 --------------
 .../solrj/io/stream/StreamExpressionTest.java      |  38 +-
 .../client/solrj/request/TestV1toV2ApiMapper.java  |  26 +-
 .../apache/solr/common/util/JsonValidatorTest.java |  52 +--
 155 files changed, 3106 insertions(+), 1526 deletions(-)

diff --git a/dev-tools/missing-doclet/src/main/java/org/apache/lucene/missingdoclet/MissingDoclet.java b/dev-tools/missing-doclet/src/main/java/org/apache/lucene/missingdoclet/MissingDoclet.java
index 89c205f..53dc033 100644
--- a/dev-tools/missing-doclet/src/main/java/org/apache/lucene/missingdoclet/MissingDoclet.java
+++ b/dev-tools/missing-doclet/src/main/java/org/apache/lucene/missingdoclet/MissingDoclet.java
@@ -406,7 +406,7 @@ public class MissingDoclet extends StandardDoclet {
   /** logs a new error for the particular element */
   private void error(Element element, String message) {
     var fullMessage = new StringBuilder();
-    switch(element.getKind()) {
+    switch (element.getKind()) {
       case MODULE:
       case PACKAGE:
         // for modules/packages, we don't have filename + line number, fully qualify
@@ -426,10 +426,19 @@ public class MissingDoclet extends StandardDoclet {
         fullMessage.append(element.getSimpleName());
         break;
     }
+
     fullMessage.append(" (");
     fullMessage.append(element.getKind().toString().toLowerCase(Locale.ROOT));
     fullMessage.append("): ");
     fullMessage.append(message);
-    reporter.print(Diagnostic.Kind.ERROR, element, fullMessage.toString());
+
+    if (Runtime.version().feature() == 11 && element.getKind() == ElementKind.PACKAGE) {
+      // Avoid JDK 11 bug:
+      // https://issues.apache.org/jira/browse/LUCENE-9747
+      // https://bugs.openjdk.java.net/browse/JDK-8224082
+      reporter.print(Diagnostic.Kind.ERROR, fullMessage.toString());
+    } else {
+      reporter.print(Diagnostic.Kind.ERROR, element, fullMessage.toString());
+    }
   }
 }
diff --git a/gradle/ant-compat/folder-layout.gradle b/gradle/ant-compat/folder-layout.gradle
index 286c8d1..0186fb7 100644
--- a/gradle/ant-compat/folder-layout.gradle
+++ b/gradle/ant-compat/folder-layout.gradle
@@ -41,3 +41,23 @@ configure(project(":solr:webapp")) {
     webAppDirName = "web"
   }
 }
+
+allprojects {
+  plugins.withType(JavaPlugin) {
+    // if 'src/tools' exists, add it as a separate sourceSet.
+    if (file('src/tools/java').exists()) {
+      sourceSets {
+        tools {
+          java {
+            srcDirs = ['src/tools/java']
+          }
+        }
+      }
+
+      configurations {
+        // Inherit any dependencies from the main source set.
+        toolsImplementation.extendsFrom implementation
+      }
+    }
+  }
+}
\ No newline at end of file
diff --git a/gradle/documentation/render-javadoc.gradle b/gradle/documentation/render-javadoc.gradle
index 55f904e..66d1539 100644
--- a/gradle/documentation/render-javadoc.gradle
+++ b/gradle/documentation/render-javadoc.gradle
@@ -533,8 +533,6 @@ class RenderJavadocTask extends DefaultTask {
 
         ignoreExitValue true
       }
-
-      logger.lifecycle("Exec returned: ${result}")
     }
 
     if (result.getExitValue() != 0) {
diff --git a/gradle/testing/randomization/policies/tests.policy b/gradle/testing/randomization/policies/tests.policy
index e17af8e..469892c 100644
--- a/gradle/testing/randomization/policies/tests.policy
+++ b/gradle/testing/randomization/policies/tests.policy
@@ -91,10 +91,12 @@ grant {
   // allows LuceneTestCase#runWithRestrictedPermissions to execute with lower (or no) permission
   permission java.security.SecurityPermission "createAccessControlContext";
 
-  // Some Hunspell tests may read from external files specified in system properties
+  // Hunspell regression and validation tests can read from external files
+  // specified in system properties.
   permission java.io.FilePermission "${hunspell.repo.path}${/}-", "read";
-  permission java.io.FilePermission "${hunspell.dictionaries}${/}-", "read";
   permission java.io.FilePermission "${hunspell.corpora}${/}-", "read";
+  permission java.io.FilePermission "${hunspell.dictionaries}", "read";
+  permission java.io.FilePermission "${hunspell.dictionaries}${/}-", "read";
 };
 
 // Permissions to support ant build
diff --git a/gradle/validation/forbidden-apis.gradle b/gradle/validation/forbidden-apis.gradle
index c4fb27d..c23002a 100644
--- a/gradle/validation/forbidden-apis.gradle
+++ b/gradle/validation/forbidden-apis.gradle
@@ -89,6 +89,23 @@ allprojects { prj ->
       ]
     }
 
+    // Configure defaults for sourceSets.tools (if present).
+    tasks.matching { it.name == "forbiddenApisTools" }.all {
+      bundledSignatures += [
+          'jdk-unsafe',
+          'jdk-deprecated',
+          'jdk-non-portable',
+          'jdk-reflection',
+      ]
+
+      suppressAnnotations += [
+          "**.SuppressForbidden"
+      ]
+
+      doFirst dynamicSignatures.curry(configurations.toolsCompileClasspath, "lucene")
+      inputs.dir(file(resources))
+    }
+
     // Disable sysout signatures for these projects.
     if (prj.path in [
         ":lucene:demo",
diff --git a/gradle/validation/rat-sources.gradle b/gradle/validation/rat-sources.gradle
index 5738d01..e13b052 100644
--- a/gradle/validation/rat-sources.gradle
+++ b/gradle/validation/rat-sources.gradle
@@ -154,10 +154,16 @@ class RatTask extends DefaultTask {
             }
 
             if (project.plugins.findPlugin(JavaPlugin)) {
-                [
+                def checkSets = [
                     project.sourceSets.main.java.srcDirs,
                     project.sourceSets.test.java.srcDirs,
-                ].flatten().each { srcLocation ->
+                ]
+
+                project.sourceSets.matching { it.name == 'tools' }.all {
+                    checkSets += project.sourceSets.tools.java.srcDirs
+                }
+
+                checkSets.flatten().each { srcLocation ->
                     ant.fileset(dir: srcLocation, erroronmissingdir: false) {
                         srcExcludes.each { pattern -> ant.exclude(name: pattern) }
                     }
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 104d9b9..337f376 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -89,8 +89,8 @@ API Changes
 
 Improvements
 
-* LUCENE-9687: Hunspell support improvements: add SpellChecker API, support default encoding and
-  BREAK/FORBIDDENWORD/COMPOUNDRULE affix rules, improve stemming of all-caps words (Peter Gromov)
+* LUCENE-9687: Hunspell support improvements: add API for spell-checking and suggestions, support compound words,
+  fix various behavior differences between Java and C++ implementations, improve performance (Peter Gromov, Dawid Weiss)
 
 * LUCENE-9633: Improve match highlighter behavior for degenerate intervals (on non-existing positions).
   (Dawid Weiss)
@@ -288,7 +288,9 @@ Optimizations
 
 Bug Fixes
 ---------------------
-(No changes)
+
+* LUCENE-9744: NPE on a degenerate query in MinimumShouldMatchIntervalsSource
+  $MinimumMatchesIterator.getSubMatches(). (Alan Woodward)
 
 Other
 ---------------------
diff --git a/lucene/analysis/common/build.gradle b/lucene/analysis/common/build.gradle
index a44152c..24c949f 100644
--- a/lucene/analysis/common/build.gradle
+++ b/lucene/analysis/common/build.gradle
@@ -23,3 +23,18 @@ dependencies {
   api project(':lucene:core')
   testImplementation project(':lucene:test-framework')
 }
+
+// Pass all hunspell-tests-specific project properties to tests as system properties.
+tasks.withType(Test) {
+  [
+      "hunspell.dictionaries",
+      "hunspell.corpora",
+      "hunspell.repo.path"
+  ].each {
+    def val = propertyOrDefault(it, null)
+    if (val != null) {
+      logger.lifecycle("Passing property: ${it}=${val}")
+      systemProperty it, val
+    }
+  }
+}
\ No newline at end of file
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CheckCompoundPattern.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CheckCompoundPattern.java
index 3d70591..b1c4b3d 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CheckCompoundPattern.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CheckCompoundPattern.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.analysis.hunspell;
 
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.IntsRef;
 
@@ -27,7 +26,6 @@ class CheckCompoundPattern {
   private final char[] endFlags;
   private final char[] beginFlags;
   private final Dictionary dictionary;
-  private final BytesRef scratch = new BytesRef();
 
   CheckCompoundPattern(
       String unparsed, Dictionary.FlagParsingStrategy strategy, Dictionary dictionary) {
@@ -93,7 +91,7 @@ class CheckCompoundPattern {
 
   private boolean hasAllFlags(char[] flags, IntsRef forms) {
     for (char flag : flags) {
-      if (!dictionary.hasFlag(forms, flag, scratch)) {
+      if (!dictionary.hasFlag(forms, flag)) {
         return false;
       }
     }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CompoundRule.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CompoundRule.java
index 0f89de8..726c1dc 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CompoundRule.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CompoundRule.java
@@ -17,7 +17,6 @@
 package org.apache.lucene.analysis.hunspell;
 
 import java.util.List;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IntsRef;
 
 class CompoundRule {
@@ -51,16 +50,15 @@ class CompoundRule {
     data = parsedFlags.toString().toCharArray();
   }
 
-  boolean mayMatch(List<IntsRef> words, BytesRef scratch) {
-    return match(words, 0, 0, scratch, false);
+  boolean mayMatch(List<IntsRef> words) {
+    return match(words, 0, 0, false);
   }
 
-  boolean fullyMatches(List<IntsRef> words, BytesRef scratch) {
-    return match(words, 0, 0, scratch, true);
+  boolean fullyMatches(List<IntsRef> words) {
+    return match(words, 0, 0, true);
   }
 
-  private boolean match(
-      List<IntsRef> words, int patternIndex, int wordIndex, BytesRef scratch, boolean fully) {
+  private boolean match(List<IntsRef> words, int patternIndex, int wordIndex, boolean fully) {
     if (patternIndex >= data.length) {
       return wordIndex >= words.size();
     }
@@ -71,12 +69,12 @@ class CompoundRule {
     char flag = data[patternIndex];
     if (patternIndex < data.length - 1 && data[patternIndex + 1] == '*') {
       int startWI = wordIndex;
-      while (wordIndex < words.size() && dictionary.hasFlag(words.get(wordIndex), flag, scratch)) {
+      while (wordIndex < words.size() && dictionary.hasFlag(words.get(wordIndex), flag)) {
         wordIndex++;
       }
 
       while (wordIndex >= startWI) {
-        if (match(words, patternIndex + 2, wordIndex, scratch, fully)) {
+        if (match(words, patternIndex + 2, wordIndex, fully)) {
           return true;
         }
 
@@ -86,16 +84,16 @@ class CompoundRule {
     }
 
     boolean currentWordMatches =
-        wordIndex < words.size() && dictionary.hasFlag(words.get(wordIndex), flag, scratch);
+        wordIndex < words.size() && dictionary.hasFlag(words.get(wordIndex), flag);
 
     if (patternIndex < data.length - 1 && data[patternIndex + 1] == '?') {
-      if (currentWordMatches && match(words, patternIndex + 2, wordIndex + 1, scratch, fully)) {
+      if (currentWordMatches && match(words, patternIndex + 2, wordIndex + 1, fully)) {
         return true;
       }
-      return match(words, patternIndex + 2, wordIndex, scratch, fully);
+      return match(words, patternIndex + 2, wordIndex, fully);
     }
 
-    return currentWordMatches && match(words, patternIndex + 1, wordIndex + 1, scratch, fully);
+    return currentWordMatches && match(words, patternIndex + 1, wordIndex + 1, fully);
   }
 
   @Override
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java
index ae1a3a1..59536fe 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java
@@ -18,11 +18,11 @@ package org.apache.lucene.analysis.hunspell;
 
 import java.io.BufferedInputStream;
 import java.io.BufferedReader;
+import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.LineNumberReader;
-import java.io.OutputStream;
 import java.nio.charset.Charset;
 import java.nio.charset.CharsetDecoder;
 import java.nio.charset.CodingErrorAction;
@@ -52,8 +52,6 @@ import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.BytesRefHash;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.IntsRef;
@@ -72,16 +70,19 @@ import org.apache.lucene.util.fst.Util;
 
 /** In-memory structure for the dictionary (.dic) and affix (.aff) data of a hunspell dictionary. */
 public class Dictionary {
+  // Derived from woorm/LibreOffice dictionaries.
+  // See TestAllDictionaries.testMaxPrologueNeeded.
+  static final int MAX_PROLOGUE_SCAN_WINDOW = 30 * 1024;
 
   static final char[] NOFLAGS = new char[0];
 
   static final char FLAG_UNSET = (char) 0;
   private static final int DEFAULT_FLAGS = 65510;
-  private static final char HIDDEN_FLAG = (char) 65511; // called 'ONLYUPCASEFLAG' in Hunspell
+  static final char HIDDEN_FLAG = (char) 65511; // called 'ONLYUPCASEFLAG' in Hunspell
 
   // TODO: really for suffixes we should reverse the automaton and run them backwards
-  private static final String PREFIX_CONDITION_REGEX_PATTERN = "%s.*";
-  private static final String SUFFIX_CONDITION_REGEX_PATTERN = ".*%s";
+  private static final String PREFIX_CONDITION_REGEX = "%s.*";
+  private static final String SUFFIX_CONDITION_REGEX = ".*%s";
   private static final Pattern MORPH_KEY_PATTERN = Pattern.compile("\\s+(?=\\p{Alpha}{2}:)");
   static final Charset DEFAULT_CHARSET = StandardCharsets.ISO_8859_1;
   CharsetDecoder decoder = replacingDecoder(DEFAULT_CHARSET);
@@ -106,7 +107,7 @@ public class Dictionary {
    * The list of unique flagsets (wordforms). theoretically huge, but practically small (for Polish
    * this is 756), otherwise humans wouldn't be able to deal with it either.
    */
-  BytesRefHash flagLookup = new BytesRefHash();
+  final FlagEnumerator.Lookup flagLookup;
 
   // the list of unique strip affixes.
   char[] stripData;
@@ -121,7 +122,7 @@ public class Dictionary {
   // offsets in affixData
   static final int AFFIX_FLAG = 0;
   static final int AFFIX_STRIP_ORD = 1;
-  static final int AFFIX_CONDITION = 2;
+  private static final int AFFIX_CONDITION = 2;
   static final int AFFIX_APPEND = 3;
 
   // Default flag parsing strategy
@@ -170,6 +171,11 @@ public class Dictionary {
   String[] neighborKeyGroups = new String[0];
   boolean enableSplitSuggestions = true;
   List<RepEntry> repTable = new ArrayList<>();
+  List<List<String>> mapTable = new ArrayList<>();
+  int maxDiff = 5;
+  int maxNGramSuggestions = Integer.MAX_VALUE;
+  boolean onlyMaxDiff;
+  char noSuggest, subStandard;
 
   // FSTs used for ICONV/OCONV, output ord pointing to replacement text
   FST<CharsRef> iconv;
@@ -224,42 +230,46 @@ public class Dictionary {
     this.ignoreCase = ignoreCase;
     this.needsInputCleaning = ignoreCase;
     this.needsOutputCleaning = false; // set if we have an OCONV
-    flagLookup.add(new BytesRef()); // no flags -> ord 0
 
-    Path tempPath = getDefaultTempDir(); // TODO: make this configurable?
-    Path aff = Files.createTempFile(tempPath, "affix", "aff");
-
-    BufferedInputStream aff1 = null;
-    InputStream aff2 = null;
-    boolean success = false;
-    try {
-      // Copy contents of the affix stream to a temp file.
-      try (OutputStream os = Files.newOutputStream(aff)) {
-        affix.transferTo(os);
+    try (BufferedInputStream affixStream =
+        new BufferedInputStream(affix, MAX_PROLOGUE_SCAN_WINDOW) {
+          @Override
+          public void close() {
+            // TODO: maybe we should consume and close it? Why does it need to stay open?
+            // Don't close the affix stream as per javadoc.
+          }
+        }) {
+      // I assume we don't support other BOMs (utf16, etc.)? We trivially could,
+      // by adding maybeConsume() with a proper bom... but I don't see hunspell repo to have
+      // any such exotic examples.
+      Charset streamCharset;
+      if (maybeConsume(affixStream, BOM_UTF8)) {
+        streamCharset = StandardCharsets.UTF_8;
+      } else {
+        streamCharset = DEFAULT_CHARSET;
       }
 
-      // pass 1: get encoding & flag
-      aff1 = new BufferedInputStream(Files.newInputStream(aff));
-      readConfig(aff1);
+      /*
+       * pass 1: look for encoding & flag. This is simple but works. We just prefetch
+       * a large enough chunk of the input and scan through it. The buffered data will
+       * be subsequently reused anyway so nothing is wasted.
+       */
+      affixStream.mark(MAX_PROLOGUE_SCAN_WINDOW);
+      byte[] prologue = affixStream.readNBytes(MAX_PROLOGUE_SCAN_WINDOW - 1);
+      affixStream.reset();
+      readConfig(new ByteArrayInputStream(prologue), streamCharset);
 
       // pass 2: parse affixes
-      aff2 = new BufferedInputStream(Files.newInputStream(aff));
-      readAffixFile(aff2, decoder);
+      FlagEnumerator flagEnumerator = new FlagEnumerator();
+      readAffixFile(affixStream, decoder, flagEnumerator);
 
       // read dictionary entries
       IndexOutput unsorted = mergeDictionaries(tempDir, tempFileNamePrefix, dictionaries, decoder);
       String sortedFile = sortWordsOffline(tempDir, tempFileNamePrefix, unsorted);
-      words = readSortedDictionaries(tempDir, sortedFile);
+      words = readSortedDictionaries(tempDir, sortedFile, flagEnumerator);
+      flagLookup = flagEnumerator.finish();
       aliases = null; // no longer needed
       morphAliases = null; // no longer needed
-      success = true;
-    } finally {
-      IOUtils.closeWhileHandlingException(aff1, aff2);
-      if (success) {
-        Files.delete(aff);
-      } else {
-        IOUtils.deleteFilesIgnoringExceptions(aff);
-      }
     }
   }
 
@@ -321,7 +331,7 @@ public class Dictionary {
    * @param decoder CharsetDecoder to decode the content of the file
    * @throws IOException Can be thrown while reading from the InputStream
    */
-  private void readAffixFile(InputStream affixStream, CharsetDecoder decoder)
+  private void readAffixFile(InputStream affixStream, CharsetDecoder decoder, FlagEnumerator flags)
       throws IOException, ParseException {
     TreeMap<String, List<Integer>> prefixes = new TreeMap<>();
     TreeMap<String, List<Integer>> suffixes = new TreeMap<>();
@@ -346,16 +356,15 @@ public class Dictionary {
       if (line.isEmpty()) continue;
 
       String firstWord = line.split("\\s")[0];
+      // TODO: convert to a switch?
       if ("AF".equals(firstWord)) {
         parseAlias(line);
       } else if ("AM".equals(firstWord)) {
         parseMorphAlias(line);
       } else if ("PFX".equals(firstWord)) {
-        parseAffix(
-            prefixes, line, reader, PREFIX_CONDITION_REGEX_PATTERN, seenPatterns, seenStrips);
+        parseAffix(prefixes, line, reader, PREFIX_CONDITION_REGEX, seenPatterns, seenStrips, flags);
       } else if ("SFX".equals(firstWord)) {
-        parseAffix(
-            suffixes, line, reader, SUFFIX_CONDITION_REGEX_PATTERN, seenPatterns, seenStrips);
+        parseAffix(suffixes, line, reader, SUFFIX_CONDITION_REGEX, seenPatterns, seenStrips, flags);
       } else if (line.equals("COMPLEXPREFIXES")) {
         complexPrefixes =
             true; // 2-stage prefix+1-stage suffix instead of 2-stage suffix+1-stage prefix
@@ -393,21 +402,40 @@ public class Dictionary {
       } else if ("BREAK".equals(firstWord)) {
         breaks = parseBreaks(reader, line);
       } else if ("WORDCHARS".equals(firstWord)) {
-        wordChars = singleArgument(reader, line);
+        wordChars = firstArgument(reader, line);
       } else if ("TRY".equals(firstWord)) {
-        tryChars = singleArgument(reader, line);
+        tryChars = firstArgument(reader, line);
       } else if ("REP".equals(firstWord)) {
         int count = parseNum(reader, line);
         for (int i = 0; i < count; i++) {
           String[] parts = splitBySpace(reader, reader.readLine(), 3, Integer.MAX_VALUE);
           repTable.add(new RepEntry(parts[1], parts[2]));
         }
+      } else if ("MAP".equals(firstWord)) {
+        int count = parseNum(reader, line);
+        for (int i = 0; i < count; i++) {
+          mapTable.add(parseMapEntry(reader, reader.readLine()));
+        }
       } else if ("KEY".equals(firstWord)) {
         neighborKeyGroups = singleArgument(reader, line).split("\\|");
       } else if ("NOSPLITSUGS".equals(firstWord)) {
         enableSplitSuggestions = false;
+      } else if ("MAXNGRAMSUGS".equals(firstWord)) {
+        maxNGramSuggestions = Integer.parseInt(singleArgument(reader, line));
+      } else if ("MAXDIFF".equals(firstWord)) {
+        int i = Integer.parseInt(singleArgument(reader, line));
+        if (i < 0 || i > 10) {
+          throw new ParseException("MAXDIFF should be between 0 and 10", reader.getLineNumber());
+        }
+        maxDiff = i;
+      } else if ("ONLYMAXDIFF".equals(firstWord)) {
+        onlyMaxDiff = true;
       } else if ("FORBIDDENWORD".equals(firstWord)) {
         forbiddenword = flagParsingStrategy.parseFlag(singleArgument(reader, line));
+      } else if ("NOSUGGEST".equals(firstWord)) {
+        noSuggest = flagParsingStrategy.parseFlag(singleArgument(reader, line));
+      } else if ("SUBSTANDARD".equals(firstWord)) {
+        subStandard = flagParsingStrategy.parseFlag(singleArgument(reader, line));
       } else if ("COMPOUNDMIN".equals(firstWord)) {
         compoundMin = Math.max(1, parseNum(reader, line));
       } else if ("COMPOUNDWORDMAX".equals(firstWord)) {
@@ -442,6 +470,13 @@ public class Dictionary {
           checkCompoundPatterns.add(
               new CheckCompoundPattern(reader.readLine(), flagParsingStrategy, this));
         }
+      } else if ("SET".equals(firstWord)) {
+        checkCriticalDirectiveSame(
+            "SET", reader, decoder.charset(), getDecoder(singleArgument(reader, line)).charset());
+      } else if ("FLAG".equals(firstWord)) {
+        FlagParsingStrategy strategy = getFlagParsingStrategy(line, decoder.charset());
+        checkCriticalDirectiveSame(
+            "FLAG", reader, flagParsingStrategy.getClass(), strategy.getClass());
       }
     }
 
@@ -465,7 +500,39 @@ public class Dictionary {
     stripOffsets[currentIndex] = currentOffset;
   }
 
-  private boolean hasLanguage(String... langCodes) {
+  private void checkCriticalDirectiveSame(
+      String directive, LineNumberReader reader, Object expected, Object actual)
+      throws ParseException {
+    if (!expected.equals(actual)) {
+      throw new ParseException(
+          directive
+              + " directive should occur at most once, and in the first "
+              + MAX_PROLOGUE_SCAN_WINDOW
+              + " bytes of the *.aff file",
+          reader.getLineNumber());
+    }
+  }
+
+  private List<String> parseMapEntry(LineNumberReader reader, String line) throws ParseException {
+    String unparsed = firstArgument(reader, line);
+    List<String> mapEntry = new ArrayList<>();
+    for (int j = 0; j < unparsed.length(); j++) {
+      if (unparsed.charAt(j) == '(') {
+        int closing = unparsed.indexOf(')', j);
+        if (closing < 0) {
+          throw new ParseException("Unclosed parenthesis: " + line, reader.getLineNumber());
+        }
+
+        mapEntry.add(unparsed.substring(j + 1, closing));
+        j = closing;
+      } else {
+        mapEntry.add(String.valueOf(unparsed.charAt(j)));
+      }
+    }
+    return mapEntry;
+  }
+
+  boolean hasLanguage(String... langCodes) {
     if (language == null) return false;
     String langCode = extractLanguageCode(language);
     for (String code : langCodes) {
@@ -489,6 +556,10 @@ public class Dictionary {
     return splitBySpace(reader, line, 2)[1];
   }
 
+  private String firstArgument(LineNumberReader reader, String line) throws ParseException {
+    return splitBySpace(reader, line, 2, Integer.MAX_VALUE)[1];
+  }
+
   private String[] splitBySpace(LineNumberReader reader, String line, int expectedParts)
       throws ParseException {
     return splitBySpace(reader, line, expectedParts, expectedParts);
@@ -583,17 +654,22 @@ public class Dictionary {
       LineNumberReader reader,
       String conditionPattern,
       Map<String, Integer> seenPatterns,
-      Map<String, Integer> seenStrips)
+      Map<String, Integer> seenStrips,
+      FlagEnumerator flags)
       throws IOException, ParseException {
 
-    BytesRefBuilder scratch = new BytesRefBuilder();
     StringBuilder sb = new StringBuilder();
     String[] args = header.split("\\s+");
 
     boolean crossProduct = args[2].equals("Y");
-    boolean isSuffix = conditionPattern.equals(SUFFIX_CONDITION_REGEX_PATTERN);
+    boolean isSuffix = conditionPattern.equals(SUFFIX_CONDITION_REGEX);
 
-    int numLines = Integer.parseInt(args[3]);
+    int numLines;
+    try {
+      numLines = Integer.parseInt(args[3]);
+    } catch (NumberFormatException e) {
+      return;
+    }
     affixData = ArrayUtil.grow(affixData, currentAffix * 4 + numLines * 4);
 
     for (int i = 0; i < numLines; i++) {
@@ -617,7 +693,6 @@ public class Dictionary {
         }
 
         appendFlags = flagParsingStrategy.parseFlags(flagPart);
-        Arrays.sort(appendFlags);
         twoStageAffix = true;
       }
       // zero affix -> empty string
@@ -676,8 +751,7 @@ public class Dictionary {
         appendFlags = NOFLAGS;
       }
 
-      encodeFlags(scratch, appendFlags);
-      int appendFlagsOrd = flagLookup.add(scratch.get());
+      int appendFlagsOrd = flags.add(appendFlags);
       if (appendFlagsOrd < 0) {
         // already exists in our hash
         appendFlagsOrd = (-appendFlagsOrd) - 1;
@@ -724,6 +798,14 @@ public class Dictionary {
     return affixData[affixIndex * 4 + offset];
   }
 
+  boolean isCrossProduct(int affix) {
+    return (affixData(affix, AFFIX_CONDITION) & 1) == 1;
+  }
+
+  int getAffixCondition(int affix) {
+    return affixData(affix, AFFIX_CONDITION) >>> 1;
+  }
+
   private FST<CharsRef> parseConversions(LineNumberReader reader, int num)
       throws IOException, ParseException {
     Map<String, String> mappings = new TreeMap<>();
@@ -749,31 +831,36 @@ public class Dictionary {
   private static final byte[] BOM_UTF8 = {(byte) 0xef, (byte) 0xbb, (byte) 0xbf};
 
   /** Parses the encoding and flag format specified in the provided InputStream */
-  private void readConfig(BufferedInputStream stream) throws IOException, ParseException {
-    // I assume we don't support other BOMs (utf16, etc.)? We trivially could,
-    // by adding maybeConsume() with a proper bom... but I don't see hunspell repo to have
-    // any such exotic examples.
-    Charset streamCharset;
-    if (maybeConsume(stream, BOM_UTF8)) {
-      streamCharset = StandardCharsets.UTF_8;
-    } else {
-      streamCharset = DEFAULT_CHARSET;
-    }
-
-    // TODO: can these flags change throughout the file? If not then we can abort sooner. And
-    // then we wouldn't even need to create a temp file for the affix stream - a large enough
-    // leading buffer (BufferedInputStream) would be sufficient?
+  private void readConfig(InputStream stream, Charset streamCharset)
+      throws IOException, ParseException {
     LineNumberReader reader = new LineNumberReader(new InputStreamReader(stream, streamCharset));
     String line;
+    String flagLine = null;
+    boolean charsetFound = false;
+    boolean flagFound = false;
     while ((line = reader.readLine()) != null) {
       if (line.isBlank()) continue;
 
       String firstWord = line.split("\\s")[0];
       if ("SET".equals(firstWord)) {
         decoder = getDecoder(singleArgument(reader, line));
+        charsetFound = true;
       } else if ("FLAG".equals(firstWord)) {
-        flagParsingStrategy = getFlagParsingStrategy(line, decoder.charset());
+        // Preserve the flag line for parsing later since we need the decoder's charset
+        // and just in case they come out of order.
+        flagLine = line;
+        flagFound = true;
+      } else {
+        continue;
       }
+
+      if (charsetFound && flagFound) {
+        break;
+      }
+    }
+
+    if (flagFound) {
+      flagParsingStrategy = getFlagParsingStrategy(flagLine, decoder.charset());
     }
   }
 
@@ -885,14 +972,15 @@ public class Dictionary {
         || ch == MORPH_SEPARATOR; // BINARY EXECUTABLES EMBEDDED IN ZULU DICTIONARIES!!!!!!!
   }
 
-  static int morphBoundary(String line) {
+  private static int morphBoundary(String line) {
     int end = indexOfSpaceOrTab(line, 0);
     if (end == -1) {
       return line.length();
     }
     while (end >= 0 && end < line.length()) {
       if (line.charAt(end) == '\t'
-          || end + 3 < line.length()
+          || end > 0
+              && end + 3 < line.length()
               && Character.isLetter(line.charAt(end + 1))
               && Character.isLetter(line.charAt(end + 2))
               && line.charAt(end + 3) == ':') {
@@ -1064,10 +1152,11 @@ public class Dictionary {
     return sorted;
   }
 
-  private FST<IntsRef> readSortedDictionaries(Directory tempDir, String sorted) throws IOException {
+  private FST<IntsRef> readSortedDictionaries(
+      Directory tempDir, String sorted, FlagEnumerator flags) throws IOException {
     boolean success = false;
 
-    EntryGrouper grouper = new EntryGrouper();
+    EntryGrouper grouper = new EntryGrouper(flags);
 
     try (ByteSequencesReader reader =
         new ByteSequencesReader(tempDir.openChecksumInput(sorted, IOContext.READONCE), sorted)) {
@@ -1104,7 +1193,6 @@ public class Dictionary {
             wordForm = ArrayUtil.growExact(wordForm, wordForm.length + 1);
             wordForm[wordForm.length - 1] = HIDDEN_FLAG;
           }
-          Arrays.sort(wordForm);
           entry = line.substring(0, flagSep);
         }
         // we possibly have morphological data
@@ -1191,9 +1279,13 @@ public class Dictionary {
         new FSTCompiler<>(FST.INPUT_TYPE.BYTE4, IntSequenceOutputs.getSingleton());
     private final List<char[]> group = new ArrayList<>();
     private final List<Integer> stemExceptionIDs = new ArrayList<>();
-    private final BytesRefBuilder flagsScratch = new BytesRefBuilder();
     private final IntsRefBuilder scratchInts = new IntsRefBuilder();
     private String currentEntry = null;
+    private final FlagEnumerator flagEnumerator;
+
+    EntryGrouper(FlagEnumerator flagEnumerator) {
+      this.flagEnumerator = flagEnumerator;
+    }
 
     void add(String entry, char[] flags, int stemExceptionID) throws IOException {
       if (!entry.equals(currentEntry)) {
@@ -1229,12 +1321,7 @@ public class Dictionary {
           continue;
         }
 
-        encodeFlags(flagsScratch, flags);
-        int ord = flagLookup.add(flagsScratch.get());
-        if (ord < 0) {
-          ord = -ord - 1; // already exists in our hash
-        }
-        currentOrds.append(ord);
+        currentOrds.append(flagEnumerator.add(flags));
         if (hasStemExceptions) {
           currentOrds.append(stemExceptionIDs.get(i));
         }
@@ -1248,34 +1335,13 @@ public class Dictionary {
     }
   }
 
-  static boolean hasHiddenFlag(char[] flags) {
-    return hasFlag(flags, HIDDEN_FLAG);
-  }
-
-  char[] decodeFlags(int entryId, BytesRef b) {
-    this.flagLookup.get(entryId, b);
-
-    if (b.length == 0) {
-      return CharsRef.EMPTY_CHARS;
-    }
-    int len = b.length >>> 1;
-    char[] flags = new char[len];
-    int upto = 0;
-    int end = b.offset + b.length;
-    for (int i = b.offset; i < end; i += 2) {
-      flags[upto++] = (char) ((b.bytes[i] << 8) | (b.bytes[i + 1] & 0xff));
-    }
-    return flags;
-  }
-
-  private static void encodeFlags(BytesRefBuilder b, char[] flags) {
-    int len = flags.length << 1;
-    b.grow(len);
-    b.clear();
-    for (int flag : flags) {
-      b.append((byte) ((flag >> 8) & 0xff));
-      b.append((byte) (flag & 0xff));
+  private static boolean hasHiddenFlag(char[] flags) {
+    for (char flag : flags) {
+      if (flag == HIDDEN_FLAG) {
+        return true;
+      }
     }
+    return false;
   }
 
   private void parseAlias(String line) {
@@ -1341,18 +1407,10 @@ public class Dictionary {
         .collect(Collectors.toList());
   }
 
-  boolean isForbiddenWord(char[] word, int length, BytesRef scratch) {
-    if (forbiddenword != FLAG_UNSET) {
-      IntsRef forms = lookupWord(word, 0, length);
-      return forms != null && hasFlag(forms, forbiddenword, scratch);
-    }
-    return false;
-  }
-
-  boolean hasFlag(IntsRef forms, char flag, BytesRef scratch) {
+  boolean hasFlag(IntsRef forms, char flag) {
     int formStep = formStep();
     for (int i = 0; i < forms.length; i += formStep) {
-      if (hasFlag(forms.ints[forms.offset + i], flag, scratch)) {
+      if (hasFlag(forms.ints[forms.offset + i], flag)) {
         return true;
       }
     }
@@ -1414,30 +1472,26 @@ public class Dictionary {
   private static class NumFlagParsingStrategy extends FlagParsingStrategy {
     @Override
     public char[] parseFlags(String rawFlags) {
-      String[] rawFlagParts = rawFlags.trim().split(",");
-      char[] flags = new char[rawFlagParts.length];
-      int upto = 0;
-
-      for (String rawFlagPart : rawFlagParts) {
-        // note, removing the trailing X/leading I for nepali... what is the rule here?!
-        String replacement = rawFlagPart.replaceAll("[^0-9]", "");
-        // note, ignoring empty flags (this happens in danish, for example)
-        if (replacement.isEmpty()) {
-          continue;
-        }
-        int flag = Integer.parseInt(replacement);
-        if (flag >= Character.MAX_VALUE) { // read default flags as well
-          // accept 0 due to https://github.com/hunspell/hunspell/issues/708
-          throw new IllegalArgumentException(
-              "Num flags should be between 0 and " + DEFAULT_FLAGS + ", found " + flag);
+      StringBuilder result = new StringBuilder();
+      StringBuilder group = new StringBuilder();
+      for (int i = 0; i <= rawFlags.length(); i++) {
+        if (i == rawFlags.length() || rawFlags.charAt(i) == ',') {
+          if (group.length() > 0) { // ignoring empty flags (this happens in danish, for example)
+            int flag = Integer.parseInt(group, 0, group.length(), 10);
+            if (flag >= DEFAULT_FLAGS) {
+              // accept 0 due to https://github.com/hunspell/hunspell/issues/708
+              throw new IllegalArgumentException(
+                  "Num flags should be between 0 and " + DEFAULT_FLAGS + ", found " + flag);
+            }
+            result.append((char) flag);
+            group.setLength(0);
+          }
+        } else if (rawFlags.charAt(i) >= '0' && rawFlags.charAt(i) <= '9') {
+          group.append(rawFlags.charAt(i));
         }
-        flags[upto++] = (char) flag;
       }
 
-      if (upto < flags.length) {
-        flags = ArrayUtil.copyOfSubArray(flags, 0, upto);
-      }
-      return flags;
+      return result.toString().toCharArray();
     }
   }
 
@@ -1468,12 +1522,8 @@ public class Dictionary {
     }
   }
 
-  boolean hasFlag(int entryId, char flag, BytesRef scratch) {
-    return flag != FLAG_UNSET && hasFlag(decodeFlags(entryId, scratch), flag);
-  }
-
-  static boolean hasFlag(char[] flags, char flag) {
-    return flag != FLAG_UNSET && Arrays.binarySearch(flags, flag) >= 0;
+  boolean hasFlag(int entryId, char flag) {
+    return flagLookup.hasFlag(entryId, flag);
   }
 
   CharSequence cleanInput(CharSequence input, StringBuilder reuse) {
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/FlagEnumerator.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/FlagEnumerator.java
new file mode 100644
index 0000000..57aac40
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/FlagEnumerator.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.lucene.util.ArrayUtil;
+
+/**
+ * A structure similar to {@link org.apache.lucene.util.BytesRefHash}, but specialized for sorted
+ * char sequences used for Hunspell flags. It deduplicates flag sequences, gives them unique ids,
+ * stores the sequences in a contiguous char[] (via {@link #finish()} and allows to query presence
+ * of the flags later via {@link Lookup#hasFlag}.
+ */
+class FlagEnumerator {
+  private final StringBuilder builder = new StringBuilder();
+  private final Map<String, Integer> indices = new HashMap<>();
+
+  FlagEnumerator() {
+    add(new char[0]); // no flags -> ord 0
+  }
+
+  int add(char[] chars) {
+    Arrays.sort(chars);
+    String key = new String(chars);
+    if (key.length() > Character.MAX_VALUE) {
+      throw new IllegalArgumentException("Too many flags: " + key);
+    }
+
+    Integer existing = indices.get(key);
+    if (existing != null) {
+      return existing;
+    }
+
+    int result = builder.length();
+    indices.put(key, result);
+    builder.append((char) key.length());
+    builder.append(key);
+    return result;
+  }
+
+  Lookup finish() {
+    char[] result = new char[builder.length()];
+    builder.getChars(0, builder.length(), result, 0);
+    return new Lookup(result);
+  }
+
+  static class Lookup {
+    private final char[] data;
+
+    private Lookup(char[] data) {
+      this.data = data;
+    }
+
+    boolean hasFlag(int entryId, char flag) {
+      if (entryId < 0 || flag == Dictionary.FLAG_UNSET) return false;
+
+      int length = data[entryId];
+      for (int i = entryId + 1; i < entryId + 1 + length; i++) {
+        char c = data[i];
+        if (c == flag) return true;
+        if (c > flag) return false;
+      }
+      return false;
+    }
+
+    char[] getFlags(int entryId) {
+      return ArrayUtil.copyOfSubArray(data, entryId + 1, entryId + 1 + data[entryId]);
+    }
+  }
+}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java
new file mode 100644
index 0000000..9d9c582
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java
@@ -0,0 +1,438 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import static org.apache.lucene.analysis.hunspell.Dictionary.AFFIX_APPEND;
+import static org.apache.lucene.analysis.hunspell.Dictionary.AFFIX_FLAG;
+import static org.apache.lucene.analysis.hunspell.Dictionary.AFFIX_STRIP_ORD;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.function.BiConsumer;
+import java.util.stream.Collectors;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.fst.FST;
+import org.apache.lucene.util.fst.IntsRefFSTEnum;
+
+/**
+ * A class that traverses the entire dictionary and applies affix rules to check if those yield
+ * correct suggestions similar enough to the given misspelled word
+ */
+class GeneratingSuggester {
+  private static final int MAX_ROOTS = 100;
+  private static final int MAX_WORDS = 100;
+  private static final int MAX_GUESSES = 200;
+  private final Dictionary dictionary;
+  private final Hunspell speller;
+
+  GeneratingSuggester(Hunspell speller) {
+    this.dictionary = speller.dictionary;
+    this.speller = speller;
+  }
+
+  List<String> suggest(String word, WordCase originalCase, Set<String> prevSuggestions) {
+    List<Weighted<Root<String>>> roots = findSimilarDictionaryEntries(word, originalCase);
+    List<Weighted<String>> expanded = expandRoots(word, roots);
+    TreeSet<Weighted<String>> bySimilarity = rankBySimilarity(word, expanded);
+    return getMostRelevantSuggestions(bySimilarity, prevSuggestions);
+  }
+
+  private List<Weighted<Root<String>>> findSimilarDictionaryEntries(
+      String word, WordCase originalCase) {
+    TreeSet<Weighted<Root<String>>> roots = new TreeSet<>();
+    processFST(
+        dictionary.words,
+        (key, forms) -> {
+          if (Math.abs(key.length - word.length()) > 4) return;
+
+          String root = toString(key);
+          List<Root<String>> entries = filterSuitableEntries(root, forms);
+          if (entries.isEmpty()) return;
+
+          if (originalCase == WordCase.LOWER
+              && WordCase.caseOf(root) == WordCase.TITLE
+              && !dictionary.hasLanguage("de")) {
+            return;
+          }
+
+          String lower = dictionary.toLowerCase(root);
+          int sc =
+              ngram(3, word, lower, EnumSet.of(NGramOptions.LONGER_WORSE))
+                  + commonPrefix(word, root);
+
+          entries.forEach(e -> roots.add(new Weighted<>(e, sc)));
+        });
+    return roots.stream().limit(MAX_ROOTS).collect(Collectors.toList());
+  }
+
+  private void processFST(FST<IntsRef> fst, BiConsumer<IntsRef, IntsRef> keyValueConsumer) {
+    if (fst == null) return;
+    try {
+      IntsRefFSTEnum<IntsRef> fstEnum = new IntsRefFSTEnum<>(fst);
+      IntsRefFSTEnum.InputOutput<IntsRef> mapping;
+      while ((mapping = fstEnum.next()) != null) {
+        keyValueConsumer.accept(mapping.input, mapping.output);
+      }
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private static String toString(IntsRef key) {
+    char[] chars = new char[key.length];
+    for (int i = 0; i < key.length; i++) {
+      chars[i] = (char) key.ints[i + key.offset];
+    }
+    return new String(chars);
+  }
+
+  private List<Root<String>> filterSuitableEntries(String word, IntsRef forms) {
+    List<Root<String>> result = new ArrayList<>();
+    for (int i = 0; i < forms.length; i += dictionary.formStep()) {
+      int entryId = forms.ints[forms.offset + i];
+      if (dictionary.hasFlag(entryId, dictionary.forbiddenword)
+          || dictionary.hasFlag(entryId, dictionary.noSuggest)
+          || dictionary.hasFlag(entryId, Dictionary.HIDDEN_FLAG)
+          || dictionary.hasFlag(entryId, dictionary.onlyincompound)) {
+        continue;
+      }
+      result.add(new Root<>(word, entryId));
+    }
+
+    return result;
+  }
+
+  private List<Weighted<String>> expandRoots(
+      String misspelled, List<Weighted<Root<String>>> roots) {
+    int thresh = calcThreshold(misspelled);
+
+    TreeSet<Weighted<String>> expanded = new TreeSet<>();
+    for (Weighted<Root<String>> weighted : roots) {
+      for (String guess : expandRoot(weighted.word, misspelled)) {
+        String lower = dictionary.toLowerCase(guess);
+        int sc =
+            ngram(misspelled.length(), misspelled, lower, EnumSet.of(NGramOptions.ANY_MISMATCH))
+                + commonPrefix(misspelled, guess);
+        if (sc > thresh) {
+          expanded.add(new Weighted<>(guess, sc));
+        }
+      }
+    }
+    return expanded.stream().limit(MAX_GUESSES).collect(Collectors.toList());
+  }
+
+  // find minimum threshold for a passable suggestion
+  // mangle original word three different ways
+  // and score them to generate a minimum acceptable score
+  private static int calcThreshold(String word) {
+    int thresh = 0;
+    for (int sp = 1; sp < 4; sp++) {
+      char[] mw = word.toCharArray();
+      for (int k = sp; k < word.length(); k += 4) {
+        mw[k] = '*';
+      }
+
+      thresh += ngram(word.length(), word, new String(mw), EnumSet.of(NGramOptions.ANY_MISMATCH));
+    }
+    return thresh / 3 - 1;
+  }
+
+  private List<String> expandRoot(Root<String> root, String misspelled) {
+    List<String> crossProducts = new ArrayList<>();
+    Set<String> result = new LinkedHashSet<>();
+
+    if (!dictionary.hasFlag(root.entryId, dictionary.needaffix)) {
+      result.add(root.word);
+    }
+
+    // suffixes
+    processFST(
+        dictionary.suffixes,
+        (key, ids) -> {
+          String suffix = new StringBuilder(toString(key)).reverse().toString();
+          if (misspelled.length() <= suffix.length() || !misspelled.endsWith(suffix)) return;
+
+          for (int i = 0; i < ids.length; i++) {
+            int suffixId = ids.ints[ids.offset + i];
+            if (!hasCompatibleFlags(root, suffixId) || !checkAffixCondition(suffixId, root.word)) {
+              continue;
+            }
+
+            String withSuffix =
+                root.word.substring(0, root.word.length() - affixStripLength(suffixId)) + suffix;
+            result.add(withSuffix);
+            if (dictionary.isCrossProduct(suffixId)) {
+              crossProducts.add(withSuffix);
+            }
+          }
+        });
+
+    // cross-product prefixes
+    processFST(
+        dictionary.prefixes,
+        (key, ids) -> {
+          String prefix = toString(key);
+          if (misspelled.length() <= prefix.length() || !misspelled.startsWith(prefix)) return;
+
+          for (int i = 0; i < ids.length; i++) {
+            int prefixId = ids.ints[ids.offset + i];
+            if (!dictionary.hasFlag(root.entryId, dictionary.affixData(prefixId, AFFIX_FLAG))
+                || !dictionary.isCrossProduct(prefixId)) {
+              continue;
+            }
+
+            for (String suffixed : crossProducts) {
+              if (checkAffixCondition(prefixId, suffixed)) {
+                result.add(prefix + suffixed.substring(affixStripLength(prefixId)));
+              }
+            }
+          }
+        });
+
+    // pure prefixes
+    processFST(
+        dictionary.prefixes,
+        (key, ids) -> {
+          String prefix = toString(key);
+          if (misspelled.length() <= prefix.length() || !misspelled.startsWith(prefix)) return;
+
+          for (int i = 0; i < ids.length; i++) {
+            int prefixId = ids.ints[ids.offset + i];
+            if (hasCompatibleFlags(root, prefixId) && checkAffixCondition(prefixId, root.word)) {
+              result.add(prefix + root.word.substring(affixStripLength(prefixId)));
+            }
+          }
+        });
+
+    return result.stream().limit(MAX_WORDS).collect(Collectors.toList());
+  }
+
+  private boolean hasCompatibleFlags(Root<?> root, int affixId) {
+    if (!dictionary.hasFlag(root.entryId, dictionary.affixData(affixId, AFFIX_FLAG))) {
+      return false;
+    }
+
+    int append = dictionary.affixData(affixId, AFFIX_APPEND);
+    return !dictionary.hasFlag(append, dictionary.needaffix)
+        && !dictionary.hasFlag(append, dictionary.circumfix)
+        && !dictionary.hasFlag(append, dictionary.onlyincompound);
+  }
+
+  private boolean checkAffixCondition(int suffixId, String stem) {
+    int condition = dictionary.getAffixCondition(suffixId);
+    return condition == 0 || dictionary.patterns.get(condition).run(stem);
+  }
+
+  private int affixStripLength(int affixId) {
+    char stripOrd = dictionary.affixData(affixId, AFFIX_STRIP_ORD);
+    return dictionary.stripOffsets[stripOrd + 1] - dictionary.stripOffsets[stripOrd];
+  }
+
+  private TreeSet<Weighted<String>> rankBySimilarity(String word, List<Weighted<String>> expanded) {
+    double fact = (10.0 - dictionary.maxDiff) / 5.0;
+    TreeSet<Weighted<String>> bySimilarity = new TreeSet<>();
+    for (Weighted<String> weighted : expanded) {
+      String guess = weighted.word;
+      String lower = dictionary.toLowerCase(guess);
+      if (lower.equals(word)) {
+        bySimilarity.add(new Weighted<>(guess, weighted.score + 2000));
+        break;
+      }
+
+      int re =
+          ngram(2, word, lower, EnumSet.of(NGramOptions.ANY_MISMATCH, NGramOptions.WEIGHTED))
+              + ngram(2, lower, word, EnumSet.of(NGramOptions.ANY_MISMATCH, NGramOptions.WEIGHTED));
+
+      int score =
+          2 * lcs(word, lower)
+              - Math.abs(word.length() - lower.length())
+              + commonCharacterPositionScore(word, lower)
+              + commonPrefix(word, lower)
+              + ngram(4, word, lower, EnumSet.of(NGramOptions.ANY_MISMATCH))
+              + re
+              + (re < (word.length() + lower.length()) * fact ? -1000 : 0);
+      bySimilarity.add(new Weighted<>(guess, score));
+    }
+    return bySimilarity;
+  }
+
+  private List<String> getMostRelevantSuggestions(
+      TreeSet<Weighted<String>> bySimilarity, Set<String> prevSuggestions) {
+    List<String> result = new ArrayList<>();
+    boolean hasExcellent = false;
+    for (Weighted<String> weighted : bySimilarity) {
+      if (weighted.score > 1000) {
+        hasExcellent = true;
+      } else if (hasExcellent) {
+        break; // leave only excellent suggestions, if any
+      }
+
+      boolean bad = weighted.score < -100;
+      // keep the best ngram suggestions, unless in ONLYMAXDIFF mode
+      if (bad && (!result.isEmpty() || dictionary.onlyMaxDiff)) {
+        break;
+      }
+
+      if (prevSuggestions.stream().noneMatch(weighted.word::contains)
+          && result.stream().noneMatch(weighted.word::contains)
+          && speller.checkWord(weighted.word)) {
+        result.add(weighted.word);
+        if (result.size() > dictionary.maxNGramSuggestions) {
+          break;
+        }
+      }
+
+      if (bad) {
+        break;
+      }
+    }
+    return result;
+  }
+
+  private static int commonPrefix(String s1, String s2) {
+    int i = 0;
+    int limit = Math.min(s1.length(), s2.length());
+    while (i < limit && s1.charAt(i) == s2.charAt(i)) {
+      i++;
+    }
+    return i;
+  }
+
+  // generate an n-gram score comparing s1 and s2
+  private static int ngram(int n, String s1, String s2, EnumSet<NGramOptions> opt) {
+    int score = 0;
+    int l1 = s1.length();
+    int l2 = s2.length();
+    if (l2 == 0) {
+      return 0;
+    }
+    for (int j = 1; j <= n; j++) {
+      int ns = 0;
+      for (int i = 0; i <= (l1 - j); i++) {
+        if (s2.contains(s1.substring(i, i + j))) {
+          ns++;
+        } else if (opt.contains(NGramOptions.WEIGHTED)) {
+          ns--;
+          if (i == 0 || i == l1 - j) {
+            ns--; // side weight
+          }
+        }
+      }
+      score = score + ns;
+      if (ns < 2 && !opt.contains(NGramOptions.WEIGHTED)) {
+        break;
+      }
+    }
+
+    int ns = 0;
+    if (opt.contains(NGramOptions.LONGER_WORSE)) {
+      ns = (l2 - l1) - 2;
+    }
+    if (opt.contains(NGramOptions.ANY_MISMATCH)) {
+      ns = Math.abs(l2 - l1) - 2;
+    }
+    return score - Math.max(ns, 0);
+  }
+
+  private static int lcs(String s1, String s2) {
+    int[] lengths = new int[s2.length() + 1];
+
+    for (int i = 1; i <= s1.length(); i++) {
+      int prev = 0;
+      for (int j = 1; j <= s2.length(); j++) {
+        int cur = lengths[j];
+        lengths[j] =
+            s1.charAt(i - 1) == s2.charAt(j - 1) ? prev + 1 : Math.max(cur, lengths[j - 1]);
+        prev = cur;
+      }
+    }
+    return lengths[s2.length()];
+  }
+
+  private static int commonCharacterPositionScore(String s1, String s2) {
+    int num = 0;
+    int diffPos1 = -1;
+    int diffPos2 = -1;
+    int diff = 0;
+    int i;
+    for (i = 0; i < s1.length() && i < s2.length(); ++i) {
+      if (s1.charAt(i) == s2.charAt(i)) {
+        num++;
+      } else {
+        if (diff == 0) diffPos1 = i;
+        else if (diff == 1) diffPos2 = i;
+        diff++;
+      }
+    }
+    int commonScore = num > 0 ? 1 : 0;
+    if (diff == 2
+        && i == s1.length()
+        && i == s2.length()
+        && s1.charAt(diffPos1) == s2.charAt(diffPos2)
+        && s1.charAt(diffPos2) == s2.charAt(diffPos1)) {
+      return commonScore + 10;
+    }
+    return commonScore;
+  }
+
+  private enum NGramOptions {
+    WEIGHTED,
+    LONGER_WORSE,
+    ANY_MISMATCH
+  }
+
+  private static class Weighted<T extends Comparable<T>> implements Comparable<Weighted<T>> {
+    final T word;
+    final int score;
+
+    Weighted(T word, int score) {
+      this.word = word;
+      this.score = score;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (!(o instanceof Weighted)) return false;
+      @SuppressWarnings("unchecked")
+      Weighted<T> that = (Weighted<T>) o;
+      return score == that.score && word.equals(that.word);
+    }
+
+    @Override
+    public int hashCode() {
+      return Objects.hash(word, score);
+    }
+
+    @Override
+    public String toString() {
+      return word + "(" + score + ")";
+    }
+
+    @Override
+    public int compareTo(Weighted<T> o) {
+      int cmp = Integer.compare(score, o.score);
+      return cmp != 0 ? -cmp : word.compareTo(o.word);
+    }
+  }
+}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/SpellChecker.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java
similarity index 69%
rename from lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/SpellChecker.java
rename to lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java
index 53bf53e..db0e3e4 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/SpellChecker.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java
@@ -22,25 +22,37 @@ import static org.apache.lucene.analysis.hunspell.WordContext.COMPOUND_END;
 import static org.apache.lucene.analysis.hunspell.WordContext.COMPOUND_MIDDLE;
 import static org.apache.lucene.analysis.hunspell.WordContext.SIMPLE_WORD;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.LinkedHashSet;
 import java.util.List;
+import java.util.Locale;
 import java.util.Set;
-import org.apache.lucene.util.BytesRef;
+import java.util.stream.Collectors;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.IntsRef;
 
 /**
- * A spell checker based on Hunspell dictionaries. The objects of this class are not thread-safe
- * (but a single underlying Dictionary can be shared by multiple spell-checkers in different
- * threads). Not all Hunspell features are supported yet.
+ * A spell checker based on Hunspell dictionaries. This class can be used in place of native
+ * Hunspell for many languages for spell-checking and suggesting purposes. Note that not all
+ * languages are supported yet. For example:
+ *
+ * <ul>
+ *   <li>Hungarian (as it doesn't only rely on dictionaries, but has some logic directly in the
+ *       source code
+ *   <li>Languages with Unicode characters outside of the Basic Multilingual Plane
+ *   <li>PHONE affix file option for suggestions
+ * </ul>
+ *
+ * <p>The objects of this class are not thread-safe (but a single underlying Dictionary can be
+ * shared by multiple spell-checkers in different threads).
  */
-public class SpellChecker {
+public class Hunspell {
   final Dictionary dictionary;
   final Stemmer stemmer;
-  private final BytesRef scratch = new BytesRef();
 
-  public SpellChecker(Dictionary dictionary) {
+  public Hunspell(Dictionary dictionary) {
     this.dictionary = dictionary;
     stemmer = new Stemmer(dictionary);
   }
@@ -66,17 +78,22 @@ public class SpellChecker {
     }
 
     char[] wordChars = word.toCharArray();
-    if (dictionary.isForbiddenWord(wordChars, wordChars.length, scratch)) {
-      return false;
+    Boolean simpleResult = checkSimpleWord(wordChars, wordChars.length, null);
+    if (simpleResult != null) {
+      return simpleResult;
     }
 
-    if (checkWord(wordChars, wordChars.length, null)) {
+    if (checkCompounds(wordChars, wordChars.length, null)) {
       return true;
     }
 
     WordCase wc = stemmer.caseOf(wordChars, wordChars.length);
-    if ((wc == WordCase.UPPER || wc == WordCase.TITLE) && checkCaseVariants(wordChars, wc)) {
-      return true;
+    if ((wc == WordCase.UPPER || wc == WordCase.TITLE)) {
+      Stemmer.CaseVariationProcessor variationProcessor =
+          (variant, varLength, originalCase) -> !checkWord(variant, varLength, originalCase);
+      if (!stemmer.varyCase(wordChars, wordChars.length, wc, variationProcessor)) {
+        return true;
+      }
     }
 
     if (dictionary.breaks.isNotEmpty() && !hasTooManyBreakOccurrences(word)) {
@@ -94,53 +111,14 @@ public class SpellChecker {
     return spellClean(word.substring(0, length)) || spellClean(word.substring(0, length + 1));
   }
 
-  private boolean checkCaseVariants(char[] wordChars, WordCase wordCase) {
-    char[] caseVariant = wordChars;
-    if (wordCase == WordCase.UPPER) {
-      caseVariant = stemmer.caseFoldTitle(caseVariant, wordChars.length);
-      if (checkWord(caseVariant, wordChars.length, wordCase)) {
-        return true;
-      }
-      char[] aposCase = Stemmer.capitalizeAfterApostrophe(caseVariant, wordChars.length);
-      if (aposCase != null && checkWord(aposCase, aposCase.length, wordCase)) {
-        return true;
-      }
-      for (char[] variation : stemmer.sharpSVariations(caseVariant, wordChars.length)) {
-        if (checkWord(variation, variation.length, null)) {
-          return true;
-        }
-      }
-    }
-
-    if (dictionary.isDotICaseChangeDisallowed(wordChars)) {
-      return false;
-    }
-
-    char[] lower = stemmer.caseFoldLower(caseVariant, wordChars.length);
-    if (checkWord(lower, wordChars.length, wordCase)) {
-      return true;
-    }
-    if (wordCase == WordCase.UPPER) {
-      for (char[] variation : stemmer.sharpSVariations(lower, wordChars.length)) {
-        if (checkWord(variation, variation.length, null)) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
   boolean checkWord(String word) {
     return checkWord(word.toCharArray(), word.length(), null);
   }
 
   Boolean checkSimpleWord(char[] wordChars, int length, WordCase originalCase) {
-    if (dictionary.isForbiddenWord(wordChars, length, scratch)) {
-      return false;
-    }
-
-    if (findStem(wordChars, 0, length, originalCase, SIMPLE_WORD) != null) {
-      return true;
+    Root<CharsRef> entry = findStem(wordChars, 0, length, originalCase, SIMPLE_WORD);
+    if (entry != null) {
+      return !dictionary.hasFlag(entry.entryId, dictionary.forbiddenword);
     }
 
     return null;
@@ -152,6 +130,10 @@ public class SpellChecker {
       return simpleResult;
     }
 
+    return checkCompounds(wordChars, length, originalCase);
+  }
+
+  private boolean checkCompounds(char[] wordChars, int length, WordCase originalCase) {
     if (dictionary.compoundRules != null
         && checkCompoundRules(wordChars, 0, length, new ArrayList<>())) {
       return true;
@@ -164,22 +146,29 @@ public class SpellChecker {
     return false;
   }
 
-  private CharsRef findStem(
+  private Root<CharsRef> findStem(
       char[] wordChars, int offset, int length, WordCase originalCase, WordContext context) {
-    CharsRef[] result = {null};
+    @SuppressWarnings({"rawtypes", "unchecked"})
+    Root<CharsRef>[] result = new Root[1];
     stemmer.doStem(
         wordChars,
         offset,
         length,
         originalCase,
         context,
-        (stem, forms, formID) -> {
-          result[0] = stem;
+        (stem, formID, stemException) -> {
+          if (acceptsStem(formID)) {
+            result[0] = new Root<>(stem, formID);
+          }
           return false;
         });
     return result[0];
   }
 
+  boolean acceptsStem(int formID) {
+    return true;
+  }
+
   private boolean checkCompounds(CharsRef word, WordCase originalCase, CompoundPart prev) {
     if (prev != null && prev.index > dictionary.compoundMax - 2) return false;
 
@@ -188,13 +177,15 @@ public class SpellChecker {
       WordContext context = prev == null ? COMPOUND_BEGIN : COMPOUND_MIDDLE;
       int breakOffset = word.offset + breakPos;
       if (mayBreakIntoCompounds(word.chars, word.offset, word.length, breakOffset)) {
-        CharsRef stem = findStem(word.chars, word.offset, breakPos, originalCase, context);
+        Root<CharsRef> stem = findStem(word.chars, word.offset, breakPos, originalCase, context);
         if (stem == null
             && dictionary.simplifiedTriple
             && word.chars[breakOffset - 1] == word.chars[breakOffset]) {
           stem = findStem(word.chars, word.offset, breakPos + 1, originalCase, context);
         }
-        if (stem != null && (prev == null || prev.mayCompound(stem, breakPos, originalCase))) {
+        if (stem != null
+            && !dictionary.hasFlag(stem.entryId, dictionary.forbiddenword)
+            && (prev == null || prev.mayCompound(stem, breakPos, originalCase))) {
           CompoundPart part = new CompoundPart(prev, word, breakPos, stem, null);
           if (checkCompoundsAfter(originalCase, part)) {
             return true;
@@ -217,7 +208,8 @@ public class SpellChecker {
       if (expanded != null) {
         WordContext context = prev == null ? COMPOUND_BEGIN : COMPOUND_MIDDLE;
         int breakPos = pos + pattern.endLength();
-        CharsRef stem = findStem(expanded.chars, expanded.offset, breakPos, originalCase, context);
+        Root<CharsRef> stem =
+            findStem(expanded.chars, expanded.offset, breakPos, originalCase, context);
         if (stem != null) {
           CompoundPart part = new CompoundPart(prev, expanded, breakPos, stem, pattern);
           if (checkCompoundsAfter(originalCase, part)) {
@@ -234,10 +226,11 @@ public class SpellChecker {
     int breakPos = prev.length;
     int remainingLength = word.length - breakPos;
     int breakOffset = word.offset + breakPos;
-    CharsRef tailStem =
+    Root<CharsRef> tailStem =
         findStem(word.chars, breakOffset, remainingLength, originalCase, COMPOUND_END);
     if (tailStem != null
-        && !(dictionary.checkCompoundDup && equalsIgnoreCase(prev.stem, tailStem))
+        && !dictionary.hasFlag(tailStem.entryId, dictionary.forbiddenword)
+        && !(dictionary.checkCompoundDup && equalsIgnoreCase(prev.stem, tailStem.word))
         && !hasForceUCaseProblem(word.chars, breakOffset, remainingLength, originalCase)
         && prev.mayCompound(tailStem, remainingLength, originalCase)) {
       return true;
@@ -253,10 +246,10 @@ public class SpellChecker {
     if (originalCase == WordCase.TITLE || originalCase == WordCase.UPPER) return false;
 
     IntsRef forms = dictionary.lookupWord(chars, offset, length);
-    return forms != null && dictionary.hasFlag(forms, dictionary.forceUCase, scratch);
+    return forms != null && dictionary.hasFlag(forms, dictionary.forceUCase);
   }
 
-  private boolean equalsIgnoreCase(CharsRef cr1, CharsRef cr2) {
+  private boolean equalsIgnoreCase(CharSequence cr1, CharSequence cr2) {
     return cr1.toString().equalsIgnoreCase(cr2.toString());
   }
 
@@ -267,11 +260,15 @@ public class SpellChecker {
     final CheckCompoundPattern enablingPattern;
 
     CompoundPart(
-        CompoundPart prev, CharsRef tail, int length, CharsRef stem, CheckCompoundPattern enabler) {
+        CompoundPart prev,
+        CharsRef tail,
+        int length,
+        Root<CharsRef> stem,
+        CheckCompoundPattern enabler) {
       this.prev = prev;
       this.tail = tail;
       this.length = length;
-      this.stem = stem;
+      this.stem = stem.word;
       index = prev == null ? 1 : prev.index + 1;
       enablingPattern = enabler;
     }
@@ -281,22 +278,26 @@ public class SpellChecker {
       return (prev == null ? "" : prev + "+") + tail.subSequence(0, length);
     }
 
-    boolean mayCompound(CharsRef nextStem, int nextPartLength, WordCase originalCase) {
+    boolean mayCompound(Root<CharsRef> nextStem, int nextPartLength, WordCase originalCase) {
       boolean patternsOk =
           enablingPattern != null
-              ? enablingPattern.prohibitsCompounding(tail, length, stem, nextStem)
+              ? enablingPattern.prohibitsCompounding(tail, length, stem, nextStem.word)
               : dictionary.checkCompoundPatterns.stream()
-                  .noneMatch(p -> p.prohibitsCompounding(tail, length, stem, nextStem));
+                  .noneMatch(p -> p.prohibitsCompounding(tail, length, stem, nextStem.word));
       if (!patternsOk) {
         return false;
       }
 
-      //noinspection RedundantIfStatement
       if (dictionary.checkCompoundRep
           && isMisspelledSimpleWord(length + nextPartLength, originalCase)) {
         return false;
       }
-      return true;
+
+      String spaceSeparated =
+          new String(tail.chars, tail.offset, length)
+              + " "
+              + new String(tail.chars, tail.offset + length, nextPartLength);
+      return !checkWord(spaceSeparated);
     }
 
     private boolean isMisspelledSimpleWord(int length, WordCase originalCase) {
@@ -341,7 +342,7 @@ public class SpellChecker {
         words.add(forms);
 
         if (dictionary.compoundRules != null
-            && dictionary.compoundRules.stream().anyMatch(r -> r.mayMatch(words, scratch))) {
+            && dictionary.compoundRules.stream().anyMatch(r -> r.mayMatch(words))) {
           if (checkLastCompoundPart(wordChars, offset + breakPos, length - breakPos, words)) {
             return true;
           }
@@ -364,8 +365,7 @@ public class SpellChecker {
     if (forms == null) return false;
 
     words.add(forms);
-    boolean result =
-        dictionary.compoundRules.stream().anyMatch(r -> r.fullyMatches(words, scratch));
+    boolean result = dictionary.compoundRules.stream().anyMatch(r -> r.fullyMatches(words));
     words.remove(words.size() - 1);
     return result;
   }
@@ -450,14 +450,59 @@ public class SpellChecker {
       word = dictionary.cleanInput(word, new StringBuilder()).toString();
     }
 
-    ModifyingSuggester modifier = new ModifyingSuggester(this);
-    Set<String> result = modifier.suggest(word);
+    WordCase wordCase = WordCase.caseOf(word);
+    if (dictionary.forceUCase != FLAG_UNSET && wordCase == WordCase.LOWER) {
+      String title = dictionary.toTitleCase(word);
+      if (spell(title)) {
+        return Collections.singletonList(title);
+      }
+    }
+
+    Hunspell suggestionSpeller =
+        new Hunspell(dictionary) {
+          @Override
+          boolean acceptsStem(int formID) {
+            return !dictionary.hasFlag(formID, dictionary.noSuggest)
+                && !dictionary.hasFlag(formID, dictionary.subStandard);
+          }
+        };
+    ModifyingSuggester modifier = new ModifyingSuggester(suggestionSpeller);
+    Set<String> suggestions = modifier.suggest(word, wordCase);
+
+    if (!modifier.hasGoodSuggestions && dictionary.maxNGramSuggestions > 0) {
+      suggestions.addAll(
+          new GeneratingSuggester(suggestionSpeller)
+              .suggest(dictionary.toLowerCase(word), wordCase, suggestions));
+    }
 
-    if (word.contains("-") && result.stream().noneMatch(s -> s.contains("-"))) {
-      result.addAll(modifyChunksBetweenDashes(word));
+    if (word.contains("-") && suggestions.stream().noneMatch(s -> s.contains("-"))) {
+      suggestions.addAll(modifyChunksBetweenDashes(word));
     }
 
-    return new ArrayList<>(result);
+    Set<String> result = new LinkedHashSet<>();
+    for (String candidate : suggestions) {
+      result.add(adjustSuggestionCase(candidate, wordCase, word));
+      if (wordCase == WordCase.UPPER && dictionary.checkSharpS && candidate.contains("ß")) {
+        result.add(candidate);
+      }
+    }
+    return result.stream().map(this::cleanOutput).collect(Collectors.toList());
+  }
+
+  private String adjustSuggestionCase(String candidate, WordCase originalCase, String original) {
+    if (originalCase == WordCase.UPPER) {
+      String upper = candidate.toUpperCase(Locale.ROOT);
+      if (upper.contains(" ") || spell(upper)) {
+        return upper;
+      }
+    }
+    if (Character.isUpperCase(original.charAt(0))) {
+      String title = Character.toUpperCase(candidate.charAt(0)) + candidate.substring(1);
+      if (title.contains(" ") || spell(title)) {
+        return title;
+      }
+    }
+    return candidate;
   }
 
   private List<String> modifyChunksBetweenDashes(String word) {
@@ -474,7 +519,7 @@ public class SpellChecker {
         if (!spell(chunk)) {
           for (String chunkSug : suggest(chunk)) {
             String replaced = word.substring(0, chunkStart) + chunkSug + word.substring(chunkEnd);
-            if (!dictionary.isForbiddenWord(replaced.toCharArray(), replaced.length(), scratch)) {
+            if (spell(replaced)) {
               result.add(replaced);
             }
           }
@@ -485,4 +530,16 @@ public class SpellChecker {
     }
     return result;
   }
+
+  private String cleanOutput(String s) {
+    if (!dictionary.needsOutputCleaning) return s;
+
+    try {
+      StringBuilder sb = new StringBuilder(s);
+      Dictionary.applyMappings(dictionary.oconv, sb);
+      return sb.toString();
+    } catch (IOException bogus) {
+      throw new RuntimeException(bogus);
+    }
+  }
 }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/ModifyingSuggester.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/ModifyingSuggester.java
index 0c60e1b..5017ff2 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/ModifyingSuggester.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/ModifyingSuggester.java
@@ -23,31 +23,33 @@ import java.util.List;
 import java.util.Locale;
 import java.util.stream.Collectors;
 
+/** A class that modifies the given misspelled word in various ways to get correct suggestions */
 class ModifyingSuggester {
   private static final int MAX_CHAR_DISTANCE = 4;
   private final LinkedHashSet<String> result = new LinkedHashSet<>();
   private final char[] tryChars;
-  private final SpellChecker speller;
+  private final Hunspell speller;
+  boolean hasGoodSuggestions;
 
-  ModifyingSuggester(SpellChecker speller) {
+  ModifyingSuggester(Hunspell speller) {
     this.speller = speller;
     tryChars = speller.dictionary.tryChars.toCharArray();
   }
 
-  LinkedHashSet<String> suggest(String word) {
-    tryVariationsOf(word);
+  LinkedHashSet<String> suggest(String word, WordCase wordCase) {
+    String low = wordCase != WordCase.LOWER ? speller.dictionary.toLowerCase(word) : word;
+    if (wordCase == WordCase.UPPER || wordCase == WordCase.MIXED) {
+      trySuggestion(low);
+    }
 
-    WordCase wc = WordCase.caseOf(word);
+    tryVariationsOf(word);
 
-    if (wc == WordCase.UPPER) {
-      tryVariationsOf(speller.dictionary.toLowerCase(word));
+    if (wordCase == WordCase.TITLE) {
+      tryVariationsOf(low);
+    } else if (wordCase == WordCase.UPPER) {
+      tryVariationsOf(low);
       tryVariationsOf(speller.dictionary.toTitleCase(word));
-      return result.stream()
-          .map(this::tryUpperCase)
-          .collect(Collectors.toCollection(LinkedHashSet::new));
-    }
-
-    if (wc == WordCase.MIXED) {
+    } else if (wordCase == WordCase.MIXED) {
       int dot = word.indexOf('.');
       if (dot > 0
           && dot < word.length() - 1
@@ -55,25 +57,46 @@ class ModifyingSuggester {
         result.add(word.substring(0, dot + 1) + " " + word.substring(dot + 1));
       }
 
-      tryVariationsOf(speller.dictionary.toLowerCase(word));
+      boolean capitalized = Character.isUpperCase(word.charAt(0));
+      if (capitalized) {
+        tryVariationsOf(speller.dictionary.caseFold(word.charAt(0)) + word.substring(1));
+      }
+
+      tryVariationsOf(low);
+
+      if (capitalized) {
+        tryVariationsOf(speller.dictionary.toTitleCase(low));
+      }
+
+      return result.stream()
+          .map(s -> capitalizeAfterSpace(low, s))
+          .collect(Collectors.toCollection(LinkedHashSet::new));
     }
 
     return result;
   }
 
-  private String tryUpperCase(String candidate) {
-    String upper = candidate.toUpperCase(Locale.ROOT);
-    if (upper.contains(" ") || speller.spell(upper)) {
-      return upper;
+  // aNew -> "a New" (instead of "a new")
+  private String capitalizeAfterSpace(String lowMisspelled, String candidate) {
+    int space = candidate.indexOf(' ');
+    int tail = candidate.length() - space - 1;
+    if (space > 0
+        && lowMisspelled.regionMatches(lowMisspelled.length() - tail, candidate, space + 1, tail)) {
+      return candidate.substring(0, space + 1)
+          + Character.toUpperCase(candidate.charAt(space + 1))
+          + candidate.substring(space + 2);
     }
-    String title = speller.dictionary.toTitleCase(candidate);
-    return speller.spell(title) ? title : candidate;
+    return candidate;
   }
 
   private void tryVariationsOf(String word) {
-    boolean hasGoodSuggestions = trySuggestion(word.toUpperCase(Locale.ROOT));
+    hasGoodSuggestions |= trySuggestion(word.toUpperCase(Locale.ROOT));
     hasGoodSuggestions |= tryRep(word);
 
+    if (!speller.dictionary.mapTable.isEmpty()) {
+      enumerateMapReplacements(word, "", 0);
+    }
+
     trySwappingChars(word);
     tryLongSwap(word);
     tryNeighborKeys(word);
@@ -116,6 +139,27 @@ class ModifyingSuggester {
     return result.size() > before;
   }
 
+  private void enumerateMapReplacements(String word, String accumulated, int offset) {
+    if (offset == word.length()) {
+      trySuggestion(accumulated);
+      return;
+    }
+
+    for (List<String> entries : speller.dictionary.mapTable) {
+      for (String entry : entries) {
+        if (word.regionMatches(offset, entry, 0, entry.length())) {
+          for (String replacement : entries) {
+            if (!entry.equals(replacement)) {
+              enumerateMapReplacements(word, accumulated + replacement, offset + entry.length());
+            }
+          }
+        }
+      }
+    }
+
+    enumerateMapReplacements(word, accumulated + word.charAt(offset), offset + 1);
+  }
+
   private boolean checkSimpleWord(String part) {
     return Boolean.TRUE.equals(speller.checkSimpleWord(part.toCharArray(), part.length(), null));
   }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDutchIJ.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Root.java
similarity index 54%
copy from lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDutchIJ.java
copy to lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Root.java
index 58477d8..e65992e 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDutchIJ.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Root.java
@@ -16,17 +16,38 @@
  */
 package org.apache.lucene.analysis.hunspell;
 
-import org.junit.BeforeClass;
+import java.util.Objects;
 
-public class TestDutchIJ extends StemmerTestBase {
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    init("IJ.aff", "IJ.dic");
+class Root<T extends CharSequence> implements Comparable<Root<T>> {
+  final T word;
+  final int entryId;
+
+  Root(T word, int entryId) {
+    this.word = word;
+    this.entryId = entryId;
+  }
+
+  @Override
+  public String toString() {
+    return word.toString();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (!(o instanceof Root)) return false;
+    @SuppressWarnings("unchecked")
+    Root<T> root = (Root<T>) o;
+    return entryId == root.entryId && word.equals(root.word);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(word, entryId);
   }
 
-  public void testStemming() {
-    assertStemsTo("ijs", "ijs");
-    assertStemsTo("IJs", "ijs");
-    assertStemsTo("Ijs");
+  @Override
+  public int compareTo(Root<T> o) {
+    return CharSequence.compare(word, o.word);
   }
 }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java
index 44e2675..8afd9fc 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java
@@ -18,13 +18,11 @@ package org.apache.lucene.analysis.hunspell;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import org.apache.lucene.analysis.CharArraySet;
 import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.IntsRef;
 import org.apache.lucene.util.automaton.CharacterRunAutomaton;
@@ -37,7 +35,6 @@ import org.apache.lucene.util.fst.FST;
  */
 final class Stemmer {
   private final Dictionary dictionary;
-  private final BytesRef scratch = new BytesRef();
   private final StringBuilder segment = new StringBuilder();
 
   // used for normalization
@@ -96,14 +93,10 @@ final class Stemmer {
       word = scratchBuffer;
     }
 
-    if (dictionary.isForbiddenWord(word, length, scratch)) {
-      return Collections.emptyList();
-    }
-
     List<CharsRef> list = new ArrayList<>();
     RootProcessor processor =
-        (stem, forms, formID) -> {
-          list.add(newStem(stem, forms, formID));
+        (stem, formID, stemException) -> {
+          list.add(newStem(stem, stemException));
           return true;
         };
 
@@ -113,46 +106,47 @@ final class Stemmer {
 
     WordCase wordCase = caseOf(word, length);
     if (wordCase == WordCase.UPPER || wordCase == WordCase.TITLE) {
-      addCaseVariations(word, length, wordCase, processor);
+      CaseVariationProcessor variationProcessor =
+          (variant, varLength, originalCase) ->
+              doStem(variant, 0, varLength, originalCase, WordContext.SIMPLE_WORD, processor);
+      varyCase(word, length, wordCase, variationProcessor);
     }
     return list;
   }
 
-  private void addCaseVariations(
-      char[] word, int length, WordCase wordCase, RootProcessor processor) {
+  interface CaseVariationProcessor {
+    boolean process(char[] word, int length, WordCase originalCase);
+  }
+
+  boolean varyCase(char[] word, int length, WordCase wordCase, CaseVariationProcessor processor) {
     if (wordCase == WordCase.UPPER) {
       caseFoldTitle(word, length);
       char[] aposCase = capitalizeAfterApostrophe(titleBuffer, length);
-      if (aposCase != null) {
-        if (!doStem(aposCase, 0, length, wordCase, WordContext.SIMPLE_WORD, processor)) {
-          return;
-        }
+      if (aposCase != null && !processor.process(aposCase, length, wordCase)) {
+        return false;
       }
-      if (!doStem(titleBuffer, 0, length, wordCase, WordContext.SIMPLE_WORD, processor)) {
-        return;
+      if (!processor.process(titleBuffer, length, wordCase)) {
+        return false;
       }
-      for (char[] variation : sharpSVariations(titleBuffer, length)) {
-        if (!doStem(variation, 0, variation.length, null, WordContext.SIMPLE_WORD, processor)) {
-          return;
-        }
+      if (dictionary.checkSharpS && !varySharpS(titleBuffer, length, processor)) {
+        return false;
       }
     }
 
     if (dictionary.isDotICaseChangeDisallowed(word)) {
-      return;
+      return true;
     }
 
     caseFoldLower(wordCase == WordCase.UPPER ? titleBuffer : word, length);
-    if (!doStem(lowerBuffer, 0, length, wordCase, WordContext.SIMPLE_WORD, processor)) {
-      return;
+    if (!processor.process(lowerBuffer, length, wordCase)) {
+      return false;
     }
-    if (wordCase == WordCase.UPPER) {
-      for (char[] variation : sharpSVariations(lowerBuffer, length)) {
-        if (!doStem(variation, 0, variation.length, null, WordContext.SIMPLE_WORD, processor)) {
-          return;
-        }
-      }
+    if (wordCase == WordCase.UPPER
+        && dictionary.checkSharpS
+        && !varySharpS(lowerBuffer, length, processor)) {
+      return false;
     }
+    return true;
   }
 
   // temporary buffers for case variants
@@ -169,26 +163,24 @@ final class Stemmer {
   }
 
   /** folds titlecase variant of word to titleBuffer */
-  char[] caseFoldTitle(char[] word, int length) {
+  private void caseFoldTitle(char[] word, int length) {
     titleBuffer = ArrayUtil.grow(titleBuffer, length);
     System.arraycopy(word, 0, titleBuffer, 0, length);
     for (int i = 1; i < length; i++) {
       titleBuffer[i] = dictionary.caseFold(titleBuffer[i]);
     }
-    return titleBuffer;
   }
 
   /** folds lowercase variant of word (title cased) to lowerBuffer */
-  char[] caseFoldLower(char[] word, int length) {
+  private void caseFoldLower(char[] word, int length) {
     lowerBuffer = ArrayUtil.grow(lowerBuffer, length);
     System.arraycopy(word, 0, lowerBuffer, 0, length);
     lowerBuffer[0] = dictionary.caseFold(lowerBuffer[0]);
-    return lowerBuffer;
   }
 
   // Special prefix handling for Catalan, French, Italian:
   // prefixes separated by apostrophe (SANT'ELIA -> Sant'+Elia).
-  static char[] capitalizeAfterApostrophe(char[] word, int length) {
+  private static char[] capitalizeAfterApostrophe(char[] word, int length) {
     for (int i = 1; i < length - 1; i++) {
       if (word[i] == '\'') {
         char next = word[i + 1];
@@ -203,9 +195,7 @@ final class Stemmer {
     return null;
   }
 
-  List<char[]> sharpSVariations(char[] word, int length) {
-    if (!dictionary.checkSharpS) return Collections.emptyList();
-
+  private boolean varySharpS(char[] word, int length, CaseVariationProcessor processor) {
     Stream<String> result =
         new Object() {
           int findSS(int start) {
@@ -235,10 +225,15 @@ final class Stemmer {
             }
           }
         }.replaceSS(0, 0);
-    if (result == null) return Collections.emptyList();
+    if (result == null) return true;
 
     String src = new String(word, 0, length);
-    return result.filter(s -> !s.equals(src)).map(String::toCharArray).collect(Collectors.toList());
+    for (String s : result.collect(Collectors.toList())) {
+      if (!s.equals(src) && !processor.process(s.toCharArray(), s.length(), null)) {
+        return false;
+      }
+    }
+    return true;
   }
 
   boolean doStem(
@@ -251,29 +246,29 @@ final class Stemmer {
     IntsRef forms = dictionary.lookupWord(word, offset, length);
     if (forms != null) {
       for (int i = 0; i < forms.length; i += formStep) {
-        char[] wordFlags = dictionary.decodeFlags(forms.ints[forms.offset + i], scratch);
-        if (!acceptCase(originalCase, wordFlags, word, offset, length)) {
+        int entryId = forms.ints[forms.offset + i];
+        if (!acceptCase(originalCase, entryId, word, offset, length)) {
           continue;
         }
         // we can't add this form, it's a pseudostem requiring an affix
-        if (Dictionary.hasFlag(wordFlags, dictionary.needaffix)) {
+        if (dictionary.hasFlag(entryId, dictionary.needaffix)) {
           continue;
         }
         // we can't add this form, it only belongs inside a compound word
-        if (!context.isCompound() && Dictionary.hasFlag(wordFlags, dictionary.onlyincompound)) {
+        if (!context.isCompound() && dictionary.hasFlag(entryId, dictionary.onlyincompound)) {
           continue;
         }
         if (context.isCompound()) {
           if (context != WordContext.COMPOUND_END
-              && Dictionary.hasFlag(wordFlags, dictionary.compoundForbid)) {
+              && dictionary.hasFlag(entryId, dictionary.compoundForbid)) {
             return false;
           }
-          if (!Dictionary.hasFlag(wordFlags, dictionary.compoundFlag)
-              && !Dictionary.hasFlag(wordFlags, context.requiredFlag(dictionary))) {
+          if (!dictionary.hasFlag(entryId, dictionary.compoundFlag)
+              && !dictionary.hasFlag(entryId, context.requiredFlag(dictionary))) {
             continue;
           }
         }
-        if (!processor.processRoot(new CharsRef(word, offset, length), forms, i)) {
+        if (!callProcessor(word, offset, length, processor, forms, i)) {
           return false;
         }
       }
@@ -289,8 +284,6 @@ final class Stemmer {
           -1,
           0,
           true,
-          true,
-          false,
           false,
           originalCase,
           processor);
@@ -300,8 +293,8 @@ final class Stemmer {
   }
 
   private boolean acceptCase(
-      WordCase originalCase, char[] wordFlags, char[] word, int offset, int length) {
-    boolean keepCase = Dictionary.hasFlag(wordFlags, dictionary.keepcase);
+      WordCase originalCase, int entryId, char[] word, int offset, int length) {
+    boolean keepCase = dictionary.hasFlag(entryId, dictionary.keepcase);
     if (originalCase != null) {
       if (keepCase
           && dictionary.checkSharpS
@@ -311,7 +304,7 @@ final class Stemmer {
       }
       return !keepCase;
     }
-    return !Dictionary.hasHiddenFlag(wordFlags);
+    return !dictionary.hasFlag(entryId, Dictionary.HIDDEN_FLAG);
   }
 
   private boolean containsSharpS(char[] word, int offset, int length) {
@@ -346,23 +339,27 @@ final class Stemmer {
   }
 
   interface RootProcessor {
-    /** @return whether the processing should be continued */
-    boolean processRoot(CharsRef stem, IntsRef forms, int formID);
+    /**
+     * @param stem the text of the found dictionary entry
+     * @param formID internal id of the dictionary entry, e.g. to be used in {@link
+     *     Dictionary#hasFlag(int, char)}
+     * @param stemException "st:" morphological data if present, {@code null} otherwise
+     * @return whether the processing should be continued
+     */
+    boolean processRoot(CharsRef stem, int formID, String stemException);
   }
 
-  private CharsRef newStem(CharsRef stem, IntsRef forms, int formID) {
-    final String exception;
+  private String stemException(IntsRef forms, int formIndex) {
     if (dictionary.hasStemExceptions) {
-      int exceptionID = forms.ints[forms.offset + formID + 1];
+      int exceptionID = forms.ints[forms.offset + formIndex + 1];
       if (exceptionID > 0) {
-        exception = dictionary.getStemException(exceptionID);
-      } else {
-        exception = null;
+        return dictionary.getStemException(exceptionID);
       }
-    } else {
-      exception = null;
     }
+    return null;
+  }
 
+  private CharsRef newStem(CharsRef stem, String exception) {
     if (dictionary.needsOutputCleaning) {
       scratchSegment.setLength(0);
       if (exception != null) {
@@ -408,12 +405,9 @@ final class Stemmer {
    *     checked against the word
    * @param recursionDepth current recursiondepth
    * @param doPrefix true if we should remove prefixes
-   * @param doSuffix true if we should remove suffixes
    * @param previousWasPrefix true if the previous removal was a prefix: if we are removing a
    *     suffix, and it has no continuation requirements, it's ok. but two prefixes
    *     (COMPLEXPREFIXES) or two suffixes must have continuation requirements to recurse.
-   * @param circumfix true if the previous prefix removal was signed as a circumfix this means inner
-   *     most suffix must also contain circumfix flag.
    * @param originalCase if non-null, represents original word case to disallow case variations of
    *     word with KEEPCASE flags
    * @return whether the processing should be continued
@@ -428,9 +422,7 @@ final class Stemmer {
       int prefixId,
       int recursionDepth,
       boolean doPrefix,
-      boolean doSuffix,
       boolean previousWasPrefix,
-      boolean circumfix,
       WordCase originalCase,
       RootProcessor processor)
       throws IOException {
@@ -478,7 +470,6 @@ final class Stemmer {
                 -1,
                 recursionDepth,
                 true,
-                circumfix,
                 originalCase,
                 processor)) {
               return false;
@@ -488,7 +479,7 @@ final class Stemmer {
       }
     }
 
-    if (doSuffix && dictionary.suffixes != null) {
+    if (dictionary.suffixes != null) {
       FST<IntsRef> fst = dictionary.suffixes;
       FST.Arc<IntsRef> arc = suffixArcs[recursionDepth];
       fst.getFirstArc(arc);
@@ -533,7 +524,6 @@ final class Stemmer {
                 prefixId,
                 recursionDepth,
                 false,
-                circumfix,
                 originalCase,
                 processor)) {
               return false;
@@ -593,32 +583,30 @@ final class Stemmer {
     int append = dictionary.affixData(affix, Dictionary.AFFIX_APPEND);
 
     if (context.isCompound()) {
-      if (!isPrefix && dictionary.hasFlag(append, dictionary.compoundForbid, scratch)) {
+      if (!isPrefix && dictionary.hasFlag(append, dictionary.compoundForbid)) {
         return false;
       }
       WordContext allowed = isPrefix ? WordContext.COMPOUND_BEGIN : WordContext.COMPOUND_END;
-      if (context != allowed && !dictionary.hasFlag(append, dictionary.compoundPermit, scratch)) {
+      if (context != allowed && !dictionary.hasFlag(append, dictionary.compoundPermit)) {
         return false;
       }
       if (context == WordContext.COMPOUND_END
           && !isPrefix
           && !previousWasPrefix
-          && dictionary.hasFlag(append, dictionary.onlyincompound, scratch)) {
+          && dictionary.hasFlag(append, dictionary.onlyincompound)) {
         return false;
       }
     }
 
     if (recursionDepth == 0) {
       // check if affix is allowed in a non-compound word
-      return context.isCompound()
-          || !dictionary.hasFlag(append, dictionary.onlyincompound, scratch);
+      return context.isCompound() || !dictionary.hasFlag(append, dictionary.onlyincompound);
     }
 
-    if (isCrossProduct(affix)) {
+    if (dictionary.isCrossProduct(affix)) {
       // cross check incoming continuation class (flag of previous affix) against list.
-      char[] appendFlags = dictionary.decodeFlags(append, scratch);
-      if (context.isCompound() || !Dictionary.hasFlag(appendFlags, dictionary.onlyincompound)) {
-        return previousWasPrefix || Dictionary.hasFlag(appendFlags, prevFlag);
+      if (context.isCompound() || !dictionary.hasFlag(append, dictionary.onlyincompound)) {
+        return previousWasPrefix || dictionary.hasFlag(append, prevFlag);
       }
     }
 
@@ -631,7 +619,7 @@ final class Stemmer {
   // but this is a little bit more complicated.
   private boolean checkCondition(
       int affix, char[] c1, int c1off, int c1len, char[] c2, int c2off, int c2len) {
-    int condition = dictionary.affixData(affix, Dictionary.AFFIX_CONDITION) >>> 1;
+    int condition = dictionary.getAffixCondition(affix);
     if (condition != 0) {
       CharacterRunAutomaton pattern = dictionary.patterns.get(condition);
       int state = 0;
@@ -676,69 +664,53 @@ final class Stemmer {
       int prefixId,
       int recursionDepth,
       boolean prefix,
-      boolean circumfix,
       WordCase originalCase,
       RootProcessor processor)
       throws IOException {
     char flag = dictionary.affixData(affix, Dictionary.AFFIX_FLAG);
 
-    boolean skipLookup = needsAnotherAffix(affix, previousAffix, !prefix);
+    boolean skipLookup = needsAnotherAffix(affix, previousAffix, !prefix, prefixId);
     IntsRef forms = skipLookup ? null : dictionary.lookupWord(strippedWord, offset, length);
     if (forms != null) {
       for (int i = 0; i < forms.length; i += formStep) {
-        char[] wordFlags = dictionary.decodeFlags(forms.ints[forms.offset + i], scratch);
-        if (Dictionary.hasFlag(wordFlags, flag) || isFlagAppendedByAffix(prefixId, flag)) {
+        int entryId = forms.ints[forms.offset + i];
+        if (dictionary.hasFlag(entryId, flag) || isFlagAppendedByAffix(prefixId, flag)) {
           // confusing: in this one exception, we already chained the first prefix against the
           // second,
           // so it doesnt need to be checked against the word
           boolean chainedPrefix = dictionary.complexPrefixes && recursionDepth == 1 && prefix;
           if (!chainedPrefix && prefixId >= 0) {
             char prefixFlag = dictionary.affixData(prefixId, Dictionary.AFFIX_FLAG);
-            if (!Dictionary.hasFlag(wordFlags, prefixFlag)
+            if (!dictionary.hasFlag(entryId, prefixFlag)
                 && !isFlagAppendedByAffix(affix, prefixFlag)) {
               continue;
             }
           }
 
-          // if circumfix was previously set by a prefix, we must check this suffix,
-          // to ensure it has it, and vice versa
-          if (dictionary.circumfix != Dictionary.FLAG_UNSET) {
-            boolean suffixCircumfix = isFlagAppendedByAffix(affix, dictionary.circumfix);
-            if (circumfix != suffixCircumfix) {
-              continue;
-            }
-          }
-
           // we are looking for a case variant, but this word does not allow it
-          if (!acceptCase(originalCase, wordFlags, strippedWord, offset, length)) {
+          if (!acceptCase(originalCase, entryId, strippedWord, offset, length)) {
             continue;
           }
-          if (!context.isCompound() && Dictionary.hasFlag(wordFlags, dictionary.onlyincompound)) {
+          if (!context.isCompound() && dictionary.hasFlag(entryId, dictionary.onlyincompound)) {
             continue;
           }
           if (context.isCompound()) {
             char cFlag = context.requiredFlag(dictionary);
-            if (!Dictionary.hasFlag(wordFlags, cFlag)
+            if (!dictionary.hasFlag(entryId, cFlag)
                 && !isFlagAppendedByAffix(affix, cFlag)
-                && !Dictionary.hasFlag(wordFlags, dictionary.compoundFlag)
+                && !dictionary.hasFlag(entryId, dictionary.compoundFlag)
                 && !isFlagAppendedByAffix(affix, dictionary.compoundFlag)) {
               continue;
             }
           }
-          if (!processor.processRoot(new CharsRef(strippedWord, offset, length), forms, i)) {
+          if (!callProcessor(strippedWord, offset, length, processor, forms, i)) {
             return false;
           }
         }
       }
     }
 
-    // if a circumfix flag is defined in the dictionary, and we are a prefix, we need to check if we
-    // have that flag
-    if (dictionary.circumfix != Dictionary.FLAG_UNSET && !circumfix && prefix) {
-      circumfix = isFlagAppendedByAffix(affix, dictionary.circumfix);
-    }
-
-    if (isCrossProduct(affix) && recursionDepth <= 1) {
+    if (dictionary.isCrossProduct(affix) && recursionDepth <= 1) {
       boolean doPrefix;
       if (recursionDepth == 0) {
         if (prefix) {
@@ -776,9 +748,7 @@ final class Stemmer {
           prefixId,
           recursionDepth + 1,
           doPrefix,
-          true,
           prefix,
-          circumfix,
           originalCase,
           processor);
     }
@@ -786,7 +756,20 @@ final class Stemmer {
     return true;
   }
 
-  private boolean needsAnotherAffix(int affix, int previousAffix, boolean isSuffix) {
+  private boolean callProcessor(
+      char[] word, int offset, int length, RootProcessor processor, IntsRef forms, int i) {
+    CharsRef stem = new CharsRef(word, offset, length);
+    return processor.processRoot(stem, forms.ints[forms.offset + i], stemException(forms, i));
+  }
+
+  private boolean needsAnotherAffix(int affix, int previousAffix, boolean isSuffix, int prefixId) {
+    char circumfix = dictionary.circumfix;
+    // if circumfix was previously set by a prefix, we must check this suffix,
+    // to ensure it has it, and vice versa
+    if (isSuffix
+        && isFlagAppendedByAffix(prefixId, circumfix) != isFlagAppendedByAffix(affix, circumfix)) {
+      return true;
+    }
     if (isFlagAppendedByAffix(affix, dictionary.needaffix)) {
       return !isSuffix
           || previousAffix < 0
@@ -798,10 +781,6 @@ final class Stemmer {
   private boolean isFlagAppendedByAffix(int affixId, char flag) {
     if (affixId < 0 || flag == Dictionary.FLAG_UNSET) return false;
     int appendId = dictionary.affixData(affixId, Dictionary.AFFIX_APPEND);
-    return dictionary.hasFlag(appendId, flag, scratch);
-  }
-
-  private boolean isCrossProduct(int affix) {
-    return (dictionary.affixData(affix, Dictionary.AFFIX_CONDITION) & 1) == 1;
+    return dictionary.hasFlag(appendId, flag);
   }
 }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/package-info.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/package-info.java
index 94870a3..4d6cd04 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/package-info.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/package-info.java
@@ -17,13 +17,11 @@
 
 /**
  * A Java implementation of <a href="http://hunspell.github.io/">Hunspell</a> stemming and
- * spell-checking algorithms, and a stemming TokenFilter based on it.
+ * spell-checking algorithms ({@link org.apache.lucene.analysis.hunspell.Hunspell}), and a stemming
+ * TokenFilter ({@link org.apache.lucene.analysis.hunspell.HunspellStemFilter}) based on it.
  *
  * <p>For dictionaries, see e.g. <a href="https://github.com/LibreOffice/dictionaries">LibreOffice
  * repository</a> or <a href="https://github.com/wooorm/dictionaries">Titus Wormer's collection
  * (UTF)</a>
- *
- * @see org.apache.lucene.analysis.hunspell.HunspellStemFilter
- * @see org.apache.lucene.analysis.hunspell.SpellChecker
  */
 package org.apache.lucene.analysis.hunspell;
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java
index 886272c..f64c6d8 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java
@@ -16,35 +16,55 @@
  */
 package org.apache.lucene.analysis.hunspell;
 
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.nio.file.Paths;
 import java.text.ParseException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
+import org.apache.lucene.util.NamedThreadFactory;
+import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.RamUsageTester;
+import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Ignore;
 
 /**
- * Loads all dictionaries from the directory specified in {@code -Dhunspell.dictionaries=...} and
- * prints their memory usage. All *.aff files are traversed directly inside the given directory or
- * in its immediate subdirectories. Each *.aff file must have a same-named sibling *.dic file. For
- * examples of such directories, refer to the {@link org.apache.lucene.analysis.hunspell package
- * documentation}
+ * Loads all dictionaries from the directory specified in {@code hunspell.dictionaries} system
+ * property and prints their memory usage. All *.aff files are traversed recursively inside the
+ * given directory. Each *.aff file must have a same-named sibling *.dic file. For examples of such
+ * directories, refer to the {@link org.apache.lucene.analysis.hunspell package documentation}.
  */
-@Ignore("enable manually")
 @SuppressSysoutChecks(bugUrl = "prints important memory utilization stats per dictionary")
 public class TestAllDictionaries extends LuceneTestCase {
-
   static Stream<Path> findAllAffixFiles() throws IOException {
     String dicDir = System.getProperty("hunspell.dictionaries");
-    Assume.assumeFalse("Missing -Dhunspell.dictionaries=...", dicDir == null);
-    return Files.walk(Path.of(dicDir), 2).filter(f -> f.toString().endsWith(".aff"));
+    Assume.assumeFalse(
+        "Requires Hunspell dictionaries at -Dhunspell.dictionaries=...", dicDir == null);
+    Path dicPath = Paths.get(dicDir);
+    return Files.walk(dicPath).filter(f -> f.toString().endsWith(".aff")).sorted();
   }
 
   static Dictionary loadDictionary(Path aff) throws IOException, ParseException {
@@ -58,43 +78,134 @@ public class TestAllDictionaries extends LuceneTestCase {
     }
   }
 
-  public void testDictionariesLoadSuccessfully() throws Exception {
-    int failures = 0;
+  /** Hack bais to expose current position. */
+  private static class ExposePosition extends ByteArrayInputStream {
+    public ExposePosition(byte[] buf) {
+      super(buf);
+    }
+
+    public long position() {
+      return super.pos;
+    }
+  }
+
+  @Ignore
+  public void testMaxPrologueNeeded() throws Exception {
+    AtomicBoolean failTest = new AtomicBoolean();
+
+    Map<String, List<Long>> global = new LinkedHashMap<>();
     for (Path aff : findAllAffixFiles().collect(Collectors.toList())) {
-      try {
-        System.out.println(aff + "\t" + memoryUsage(loadDictionary(aff)));
-      } catch (Throwable e) {
-        failures++;
-        System.err.println("While checking " + aff + ":");
-        e.printStackTrace();
+      Map<String, List<Long>> local = new LinkedHashMap<>();
+      ByteArrayOutputStream baos = new ByteArrayOutputStream();
+      try (ExposePosition is = new ExposePosition(Files.readAllBytes(aff))) {
+        int chr;
+        while ((chr = is.read()) >= 0) {
+          baos.write(chr);
+
+          if (chr == '\n') {
+            String line = baos.toString(StandardCharsets.ISO_8859_1);
+            if (!line.isBlank()) {
+              String firstWord = line.split("\\s")[0];
+              switch (firstWord) {
+                case "SET":
+                case "FLAG":
+                  local.computeIfAbsent(firstWord, (k) -> new ArrayList<>()).add(is.position());
+                  global.computeIfAbsent(firstWord, (k) -> new ArrayList<>()).add(is.position());
+                  break;
+              }
+            }
+
+            baos.reset();
+          }
+        }
       }
+
+      local.forEach(
+          (flag, positions) -> {
+            if (positions.size() > 1) {
+              System.out.format(
+                  Locale.ROOT,
+                  "Flag %s at more than one position in %s: %s%n",
+                  flag,
+                  aff,
+                  positions);
+              failTest.set(true);
+            }
+          });
     }
-    assertEquals(failures + " failures!", 0, failures);
+
+    global.forEach(
+        (flag, positions) -> {
+          long max = positions.stream().mapToLong(v -> v).max().orElse(0);
+          System.out.printf(Locale.ROOT, "Flag %s at maximum offset %s%n", flag, max);
+          Assert.assertTrue(
+              "Flags beyond max prologue scan window: " + max,
+              max < Dictionary.MAX_PROLOGUE_SCAN_WINDOW);
+        });
+
+    if (failTest.get()) {
+      throw new AssertionError("Duplicate flags were present in at least one .aff file.");
+    }
+  }
+
+  public void testDictionariesLoadSuccessfully() throws Exception {
+    AtomicLong totalMemory = new AtomicLong();
+    AtomicLong totalWords = new AtomicLong();
+    int threads = Runtime.getRuntime().availableProcessors();
+    ExecutorService executor =
+        Executors.newFixedThreadPool(threads, new NamedThreadFactory("dictCheck-"));
+    List<Path> failures = Collections.synchronizedList(new ArrayList<>());
+    Function<Path, Void> process =
+        (Path aff) -> {
+          try {
+            Dictionary dic = loadDictionary(aff);
+            totalMemory.addAndGet(RamUsageTester.sizeOf(dic));
+            totalWords.addAndGet(RamUsageTester.sizeOf(dic.words));
+            System.out.println(aff + "\t" + memoryUsageSummary(dic));
+          } catch (Throwable e) {
+            failures.add(aff);
+            System.err.println("While checking " + aff + ":");
+            e.printStackTrace();
+          }
+          return null;
+        };
+
+    List<Callable<Void>> tasks =
+        findAllAffixFiles()
+            .map(aff -> (Callable<Void>) () -> process.apply(aff))
+            .collect(Collectors.toList());
+    try {
+      for (Future<?> future : executor.invokeAll(tasks)) {
+        future.get();
+      }
+
+      if (!failures.isEmpty()) {
+        throw new AssertionError(
+            "Certain dictionaries failed to parse:\n  - "
+                + failures.stream()
+                    .map(path -> path.toAbsolutePath().toString())
+                    .collect(Collectors.joining("\n  - ")));
+      }
+    } finally {
+      executor.shutdown();
+      assertTrue(executor.awaitTermination(1, TimeUnit.MINUTES));
+    }
+
+    System.out.println("Total dictionaries loaded: " + tasks.size());
+    System.out.println("Total memory: " + RamUsageEstimator.humanReadableUnits(totalMemory.get()));
+    System.out.println(
+        "Total memory for word storage: " + RamUsageEstimator.humanReadableUnits(totalWords.get()));
   }
 
-  private static String memoryUsage(Dictionary dic) {
+  private static String memoryUsageSummary(Dictionary dic) {
     return RamUsageTester.humanSizeOf(dic)
         + "\t("
-        + "words="
-        + RamUsageTester.humanSizeOf(dic.words)
-        + ", "
-        + "flags="
-        + RamUsageTester.humanSizeOf(dic.flagLookup)
-        + ", "
-        + "strips="
-        + RamUsageTester.humanSizeOf(dic.stripData)
-        + ", "
-        + "conditions="
-        + RamUsageTester.humanSizeOf(dic.patterns)
-        + ", "
-        + "affixData="
-        + RamUsageTester.humanSizeOf(dic.affixData)
-        + ", "
-        + "prefixes="
-        + RamUsageTester.humanSizeOf(dic.prefixes)
-        + ", "
-        + "suffixes="
-        + RamUsageTester.humanSizeOf(dic.suffixes)
-        + ")";
+        + ("words=" + RamUsageTester.humanSizeOf(dic.words) + ", ")
+        + ("flags=" + RamUsageTester.humanSizeOf(dic.flagLookup) + ", ")
+        + ("strips=" + RamUsageTester.humanSizeOf(dic.stripData) + ", ")
+        + ("conditions=" + RamUsageTester.humanSizeOf(dic.patterns) + ", ")
+        + ("affixData=" + RamUsageTester.humanSizeOf(dic.affixData) + ", ")
+        + ("prefixes=" + RamUsageTester.humanSizeOf(dic.prefixes) + ", ")
+        + ("suffixes=" + RamUsageTester.humanSizeOf(dic.suffixes) + ")");
   }
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java
index 8c4bc30..6ef783c 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java
@@ -24,7 +24,6 @@ import java.nio.charset.StandardCharsets;
 import java.text.ParseException;
 import org.apache.lucene.store.ByteBuffersDirectory;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.IntsRef;
 import org.apache.lucene.util.IntsRefBuilder;
@@ -46,16 +45,20 @@ public class TestDictionary extends LuceneTestCase {
     assertNotNull(ordList);
     assertEquals(1, ordList.length);
 
-    BytesRef ref = new BytesRef();
-    char[] flags = dictionary.decodeFlags(ordList.ints[0], ref);
-    assertEquals(1, flags.length);
+    assertEquals('B', assertSingleFlag(dictionary, ordList));
 
     int offset = random().nextInt(10);
     ordList = dictionary.lookupWord((" ".repeat(offset) + "lucen").toCharArray(), offset, 5);
     assertNotNull(ordList);
     assertEquals(1, ordList.length);
-    flags = dictionary.decodeFlags(ordList.ints[0], ref);
+    assertEquals('A', assertSingleFlag(dictionary, ordList));
+  }
+
+  private static char assertSingleFlag(Dictionary dictionary, IntsRef ordList) {
+    int entryId = ordList.ints[0];
+    char[] flags = dictionary.flagLookup.getFlags(entryId);
     assertEquals(1, flags.length);
+    return flags[0];
   }
 
   public void testCompressedDictionary() throws Exception {
@@ -63,9 +66,7 @@ public class TestDictionary extends LuceneTestCase {
     assertEquals(3, dictionary.lookupSuffix(new char[] {'e'}).length);
     assertEquals(1, dictionary.lookupPrefix(new char[] {'s'}).length);
     IntsRef ordList = dictionary.lookupWord(new char[] {'o', 'l', 'r'}, 0, 3);
-    BytesRef ref = new BytesRef();
-    char[] flags = dictionary.decodeFlags(ordList.ints[0], ref);
-    assertEquals(1, flags.length);
+    assertSingleFlag(dictionary, ordList);
   }
 
   public void testCompressedBeforeSetDictionary() throws Exception {
@@ -73,9 +74,7 @@ public class TestDictionary extends LuceneTestCase {
     assertEquals(3, dictionary.lookupSuffix(new char[] {'e'}).length);
     assertEquals(1, dictionary.lookupPrefix(new char[] {'s'}).length);
     IntsRef ordList = dictionary.lookupWord(new char[] {'o', 'l', 'r'}, 0, 3);
-    BytesRef ref = new BytesRef();
-    char[] flags = dictionary.decodeFlags(ordList.ints[0], ref);
-    assertEquals(1, flags.length);
+    assertSingleFlag(dictionary, ordList);
   }
 
   public void testCompressedEmptyAliasDictionary() throws Exception {
@@ -83,9 +82,7 @@ public class TestDictionary extends LuceneTestCase {
     assertEquals(3, dictionary.lookupSuffix(new char[] {'e'}).length);
     assertEquals(1, dictionary.lookupPrefix(new char[] {'s'}).length);
     IntsRef ordList = dictionary.lookupWord(new char[] {'o', 'l', 'r'}, 0, 3);
-    BytesRef ref = new BytesRef();
-    char[] flags = dictionary.decodeFlags(ordList.ints[0], ref);
-    assertEquals(1, flags.length);
+    assertSingleFlag(dictionary, ordList);
   }
 
   // malformed rule causes ParseException
@@ -111,7 +108,7 @@ public class TestDictionary extends LuceneTestCase {
   }
 
   public void testForgivableErrors() throws Exception {
-    Dictionary dictionary = loadDictionary("forgivable-errors.aff", "simple.dic");
+    Dictionary dictionary = loadDictionary("forgivable-errors.aff", "forgivable-errors.dic");
     assertEquals(1, dictionary.repTable.size());
     assertEquals(2, dictionary.compoundMax);
 
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDutchIJ.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDutchIJ.java
index 58477d8..dc4b897 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDutchIJ.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDutchIJ.java
@@ -27,6 +27,5 @@ public class TestDutchIJ extends StemmerTestBase {
   public void testStemming() {
     assertStemsTo("ijs", "ijs");
     assertStemsTo("IJs", "ijs");
-    assertStemsTo("Ijs");
   }
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java
index 048dc04..c7a6776 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java
@@ -20,25 +20,38 @@ import java.io.IOException;
 import java.nio.file.DirectoryStream;
 import java.nio.file.Files;
 import java.nio.file.Path;
-import java.text.ParseException;
 import java.util.Collection;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.stream.Collectors;
+import org.junit.Assert;
 import org.junit.AssumptionViolatedException;
 import org.junit.Test;
+import org.junit.function.ThrowingRunnable;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
 /**
- * Same as {@link SpellCheckerTest}, but checks all Hunspell's test data. The path to the checked
- * out Hunspell repository should be in {@code -Dhunspell.repo.path=...} system property.
+ * Same as {@link TestSpellChecking}, but checks all Hunspell's test data. The path to the checked
+ * out Hunspell repository should be in {@code hunspell.repo.path} system property.
  */
 @RunWith(Parameterized.class)
 public class TestHunspellRepositoryTestCases {
+  private static final Set<String> EXPECTED_FAILURES =
+      Set.of(
+          "hu", // Hungarian is hard: a lot of its rules are hardcoded in Hunspell code, not aff/dic
+          "morph", // we don't do morphological analysis yet
+          "opentaal_keepcase", // Hunspell bug: https://github.com/hunspell/hunspell/issues/712
+          "forbiddenword", // needs https://github.com/hunspell/hunspell/pull/713 PR to be merged
+          "nepali", // not supported yet
+          "utf8_nonbmp", // code points not supported yet
+          "phone" // not supported yet, used only for suggestions in en_ZA
+          );
+  private final String testName;
   private final Path pathPrefix;
 
   public TestHunspellRepositoryTestCases(String testName, Path pathPrefix) {
+    this.testName = testName;
     this.pathPrefix = pathPrefix;
   }
 
@@ -64,7 +77,12 @@ public class TestHunspellRepositoryTestCases {
   }
 
   @Test
-  public void test() throws IOException, ParseException {
-    SpellCheckerTest.checkSpellCheckerExpectations(pathPrefix, false);
+  public void test() throws Throwable {
+    ThrowingRunnable test = () -> TestSpellChecking.checkSpellCheckerExpectations(pathPrefix);
+    if (EXPECTED_FAILURES.contains(testName)) {
+      Assert.assertThrows(Throwable.class, test);
+    } else {
+      test.run();
+    }
   }
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java
index 33da1ca..8ae5642 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java
@@ -24,13 +24,15 @@ import java.io.InputStreamReader;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.function.Consumer;
 import java.util.regex.Pattern;
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Assume;
-import org.junit.Ignore;
+import org.junit.AssumptionViolatedException;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 /**
@@ -40,8 +42,15 @@ import org.junit.Test;
  * en.txt}) in a directory specified in {@code -Dhunspell.corpora=...}
  */
 @TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class)
-@Ignore("enable manually")
 public class TestPerformance extends LuceneTestCase {
+  private static Path corporaDir;
+
+  @BeforeClass
+  public static void resolveCorpora() {
+    String dir = System.getProperty("hunspell.corpora");
+    Assume.assumeFalse("Requires test word corpora at -Dhunspell.corpora=...", dir == null);
+    corporaDir = Paths.get(dir);
+  }
 
   @Test
   public void en() throws Exception {
@@ -50,23 +59,24 @@ public class TestPerformance extends LuceneTestCase {
 
   @Test
   public void de() throws Exception {
-    checkPerformance("de", 100_000);
+    checkPerformance("de", 200_000);
   }
 
   @Test
   public void fr() throws Exception {
-    checkPerformance("fr", 20_000);
+    checkPerformance("fr", 40_000);
   }
 
   private void checkPerformance(String code, int wordCount) throws Exception {
     Path aff = findAffFile(code);
+
     Dictionary dictionary = TestAllDictionaries.loadDictionary(aff);
     System.out.println("Loaded " + aff);
 
     List<String> words = loadWords(code, wordCount, dictionary);
 
     Stemmer stemmer = new Stemmer(dictionary);
-    SpellChecker speller = new SpellChecker(dictionary);
+    Hunspell speller = new Hunspell(dictionary);
     measure(
         "Stemming " + code,
         blackHole -> {
@@ -92,15 +102,17 @@ public class TestPerformance extends LuceneTestCase {
               return code.equals(Dictionary.extractLanguageCode(parentName));
             })
         .findFirst()
-        .orElseThrow(() -> new IllegalArgumentException("Cannot find aff/dic for " + code));
+        .orElseThrow(
+            () -> new AssumptionViolatedException("Ignored, cannot find aff/dic for: " + code));
   }
 
   private List<String> loadWords(String code, int wordCount, Dictionary dictionary)
       throws IOException {
-    String corpusDir = System.getProperty("hunspell.corpora");
-    Assume.assumeFalse("", corpusDir == null);
+    Path dataPath = corporaDir.resolve(code + ".txt");
+    if (!Files.isReadable(dataPath)) {
+      throw new AssumptionViolatedException("Missing text corpora at: " + dataPath);
+    }
 
-    Path dataPath = Path.of(corpusDir).resolve(code + ".txt");
     List<String> words = new ArrayList<>();
     try (InputStream stream = Files.newInputStream(dataPath)) {
       BufferedReader reader =
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/SpellCheckerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestSpellChecking.java
similarity index 85%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/SpellCheckerTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestSpellChecking.java
index f4ca6b5..57adce6 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/SpellCheckerTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestSpellChecking.java
@@ -26,7 +26,7 @@ import java.util.stream.Collectors;
 import org.apache.lucene.store.ByteBuffersDirectory;
 import org.apache.lucene.util.IOUtils;
 
-public class SpellCheckerTest extends StemmerTestBase {
+public class TestSpellChecking extends StemmerTestBase {
 
   public void testBase() throws Exception {
     doTest("base");
@@ -132,6 +132,10 @@ public class SpellCheckerTest extends StemmerTestBase {
     doTest("checkcompoundrep");
   }
 
+  public void testDisallowCompoundsWhenDictionaryContainsSeparatedWordPair() throws Exception {
+    doTest("wordpair");
+  }
+
   public void testCompoundrule() throws Exception {
     doTest("compoundrule");
   }
@@ -168,10 +172,26 @@ public class SpellCheckerTest extends StemmerTestBase {
     doTest("onlyincompound2");
   }
 
+  public void testForbiddenWord() throws Exception {
+    doTest("forbiddenword");
+  }
+
+  public void testForbiddenWord1() throws Exception {
+    doTest("opentaal_forbiddenword1");
+  }
+
+  public void testForbiddenWord2() throws Exception {
+    doTest("opentaal_forbiddenword2");
+  }
+
   public void testGermanCompounding() throws Exception {
     doTest("germancompounding");
   }
 
+  public void testApplyOconvToSuggestions() throws Exception {
+    doTest("oconv");
+  }
+
   public void testModifyingSuggestions() throws Exception {
     doTest("sug");
   }
@@ -180,21 +200,32 @@ public class SpellCheckerTest extends StemmerTestBase {
     doTest("sug2");
   }
 
+  public void testMixedCaseSuggestionHeuristics() throws Exception {
+    doTest("i58202");
+  }
+
+  public void testMapSuggestions() throws Exception {
+    doTest("map");
+  }
+
+  public void testNoSuggest() throws Exception {
+    doTest("nosuggest");
+  }
+
   protected void doTest(String name) throws Exception {
     checkSpellCheckerExpectations(
-        Path.of(getClass().getResource(name + ".aff").toURI()).getParent().resolve(name), true);
+        Path.of(getClass().getResource(name + ".aff").toURI()).getParent().resolve(name));
   }
 
-  static void checkSpellCheckerExpectations(Path basePath, boolean checkSuggestions)
-      throws IOException, ParseException {
+  static void checkSpellCheckerExpectations(Path basePath) throws IOException, ParseException {
     InputStream affixStream = Files.newInputStream(Path.of(basePath.toString() + ".aff"));
     InputStream dictStream = Files.newInputStream(Path.of(basePath.toString() + ".dic"));
 
-    SpellChecker speller;
+    Hunspell speller;
     try {
       Dictionary dictionary =
           new Dictionary(new ByteBuffersDirectory(), "dictionary", affixStream, dictStream);
-      speller = new SpellChecker(dictionary);
+      speller = new Hunspell(dictionary);
     } finally {
       IOUtils.closeWhileHandlingException(affixStream);
       IOUtils.closeWhileHandlingException(dictStream);
@@ -214,7 +245,7 @@ public class SpellCheckerTest extends StemmerTestBase {
       for (String word : wrongWords) {
         assertFalse("Unexpectedly considered correct: " + word, speller.spell(word.trim()));
       }
-      if (Files.exists(sug) && checkSuggestions) {
+      if (Files.exists(sug)) {
         String suggestions =
             wrongWords.stream()
                 .map(s -> String.join(", ", speller.suggest(s)))
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.sug
new file mode 100644
index 0000000..d372ff2
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.sug
@@ -0,0 +1,3 @@
+OpenOffice.org
+UNICEF
+UNICEF's
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/base_utf.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/base_utf.sug
new file mode 100644
index 0000000..03a9c9d
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/base_utf.sug
@@ -0,0 +1,13 @@
+looked, look
+text
+hello
+said
+rotten day, rotten-day, rotten
+tomorrow
+seven
+NASA
+horrifying
+speech
+suggest
+Imply
+IMPLY
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/checksharps.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/checksharps.sug
new file mode 100644
index 0000000..ab68568
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/checksharps.sug
@@ -0,0 +1 @@
+MÜSSIG, müßig
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.aff
new file mode 100644
index 0000000..de7f8ad
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.aff
@@ -0,0 +1,11 @@
+# FORBIDDENWORD flag
+# The signed word, and its suffixed forms are all forbidden,
+# excepts with root homonyms.
+# Useful for forbidding bad suffixed forms or compounds.
+
+
+FORBIDDENWORD X
+COMPOUNDFLAG Y
+
+SFX A Y 1
+SFX A 0 s .
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.dic
new file mode 100644
index 0000000..cb63592
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.dic
@@ -0,0 +1,11 @@
+10
+foo/S
+foo/YX
+bar/YS
+bars/X
+foos/X
+kg
+Kg/X
+KG/X
+cm
+Cm/X
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.good
new file mode 100644
index 0000000..7bd112e
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.good
@@ -0,0 +1,3 @@
+foo
+bar
+
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.wrong
new file mode 100644
index 0000000..5752c1e
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forbiddenword.wrong
@@ -0,0 +1,4 @@
+bars
+foos
+foobar
+barfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forceucase.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forceucase.sug
new file mode 100644
index 0000000..6a77cbd
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forceucase.sug
@@ -0,0 +1,2 @@
+Foobaz
+Foobarbaz
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.aff
index 8d17b4e..b9b56cc 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.aff
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.aff
@@ -2,8 +2,14 @@ REP 1
 REP foo bar goo doo zoo
 
 COMPOUNDWORDMAX 2 y
+WORDCHARS 0123456789'.-’ ̃
+TRY a b c
 
 KEEPCASE Aa
 
+MAP 1
+MAP a b
+
 SFX A Y 1
-SFX A   nout        l          [aeiouyáéíóúýůěr][^aeiouyáéíóúýůěrl][^aeiouy
\ No newline at end of file
+SFX A   nout        l          [aeiouyáéíóúýůěr][^aeiouyáéíóúýůěrl][^aeiouy
+SFX A b c d
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.dic
new file mode 100644
index 0000000..51a4bfb
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/forgivable-errors.dic
@@ -0,0 +1,2 @@
+1
+ st:abc
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.aff
new file mode 100644
index 0000000..11249d4
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.aff
@@ -0,0 +1,4 @@
+# case suggestions
+MAXNGRAMSUGS 0
+# capitalise baz->Baz
+TRY B
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.dic
new file mode 100644
index 0000000..19e1980
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.dic
@@ -0,0 +1,5 @@
+4
+foo
+bar
+Baz
+Boo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.good
new file mode 100644
index 0000000..88a079a
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.good
@@ -0,0 +1,10 @@
+foo
+bar
+Foo
+Bar
+Baz
+Boo
+FOO
+BAR
+BAZ
+BOO
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.sug
new file mode 100644
index 0000000..bc784ac
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.sug
@@ -0,0 +1,13 @@
+foo, Boo
+Bar
+Baz
+Boo
+foo bar
+foo Bar
+Foo bar
+Foo Bar
+foo Baz
+Foo Baz
+Baz foo
+Baz Foo
+Baz Boo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.wrong
new file mode 100644
index 0000000..886584d
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/i58202.wrong
@@ -0,0 +1,13 @@
+fOO
+BAr
+baz
+BOo
+foobar
+fooBar
+Foobar
+FooBar
+fooBaz
+FooBaz
+Bazfoo
+BazFoo
+BazBoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/keepcase.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/keepcase.sug
new file mode 100644
index 0000000..69e80dd
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/keepcase.sug
@@ -0,0 +1,8 @@
+foo
+foo
+Bar
+Bar, baz.
+baz.
+baz.
+Quux.
+Quux.
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.aff
new file mode 100644
index 0000000..3e78bab
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.aff
@@ -0,0 +1,9 @@
+# With MAP suggestion, Hunspell can add missing accents to a word.
+
+# switch off ngram suggestion for testing
+MAXNGRAMSUGS 0
+
+MAP 3
+MAP u��
+MAP o��
+MAP �(ss)
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.dic
new file mode 100644
index 0000000..744394f
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.dic
@@ -0,0 +1,4 @@
+3
+Fr�hst�ck
+t�k�rf�r�
+gro�
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.sug
new file mode 100644
index 0000000..81d09e0
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.sug
@@ -0,0 +1,3 @@
+Frühstück
+tükörfúró
+groß
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.wrong
new file mode 100644
index 0000000..251c8a1
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/map.wrong
@@ -0,0 +1,3 @@
+Fruhstuck
+tukorfuro
+gross
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.aff
new file mode 100644
index 0000000..c9361da
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.aff
@@ -0,0 +1,5 @@
+# don't suggest word with NOSUGGEST flag (for example vulgar or obscene words)
+# See OpenOffice.org Issue #55498
+# (nosuggest.sug is an empty file)
+NOSUGGEST A
+COMPOUNDFLAG B
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.dic
new file mode 100644
index 0000000..dc80c91
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.dic
@@ -0,0 +1,3 @@
+1
+foo/AB
+bar/B
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.good
new file mode 100644
index 0000000..ad91a5e
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.good
@@ -0,0 +1,3 @@
+foo
+foobar
+barfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.sug
new file mode 100644
index 0000000..e69de29
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.wrong
new file mode 100644
index 0000000..89c7a1a
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/nosuggest.wrong
@@ -0,0 +1,3 @@
+foox
+foobarx
+barfoox
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.aff
new file mode 100644
index 0000000..0059a2d
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.aff
@@ -0,0 +1,20 @@
+# output conversion 
+SET UTF-8
+
+# Testing also whitespace and comments.
+OCONV 7 # space, space
+OCONV	a A # tab, space, space
+OCONV	á	Á # tab, tab, space
+OCONV	b	B	# tab, tab, tab
+OCONV  c  C		# 2xspace, 2xspace, 2xtab
+OCONV	 d 	D # tab+space, space+tab, space
+OCONV e E #
+OCONV é É 	
+# Only comment. Note that line above ends with space+tab.
+
+ # space
+  # 2xspace
+	# tab
+		# 2xtab
+ 	# space+tab
+	 # tab+space
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.dic
new file mode 100644
index 0000000..359186c
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.dic
@@ -0,0 +1,4 @@
+3
+bébé
+dádá
+aábcdeé
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.good
new file mode 100644
index 0000000..6cdaab1
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.good
@@ -0,0 +1,2 @@
+bébé
+dádá
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.sug
new file mode 100644
index 0000000..a191c62
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.sug
@@ -0,0 +1,3 @@
+BÉBÉ
+DÁDÁ
+AÁBCDEÉ
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.wrong
new file mode 100644
index 0000000..73dcc89
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/oconv.wrong
@@ -0,0 +1,3 @@
+béb
+dád
+aábcde
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.aff
new file mode 100644
index 0000000..fa07343
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.aff
@@ -0,0 +1,9 @@
+TRY r
+
+FORBIDDENWORD F
+COMPOUNDRULE 2
+COMPOUNDRULE WW
+COMPOUNDRULE WWW
+
+SFX S Y 1
+SFX S 0 s .
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.dic
new file mode 100644
index 0000000..4437594
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.dic
@@ -0,0 +1,5 @@
+4
+foo/W
+word/W
+bar/WS
+foowordbar/FS
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.good
new file mode 100644
index 0000000..73a96a7
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.good
@@ -0,0 +1,3 @@
+fooword
+wordbar
+barwordfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.sug
new file mode 100644
index 0000000..60111a4
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.sug
@@ -0,0 +1 @@
+barwordfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.wrong
new file mode 100644
index 0000000..59dfddf
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword1.wrong
@@ -0,0 +1,5 @@
+foowordbar
+foowordbars
+foowordba
+foowordbas
+barwodfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.aff
new file mode 100644
index 0000000..441354d
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.aff
@@ -0,0 +1,7 @@
+TRY r
+
+FORBIDDENWORD F
+COMPOUNDFLAG W
+
+SFX S Y 1
+SFX S 0 s .
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.dic
new file mode 100644
index 0000000..895dd62
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.dic
@@ -0,0 +1,5 @@
+3
+foo/WS
+word/W
+bar/WS
+foowordbar/FS
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.good
new file mode 100644
index 0000000..17cf47d
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.good
@@ -0,0 +1,4 @@
+fooword
+wordbar
+barwordfoo
+barwordfoos
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.sug b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.sug
new file mode 100644
index 0000000..60111a4
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.sug
@@ -0,0 +1 @@
+barwordfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.wrong
new file mode 100644
index 0000000..59dfddf
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/opentaal_forbiddenword2.wrong
@@ -0,0 +1,5 @@
+foowordbar
+foowordbars
+foowordba
+foowordbas
+barwodfoo
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.aff
new file mode 100644
index 0000000..e788b17
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.aff
@@ -0,0 +1,4 @@
+# a dictionary word pair separated by space
+# will avoid its recognition without space
+# at compound word analysis
+COMPOUNDFLAG Y
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.dic
new file mode 100644
index 0000000..96fc77f
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.dic
@@ -0,0 +1,4 @@
+3
+word/Y
+compound/Y
+compound word
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.good
new file mode 100644
index 0000000..d868fce
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.good
@@ -0,0 +1,3 @@
+word
+compound
+wordcompound
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.wrong
new file mode 100644
index 0000000..04ca38b
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/wordpair.wrong
@@ -0,0 +1 @@
+compoundword
diff --git a/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java b/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java
index cea0dfc..dd260fb 100644
--- a/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java
+++ b/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java
@@ -17,8 +17,6 @@
 package org.apache.lucene.analysis.standard;
 
 import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
@@ -26,6 +24,9 @@ import java.io.Writer;
 import java.net.URL;
 import java.net.URLConnection;
 import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
 import java.text.DateFormat;
 import java.util.ArrayList;
 import java.util.Comparator;
@@ -102,7 +103,7 @@ public class GenerateJflexTLDMacros {
       Pattern.compile("([-A-Za-z0-9]+)\\.\\s+\\d+\\s+IN\\s+NS\\s+.*");
   private final URL tldFileURL;
   private long tldFileLastModified = -1L;
-  private final File outputFile;
+  private final Path outputFile;
   private final SortedMap<String, Boolean> processedTLDsLongestFirst =
       new TreeMap<>(
           Comparator.comparing(String::length).reversed().thenComparing(String::compareTo));
@@ -111,7 +112,7 @@ public class GenerateJflexTLDMacros {
 
   public GenerateJflexTLDMacros(String tldFileURL, String outputFile) throws Exception {
     this.tldFileURL = new URL(tldFileURL);
-    this.outputFile = new File(outputFile);
+    this.outputFile = Paths.get(outputFile);
   }
 
   /**
@@ -130,9 +131,10 @@ public class GenerateJflexTLDMacros {
     for (int suffixLength = 0; suffixLength < TLDsBySuffixLength.size(); ++suffixLength) {
       int domainsAtThisSuffixLength = TLDsBySuffixLength.get(suffixLength).size();
       totalDomains += domainsAtThisSuffixLength;
-      System.out.printf("%30s: %4d TLDs%n", getMacroName(suffixLength), domainsAtThisSuffixLength);
+      System.out.printf(
+          Locale.ROOT, "%30s: %4d TLDs%n", getMacroName(suffixLength), domainsAtThisSuffixLength);
     }
-    System.out.printf("%30s: %4d TLDs%n", "Total", totalDomains);
+    System.out.printf(Locale.ROOT, "%30s: %4d TLDs%n", "Total", totalDomains);
   }
 
   /**
@@ -216,7 +218,7 @@ public class GenerateJflexTLDMacros {
         DateFormat.getDateTimeInstance(DateFormat.FULL, DateFormat.FULL, Locale.ROOT);
     dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
     try (Writer writer =
-        new OutputStreamWriter(new FileOutputStream(outputFile), StandardCharsets.UTF_8)) {
+        new OutputStreamWriter(Files.newOutputStream(outputFile), StandardCharsets.UTF_8)) {
       writer.write(APACHE_LICENSE);
       writer.write("// Generated from IANA Root Zone Database <");
       writer.write(tldFileURL.toString());
diff --git a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java
index 18818a3..bb7d8a3 100644
--- a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java
+++ b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java
@@ -21,10 +21,6 @@ import com.ibm.icu.lang.UProperty;
 import com.ibm.icu.text.UnicodeSet;
 import com.ibm.icu.text.UnicodeSetIterator;
 import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileFilter;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
@@ -34,11 +30,16 @@ import java.io.Writer;
 import java.net.URL;
 import java.net.URLConnection;
 import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Locale;
+import java.util.function.Predicate;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
+import java.util.stream.Collectors;
 
 /**
  * Downloads/generates lucene/analysis/icu/src/data/utr30/*.txt
@@ -82,33 +83,33 @@ public class GenerateUTR30DataFiles {
   }
 
   private static void expandRulesInUTR30DataFiles() throws IOException {
-    FileFilter filter =
-        new FileFilter() {
-          @Override
-          public boolean accept(File pathname) {
-            String name = pathname.getName();
-            return pathname.isFile()
-                && name.matches(".*\\.(?s:txt)")
-                && !name.equals(NFC_TXT)
-                && !name.equals(NFKC_TXT)
-                && !name.equals(NFKC_CF_TXT);
-          }
+    Predicate<Path> predicate =
+        (path) -> {
+          String name = path.getFileName().toString();
+          return Files.isRegularFile(path)
+              && name.matches(".*\\.(?s:txt)")
+              && !name.equals(NFC_TXT)
+              && !name.equals(NFKC_TXT)
+              && !name.equals(NFKC_CF_TXT);
         };
-    for (File file : new File(".").listFiles(filter)) {
-      expandDataFileRules(file);
+    try (var stream = Files.list(Paths.get(".")).filter(predicate)) {
+      for (Path file : stream.collect(Collectors.toList())) {
+        expandDataFileRules(file);
+      }
     }
   }
 
-  private static void expandDataFileRules(File file) throws IOException {
-    final FileInputStream stream = new FileInputStream(file);
-    final InputStreamReader reader = new InputStreamReader(stream, StandardCharsets.UTF_8);
-    final BufferedReader bufferedReader = new BufferedReader(reader);
-    StringBuilder builder = new StringBuilder();
-    String line;
-    boolean verbatim = false;
+  private static void expandDataFileRules(Path file) throws IOException {
     boolean modified = false;
-    int lineNum = 0;
-    try {
+    StringBuilder builder = new StringBuilder();
+
+    try (InputStream stream = Files.newInputStream(file);
+        InputStreamReader reader = new InputStreamReader(stream, StandardCharsets.UTF_8);
+        BufferedReader bufferedReader = new BufferedReader(reader)) {
+      String line;
+      boolean verbatim = false;
+      int lineNum = 0;
+
       while (null != (line = bufferedReader.readLine())) {
         ++lineNum;
         if (VERBATIM_RULE_LINE_PATTERN.matcher(line).matches()) {
@@ -124,7 +125,7 @@ public class GenerateUTR30DataFiles {
               String rightHandSide = ruleMatcher.group(2).trim();
               expandSingleRule(builder, leftHandSide, rightHandSide);
             } catch (IllegalArgumentException e) {
-              System.err.println("ERROR in " + file.getName() + " line #" + lineNum + ":");
+              System.err.println("ERROR in " + file.getFileName() + " line #" + lineNum + ":");
               e.printStackTrace(System.err);
               System.exit(1);
             }
@@ -142,18 +143,11 @@ public class GenerateUTR30DataFiles {
           }
         }
       }
-    } finally {
-      bufferedReader.close();
     }
+
     if (modified) {
-      System.err.println("Expanding rules in and overwriting " + file.getName());
-      final FileOutputStream out = new FileOutputStream(file, false);
-      Writer writer = new OutputStreamWriter(out, StandardCharsets.UTF_8);
-      try {
-        writer.write(builder.toString());
-      } finally {
-        writer.close();
-      }
+      System.err.println("Expanding rules in and overwriting " + file.getFileName());
+      Files.writeString(file, builder.toString(), StandardCharsets.UTF_8);
     }
   }
 
@@ -171,11 +165,12 @@ public class GenerateUTR30DataFiles {
 
     System.err.print("Downloading " + NFKC_CF_TXT + " and making diacritic rules one-way ... ");
     URLConnection connection = openConnection(new URL(norm2url, NFC_TXT));
-    BufferedReader reader =
-        new BufferedReader(
-            new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8));
-    Writer writer = new OutputStreamWriter(new FileOutputStream(NFC_TXT), StandardCharsets.UTF_8);
-    try {
+    try (BufferedReader reader =
+            new BufferedReader(
+                new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8));
+        Writer writer =
+            new OutputStreamWriter(
+                Files.newOutputStream(Path.of(NFC_TXT)), StandardCharsets.UTF_8)) {
       String line;
 
       while (null != (line = reader.readLine())) {
@@ -208,9 +203,6 @@ public class GenerateUTR30DataFiles {
         writer.write(line);
         writer.write("\n");
       }
-    } finally {
-      reader.close();
-      writer.close();
     }
     System.err.println("done.");
   }
@@ -218,7 +210,7 @@ public class GenerateUTR30DataFiles {
   private static void download(URL url, String outputFile) throws IOException {
     final URLConnection connection = openConnection(url);
     final InputStream inputStream = connection.getInputStream();
-    final OutputStream outputStream = new FileOutputStream(outputFile);
+    final OutputStream outputStream = Files.newOutputStream(Path.of(outputFile));
     int numBytes;
     try {
       while (-1 != (numBytes = inputStream.read(DOWNLOAD_BUFFER))) {
diff --git a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java
index a210244..7797ae8 100644
--- a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java
+++ b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java
@@ -18,14 +18,16 @@ package org.apache.lucene.analysis.icu;
 
 import com.ibm.icu.text.RuleBasedBreakIterator;
 import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
+import java.io.OutputStream;
 import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.List;
+import java.util.stream.Collectors;
 
 /**
  * Command-line utility to converts RuleBasedBreakIterator (.rbbi) files into binary compiled form
@@ -33,9 +35,9 @@ import java.nio.charset.StandardCharsets;
  */
 public class RBBIRuleCompiler {
 
-  static String getRules(File ruleFile) throws IOException {
+  static String getRules(Path ruleFile) throws IOException {
     StringBuilder rules = new StringBuilder();
-    InputStream in = new FileInputStream(ruleFile);
+    InputStream in = Files.newInputStream(ruleFile);
     BufferedReader cin = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8));
     String line = null;
     while ((line = cin.readLine()) != null) {
@@ -49,20 +51,21 @@ public class RBBIRuleCompiler {
     return rules.toString();
   }
 
-  static void compile(File srcDir, File destDir) throws Exception {
-    File files[] =
-        srcDir.listFiles(
-            new FilenameFilter() {
-              public boolean accept(File dir, String name) {
-                return name.endsWith("rbbi");
-              }
-            });
-    if (files == null) throw new IOException("Path does not exist: " + srcDir);
-    for (int i = 0; i < files.length; i++) {
-      File file = files[i];
-      File outputFile = new File(destDir, file.getName().replaceAll("rbbi$", "brk"));
+  static void compile(Path srcDir, Path destDir) throws Exception {
+    List<Path> files;
+    try (var stream = Files.list(srcDir)) {
+      files =
+          stream
+              .filter(name -> name.getFileName().toString().endsWith("rbbi"))
+              .collect(Collectors.toList());
+    }
+
+    if (files.isEmpty()) throw new IOException("No input files matching *.rbbi at: " + srcDir);
+    for (Path file : files) {
+      Path outputFile = destDir.resolve(file.getFileName().toString().replaceAll("rbbi$", "brk"));
       String rules = getRules(file);
-      System.err.print("Compiling " + file.getName() + " to " + outputFile.getName() + ": ");
+      System.err.print(
+          "Compiling " + file.getFileName() + " to " + outputFile.getFileName() + ": ");
       /*
        * if there is a syntax error, compileRules() may succeed. the way to
        * check is to try to instantiate from the string. additionally if the
@@ -78,10 +81,10 @@ public class RBBIRuleCompiler {
         System.err.println(e.getMessage());
         System.exit(1);
       }
-      FileOutputStream os = new FileOutputStream(outputFile);
-      RuleBasedBreakIterator.compileRules(rules, os);
-      os.close();
-      System.err.println(outputFile.length() + " bytes.");
+      try (OutputStream os = Files.newOutputStream(outputFile)) {
+        RuleBasedBreakIterator.compileRules(rules, os);
+      }
+      System.err.println(Files.size(outputFile) + " bytes.");
     }
   }
 
@@ -90,7 +93,7 @@ public class RBBIRuleCompiler {
       System.err.println("Usage: RBBIRuleComputer <sourcedir> <destdir>");
       System.exit(1);
     }
-    compile(new File(args[0]), new File(args[1]));
+    compile(Paths.get(args[0]), Paths.get(args[1]));
     System.exit(0);
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50CompoundFormat.java
similarity index 59%
copy from lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java
copy to lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50CompoundFormat.java
index 7c8ae37..75d6912 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50CompoundFormat.java
@@ -14,19 +14,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.codecs.lucene50;
+package org.apache.lucene.backward_codecs.lucene50;
 
 import java.io.IOException;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.CompoundDirectory;
 import org.apache.lucene.codecs.CompoundFormat;
-import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.store.ChecksumIndexInput;
 import org.apache.lucene.store.DataOutput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexOutput;
 
 /**
  * Lucene 5.0 compound file format
@@ -86,55 +83,6 @@ public final class Lucene50CompoundFormat extends CompoundFormat {
 
   @Override
   public void write(Directory dir, SegmentInfo si, IOContext context) throws IOException {
-    String dataFile = IndexFileNames.segmentFileName(si.name, "", DATA_EXTENSION);
-    String entriesFile = IndexFileNames.segmentFileName(si.name, "", ENTRIES_EXTENSION);
-
-    try (IndexOutput data = dir.createOutput(dataFile, context);
-        IndexOutput entries = dir.createOutput(entriesFile, context)) {
-      CodecUtil.writeIndexHeader(data, DATA_CODEC, VERSION_CURRENT, si.getId(), "");
-      CodecUtil.writeIndexHeader(entries, ENTRY_CODEC, VERSION_CURRENT, si.getId(), "");
-
-      writeCompoundFile(entries, data, dir, si);
-
-      CodecUtil.writeFooter(data);
-      CodecUtil.writeFooter(entries);
-    }
-  }
-
-  private void writeCompoundFile(
-      IndexOutput entries, IndexOutput data, Directory dir, SegmentInfo si) throws IOException {
-    // write number of files
-    entries.writeVInt(si.files().size());
-    for (String file : si.files()) {
-      // write bytes for file
-      long startOffset = data.getFilePointer();
-      try (ChecksumIndexInput in = dir.openChecksumInput(file, IOContext.READONCE)) {
-
-        // just copies the index header, verifying that its id matches what we expect
-        CodecUtil.verifyAndCopyIndexHeader(in, data, si.getId());
-
-        // copy all bytes except the footer
-        long numBytesToCopy = in.length() - CodecUtil.footerLength() - in.getFilePointer();
-        data.copyBytes(in, numBytesToCopy);
-
-        // verify footer (checksum) matches for the incoming file we are copying
-        long checksum = CodecUtil.checkFooter(in);
-
-        // this is poached from CodecUtil.writeFooter, but we need to use our own checksum, not
-        // data.getChecksum(), but I think
-        // adding a public method to CodecUtil to do that is somewhat dangerous:
-        data.writeInt(CodecUtil.FOOTER_MAGIC);
-        data.writeInt(0);
-        data.writeLong(checksum);
-      }
-      long endOffset = data.getFilePointer();
-
-      long length = endOffset - startOffset;
-
-      // write entry for file
-      entries.writeString(IndexFileNames.stripSegmentName(file));
-      entries.writeLong(startOffset);
-      entries.writeLong(length);
-    }
+    throw new UnsupportedOperationException("Old formats can't be used for writing");
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundReader.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50CompoundReader.java
similarity index 99%
copy from lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundReader.java
copy to lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50CompoundReader.java
index 4c8eb84..9ff5161 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundReader.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50CompoundReader.java
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.codecs.lucene50;
+package org.apache.lucene.backward_codecs.lucene50;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70Codec.java
index e34502e..e6e1d9e 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70Codec.java
@@ -16,6 +16,7 @@
  */
 package org.apache.lucene.backward_codecs.lucene70;
 
+import org.apache.lucene.backward_codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
@@ -34,7 +35,6 @@ import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.VectorFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
 import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
 import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
@@ -112,7 +112,7 @@ public class Lucene70Codec extends Codec {
   }
 
   @Override
-  public final CompoundFormat compoundFormat() {
+  public CompoundFormat compoundFormat() {
     return compoundFormat;
   }
 
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80Codec.java
index f39ffa7..92b6a21 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80Codec.java
@@ -16,6 +16,7 @@
  */
 package org.apache.lucene.backward_codecs.lucene80;
 
+import org.apache.lucene.backward_codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene60.Lucene60FieldInfosFormat;
@@ -33,7 +34,6 @@ import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.VectorFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
 import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84Codec.java
index 0b3ffb7..c476e9f 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84Codec.java
@@ -17,6 +17,7 @@
 package org.apache.lucene.backward_codecs.lucene84;
 
 import java.util.Objects;
+import org.apache.lucene.backward_codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
@@ -36,7 +37,6 @@ import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.VectorFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
 import org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat;
@@ -125,7 +125,7 @@ public class Lucene84Codec extends Codec {
   }
 
   @Override
-  public final CompoundFormat compoundFormat() {
+  public CompoundFormat compoundFormat() {
     return compoundFormat;
   }
 
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene86/Lucene86Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene86/Lucene86Codec.java
index db02573..b8659f7 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene86/Lucene86Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene86/Lucene86Codec.java
@@ -18,6 +18,7 @@
 package org.apache.lucene.backward_codecs.lucene86;
 
 import java.util.Objects;
+import org.apache.lucene.backward_codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene60.Lucene60FieldInfosFormat;
@@ -34,7 +35,6 @@ import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.VectorFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
 import org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat;
@@ -126,7 +126,7 @@ public class Lucene86Codec extends Codec {
   }
 
   @Override
-  public final CompoundFormat compoundFormat() {
+  public CompoundFormat compoundFormat() {
     return compoundFormat;
   }
 
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87Codec.java
index 8543de6..52bc76c 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87Codec.java
@@ -18,6 +18,7 @@
 package org.apache.lucene.backward_codecs.lucene87;
 
 import java.util.Objects;
+import org.apache.lucene.backward_codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.backward_codecs.lucene60.Lucene60FieldInfosFormat;
 import org.apache.lucene.codecs.Codec;
@@ -33,7 +34,6 @@ import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.VectorFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80DocValuesFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
@@ -138,7 +138,7 @@ public class Lucene87Codec extends Codec {
   }
 
   @Override
-  public final CompoundFormat compoundFormat() {
+  public CompoundFormat compoundFormat() {
     return compoundFormat;
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/Lucene87/Lucene87RWCodec.java
similarity index 65%
copy from lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java
copy to lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/Lucene87/Lucene87RWCodec.java
index 15fdf17..6467bc7 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/Lucene87/Lucene87RWCodec.java
@@ -14,17 +14,17 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.codecs.lucene50;
+package org.apache.lucene.backward_codecs.Lucene87;
 
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.index.BaseCompoundFormatTestCase;
-import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.backward_codecs.lucene50.Lucene50RWCompoundFormat;
+import org.apache.lucene.backward_codecs.lucene87.Lucene87Codec;
+import org.apache.lucene.codecs.CompoundFormat;
 
-public class TestLucene50CompoundFormat extends BaseCompoundFormatTestCase {
-  private final Codec codec = TestUtil.getDefaultCodec();
+/** RW impersonation of {@link Lucene87Codec}. */
+public class Lucene87RWCodec extends Lucene87Codec {
 
   @Override
-  protected Codec getCodec() {
-    return codec;
+  public final CompoundFormat compoundFormat() {
+    return new Lucene50RWCompoundFormat();
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/Lucene50RWCompoundFormat.java
similarity index 97%
copy from lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java
copy to lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/Lucene50RWCompoundFormat.java
index 7c8ae37..7e9b2a6 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/Lucene50RWCompoundFormat.java
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.codecs.lucene50;
+package org.apache.lucene.backward_codecs.lucene50;
 
 import java.io.IOException;
 import org.apache.lucene.codecs.CodecUtil;
@@ -63,7 +63,7 @@ import org.apache.lucene.store.IndexOutput;
  *       files length, and a String with that file's name.
  * </ul>
  */
-public final class Lucene50CompoundFormat extends CompoundFormat {
+public final class Lucene50RWCompoundFormat extends CompoundFormat {
 
   /** Extension of compound file */
   static final String DATA_EXTENSION = "cfs";
@@ -76,7 +76,7 @@ public final class Lucene50CompoundFormat extends CompoundFormat {
   static final int VERSION_CURRENT = VERSION_START;
 
   /** Sole constructor. */
-  public Lucene50CompoundFormat() {}
+  public Lucene50RWCompoundFormat() {}
 
   @Override
   public CompoundDirectory getCompoundReader(Directory dir, SegmentInfo si, IOContext context)
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestLucene50CompoundFormat.java
similarity index 86%
copy from lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java
copy to lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestLucene50CompoundFormat.java
index 15fdf17..28624bf 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/TestLucene50CompoundFormat.java
@@ -14,17 +14,17 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.codecs.lucene50;
+package org.apache.lucene.backward_codecs.lucene50;
 
+import org.apache.lucene.backward_codecs.Lucene87.Lucene87RWCodec;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.index.BaseCompoundFormatTestCase;
-import org.apache.lucene.util.TestUtil;
 
 public class TestLucene50CompoundFormat extends BaseCompoundFormatTestCase {
-  private final Codec codec = TestUtil.getDefaultCodec();
+  ;
 
   @Override
   protected Codec getCodec() {
-    return codec;
+    return new Lucene87RWCodec();
   }
 }
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene70/Lucene70RWCodec.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene70/Lucene70RWCodec.java
index 16041ae9..7b44821 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene70/Lucene70RWCodec.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene70/Lucene70RWCodec.java
@@ -16,8 +16,10 @@
  */
 package org.apache.lucene.backward_codecs.lucene70;
 
+import org.apache.lucene.backward_codecs.lucene50.Lucene50RWCompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50RWPostingsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50RWStoredFieldsFormat;
+import org.apache.lucene.codecs.CompoundFormat;
 import org.apache.lucene.codecs.NormsFormat;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.SegmentInfoFormat;
@@ -58,4 +60,9 @@ public final class Lucene70RWCodec extends Lucene70Codec {
   public PostingsFormat postingsFormat() {
     return postingsFormat;
   }
+
+  @Override
+  public CompoundFormat compoundFormat() {
+    return new Lucene50RWCompoundFormat();
+  }
 }
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene84/Lucene84RWCodec.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene84/Lucene84RWCodec.java
index dd08c5d..05736d9 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene84/Lucene84RWCodec.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene84/Lucene84RWCodec.java
@@ -16,9 +16,11 @@
  */
 package org.apache.lucene.backward_codecs.lucene84;
 
+import org.apache.lucene.backward_codecs.lucene50.Lucene50RWCompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50RWStoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene60.Lucene60RWPointsFormat;
 import org.apache.lucene.backward_codecs.lucene70.Lucene70RWSegmentInfoFormat;
+import org.apache.lucene.codecs.CompoundFormat;
 import org.apache.lucene.codecs.PointsFormat;
 import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
@@ -40,4 +42,9 @@ public class Lucene84RWCodec extends Lucene84Codec {
   public StoredFieldsFormat storedFieldsFormat() {
     return new Lucene50RWStoredFieldsFormat();
   }
+
+  @Override
+  public final CompoundFormat compoundFormat() {
+    return new Lucene50RWCompoundFormat();
+  }
 }
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene86/Lucene86RWCodec.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene86/Lucene86RWCodec.java
index d9d3a49..c1d278f 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene86/Lucene86RWCodec.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene86/Lucene86RWCodec.java
@@ -16,8 +16,10 @@
  */
 package org.apache.lucene.backward_codecs.lucene86;
 
+import org.apache.lucene.backward_codecs.lucene50.Lucene50RWCompoundFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50RWStoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
+import org.apache.lucene.codecs.CompoundFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 
 /** RW impersonation of {@link Lucene86Codec}. */
@@ -39,4 +41,9 @@ public class Lucene86RWCodec extends Lucene86Codec {
   public StoredFieldsFormat storedFieldsFormat() {
     return storedFieldsFormat;
   }
+
+  @Override
+  public final CompoundFormat compoundFormat() {
+    return new Lucene50RWCompoundFormat();
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsFormat.java
index 17594c0..0714840 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsFormat.java
@@ -35,9 +35,12 @@ import org.apache.lucene.index.SegmentWriteState;
  *   <li>A .kdm file that records metadata about the fields, such as numbers of dimensions or
  *       numbers of bytes per dimension.
  *   <li>A .kdi file that stores inner nodes of the tree.
- *   <li>A .kdm file that stores leaf nodes, where most of the data lives.
+ *   <li>A .kdd file that stores leaf nodes, where most of the data lives.
  * </ul>
  *
+ * See <a href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=173081898">this
+ * wiki</a> for detailed data structures of the three files.
+ *
  * @lucene.experimental
  */
 public final class Lucene86PointsFormat extends PointsFormat {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90Codec.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90Codec.java
index 6250592..3f84280 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90Codec.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90Codec.java
@@ -30,7 +30,6 @@ import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.VectorFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80DocValuesFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
@@ -73,7 +72,7 @@ public class Lucene90Codec extends Codec {
   private final FieldInfosFormat fieldInfosFormat = new Lucene90FieldInfosFormat();
   private final SegmentInfoFormat segmentInfosFormat = new Lucene86SegmentInfoFormat();
   private final LiveDocsFormat liveDocsFormat = new Lucene90LiveDocsFormat();
-  private final CompoundFormat compoundFormat = new Lucene50CompoundFormat();
+  private final CompoundFormat compoundFormat = new Lucene90CompoundFormat();
   private final PostingsFormat defaultFormat;
 
   private final PostingsFormat postingsFormat =
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundFormat.java
similarity index 93%
rename from lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java
rename to lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundFormat.java
index 7c8ae37..d06802c 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundFormat.java
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.codecs.lucene50;
+package org.apache.lucene.codecs.lucene90;
 
 import java.io.IOException;
 import org.apache.lucene.codecs.CodecUtil;
@@ -29,7 +29,7 @@ import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexOutput;
 
 /**
- * Lucene 5.0 compound file format
+ * Lucene 9.0 compound file format
  *
  * <p>Files:
  *
@@ -63,25 +63,25 @@ import org.apache.lucene.store.IndexOutput;
  *       files length, and a String with that file's name.
  * </ul>
  */
-public final class Lucene50CompoundFormat extends CompoundFormat {
+public final class Lucene90CompoundFormat extends CompoundFormat {
 
   /** Extension of compound file */
   static final String DATA_EXTENSION = "cfs";
   /** Extension of compound file entries */
   static final String ENTRIES_EXTENSION = "cfe";
 
-  static final String DATA_CODEC = "Lucene50CompoundData";
-  static final String ENTRY_CODEC = "Lucene50CompoundEntries";
+  static final String DATA_CODEC = "Lucene90CompoundData";
+  static final String ENTRY_CODEC = "Lucene90CompoundEntries";
   static final int VERSION_START = 0;
   static final int VERSION_CURRENT = VERSION_START;
 
   /** Sole constructor. */
-  public Lucene50CompoundFormat() {}
+  public Lucene90CompoundFormat() {}
 
   @Override
   public CompoundDirectory getCompoundReader(Directory dir, SegmentInfo si, IOContext context)
       throws IOException {
-    return new Lucene50CompoundReader(dir, si, context);
+    return new Lucene90CompoundReader(dir, si, context);
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundReader.java
similarity index 91%
rename from lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundReader.java
rename to lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundReader.java
index 4c8eb84..cbf1e0d 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50CompoundReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90CompoundReader.java
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.codecs.lucene50;
+package org.apache.lucene.codecs.lucene90;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -39,7 +39,7 @@ import org.apache.lucene.util.IOUtils;
  *
  * @lucene.experimental
  */
-final class Lucene50CompoundReader extends CompoundDirectory {
+final class Lucene90CompoundReader extends CompoundDirectory {
 
   /** Offset/Length for a slice inside of a compound file */
   public static final class FileEntry {
@@ -56,18 +56,18 @@ final class Lucene50CompoundReader extends CompoundDirectory {
   /** Create a new CompoundFileDirectory. */
   // TODO: we should just pre-strip "entries" and append segment name up-front like simpletext?
   // this need not be a "general purpose" directory anymore (it only writes index files)
-  public Lucene50CompoundReader(Directory directory, SegmentInfo si, IOContext context)
+  public Lucene90CompoundReader(Directory directory, SegmentInfo si, IOContext context)
       throws IOException {
     this.directory = directory;
     this.segmentName = si.name;
     String dataFileName =
-        IndexFileNames.segmentFileName(segmentName, "", Lucene50CompoundFormat.DATA_EXTENSION);
+        IndexFileNames.segmentFileName(segmentName, "", Lucene90CompoundFormat.DATA_EXTENSION);
     String entriesFileName =
-        IndexFileNames.segmentFileName(segmentName, "", Lucene50CompoundFormat.ENTRIES_EXTENSION);
+        IndexFileNames.segmentFileName(segmentName, "", Lucene90CompoundFormat.ENTRIES_EXTENSION);
     this.entries = readEntries(si.getId(), directory, entriesFileName);
     boolean success = false;
 
-    long expectedLength = CodecUtil.indexHeaderLength(Lucene50CompoundFormat.DATA_CODEC, "");
+    long expectedLength = CodecUtil.indexHeaderLength(Lucene90CompoundFormat.DATA_CODEC, "");
     for (Map.Entry<String, FileEntry> ent : entries.entrySet()) {
       expectedLength += ent.getValue().length;
     }
@@ -76,7 +76,7 @@ final class Lucene50CompoundReader extends CompoundDirectory {
     handle = directory.openInput(dataFileName, context);
     try {
       CodecUtil.checkIndexHeader(
-          handle, Lucene50CompoundFormat.DATA_CODEC, version, version, si.getId(), "");
+          handle, Lucene90CompoundFormat.DATA_CODEC, version, version, si.getId(), "");
 
       // NOTE: data file is too costly to verify checksum against all the bytes on open,
       // but for now we at least verify proper structure of the checksum footer: which looks
@@ -111,9 +111,9 @@ final class Lucene50CompoundReader extends CompoundDirectory {
         version =
             CodecUtil.checkIndexHeader(
                 entriesStream,
-                Lucene50CompoundFormat.ENTRY_CODEC,
-                Lucene50CompoundFormat.VERSION_START,
-                Lucene50CompoundFormat.VERSION_CURRENT,
+                Lucene90CompoundFormat.ENTRY_CODEC,
+                Lucene90CompoundFormat.VERSION_START,
+                Lucene90CompoundFormat.VERSION_CURRENT,
                 segmentID,
                 "");
 
@@ -156,7 +156,7 @@ final class Lucene50CompoundReader extends CompoundDirectory {
     final FileEntry entry = entries.get(id);
     if (entry == null) {
       String datFileName =
-          IndexFileNames.segmentFileName(segmentName, "", Lucene50CompoundFormat.DATA_EXTENSION);
+          IndexFileNames.segmentFileName(segmentName, "", Lucene90CompoundFormat.DATA_EXTENSION);
       throw new FileNotFoundException(
           "No sub-file with id "
               + id
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java
index 3050759..43f4215 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java
@@ -168,9 +168,7 @@ public final class Lucene90FieldInfosFormat extends FieldInfosFormat {
           int pointNumBytes;
           int pointIndexDimensionCount = pointDataDimensionCount;
           if (pointDataDimensionCount != 0) {
-            if (version >= Lucene90FieldInfosFormat.FORMAT_SELECTIVE_INDEXING) {
-              pointIndexDimensionCount = input.readVInt();
-            }
+            pointIndexDimensionCount = input.readVInt();
             pointNumBytes = input.readVInt();
           } else {
             pointNumBytes = 0;
@@ -363,9 +361,7 @@ public final class Lucene90FieldInfosFormat extends FieldInfosFormat {
   // Codec header
   static final String CODEC_NAME = "Lucene90FieldInfos";
   static final int FORMAT_START = 0;
-  static final int FORMAT_SOFT_DELETES = 1;
-  static final int FORMAT_SELECTIVE_INDEXING = 2;
-  static final int FORMAT_CURRENT = FORMAT_SELECTIVE_INDEXING;
+  static final int FORMAT_CURRENT = FORMAT_START;
 
   // Field flags
   static final byte STORE_TERMVECTOR = 0x1;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90VectorWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90VectorWriter.java
index 21ab611..e070784 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90VectorWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90VectorWriter.java
@@ -54,23 +54,25 @@ public final class Lucene90VectorWriter extends VectorWriter {
     String metaFileName =
         IndexFileNames.segmentFileName(
             state.segmentInfo.name, state.segmentSuffix, Lucene90VectorFormat.META_EXTENSION);
-    meta = state.directory.createOutput(metaFileName, state.context);
 
     String vectorDataFileName =
         IndexFileNames.segmentFileName(
             state.segmentInfo.name,
             state.segmentSuffix,
             Lucene90VectorFormat.VECTOR_DATA_EXTENSION);
-    vectorData = state.directory.createOutput(vectorDataFileName, state.context);
 
     String indexDataFileName =
         IndexFileNames.segmentFileName(
             state.segmentInfo.name,
             state.segmentSuffix,
             Lucene90VectorFormat.VECTOR_INDEX_EXTENSION);
-    vectorIndex = state.directory.createOutput(indexDataFileName, state.context);
 
+    boolean success = false;
     try {
+      meta = state.directory.createOutput(metaFileName, state.context);
+      vectorData = state.directory.createOutput(vectorDataFileName, state.context);
+      vectorIndex = state.directory.createOutput(indexDataFileName, state.context);
+
       CodecUtil.writeIndexHeader(
           meta,
           Lucene90VectorFormat.META_CODEC_NAME,
@@ -89,8 +91,11 @@ public final class Lucene90VectorWriter extends VectorWriter {
           Lucene90VectorFormat.VERSION_CURRENT,
           state.segmentInfo.getId(),
           state.segmentSuffix);
-    } catch (IOException e) {
-      IOUtils.closeWhileHandlingException(this);
+      success = true;
+    } finally {
+      if (success == false) {
+        IOUtils.closeWhileHandlingException(this);
+      }
     }
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/package-info.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/package-info.java
index b7a9d4a..4d34a40 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/package-info.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/package-info.java
@@ -234,7 +234,7 @@
  * <td>Stores metadata about a segment</td>
  * </tr>
  * <tr>
- * <td>{@link org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat Compound File}</td>
+ * <td>{@link org.apache.lucene.codecs.lucene90.Lucene90CompoundFormat Compound File}</td>
  * <td>.cfs, .cfe</td>
  * <td>An optional "virtual" file consisting of all the other index files for
  * systems that frequently run out of file handles.</td>
diff --git a/lucene/core/src/java/org/apache/lucene/document/FieldType.java b/lucene/core/src/java/org/apache/lucene/document/FieldType.java
index b04b2f6..9ffa198 100644
--- a/lucene/core/src/java/org/apache/lucene/document/FieldType.java
+++ b/lucene/core/src/java/org/apache/lucene/document/FieldType.java
@@ -367,7 +367,8 @@ public class FieldType implements IndexableFieldType {
     return dimensionNumBytes;
   }
 
-  void setVectorDimensionsAndSearchStrategy(
+  /** Enable vector indexing, with the specified number of dimensions and distance function. */
+  public void setVectorDimensionsAndSearchStrategy(
       int numDimensions, VectorValues.SearchStrategy distFunc) {
     checkIfFrozen();
     if (numDimensions <= 0) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
index 4f0d3a1..6974eac 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
@@ -259,6 +259,11 @@ public abstract class CodecReader extends LeafReader implements Accountable {
       ramBytesUsed += getPointsReader().ramBytesUsed();
     }
 
+    // vectors
+    if (getVectorReader() != null) {
+      ramBytesUsed += getVectorReader().ramBytesUsed();
+    }
+
     return ramBytesUsed;
   }
 
@@ -295,6 +300,11 @@ public abstract class CodecReader extends LeafReader implements Accountable {
       resources.add(Accountables.namedAccountable("points", getPointsReader()));
     }
 
+    // vectors
+    if (getVectorReader() != null) {
+      resources.add(Accountables.namedAccountable("vectors", getVectorReader()));
+    }
+
     return Collections.unmodifiableList(resources);
   }
 
@@ -329,5 +339,10 @@ public abstract class CodecReader extends LeafReader implements Accountable {
     if (getPointsReader() != null) {
       getPointsReader().checkIntegrity();
     }
+
+    // vectors
+    if (getVectorReader() != null) {
+      getVectorReader().checkIntegrity();
+    }
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
index 3746817..b917be5 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
@@ -66,8 +66,8 @@ final class ReadersAndUpdates {
 
   // Indicates whether this segment is currently being merged. While a segment
   // is merging, all field updates are also registered in the
-  // mergingNumericUpdates map. Also, calls to writeFieldUpdates merge the
-  // updates with mergingNumericUpdates.
+  // mergingDVUpdates map. Also, calls to writeFieldUpdates merge the
+  // updates with mergingDVUpdates.
   // That way, when the segment is done merging, IndexWriter can apply the
   // updates on the merged segment too.
   private boolean isMerging = false;
diff --git a/lucene/core/src/java/org/apache/lucene/util/RamUsageEstimator.java b/lucene/core/src/java/org/apache/lucene/util/RamUsageEstimator.java
index 7140546..6c9a75b 100644
--- a/lucene/core/src/java/org/apache/lucene/util/RamUsageEstimator.java
+++ b/lucene/core/src/java/org/apache/lucene/util/RamUsageEstimator.java
@@ -20,6 +20,7 @@ import java.lang.reflect.Array;
 import java.lang.reflect.Field;
 import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
+import java.security.AccessControlException;
 import java.security.AccessController;
 import java.security.PrivilegedAction;
 import java.text.DecimalFormat;
@@ -527,14 +528,14 @@ public final class RamUsageEstimator {
     // Walk type hierarchy
     for (; clazz != null; clazz = clazz.getSuperclass()) {
       final Class<?> target = clazz;
-      final Field[] fields =
-          AccessController.doPrivileged(
-              new PrivilegedAction<Field[]>() {
-                @Override
-                public Field[] run() {
-                  return target.getDeclaredFields();
-                }
-              });
+      final Field[] fields;
+      try {
+        fields =
+            AccessController.doPrivileged((PrivilegedAction<Field[]>) target::getDeclaredFields);
+      } catch (AccessControlException e) {
+        throw new RuntimeException("Can't access fields of class: " + target, e);
+      }
+
       for (Field f : fields) {
         if (!Modifier.isStatic(f.getModifiers())) {
           size = adjustForField(size, f);
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90CompoundFormat.java
similarity index 90%
copy from lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java
copy to lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90CompoundFormat.java
index 15fdf17..ed78abd 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90CompoundFormat.java
@@ -14,13 +14,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.codecs.lucene50;
+package org.apache.lucene.codecs.lucene90;
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.index.BaseCompoundFormatTestCase;
 import org.apache.lucene.util.TestUtil;
 
-public class TestLucene50CompoundFormat extends BaseCompoundFormatTestCase {
+public class TestLucene90CompoundFormat extends BaseCompoundFormatTestCase {
   private final Codec codec = TestUtil.getDefaultCodec();
 
   @Override
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90FieldInfosFormat.java
similarity index 78%
copy from lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java
copy to lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90FieldInfosFormat.java
index 15fdf17..83bd56a 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90FieldInfosFormat.java
@@ -14,17 +14,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.codecs.lucene50;
+package org.apache.lucene.codecs.lucene90;
 
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.index.BaseCompoundFormatTestCase;
+import org.apache.lucene.index.BaseFieldInfoFormatTestCase;
 import org.apache.lucene.util.TestUtil;
 
-public class TestLucene50CompoundFormat extends BaseCompoundFormatTestCase {
-  private final Codec codec = TestUtil.getDefaultCodec();
-
+public class TestLucene90FieldInfosFormat extends BaseFieldInfoFormatTestCase {
   @Override
   protected Codec getCodec() {
-    return codec;
+    return TestUtil.getDefaultCodec();
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90VectorFormat.java
similarity index 78%
rename from lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java
rename to lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90VectorFormat.java
index 15fdf17..1906ecf 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50CompoundFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestLucene90VectorFormat.java
@@ -14,17 +14,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.codecs.lucene50;
+package org.apache.lucene.codecs.lucene90;
 
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.index.BaseCompoundFormatTestCase;
+import org.apache.lucene.index.BaseVectorFormatTestCase;
 import org.apache.lucene.util.TestUtil;
 
-public class TestLucene50CompoundFormat extends BaseCompoundFormatTestCase {
-  private final Codec codec = TestUtil.getDefaultCodec();
+public class TestLucene90VectorFormat extends BaseVectorFormatTestCase {
 
   @Override
   protected Codec getCodec() {
-    return codec;
+    return TestUtil.getDefaultCodec();
   }
 }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/Intervals.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/Intervals.java
index e2c7225..1094dbe 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/Intervals.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/Intervals.java
@@ -404,6 +404,16 @@ public final class Intervals {
    * Return intervals that span combinations of intervals from {@code minShouldMatch} of the sources
    */
   public static IntervalsSource atLeast(int minShouldMatch, IntervalsSource... sources) {
+    if (minShouldMatch == sources.length) {
+      return unordered(sources);
+    }
+    if (minShouldMatch > sources.length) {
+      return new NoMatchIntervalsSource(
+          "Too few sources to match minimum of ["
+              + minShouldMatch
+              + "]: "
+              + Arrays.toString(sources));
+    }
     return new MinimumShouldMatchIntervalsSource(sources, minShouldMatch);
   }
 
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimumShouldMatchIntervalsSource.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimumShouldMatchIntervalsSource.java
index 87cd427..f9f2677 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimumShouldMatchIntervalsSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimumShouldMatchIntervalsSource.java
@@ -42,6 +42,7 @@ class MinimumShouldMatchIntervalsSource extends IntervalsSource {
   private final int minShouldMatch;
 
   MinimumShouldMatchIntervalsSource(IntervalsSource[] sources, int minShouldMatch) {
+    assert minShouldMatch < sources.length;
     this.sources = sources;
     this.minShouldMatch = minShouldMatch;
   }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/NoMatchIntervalsSource.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/NoMatchIntervalsSource.java
new file mode 100644
index 0000000..cfa7364
--- /dev/null
+++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/NoMatchIntervalsSource.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.queries.intervals;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Objects;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.QueryVisitor;
+
+/** A source returning no matches */
+class NoMatchIntervalsSource extends IntervalsSource {
+  final String reason;
+
+  NoMatchIntervalsSource(String reason) {
+    this.reason = reason;
+  }
+
+  @Override
+  public IntervalIterator intervals(String field, LeafReaderContext ctx) throws IOException {
+    return null;
+  }
+
+  @Override
+  public IntervalMatchesIterator matches(String field, LeafReaderContext ctx, int doc)
+      throws IOException {
+    return null;
+  }
+
+  @Override
+  public void visit(String field, QueryVisitor visitor) {}
+
+  @Override
+  public int minExtent() {
+    return 0;
+  }
+
+  @Override
+  public Collection<IntervalsSource> pullUpDisjunctions() {
+    return Collections.singleton(this);
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    NoMatchIntervalsSource that = (NoMatchIntervalsSource) o;
+    return Objects.equals(reason, that.reason);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(reason);
+  }
+
+  @Override
+  public String toString() {
+    return "NOMATCH(" + reason + ")";
+  }
+}
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java
index f478c00..57e86f8 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java
@@ -756,6 +756,27 @@ public class TestIntervals extends LuceneTestCase {
     assertEquals(3, source.minExtent());
   }
 
+  public void testDegenerateMinShouldMatch() throws IOException {
+    IntervalsSource source =
+        Intervals.ordered(
+            Intervals.atLeast(1, Intervals.term("interest")),
+            Intervals.atLeast(1, Intervals.term("anyone")));
+
+    MatchesIterator mi = getMatches(source, 0, "field1");
+    assertMatch(mi, 2, 4, 11, 29);
+    MatchesIterator subs = mi.getSubMatches();
+    assertNotNull(subs);
+    assertMatch(subs, 2, 2, 11, 19);
+    assertMatch(subs, 4, 4, 23, 29);
+    assertFalse(subs.next());
+    assertFalse(mi.next());
+  }
+
+  public void testNoMatchMinShouldMatch() throws IOException {
+    IntervalsSource source = Intervals.atLeast(4, Intervals.term("a"), Intervals.term("b"));
+    checkIntervals(source, "field", 0, new int[][] {});
+  }
+
   public void testDefinedGaps() throws IOException {
     IntervalsSource source =
         Intervals.phrase(
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestSimplifications.java b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestSimplifications.java
index da9531b..6bdc2d5 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestSimplifications.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestSimplifications.java
@@ -111,4 +111,13 @@ public class TestSimplifications extends LuceneTestCase {
             Intervals.term("a"), Intervals.term("b"), Intervals.term("c"), Intervals.term("d")),
         actual);
   }
+
+  public void testMinShouldMatchSimplifications() {
+    IntervalsSource expected = Intervals.unordered(Intervals.term("a"), Intervals.term("b"));
+    assertEquals(expected, Intervals.atLeast(2, Intervals.term("a"), Intervals.term("b")));
+
+    assertEquals(
+        "NOMATCH(Too few sources to match minimum of [3]: [a, b])",
+        Intervals.atLeast(3, Intervals.term("a"), Intervals.term("b")).toString());
+  }
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseFieldInfoFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseFieldInfoFormatTestCase.java
index 4a9f4fb..05f8957 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseFieldInfoFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseFieldInfoFormatTestCase.java
@@ -16,6 +16,7 @@
  */
 package org.apache.lucene.index;
 
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.HashSet;
@@ -63,6 +64,9 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
     assertFalse(infos2.fieldInfo("field").omitsNorms());
     assertFalse(infos2.fieldInfo("field").hasPayloads());
     assertFalse(infos2.fieldInfo("field").hasVectors());
+    assertEquals(0, infos2.fieldInfo("field").getPointDimensionCount());
+    assertEquals(0, infos2.fieldInfo("field").getVectorDimension());
+    assertFalse(infos2.fieldInfo("field").isSoftDeletesField());
     dir.close();
   }
 
@@ -253,7 +257,12 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
     for (int i = 0; i < numFields; i++) {
       fieldNames.add(TestUtil.randomUnicodeString(random()));
     }
-    FieldInfos.Builder builder = new FieldInfos.Builder(new FieldInfos.FieldNumbers(null));
+
+    String softDeletesField =
+        random().nextBoolean() ? TestUtil.randomUnicodeString(random()) : null;
+    FieldInfos.Builder builder =
+        new FieldInfos.Builder(new FieldInfos.FieldNumbers(softDeletesField));
+
     for (String field : fieldNames) {
       IndexableFieldType fieldType = randomFieldType(random());
       FieldInfo fi = builder.getOrAdd(field);
@@ -271,6 +280,19 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
           fi.setStorePayloads();
         }
       }
+
+      if (fieldType.pointDimensionCount() > 0) {
+        fi.setPointDimensions(
+            fieldType.pointDimensionCount(),
+            fieldType.pointIndexDimensionCount(),
+            fieldType.pointNumBytes());
+      }
+
+      if (fieldType.vectorDimension() > 0) {
+        fi.setVectorDimensionAndSearchStrategy(
+            fieldType.vectorDimension(), fieldType.vectorSearchStrategy());
+      }
+
       addAttributes(fi);
     }
     FieldInfos infos = builder.finish();
@@ -280,11 +302,11 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
     dir.close();
   }
 
-  private final IndexableFieldType randomFieldType(Random r) {
+  private IndexableFieldType randomFieldType(Random r) {
     FieldType type = new FieldType();
 
     if (r.nextBoolean()) {
-      IndexOptions values[] = IndexOptions.values();
+      IndexOptions[] values = IndexOptions.values();
       type.setIndexOptions(values[r.nextInt(values.length)]);
       type.setOmitNorms(r.nextBoolean());
 
@@ -301,27 +323,30 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
     }
 
     if (r.nextBoolean()) {
-      DocValuesType values[] = getDocValuesTypes();
+      DocValuesType[] values = DocValuesType.values();
       type.setDocValuesType(values[r.nextInt(values.length)]);
     }
 
+    if (r.nextBoolean()) {
+      int dimension = 1 + r.nextInt(PointValues.MAX_DIMENSIONS);
+      int indexDimension = 1 + r.nextInt(Math.min(dimension, PointValues.MAX_INDEX_DIMENSIONS));
+      int dimensionNumBytes = 1 + r.nextInt(PointValues.MAX_NUM_BYTES);
+      type.setDimensions(dimension, indexDimension, dimensionNumBytes);
+    }
+
+    if (r.nextBoolean()) {
+      int dimension = 1 + r.nextInt(VectorValues.MAX_DIMENSIONS);
+      VectorValues.SearchStrategy searchStrategy =
+          RandomPicks.randomFrom(r, VectorValues.SearchStrategy.values());
+      type.setVectorDimensionsAndSearchStrategy(dimension, searchStrategy);
+    }
+
     return type;
   }
 
   /** Hook to add any codec attributes to fieldinfo instances added in this test. */
   protected void addAttributes(FieldInfo fi) {}
 
-  /**
-   * Docvalues types to test.
-   *
-   * @deprecated only for Only available to ancient codecs can limit this to the subset of types
-   *     they support.
-   */
-  @Deprecated
-  protected DocValuesType[] getDocValuesTypes() {
-    return DocValuesType.values();
-  }
-
   /** equality for entirety of fieldinfos */
   protected void assertEquals(FieldInfos expected, FieldInfos actual) {
     assertEquals(expected.size(), actual.size());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestVectorValues.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseVectorFormatTestCase.java
similarity index 87%
rename from lucene/core/src/test/org/apache/lucene/index/TestVectorValues.java
rename to lucene/test-framework/src/java/org/apache/lucene/index/BaseVectorFormatTestCase.java
index 9691efb..047c373 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestVectorValues.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseVectorFormatTestCase.java
@@ -22,42 +22,45 @@ import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.util.Arrays;
 import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.VectorFormat;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.VectorField;
-import org.apache.lucene.index.VectorValues.SearchStrategy;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.VectorUtil;
 
-/** Test Indexing/IndexWriter with vectors */
-public class TestVectorValues extends LuceneTestCase {
+/**
+ * Base class aiming at testing {@link VectorFormat vectors formats}. To test a new format, all you
+ * need is to register a new {@link Codec} which uses it and extend this class and override {@link
+ * #getCodec()}.
+ *
+ * @lucene.experimental
+ */
+public abstract class BaseVectorFormatTestCase extends BaseIndexFileFormatTestCase {
 
-  private IndexWriterConfig createIndexWriterConfig() {
-    IndexWriterConfig iwc = newIndexWriterConfig();
-    iwc.setCodec(Codec.forName("Lucene90"));
-    return iwc;
+  @Override
+  protected void addRandomFields(Document doc) {
+    doc.add(new VectorField("v2", randomVector(30), VectorValues.SearchStrategy.NONE));
   }
 
   // Suddenly add vectors to an existing field:
   public void testUpgradeFieldToVectors() throws Exception {
     try (Directory dir = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc = new Document();
-        doc.add(newStringField("f", "foo", Store.NO));
+        doc.add(newStringField("f", "foo", Field.Store.NO));
         w.addDocument(doc);
       }
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
@@ -79,7 +82,7 @@ public class TestVectorValues extends LuceneTestCase {
     expectThrows(IllegalArgumentException.class, () -> new VectorField("f", null));
     expectThrows(
         IllegalArgumentException.class,
-        () -> new VectorField("f", new float[1], (SearchStrategy) null));
+        () -> new VectorField("f", new float[1], (VectorValues.SearchStrategy) null));
     expectThrows(IllegalArgumentException.class, () -> new VectorField("f", new float[0]));
     expectThrows(
         IllegalArgumentException.class,
@@ -101,12 +104,15 @@ public class TestVectorValues extends LuceneTestCase {
   public void testFieldCreateFieldType() {
     expectThrows(
         IllegalArgumentException.class,
-        () -> VectorField.createHnswType(0, SearchStrategy.EUCLIDEAN_HNSW, 16, 16));
+        () -> VectorField.createHnswType(0, VectorValues.SearchStrategy.EUCLIDEAN_HNSW, 16, 16));
     expectThrows(
         IllegalArgumentException.class,
         () ->
             VectorField.createHnswType(
-                VectorValues.MAX_DIMENSIONS + 1, SearchStrategy.EUCLIDEAN_HNSW, 16, 16));
+                VectorValues.MAX_DIMENSIONS + 1,
+                VectorValues.SearchStrategy.EUCLIDEAN_HNSW,
+                16,
+                16));
     expectThrows(
         IllegalArgumentException.class,
         () -> VectorField.createHnswType(VectorValues.MAX_DIMENSIONS + 1, null, 16, 16));
@@ -114,14 +120,14 @@ public class TestVectorValues extends LuceneTestCase {
         IllegalArgumentException.class,
         () ->
             VectorField.createHnswType(
-                VectorValues.MAX_DIMENSIONS + 1, SearchStrategy.NONE, 16, 16));
+                VectorValues.MAX_DIMENSIONS + 1, VectorValues.SearchStrategy.NONE, 16, 16));
   }
 
   // Illegal schema change tests:
 
   public void testIllegalDimChangeTwoDocs() throws Exception {
     try (Directory dir = newDirectory();
-        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
       Document doc = new Document();
       doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
       w.addDocument(doc);
@@ -141,7 +147,7 @@ public class TestVectorValues extends LuceneTestCase {
 
   public void testIllegalSearchStrategyChange() throws Exception {
     try (Directory dir = newDirectory();
-        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
       Document doc = new Document();
       doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
       w.addDocument(doc);
@@ -162,13 +168,13 @@ public class TestVectorValues extends LuceneTestCase {
 
   public void testIllegalDimChangeTwoWriters() throws Exception {
     try (Directory dir = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
 
-      try (IndexWriter w2 = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc2 = new Document();
         doc2.add(new VectorField("f", new float[1], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         IllegalArgumentException expected =
@@ -181,13 +187,13 @@ public class TestVectorValues extends LuceneTestCase {
 
   public void testIllegalSearchStrategyChangeTwoWriters() throws Exception {
     try (Directory dir = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
 
-      try (IndexWriter w2 = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc2 = new Document();
         doc2.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.EUCLIDEAN_HNSW));
         IllegalArgumentException expected =
@@ -205,10 +211,10 @@ public class TestVectorValues extends LuceneTestCase {
     doc.add(new VectorField(fieldName, new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
         w2.addIndexes(dir);
         w2.forceMerge(1);
         try (IndexReader reader = w2.getReader()) {
@@ -227,12 +233,12 @@ public class TestVectorValues extends LuceneTestCase {
     Document doc = new Document();
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         w.addDocument(doc);
       }
       doc.add(
           new VectorField(fieldName, new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
-      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
         w2.addDocument(doc);
         w2.addIndexes(dir);
         w2.forceMerge(1);
@@ -254,10 +260,10 @@ public class TestVectorValues extends LuceneTestCase {
     doc.add(new VectorField(fieldName, vector, VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
         vector[0] = 1;
         w2.addDocument(doc);
         w2.addIndexes(dir);
@@ -280,12 +286,12 @@ public class TestVectorValues extends LuceneTestCase {
   public void testIllegalDimChangeViaAddIndexesDirectory() throws Exception {
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc = new Document();
-        doc.add(new VectorField("f", new float[4], SearchStrategy.DOT_PRODUCT_HNSW));
+        doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[5], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w2.addDocument(doc);
@@ -301,12 +307,12 @@ public class TestVectorValues extends LuceneTestCase {
   public void testIllegalSearchStrategyChangeViaAddIndexesDirectory() throws Exception {
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.EUCLIDEAN_HNSW));
         w2.addDocument(doc);
@@ -322,12 +328,12 @@ public class TestVectorValues extends LuceneTestCase {
   public void testIllegalDimChangeViaAddIndexesCodecReader() throws Exception {
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc = new Document();
-        doc.add(new VectorField("f", new float[4], SearchStrategy.DOT_PRODUCT_HNSW));
+        doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[5], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w2.addDocument(doc);
@@ -346,12 +352,12 @@ public class TestVectorValues extends LuceneTestCase {
   public void testIllegalSearchStrategyChangeViaAddIndexesCodecReader() throws Exception {
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.EUCLIDEAN_HNSW));
         w2.addDocument(doc);
@@ -371,12 +377,12 @@ public class TestVectorValues extends LuceneTestCase {
   public void testIllegalDimChangeViaAddIndexesSlowCodecReader() throws Exception {
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[5], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w2.addDocument(doc);
@@ -393,14 +399,14 @@ public class TestVectorValues extends LuceneTestCase {
   public void testIllegalSearchStrategyChangeViaAddIndexesSlowCodecReader() throws Exception {
     try (Directory dir = newDirectory();
         Directory dir2 = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w2 = new IndexWriter(dir2, createIndexWriterConfig())) {
+      try (IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig())) {
         Document doc = new Document();
-        doc.add(new VectorField("f", new float[4], SearchStrategy.EUCLIDEAN_HNSW));
+        doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.EUCLIDEAN_HNSW));
         w2.addDocument(doc);
         try (DirectoryReader r = DirectoryReader.open(dir)) {
           IllegalArgumentException expected =
@@ -415,7 +421,7 @@ public class TestVectorValues extends LuceneTestCase {
 
   public void testIllegalMultipleValues() throws Exception {
     try (Directory dir = newDirectory();
-        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
       Document doc = new Document();
       doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
       doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
@@ -429,7 +435,7 @@ public class TestVectorValues extends LuceneTestCase {
 
   public void testIllegalDimensionTooLarge() throws Exception {
     try (Directory dir = newDirectory();
-        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
       Document doc = new Document();
       expectThrows(
           IllegalArgumentException.class,
@@ -448,12 +454,12 @@ public class TestVectorValues extends LuceneTestCase {
 
   public void testIllegalEmptyVector() throws Exception {
     try (Directory dir = newDirectory();
-        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
       Document doc = new Document();
       Exception e =
           expectThrows(
               IllegalArgumentException.class,
-              () -> doc.add(new VectorField("f", new float[0], SearchStrategy.NONE)));
+              () -> doc.add(new VectorField("f", new float[0], VectorValues.SearchStrategy.NONE)));
       assertEquals("cannot index an empty vector", e.getMessage());
 
       Document doc2 = new Document();
@@ -465,7 +471,7 @@ public class TestVectorValues extends LuceneTestCase {
   // Write vectors, one segment with default codec, another with SimpleText, then forceMerge
   public void testDifferentCodecs1() throws Exception {
     try (Directory dir = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
@@ -491,7 +497,7 @@ public class TestVectorValues extends LuceneTestCase {
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
       }
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("f", new float[4], VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
         w.addDocument(doc);
@@ -512,9 +518,9 @@ public class TestVectorValues extends LuceneTestCase {
 
   public void testDeleteAllVectorDocs() throws Exception {
     try (Directory dir = newDirectory();
-        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
       Document doc = new Document();
-      doc.add(new StringField("id", "0", Store.NO));
+      doc.add(new StringField("id", "0", Field.Store.NO));
       doc.add(
           new VectorField(
               "v", new float[] {2, 3, 5}, VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
@@ -535,9 +541,9 @@ public class TestVectorValues extends LuceneTestCase {
 
   public void testVectorFieldMissingFromOneSegment() throws Exception {
     try (Directory dir = FSDirectory.open(createTempDir());
-        IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+        IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
       Document doc = new Document();
-      doc.add(new StringField("id", "0", Store.NO));
+      doc.add(new StringField("id", "0", Field.Store.NO));
       doc.add(
           new VectorField(
               "v0", new float[] {2, 3, 5}, VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
@@ -568,7 +574,7 @@ public class TestVectorValues extends LuceneTestCase {
               random().nextInt(VectorValues.SearchStrategy.values().length)];
     }
     try (Directory dir = newDirectory();
-        RandomIndexWriter w = new RandomIndexWriter(random(), dir, createIndexWriterConfig())) {
+        RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig())) {
       for (int i = 0; i < numDocs; i++) {
         Document doc = new Document();
         for (int field = 0; field < numFields; field++) {
@@ -610,7 +616,7 @@ public class TestVectorValues extends LuceneTestCase {
     String fieldName = "field";
     float[] v = {0};
     try (Directory dir = newDirectory();
-        IndexWriter iw = new IndexWriter(dir, createIndexWriterConfig())) {
+        IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig())) {
       Document doc1 = new Document();
       doc1.add(new VectorField(fieldName, v, VectorValues.SearchStrategy.EUCLIDEAN_HNSW));
       v[0] = 1;
@@ -637,7 +643,7 @@ public class TestVectorValues extends LuceneTestCase {
   }
 
   public void testSortedIndex() throws Exception {
-    IndexWriterConfig iwc = createIndexWriterConfig();
+    IndexWriterConfig iwc = newIndexWriterConfig();
     iwc.setIndexSort(new Sort(new SortField("sortkey", SortField.Type.INT)));
     String fieldName = "field";
     try (Directory dir = newDirectory();
@@ -675,13 +681,15 @@ public class TestVectorValues extends LuceneTestCase {
         IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig())) {
       Document doc = new Document();
       float[] v = new float[] {1};
-      doc.add(new VectorField("field1", v, SearchStrategy.EUCLIDEAN_HNSW));
-      doc.add(new VectorField("field2", new float[] {1, 2, 3}, SearchStrategy.NONE));
+      doc.add(new VectorField("field1", v, VectorValues.SearchStrategy.EUCLIDEAN_HNSW));
+      doc.add(new VectorField("field2", new float[] {1, 2, 3}, VectorValues.SearchStrategy.NONE));
       iw.addDocument(doc);
       v[0] = 2;
       iw.addDocument(doc);
       doc = new Document();
-      doc.add(new VectorField("field3", new float[] {1, 2, 3}, SearchStrategy.DOT_PRODUCT_HNSW));
+      doc.add(
+          new VectorField(
+              "field3", new float[] {1, 2, 3}, VectorValues.SearchStrategy.DOT_PRODUCT_HNSW));
       iw.addDocument(doc);
       iw.forceMerge(1);
       try (IndexReader reader = iw.getReader()) {
@@ -721,7 +729,7 @@ public class TestVectorValues extends LuceneTestCase {
    * consistently.
    */
   public void testRandom() throws Exception {
-    IndexWriterConfig iwc = createIndexWriterConfig();
+    IndexWriterConfig iwc = newIndexWriterConfig();
     if (random().nextBoolean()) {
       iwc.setIndexSort(new Sort(new SortField("sortkey", SortField.Type.INT)));
     }
@@ -742,9 +750,9 @@ public class TestVectorValues extends LuceneTestCase {
         if (random().nextBoolean() && values[i] != null) {
           // sometimes use a shared scratch array
           System.arraycopy(values[i], 0, scratch, 0, scratch.length);
-          add(iw, fieldName, i, scratch, SearchStrategy.NONE);
+          add(iw, fieldName, i, scratch, VectorValues.SearchStrategy.NONE);
         } else {
-          add(iw, fieldName, i, values[i], SearchStrategy.NONE);
+          add(iw, fieldName, i, values[i], VectorValues.SearchStrategy.NONE);
         }
         if (random().nextInt(10) == 2) {
           // sometimes delete a random document
@@ -817,7 +825,7 @@ public class TestVectorValues extends LuceneTestCase {
         values[i] = value;
         id2value[id] = value;
         id2ord[id] = i;
-        add(iw, fieldName, id, value, SearchStrategy.EUCLIDEAN_HNSW);
+        add(iw, fieldName, id, value, VectorValues.SearchStrategy.EUCLIDEAN_HNSW);
       }
       try (IndexReader reader = iw.getReader()) {
         for (LeafReaderContext ctx : reader.leaves()) {
@@ -850,14 +858,18 @@ public class TestVectorValues extends LuceneTestCase {
   }
 
   private void add(
-      IndexWriter iw, String field, int id, float[] vector, SearchStrategy searchStrategy)
+      IndexWriter iw,
+      String field,
+      int id,
+      float[] vector,
+      VectorValues.SearchStrategy searchStrategy)
       throws IOException {
     add(iw, field, id, random().nextInt(100), vector, searchStrategy);
   }
 
   private void add(IndexWriter iw, String field, int id, int sortkey, float[] vector)
       throws IOException {
-    add(iw, field, id, sortkey, vector, SearchStrategy.NONE);
+    add(iw, field, id, sortkey, vector, VectorValues.SearchStrategy.NONE);
   }
 
   private void add(
@@ -866,7 +878,7 @@ public class TestVectorValues extends LuceneTestCase {
       int id,
       int sortkey,
       float[] vector,
-      SearchStrategy searchStrategy)
+      VectorValues.SearchStrategy searchStrategy)
       throws IOException {
     Document doc = new Document();
     if (vector != null) {
@@ -890,7 +902,7 @@ public class TestVectorValues extends LuceneTestCase {
 
   public void testCheckIndexIncludesVectors() throws Exception {
     try (Directory dir = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         Document doc = new Document();
         doc.add(new VectorField("v1", randomVector(3), VectorValues.SearchStrategy.NONE));
         w.addDocument(doc);
@@ -924,14 +936,14 @@ public class TestVectorValues extends LuceneTestCase {
 
   public void testAdvance() throws Exception {
     try (Directory dir = newDirectory()) {
-      try (IndexWriter w = new IndexWriter(dir, createIndexWriterConfig())) {
+      try (IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
         int numdocs = atLeast(1500);
         String fieldName = "field";
         for (int i = 0; i < numdocs; i++) {
           Document doc = new Document();
           // randomly add a vector field
           if (random().nextInt(4) == 3) {
-            doc.add(new VectorField(fieldName, new float[4], SearchStrategy.NONE));
+            doc.add(new VectorField(fieldName, new float[4], VectorValues.SearchStrategy.NONE));
           }
           w.addDocument(doc);
         }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/RamUsageTester.java b/lucene/test-framework/src/java/org/apache/lucene/util/RamUsageTester.java
index 39d2556..2c5c6af 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/RamUsageTester.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/RamUsageTester.java
@@ -23,11 +23,14 @@ import java.lang.reflect.Field;
 import java.lang.reflect.Modifier;
 import java.nio.ByteBuffer;
 import java.nio.ByteOrder;
+import java.nio.charset.CharsetDecoder;
+import java.nio.charset.CharsetEncoder;
 import java.nio.file.Path;
 import java.security.AccessController;
 import java.security.PrivilegedAction;
 import java.util.AbstractList;
 import java.util.ArrayList;
+import java.util.BitSet;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -151,6 +154,14 @@ public final class RamUsageTester {
       ArrayList<Object> stack,
       Object ob,
       Class<?> obClazz) {
+
+    // Ignore JDK objects we can't access or handle properly.
+    Predicate<Object> isIgnorable =
+        (clazz) -> (clazz instanceof CharsetEncoder) || (clazz instanceof CharsetDecoder);
+    if (isIgnorable.test(ob)) {
+      return accumulator.accumulateObject(ob, 0, Collections.emptyMap(), stack);
+    }
+
     /*
      * Consider an object. Push any references it has to the processing stack
      * and accumulate this object's shallow size.
@@ -159,10 +170,7 @@ public final class RamUsageTester {
       if (Constants.JRE_IS_MINIMUM_JAVA9) {
         long alignedShallowInstanceSize = RamUsageEstimator.shallowSizeOf(ob);
 
-        Predicate<Class<?>> isJavaModule =
-            (clazz) -> {
-              return clazz.getName().startsWith("java.");
-            };
+        Predicate<Class<?>> isJavaModule = (clazz) -> clazz.getName().startsWith("java.");
 
         // Java 9: Best guess for some known types, as we cannot precisely look into runtime
         // classes:
@@ -274,13 +282,17 @@ public final class RamUsageTester {
                           v.length())); // may not be correct with Java 9's compact strings!
               a(StringBuilder.class, v -> charArraySize(v.capacity()));
               a(StringBuffer.class, v -> charArraySize(v.capacity()));
+              // Approximate the underlying long[] buffer.
+              a(BitSet.class, v -> (v.size() / Byte.SIZE));
               // Types with large buffers:
               a(ByteArrayOutputStream.class, v -> byteArraySize(v.size()));
               // For File and Path, we just take the length of String representation as
               // approximation:
               a(File.class, v -> charArraySize(v.toString().length()));
               a(Path.class, v -> charArraySize(v.toString().length()));
-              a(ByteOrder.class, v -> 0); // Instances of ByteOrder are constants
+
+              // Ignorable JDK classes.
+              a(ByteOrder.class, v -> 0);
             }
 
             @SuppressWarnings("unchecked")
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index a0d436d..ddc8c84 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -199,6 +199,8 @@ Other Changes
 * SOLR-14067: StatelessScriptUpdateProcessorFactory moved to it's own /contrib/scripting/ package instead
  of shipping as part of Solr due to security concerns.  Renamed to ScriptUpdateProcessorFactory for simpler name. (Eric Pugh)
 
+* SOLR-15118: Switch /v2/collections APIs over to the now-preferred annotated-POJO implementation approach (Jason Gerlowski)
+
 Bug Fixes
 ---------------------
 * SOLR-14546: Fix for a relatively hard to hit issue in OverseerTaskProcessor that could lead to out of order execution
@@ -221,7 +223,7 @@ Improvements
 * SOLR-14234: Unhelpful message in RemoteExecutionException. (ab)
 
 * SOLR-13608: Backups are now done incrementally by default.  Multiple backups can be stored at the same location, and each
-  backup will only upload those files that are new since the last backup. (Jason Gerlowski, Shalin , Cao Manh Dat)
+  backup will only upload those files that are new since the last backup. (Jason Gerlowski, shalin , Cao Manh Dat)
 
 * SOLR-15123: Revamp SolrCLI tool's help descriptions for all commands for consistency and clarity. (Eric Pugh)
 
@@ -238,7 +240,20 @@ Bug Fixes
 
 Other Changes
 ---------------------
-(No changes)
+* SOLR-15118: Deprecate CollectionAdminRequest.getV2Request(). (Jason Gerlowski)
+
+==================  8.8.1 ==================
+
+Bug Fixes
+---------------------
+
+* SOLR-15145: System property to control whether base_url is stored in state.json to enable back-compat with older SolrJ versions.
+  (Timothy Potter)
+
+* SOLR-15114: Fix bug that caused WAND optimization to be disabled in cases where the max score is requested (such as
+  multi-shard requests in SolrCloud) (Naoto Minami via Tomás Fernández Löbbe)
+
+* SOLR-15136: Reduce excessive logging introduced with Per Replica States feature (Ishan Chattopadhyaya)
 
 * SOLR-15136: Reduce excessive logging introduced with Per Replica States feature (Ishan Chattopadhyaya)
 
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
index 8928fbe..afa1368 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
@@ -16,19 +16,6 @@
  */
 package org.apache.solr.cloud.api.collections;
 
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.params.CollectionAdminParams.FOLLOW_ALIASES;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Optional;
-
 import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ShardRequestTracker;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
@@ -44,6 +31,7 @@ import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.backup.BackupFilePaths;
 import org.apache.solr.core.backup.BackupManager;
 import org.apache.solr.core.backup.BackupProperties;
 import org.apache.solr.core.backup.ShardBackupId;
@@ -52,11 +40,23 @@ import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
 import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
 import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
 import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.core.backup.BackupFilePaths;
 import org.apache.solr.handler.component.ShardHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Optional;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.params.CollectionAdminParams.FOLLOW_ALIASES;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+
 public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
@@ -91,25 +91,25 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
 
       // Backup location
       URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
-      final URI backupPath = createAndValidateBackupPath(repository, incremental, location, backupName, collectionName);
+      final URI backupUri = createAndValidateBackupPath(repository, incremental, location, backupName, collectionName);
 
       BackupManager backupMgr = (incremental) ?
-              BackupManager.forIncrementalBackup(repository, ocmh.zkStateReader, backupPath) :
-              BackupManager.forBackup(repository, ocmh.zkStateReader, backupPath);
+              BackupManager.forIncrementalBackup(repository, ocmh.zkStateReader, backupUri) :
+              BackupManager.forBackup(repository, ocmh.zkStateReader, backupUri);
 
       String strategy = message.getStr(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY);
       switch (strategy) {
         case CollectionAdminParams.COPY_FILES_STRATEGY: {
           if (incremental) {
             try {
-              incrementalCopyIndexFiles(backupPath, collectionName, message, results, backupProperties, backupMgr);
+              incrementalCopyIndexFiles(backupUri, collectionName, message, results, backupProperties, backupMgr);
             } catch (SolrException e) {
               log.error("Error happened during incremental backup for collection:{}", collectionName, e);
-              ocmh.cleanBackup(repository, backupPath, backupMgr.getBackupId());
+              ocmh.cleanBackup(repository, backupUri, backupMgr.getBackupId());
               throw e;
             }
           } else {
-            copyIndexFiles(backupPath, collectionName, message, results);
+            copyIndexFiles(backupUri, collectionName, message, results);
           }
           break;
         }
@@ -139,7 +139,7 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
 
       int maxNumBackup = message.getInt(CoreAdminParams.MAX_NUM_BACKUP_POINTS, -1);
       if (incremental && maxNumBackup != -1) {
-        ocmh.deleteBackup(repository, backupPath, maxNumBackup, results);
+        ocmh.deleteBackup(repository, backupUri, maxNumBackup, results);
       }
     }
   }
@@ -204,7 +204,7 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
     return r.get();
   }
 
-  private void incrementalCopyIndexFiles(URI backupPath, String collectionName, ZkNodeProps request,
+  private void incrementalCopyIndexFiles(URI backupUri, String collectionName, ZkNodeProps request,
                                          NamedList<Object> results, BackupProperties backupProperties,
                                          BackupManager backupManager) throws IOException {
     String backupName = request.getStr(NAME);
@@ -213,7 +213,7 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
     ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
 
     log.info("Starting backup of collection={} with backupName={} at location={}", collectionName, backupName,
-            backupPath);
+            backupUri);
 
     Optional<BackupProperties> previousProps = backupManager.tryReadBackupProperties();
     final ShardRequestTracker shardRequestTracker = ocmh.asyncRequestTracker(asyncId);
@@ -227,7 +227,7 @@ public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
       }
       String coreName = replica.getStr(CORE_NAME_PROP);
 
-      ModifiableSolrParams params = coreBackupParams(backupPath, repoName, slice, coreName, true /* incremental backup */);
+      ModifiableSolrParams params = coreBackupParams(backupUri, repoName, slice, coreName, true /* incremental backup */);
       params.set(CoreAdminParams.BACKUP_INCREMENTAL, true);
       previousProps.flatMap(bp -> bp.getShardBackupIdFor(slice.getName()))
               .ifPresent(prevBackupPoint -> params.set(CoreAdminParams.PREV_SHARD_BACKUP_ID, prevBackupPoint.getIdAsString()));
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteBackupCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteBackupCmd.java
index 7709751..0a48f1d 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteBackupCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteBackupCmd.java
@@ -23,14 +23,14 @@ import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.backup.BackupId;
 import org.apache.solr.core.backup.AggregateBackupStats;
+import org.apache.solr.core.backup.BackupFilePaths;
+import org.apache.solr.core.backup.BackupId;
 import org.apache.solr.core.backup.BackupManager;
 import org.apache.solr.core.backup.BackupProperties;
 import org.apache.solr.core.backup.ShardBackupId;
 import org.apache.solr.core.backup.ShardBackupMetadata;
 import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.core.backup.BackupFilePaths;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -117,10 +117,10 @@ public class DeleteBackupCmd implements OverseerCollectionMessageHandler.Cmd {
         deleteBackupIds(backupPath, repository, new HashSet<>(backupIdDeletes), results);
     }
 
-    void deleteBackupIds(URI backupPath, BackupRepository repository,
+    void deleteBackupIds(URI backupUri, BackupRepository repository,
                          Set<BackupId> backupIdsDeletes,
                          @SuppressWarnings({"rawtypes"}) NamedList results) throws IOException {
-        BackupFilePaths incBackupFiles = new BackupFilePaths(repository, backupPath);
+        BackupFilePaths incBackupFiles = new BackupFilePaths(repository, backupUri);
         URI shardBackupMetadataDir = incBackupFiles.getShardBackupMetadataDir();
 
         Set<String> referencedIndexFiles = new HashSet<>();
@@ -167,15 +167,15 @@ public class DeleteBackupCmd implements OverseerCollectionMessageHandler.Cmd {
         repository.delete(incBackupFiles.getIndexDir(), unusedFiles, true);
         try {
             for (BackupId backupId : backupIdsDeletes) {
-                repository.deleteDirectory(repository.resolve(backupPath, BackupFilePaths.getZkStateDir(backupId)));
+                repository.deleteDirectory(repository.resolve(backupUri, BackupFilePaths.getZkStateDir(backupId)));
             }
         } catch (FileNotFoundException e) {
             //ignore this
         }
 
         //add details to result before deleting backupPropFiles
-        addResult(backupPath, repository, backupIdsDeletes, backupIdToCollectionBackupPoint, results);
-        repository.delete(backupPath, backupIdsDeletes.stream().map(id -> BackupFilePaths.getBackupPropsName(id)).collect(Collectors.toList()), true);
+        addResult(backupUri, repository, backupIdsDeletes, backupIdToCollectionBackupPoint, results);
+        repository.delete(backupUri, backupIdsDeletes.stream().map(id -> BackupFilePaths.getBackupPropsName(id)).collect(Collectors.toList()), true);
     }
 
     @SuppressWarnings("unchecked")
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
index b7f9c6d..53a02fa 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
@@ -16,24 +16,6 @@
  */
 package org.apache.solr.cloud.api.collections;
 
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
 import com.google.common.collect.ImmutableMap;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.solr.client.solrj.SolrResponse;
@@ -95,16 +77,25 @@ import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NODE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.solr.common.cloud.ZkStateReader.*;
 import static org.apache.solr.common.params.CollectionAdminParams.COLLECTION;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
 import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
@@ -630,9 +621,9 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler,
   }
 
   @SuppressWarnings({"rawtypes"})
-  void cleanBackup(BackupRepository  repository, URI backupPath, BackupId backupId) throws Exception {
+  void cleanBackup(BackupRepository  repository, URI backupUri, BackupId backupId) throws Exception {
     ((DeleteBackupCmd)commandMap.get(DELETEBACKUP))
-            .deleteBackupIds(backupPath, repository, Collections.singleton(backupId), new NamedList());
+            .deleteBackupIds(backupUri, repository, Collections.singleton(backupId), new NamedList());
   }
 
   void deleteBackup(BackupRepository repository, URI backupPath,
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 26bb7d3..89d9df4 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -743,7 +743,9 @@ public class CoreContainer {
     createHandler(ZK_PATH, ZookeeperInfoHandler.class.getName(), ZookeeperInfoHandler.class);
     createHandler(ZK_STATUS_PATH, ZookeeperStatusHandler.class.getName(), ZookeeperStatusHandler.class);
     collectionsHandler = createHandler(COLLECTIONS_HANDLER_PATH, cfg.getCollectionsHandlerClass(), CollectionsHandler.class);
-    containerHandlers.getApiBag().registerObject(new CollectionsAPI(collectionsHandler));
+    final CollectionsAPI collectionsAPI = new CollectionsAPI(collectionsHandler);
+    containerHandlers.getApiBag().registerObject(collectionsAPI);
+    containerHandlers.getApiBag().registerObject(collectionsAPI.collectionsCommands);
     configSetsHandler = createHandler(CONFIGSETS_HANDLER_PATH, cfg.getConfigSetsHandlerClass(), ConfigSetsHandler.class);
     ClusterAPI clusterAPI = new ClusterAPI(collectionsHandler, configSetsHandler);
     containerHandlers.getApiBag().registerObject(clusterAPI);
diff --git a/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java b/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
index f16db6d..e8cfef5 100644
--- a/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
+++ b/solr/core/src/java/org/apache/solr/core/backup/repository/LocalFileSystemRepository.java
@@ -17,6 +17,16 @@
 
 package org.apache.solr.core.backup.repository;
 
+import com.google.common.base.Preconditions;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.NIOFSDirectory;
+import org.apache.lucene.store.NoLockFactory;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.DirectoryFactory;
+
 import java.io.IOException;
 import java.io.OutputStream;
 import java.net.URI;
@@ -32,23 +42,13 @@ import java.nio.file.attribute.BasicFileAttributes;
 import java.util.Collection;
 import java.util.Objects;
 
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.NIOFSDirectory;
-import org.apache.lucene.store.NoLockFactory;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.DirectoryFactory;
-
-import com.google.common.base.Preconditions;
-
 /**
  * A concrete implementation of {@linkplain BackupRepository} interface supporting backup/restore of Solr indexes to a
  * local file-system. (Note - This can even be used for a shared file-system if it is exposed via a local file-system
  * interface e.g. NFS).
  */
 public class LocalFileSystemRepository implements BackupRepository {
+
   @SuppressWarnings("rawtypes")
   private NamedList config = null;
 
diff --git a/solr/core/src/java/org/apache/solr/handler/CatStream.java b/solr/core/src/java/org/apache/solr/handler/CatStream.java
index d7f5fe6..e6d58c5 100644
--- a/solr/core/src/java/org/apache/solr/handler/CatStream.java
+++ b/solr/core/src/java/org/apache/solr/handler/CatStream.java
@@ -17,7 +17,10 @@
 
 package org.apache.solr.handler;
 
+import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStreamReader;
 import java.lang.invoke.MethodHandles;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -25,6 +28,7 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import java.util.stream.Stream;
+import java.util.zip.GZIPInputStream;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.LineIterator;
@@ -180,7 +184,12 @@ public class CatStream extends TupleStream implements Expressible {
     while (allFilesToCrawl.hasNext()) {
       closeCurrentFileIfSet();
       currentFilePath = allFilesToCrawl.next();
-      currentFileLines = FileUtils.lineIterator(currentFilePath.absolutePath.toFile(), "UTF-8");
+      File currentFile = currentFilePath.absolutePath.toFile();
+      if(currentFile.getName().endsWith(".gz")) {
+        currentFileLines = new LineIterator(new InputStreamReader(new GZIPInputStream(new FileInputStream(currentFile)), "UTF-8"));
+      } else {
+        currentFileLines = FileUtils.lineIterator(currentFile, "UTF-8");
+      }
       if (currentFileLines.hasNext()) return true;
     }
 
diff --git a/solr/core/src/java/org/apache/solr/handler/ClusterAPI.java b/solr/core/src/java/org/apache/solr/handler/ClusterAPI.java
index ee77e3d..aee2571 100644
--- a/solr/core/src/java/org/apache/solr/handler/ClusterAPI.java
+++ b/solr/core/src/java/org/apache/solr/handler/ClusterAPI.java
@@ -23,9 +23,9 @@ import java.util.Map;
 import org.apache.solr.api.Command;
 import org.apache.solr.api.EndPoint;
 import org.apache.solr.api.PayloadObj;
-import org.apache.solr.client.solrj.request.beans.ClusterPropInfo;
-import org.apache.solr.client.solrj.request.beans.CreateConfigInfo;
-import org.apache.solr.client.solrj.request.beans.RateLimiterMeta;
+import org.apache.solr.client.solrj.request.beans.ClusterPropPayload;
+import org.apache.solr.client.solrj.request.beans.CreateConfigPayload;
+import org.apache.solr.client.solrj.request.beans.RateLimiterPayload;
 import org.apache.solr.cloud.OverseerConfigSetMessageHandler;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.annotation.JsonProperty;
@@ -120,7 +120,7 @@ public class ClusterAPI {
 
     @Command(name = "create")
     @SuppressWarnings("unchecked")
-    public void create(PayloadObj<CreateConfigInfo> obj) throws Exception {
+    public void create(PayloadObj<CreateConfigPayload> obj) throws Exception {
       Map<String, Object> mapVals = obj.get().toMap(new HashMap<>());
       Map<String,Object> customProps = (Map<String, Object>) mapVals.remove("properties");
       if(customProps!= null) {
@@ -223,7 +223,7 @@ public class ClusterAPI {
 
     @Command(name = "set-obj-property")
     @SuppressWarnings({"rawtypes", "unchecked"})
-    public void setObjProperty(PayloadObj<ClusterPropInfo> obj) {
+    public void setObjProperty(PayloadObj<ClusterPropPayload> obj) {
       //Not using the object directly here because the API differentiate between {name:null} and {}
       Map m = obj.getDataMap();
       ClusterProperties clusterProperties = new ClusterProperties(getCoreContainer().getZkController().getZkClient());
@@ -242,8 +242,8 @@ public class ClusterAPI {
     }
 
     @Command(name = "set-ratelimiter")
-    public void setRateLimiters(PayloadObj<RateLimiterMeta> payLoad) {
-      RateLimiterMeta rateLimiterConfig = payLoad.get();
+    public void setRateLimiters(PayloadObj<RateLimiterPayload> payLoad) {
+      RateLimiterPayload rateLimiterConfig = payLoad.get();
       ClusterProperties clusterProperties = new ClusterProperties(getCoreContainer().getZkController().getZkClient());
 
       try {
diff --git a/solr/core/src/java/org/apache/solr/handler/CollectionsAPI.java b/solr/core/src/java/org/apache/solr/handler/CollectionsAPI.java
index e02e5f2..a5b6b12 100644
--- a/solr/core/src/java/org/apache/solr/handler/CollectionsAPI.java
+++ b/solr/core/src/java/org/apache/solr/handler/CollectionsAPI.java
@@ -17,16 +17,38 @@
 
 package org.apache.solr.handler;
 
+import org.apache.commons.collections4.CollectionUtils;
+import org.apache.solr.api.Command;
 import org.apache.solr.api.EndPoint;
+import org.apache.solr.api.PayloadObj;
+import org.apache.solr.client.solrj.request.beans.BackupCollectionPayload;
+import org.apache.solr.client.solrj.request.beans.CreateAliasPayload;
+import org.apache.solr.client.solrj.request.beans.CreatePayload;
+import org.apache.solr.client.solrj.request.beans.DeleteAliasPayload;
+import org.apache.solr.client.solrj.request.beans.RestoreCollectionPayload;
+import org.apache.solr.client.solrj.request.beans.SetAliasPropertyPayload;
+import org.apache.solr.client.solrj.request.beans.V2ApiConstants;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.params.CollectionParams.CollectionAction;
 import org.apache.solr.handler.admin.CollectionsHandler;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
 
-import static org.apache.solr.client.solrj.SolrRequest.METHOD.DELETE;
-import static org.apache.solr.client.solrj.SolrRequest.METHOD.GET;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static org.apache.solr.client.solrj.SolrRequest.METHOD.*;
+import static org.apache.solr.client.solrj.request.beans.V2ApiConstants.ROUTER_KEY;
+import static org.apache.solr.cloud.api.collections.RoutedAlias.CREATE_COLLECTION_PREFIX;
+import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_PREFIX;
+import static org.apache.solr.common.params.CollectionAdminParams.ROUTER_PREFIX;
+import static org.apache.solr.common.params.CommonParams.ACTION;
 import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.handler.ClusterAPI.wrapParams;
 import static org.apache.solr.security.PermissionNameProvider.Name.COLL_EDIT_PERM;
 import static org.apache.solr.security.PermissionNameProvider.Name.COLL_READ_PERM;
 
@@ -36,7 +58,16 @@ import static org.apache.solr.security.PermissionNameProvider.Name.COLL_READ_PER
  */
 public class CollectionsAPI {
 
-  private final CollectionsHandler collectionsHandler;
+    public static final String V2_CREATE_COLLECTION_CMD = "create";
+    public static final String V2_BACKUP_CMD = "backup-collection";
+    public static final String V2_RESTORE_CMD = "restore-collection";
+    public static final String V2_CREATE_ALIAS_CMD = "create-alias";
+    public static final String V2_SET_ALIAS_PROP_CMD = "set-alias-property";
+    public static final String V2_DELETE_ALIAS_CMD = "delete-alias";
+
+    private final CollectionsHandler collectionsHandler;
+
+  public  final CollectionsCommands collectionsCommands = new CollectionsCommands();
 
   public CollectionsAPI(CollectionsHandler collectionsHandler) {
     this.collectionsHandler = collectionsHandler;
@@ -50,11 +81,149 @@ public class CollectionsAPI {
     CollectionsHandler.CollectionOperation.LIST_OP.execute(req, rsp, collectionsHandler);
   }
 
+    @EndPoint(
+            path = {"/c", "/collections"},
+            method = POST,
+            permission = COLL_EDIT_PERM)
+    public class CollectionsCommands {
+
+        @Command(name = V2_BACKUP_CMD)
+        @SuppressWarnings("unchecked")
+        public void backupCollection(PayloadObj<BackupCollectionPayload> obj) throws Exception {
+            final Map<String, Object> v1Params = obj.get().toMap(new HashMap<>());
+            v1Params.put(ACTION, CollectionAction.BACKUP.toLower());
+
+            collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), v1Params), obj.getResponse());
+        }
+
+        @Command(name = V2_RESTORE_CMD)
+        @SuppressWarnings("unchecked")
+        public void restoreBackup(PayloadObj<RestoreCollectionPayload> obj) throws Exception {
+            final RestoreCollectionPayload v2Body = obj.get();
+            final Map<String, Object> v1Params = v2Body.toMap(new HashMap<>());
+
+            v1Params.put(ACTION, CollectionAction.RESTORE.toLower());
+            if (v2Body.createCollectionParams != null && !v2Body.createCollectionParams.isEmpty()) {
+                final Map<String, Object> createCollParams = (Map<String, Object>) v1Params.remove(V2ApiConstants.CREATE_COLLECTION_KEY);
+                convertV2CreateCollectionMapToV1ParamMap(createCollParams);
+                v1Params.putAll(createCollParams);
+            }
+
+            collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), v1Params), obj.getResponse());
+        }
+
+        @Command(name = V2_CREATE_ALIAS_CMD)
+        @SuppressWarnings("unchecked")
+        public void createAlias(PayloadObj<CreateAliasPayload> obj) throws Exception {
+            final CreateAliasPayload v2Body = obj.get();
+            final Map<String, Object> v1Params = v2Body.toMap(new HashMap<>());
+
+            v1Params.put(ACTION, CollectionAction.CREATEALIAS.toLower());
+            if (! CollectionUtils.isEmpty(v2Body.collections)) {
+                final String collectionsStr = String.join(",", v2Body.collections);
+                v1Params.remove(V2ApiConstants.COLLECTIONS);
+                v1Params.put(V2ApiConstants.COLLECTIONS, collectionsStr);
+            }
+            if (v2Body.router != null) {
+                Map<String, Object> routerProperties = (Map<String, Object>) v1Params.remove(V2ApiConstants.ROUTER_KEY);
+                flattenMapWithPrefix(routerProperties, v1Params, ROUTER_PREFIX);
+            }
+            if (v2Body.createCollectionParams != null && !v2Body.createCollectionParams.isEmpty()) {
+                final Map<String, Object> createCollectionMap = (Map<String, Object>) v1Params.remove(V2ApiConstants.CREATE_COLLECTION_KEY);
+                convertV2CreateCollectionMapToV1ParamMap(createCollectionMap);
+                flattenMapWithPrefix(createCollectionMap, v1Params, CREATE_COLLECTION_PREFIX);
+            }
+
+            collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), v1Params), obj.getResponse());
+        }
+
+        @Command(name= V2_SET_ALIAS_PROP_CMD)
+        @SuppressWarnings("unchecked")
+        public void setAliasProperty(PayloadObj<SetAliasPropertyPayload> obj) throws Exception {
+            final SetAliasPropertyPayload v2Body = obj.get();
+            final Map<String, Object> v1Params = v2Body.toMap(new HashMap<>());
+
+            v1Params.put(ACTION, CollectionAction.ALIASPROP.toLower());
+            // Flatten "properties" map into individual prefixed params
+            final Map<String, Object> propertiesMap = (Map<String, Object>) v1Params.remove(V2ApiConstants.PROPERTIES_KEY);
+            flattenMapWithPrefix(propertiesMap, v1Params, PROPERTY_PREFIX);
+
+            collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), v1Params), obj.getResponse());
+        }
+
+        @Command(name= V2_DELETE_ALIAS_CMD)
+        @SuppressWarnings("unchecked")
+        public void deleteAlias(PayloadObj<DeleteAliasPayload> obj) throws Exception {
+            final DeleteAliasPayload v2Body = obj.get();
+            final Map<String, Object> v1Params = v2Body.toMap(new HashMap<>());
+            v1Params.put(ACTION, CollectionAction.DELETEALIAS.toLower());
+
+            collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), v1Params), obj.getResponse());
+        }
+
+        @Command(name = V2_CREATE_COLLECTION_CMD)
+        @SuppressWarnings("unchecked")
+        public void create(PayloadObj<CreatePayload> obj) throws Exception {
+            final CreatePayload v2Body = obj.get();
+            final Map<String, Object> v1Params = v2Body.toMap(new HashMap<>());
+
+            v1Params.put(ACTION, CollectionAction.CREATE.toLower());
+            convertV2CreateCollectionMapToV1ParamMap(v1Params);
+
+            collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), v1Params), obj.getResponse());
+        }
+
+        @SuppressWarnings("unchecked")
+        private void convertV2CreateCollectionMapToV1ParamMap(Map<String, Object> v2MapVals) {
+            // Keys are copied so that map can be modified as keys are looped through.
+            final Set<String> v2Keys = v2MapVals.keySet().stream().collect(Collectors.toSet());
+            for (String key : v2Keys) {
+                switch (key) {
+                    case V2ApiConstants.PROPERTIES_KEY:
+                        final Map<String, Object> propertiesMap = (Map<String, Object>) v2MapVals.remove(V2ApiConstants.PROPERTIES_KEY);
+                        flattenMapWithPrefix(propertiesMap, v2MapVals, PROPERTY_PREFIX);
+                        break;
+                    case ROUTER_KEY:
+                        final Map<String, Object> routerProperties = (Map<String, Object>) v2MapVals.remove(V2ApiConstants.ROUTER_KEY);
+                        flattenMapWithPrefix(routerProperties, v2MapVals, CollectionAdminParams.ROUTER_PREFIX);
+                        break;
+                    case V2ApiConstants.CONFIG:
+                        v2MapVals.put(CollectionAdminParams.COLL_CONF, v2MapVals.remove(V2ApiConstants.CONFIG));
+                        break;
+                    case V2ApiConstants.SHUFFLE_NODES:
+                        v2MapVals.put(CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM, v2MapVals.remove(V2ApiConstants.SHUFFLE_NODES));
+                        break;
+                    case V2ApiConstants.NODE_SET:
+                        final Object nodeSetValUncast = v2MapVals.remove(V2ApiConstants.NODE_SET);
+                        if (nodeSetValUncast instanceof String) {
+                            v2MapVals.put(CollectionAdminParams.CREATE_NODE_SET_PARAM, nodeSetValUncast);
+                        } else {
+                            final List<String> nodeSetList = (List<String>) nodeSetValUncast;
+                            final String nodeSetStr = String.join(",", nodeSetList);
+                            v2MapVals.put(CollectionAdminParams.CREATE_NODE_SET_PARAM, nodeSetStr);
+                        }
+                        break;
+                    default:
+                        break;
+                }
+            }
+        }
+
+        private void flattenMapWithPrefix(Map<String, Object> toFlatten, Map<String, Object> destination,
+                                          String additionalPrefix) {
+            if (toFlatten == null || toFlatten.isEmpty() || destination == null) {
+                return;
+            }
+
+            toFlatten.forEach((k, v) -> destination.put(additionalPrefix + k, v));
+        }
+  }
+
   @EndPoint(path = {"/c/{collection}", "/collections/{collection}"},
       method = DELETE,
       permission = COLL_EDIT_PERM)
   public void deleteCollection(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
-    req = ClusterAPI.wrapParams(req, "action",
+    req = wrapParams(req, ACTION,
         CollectionAction.DELETE.toString(),
         NAME, req.getPathTemplateValues().get(ZkStateReader.COLLECTION_PROP));
     collectionsHandler.handleRequestBody(req, rsp);
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/BackupCoreOp.java b/solr/core/src/java/org/apache/solr/handler/admin/BackupCoreOp.java
index c2c7806..5543f04 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/BackupCoreOp.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/BackupCoreOp.java
@@ -60,7 +60,7 @@ class BackupCoreOp implements CoreAdminHandler.CoreAdminOp {
 
       if (incremental) {
         if ("file".equals(locationUri.getScheme())) {
-          core.getCoreContainer().assertPathAllowed(Paths.get(location));
+          core.getCoreContainer().assertPathAllowed(Paths.get(locationUri));
         }
         final ShardBackupId prevShardBackupId = prevShardBackupIdStr != null ? ShardBackupId.from(prevShardBackupIdStr) : null;
         BackupFilePaths incBackupFiles = new BackupFilePaths(repository, locationUri);
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index 32061c0..54aaf28 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -1028,7 +1028,6 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
               + " parameter or as a default repository property or as a cluster property.");
         }
       }
-
       boolean incremental = req.getParams().getBool(CoreAdminParams.BACKUP_INCREMENTAL, true);
 
       // Check if the specified location is valid for this repository.
diff --git a/solr/core/src/java/org/apache/solr/response/JSONWriter.java b/solr/core/src/java/org/apache/solr/response/JSONWriter.java
index cef8b7d..4e17696 100644
--- a/solr/core/src/java/org/apache/solr/response/JSONWriter.java
+++ b/solr/core/src/java/org/apache/solr/response/JSONWriter.java
@@ -45,16 +45,16 @@ public class JSONWriter extends TextResponseWriter implements JsonTextWriter {
     this.wrapperFunction = wrapperFunction;
     this.namedListStyle = namedListStyle;
   }
-  private JSONWriter(Writer writer, boolean intend, String namedListStyle) throws IOException {
-    super(writer, intend);
+  private JSONWriter(Writer writer, boolean indent, String namedListStyle) throws IOException {
+    super(writer, indent);
     this.namedListStyle = namedListStyle;
 
   }
 
   /**Strictly for testing only
    */
-  public static void write(Writer writer, boolean intend,  String namedListStyle, Object val) throws IOException {
-    JSONWriter jw = new JSONWriter(writer, intend, namedListStyle);
+  public static void write(Writer writer, boolean indent,  String namedListStyle, Object val) throws IOException {
+    JSONWriter jw = new JSONWriter(writer, indent, namedListStyle);
     jw.writeVal(null, val);
     jw.close();
 
diff --git a/solr/core/src/java/org/apache/solr/search/MaxScoreCollector.java b/solr/core/src/java/org/apache/solr/search/MaxScoreCollector.java
index 744e576..0cd76b6 100644
--- a/solr/core/src/java/org/apache/solr/search/MaxScoreCollector.java
+++ b/solr/core/src/java/org/apache/solr/search/MaxScoreCollector.java
@@ -41,7 +41,12 @@ public class MaxScoreCollector extends SimpleCollector {
   }
 
   @Override
-  public void setScorer(Scorable scorer) {
+  public void setScorer(Scorable scorer) throws IOException {
+    if (maxScore == Float.MIN_VALUE) {
+      scorer.setMinCompetitiveScore(0f);
+    } else {
+      scorer.setMinCompetitiveScore(Math.nextUp(maxScore));
+    }
     this.scorer = scorer;
   }
 
diff --git a/solr/core/src/java/org/apache/solr/servlet/QueryRateLimiter.java b/solr/core/src/java/org/apache/solr/servlet/QueryRateLimiter.java
index f746aca..36e5d3b 100644
--- a/solr/core/src/java/org/apache/solr/servlet/QueryRateLimiter.java
+++ b/solr/core/src/java/org/apache/solr/servlet/QueryRateLimiter.java
@@ -22,7 +22,7 @@ import java.util.Map;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.request.beans.RateLimiterMeta;
+import org.apache.solr.client.solrj.request.beans.RateLimiterPayload;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.Utils;
@@ -51,7 +51,7 @@ public class QueryRateLimiter extends RequestRateLimiter {
       return;
     }
 
-    RateLimiterMeta rateLimiterMeta = mapper.readValue(configInput, RateLimiterMeta.class);
+    RateLimiterPayload rateLimiterMeta = mapper.readValue(configInput, RateLimiterPayload.class);
 
     constructQueryRateLimiterConfigInternal(rateLimiterMeta, rateLimiterConfig);
   }
@@ -74,7 +74,7 @@ public class QueryRateLimiter extends RequestRateLimiter {
         return rateLimiterConfig;
       }
 
-      RateLimiterMeta rateLimiterMeta = mapper.readValue(configInput, RateLimiterMeta.class);
+      RateLimiterPayload rateLimiterMeta = mapper.readValue(configInput, RateLimiterPayload.class);
 
       constructQueryRateLimiterConfigInternal(rateLimiterMeta, rateLimiterConfig);
 
@@ -88,7 +88,7 @@ public class QueryRateLimiter extends RequestRateLimiter {
     }
   }
 
-  private static void constructQueryRateLimiterConfigInternal(RateLimiterMeta rateLimiterMeta, RateLimiterConfig rateLimiterConfig) {
+  private static void constructQueryRateLimiterConfigInternal(RateLimiterPayload rateLimiterMeta, RateLimiterConfig rateLimiterConfig) {
 
     if (rateLimiterMeta == null) {
       // No Rate limiter configuration defined in clusterprops.json
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-cache-enable-disable.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-cache-enable-disable.xml
index 29e1799..d51f805 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-cache-enable-disable.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-cache-enable-disable.xml
@@ -24,7 +24,7 @@
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
   <schemaFactory class="ClassicIndexSchemaFactory"/>
   <requestHandler name="/select" class="solr.SearchHandler" />
-  
+
   <query>
     <!-- Maximum number of clauses in a boolean query... can affect
         range or wildcard queries that expand to big boolean
@@ -54,16 +54,16 @@
       autowarmCount="0"/>
 
     <cache
-      name="user_definied_cache_XXX"
-      enabled="${user_definied_cache_XXX.enabled:false}"  
+      name="user_defined_cache_XXX"
+      enabled="${user_defined_cache_XXX.enabled:false}"
       />
     <cache
-      name="user_definied_cache_ZZZ"
-      enabled="${user_definied_cache_ZZZ.enabled:false}"  
+      name="user_defined_cache_ZZZ"
+      enabled="${user_defined_cache_ZZZ.enabled:false}"
       />
 
 
-    
+
     <!-- If true, stored fields that are not requested will be loaded lazily.
     -->
     <enableLazyFieldLoading>true</enableLazyFieldLoading>
@@ -85,6 +85,3 @@
   </initParams>
 
 </config>
-
-
-
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-memory-circuitbreaker.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-memory-circuitbreaker.xml
index 699a7bd..6ab9f89 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-memory-circuitbreaker.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-memory-circuitbreaker.xml
@@ -54,12 +54,12 @@
       autowarmCount="0"/>
 
     <cache
-      name="user_definied_cache_XXX"
-      enabled="${user_definied_cache_XXX.enabled:false}"
+      name="user_defined_cache_XXX"
+      enabled="${user_defined_cache_XXX.enabled:false}"
       />
     <cache
-      name="user_definied_cache_ZZZ"
-      enabled="${user_definied_cache_ZZZ.enabled:false}"
+      name="user_defined_cache_ZZZ"
+      enabled="${user_defined_cache_ZZZ.enabled:false}"
       />
 
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/LocalFSCloudIncrementalBackupTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/LocalFSCloudIncrementalBackupTest.java
index b247ae6..b552c6c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/LocalFSCloudIncrementalBackupTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/LocalFSCloudIncrementalBackupTest.java
@@ -20,8 +20,9 @@ package org.apache.solr.cloud.api.collections;
 import org.junit.BeforeClass;
 
 public class LocalFSCloudIncrementalBackupTest extends AbstractIncrementalBackupTest {
-    public static final String SOLR_XML = "<solr>\n" +
+    private static final String SOLR_XML = "<solr>\n" +
             "\n" +
+            "  <str name=\"allowPaths\">ALLOWPATHS_TEMPLATE_VAL</str>\n" +
             "  <str name=\"shareSchema\">${shareSchema:false}</str>\n" +
             "  <str name=\"configSetBaseDir\">${configSetBaseDir:configsets}</str>\n" +
             "  <str name=\"coreRootDirectory\">${coreRootDirectory:.}</str>\n" +
@@ -57,17 +58,17 @@ public class LocalFSCloudIncrementalBackupTest extends AbstractIncrementalBackup
 
     @BeforeClass
     public static void setupClass() throws Exception {
-        configureCluster(NUM_SHARDS)// nodes
-                .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
-                .withSolrXml(SOLR_XML)
-                .configure();
-
         boolean whitespacesInPath = random().nextBoolean();
         if (whitespacesInPath) {
             backupLocation = createTempDir("my backup").toAbsolutePath().toString();
         } else {
             backupLocation = createTempDir("mybackup").toAbsolutePath().toString();
         }
+
+        configureCluster(NUM_SHARDS)// nodes
+                .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
+                .withSolrXml(SOLR_XML.replace("ALLOWPATHS_TEMPLATE_VAL", backupLocation))
+                .configure();
     }
 
     @Override
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
index ef718e0..17f34e3 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
@@ -69,9 +69,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
       } else {
         req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 1, 0, 1);
       }
-      setV2(req);
       client.request(req);
-      assertV2CallsCount();
       createCollection(null, COLLECTION_NAME1, 1, 1, client, null, "conf1");
     }
 
@@ -414,8 +412,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
   private void clusterStatusZNodeVersion() throws Exception {
     String cname = "clusterStatusZNodeVersion";
     try (CloudSolrClient client = createCloudClient(null)) {
-      setV2(CollectionAdminRequest.createCollection(cname, "conf1", 1, 1)).process(client);
-      assertV2CallsCount();
+      CollectionAdminRequest.createCollection(cname, "conf1", 1, 1).process(client);
       waitForRecoveriesToFinish(cname, true);
 
       ModifiableSolrParams params = new ModifiableSolrParams();
@@ -438,9 +435,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
       assertNotNull(znodeVersion);
 
       CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(cname, "shard1");
-      setV2(addReplica);
       addReplica.process(client);
-      assertV2CallsCount();
       waitForRecoveriesToFinish(cname, true);
 
       rsp = client.request(request);
diff --git a/solr/core/src/test/org/apache/solr/core/DirectoryFactoriesTest.java b/solr/core/src/test/org/apache/solr/core/DirectoryFactoriesTest.java
index 4748a6f..c033e73 100644
--- a/solr/core/src/test/org/apache/solr/core/DirectoryFactoriesTest.java
+++ b/solr/core/src/test/org/apache/solr/core/DirectoryFactoriesTest.java
@@ -75,14 +75,14 @@ public class DirectoryFactoriesTest extends SolrTestCaseJ4 {
           file.writeInt(42);
 
           // TODO: even StandardDirectoryFactory & NRTCachingDirectoryFactory can't agree on this...
-          // ... should we consider this explicitly undefinied?
+          // ... should we consider this explicitly undefined?
           // ... or should *all* Caching DirFactories consult the cache as well as the disk itself?
           // assertFalse(path + " should still not exist until file is closed", dirFac.exists(path));
           
         } // implicitly close file...
         
         // TODO: even StandardDirectoryFactory & NRTCachingDirectoryFactory can't agree on this...
-        // ... should we consider this explicitly undefinied?
+        // ... should we consider this explicitly undefined?
         // ... or should *all* Caching DirFactories consult the cache as well as the disk itself?
         // assertTrue(path + " should exist once file is closed", dirFac.exists(path));
         
diff --git a/solr/core/src/test/org/apache/solr/core/TestConfig.java b/solr/core/src/test/org/apache/solr/core/TestConfig.java
index a1404e1..ccf3114 100644
--- a/solr/core/src/test/org/apache/solr/core/TestConfig.java
+++ b/solr/core/src/test/org/apache/solr/core/TestConfig.java
@@ -124,8 +124,8 @@ public class TestConfig extends SolrTestCaseJ4 {
    System.setProperty("filterCache.enabled", "true");
    System.setProperty("queryResultCache.enabled", "true");
    System.setProperty("documentCache.enabled", "true");
-   System.setProperty("user_definied_cache_XXX.enabled","true");
-   // user_definied_cache_ZZZ.enabled defaults to false in config
+   System.setProperty("user_defined_cache_XXX.enabled","true");
+   // user_defined_cache_ZZZ.enabled defaults to false in config
    
    sc = new SolrConfig(TEST_PATH().resolve("collection1"), "solrconfig-cache-enable-disable.xml");
    assertNotNull(sc.filterCacheConfig);
@@ -134,14 +134,14 @@ public class TestConfig extends SolrTestCaseJ4 {
    //
    assertNotNull(sc.userCacheConfigs);
    assertEquals(1, sc.userCacheConfigs.size());
-   assertNotNull(sc.userCacheConfigs.get("user_definied_cache_XXX"));
+   assertNotNull(sc.userCacheConfigs.get("user_defined_cache_XXX"));
    
    // disable all the core caches (and enable both user caches) via system properties and verify
    System.setProperty("filterCache.enabled", "false");
    System.setProperty("queryResultCache.enabled", "false");
    System.setProperty("documentCache.enabled", "false");
-   System.setProperty("user_definied_cache_XXX.enabled","true");
-   System.setProperty("user_definied_cache_ZZZ.enabled","true");
+   System.setProperty("user_defined_cache_XXX.enabled","true");
+   System.setProperty("user_defined_cache_ZZZ.enabled","true");
 
    sc = new SolrConfig(TEST_PATH().resolve("collection1"), "solrconfig-cache-enable-disable.xml");
    assertNull(sc.filterCacheConfig);
@@ -150,11 +150,11 @@ public class TestConfig extends SolrTestCaseJ4 {
    //
    assertNotNull(sc.userCacheConfigs);
    assertEquals(2, sc.userCacheConfigs.size());
-   assertNotNull(sc.userCacheConfigs.get("user_definied_cache_XXX"));
-   assertNotNull(sc.userCacheConfigs.get("user_definied_cache_ZZZ"));
+   assertNotNull(sc.userCacheConfigs.get("user_defined_cache_XXX"));
+   assertNotNull(sc.userCacheConfigs.get("user_defined_cache_ZZZ"));
    
-   System.clearProperty("user_definied_cache_XXX.enabled");
-   System.clearProperty("user_definied_cache_ZZZ.enabled");
+   System.clearProperty("user_defined_cache_XXX.enabled");
+   System.clearProperty("user_defined_cache_ZZZ.enabled");
    System.clearProperty("filterCache.enabled");
    System.clearProperty("queryResultCache.enabled");
    System.clearProperty("documentCache.enabled");
diff --git a/solr/core/src/test/org/apache/solr/handler/TestIncrementalCoreBackup.java b/solr/core/src/test/org/apache/solr/handler/TestIncrementalCoreBackup.java
index 531b1be..5035827 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestIncrementalCoreBackup.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestIncrementalCoreBackup.java
@@ -31,9 +31,9 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.io.File;
 import java.io.IOException;
 import java.net.URI;
+import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.Arrays;
 
@@ -62,21 +62,22 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
         assertQ(req("q", "id:2"), "//result[@numFound='0']");
 
         //call backup
-        final URI location = createAndBootstrapLocationForBackup();
+        final Path locationPath = createBackupLocation();
+        final URI locationUri = bootstrapBackupLocation(locationPath);
         final ShardBackupId shardBackupId = new ShardBackupId("shard1", BackupId.zero());
 
         final CoreContainer cores = h.getCoreContainer();
-        cores.getAllowPaths().add(Paths.get(location));
+        cores.getAllowPaths().add(Paths.get(locationUri));
         try (final CoreAdminHandler admin = new CoreAdminHandler(cores)) {
             SolrQueryResponse resp = new SolrQueryResponse();
             admin.handleRequestBody
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
-                            "location", location.getPath(),
+                            "location", locationPath.toString(),
                             CoreAdminParams.SHARD_BACKUP_ID, shardBackupId.getIdAsString())
                             , resp);
             assertNull("Backup should have succeeded", resp.getException());
-            simpleBackupCheck(location, shardBackupId);
+            simpleBackupCheck(locationUri, shardBackupId);
         }
     }
 
@@ -100,7 +101,8 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
 
         final CoreContainer cores = h.getCoreContainer();
         final CoreAdminHandler admin = new CoreAdminHandler(cores);
-        final URI location = createAndBootstrapLocationForBackup();
+        final Path locationPath = createBackupLocation();
+        final URI locationUri = bootstrapBackupLocation(locationPath);
 
         final ShardBackupId firstShardBackup = new ShardBackupId("shard1", BackupId.zero());
         { // first a backup before we've ever done *anything*...
@@ -108,11 +110,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
             admin.handleRequestBody
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
-                            "location", location.getPath(),
+                            "location", locationPath.toString(),
                             CoreAdminParams.SHARD_BACKUP_ID, firstShardBackup.getIdAsString()),
                             resp);
             assertNull("Backup should have succeeded", resp.getException());
-            simpleBackupCheck(location, firstShardBackup, initialEmptyIndexSegmentFileName);
+            simpleBackupCheck(locationUri, firstShardBackup, initialEmptyIndexSegmentFileName);
         }
 
         { // Empty (named) snapshot..
@@ -133,11 +135,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
             admin.handleRequestBody
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
-                            "location", location.getPath(),
+                            "location", locationPath.toString(),
                             CoreAdminParams.SHARD_BACKUP_ID, secondShardBackupId.getIdAsString()),
                             resp);
             assertNull("Backup should have succeeded", resp.getException());
-            simpleBackupCheck(location, secondShardBackupId, initialEmptyIndexSegmentFileName);
+            simpleBackupCheck(locationUri, secondShardBackupId, initialEmptyIndexSegmentFileName);
         }
 
         { // Second empty (named) snapshot..
@@ -154,7 +156,7 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
         assertU(commit());
 
         for (ShardBackupId shardBackupId: Arrays.asList(firstShardBackup, secondShardBackupId)) {
-            simpleBackupCheck(location, shardBackupId, initialEmptyIndexSegmentFileName);
+            simpleBackupCheck(locationUri, shardBackupId, initialEmptyIndexSegmentFileName);
         }
 
         // Make backups from each of the snapshots and check they are still empty as well...
@@ -165,11 +167,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
                             "commitName", "empty_snapshotA",
-                            "location", location.getPath(),
+                            "location", locationPath.toString(),
                             CoreAdminParams.SHARD_BACKUP_ID, thirdShardBackup.getIdAsString()),
                             resp);
             assertNull("Backup from snapshot empty_snapshotA should have succeeded", resp.getException());
-            simpleBackupCheck(location, thirdShardBackup, initialEmptyIndexSegmentFileName);
+            simpleBackupCheck(locationUri, thirdShardBackup, initialEmptyIndexSegmentFileName);
         }
         {
             final ShardBackupId fourthShardBackup = new ShardBackupId("shard1", new BackupId(3));
@@ -178,11 +180,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
                             "commitName", "empty_snapshotB",
-                            "location", location.getPath(),
+                            "location", locationPath.toString(),
                             CoreAdminParams.SHARD_BACKUP_ID, fourthShardBackup.getIdAsString()),
                             resp);
             assertNull("Backup from snapshot empty_snapshotB should have succeeded", resp.getException());
-            simpleBackupCheck(location, fourthShardBackup, initialEmptyIndexSegmentFileName);
+            simpleBackupCheck(locationUri, fourthShardBackup, initialEmptyIndexSegmentFileName);
         }
         admin.close();
     }
@@ -209,7 +211,8 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
 
         final CoreContainer cores = h.getCoreContainer();
         final CoreAdminHandler admin = new CoreAdminHandler(cores);
-        final URI location = createAndBootstrapLocationForBackup();
+        final Path locationPath = createBackupLocation();
+        final URI locationUri = bootstrapBackupLocation(locationPath);
 
         final ShardBackupId firstShardBackupId = new ShardBackupId("shard1", BackupId.zero());
         { // take an initial 'backup1a' containing our 1 document
@@ -218,11 +221,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
                             "name", "backup1a",
-                            "location", location.getPath(),
+                            "location", locationPath.toString(),
                             CoreAdminParams.SHARD_BACKUP_ID, firstShardBackupId.getIdAsString()),
                             resp);
             assertNull("Backup should have succeeded", resp.getException());
-            simpleBackupCheck(location, firstShardBackupId, oneDocSegmentFile);
+            simpleBackupCheck(locationUri, firstShardBackupId, oneDocSegmentFile);
         }
 
         { // and an initial "snapshot1a' that should eventually match
@@ -251,11 +254,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
             admin.handleRequestBody
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
-                            "location", location.getPath(),
+                            "location", locationPath.toString(),
                             CoreAdminParams.SHARD_BACKUP_ID, secondShardBackupId.getIdAsString()),
                             resp);
             assertNull("Backup should have succeeded", resp.getException());
-            simpleBackupCheck(location, secondShardBackupId, oneDocSegmentFile);
+            simpleBackupCheck(locationUri, secondShardBackupId, oneDocSegmentFile);
         }
 
         { // and a second "snapshot1b' should also still be identical
@@ -270,8 +273,8 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
 
         // Hard Committing the 2nd doc now should not affect the existing backups or snapshots...
         assertU(commit());
-        simpleBackupCheck(location, firstShardBackupId, oneDocSegmentFile); // backup1a
-        simpleBackupCheck(location, secondShardBackupId, oneDocSegmentFile); // backup1b
+        simpleBackupCheck(locationUri, firstShardBackupId, oneDocSegmentFile); // backup1a
+        simpleBackupCheck(locationUri, secondShardBackupId, oneDocSegmentFile); // backup1b
 
         final ShardBackupId thirdShardBackupId = new ShardBackupId("shard1", new BackupId(2));
         { // But we should be able to confirm both docs appear in a new backup (not based on a previous snapshot)
@@ -279,13 +282,13 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
             admin.handleRequestBody
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
-                            "location", location.getPath(),
+                            "location", locationPath.toString(),
                             CoreAdminParams.SHARD_BACKUP_ID, thirdShardBackupId.getIdAsString()),
                             resp);
             assertNull("Backup should have succeeded", resp.getException());
             // TODO This doesn't actually check that backup has both docs!  Can we do better than this without doing a full restore?
             // Maybe validate the new segments_X file at least to show that it's picked up the latest commit?
-            simpleBackupCheck(location, thirdShardBackupId);
+            simpleBackupCheck(locationUri, thirdShardBackupId);
         }
 
         // if we go back and create backups from our earlier snapshots they should still only
@@ -298,11 +301,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
                             "commitName", "snapshot1a",
-                            "location", location.getPath(),
+                            "location", locationPath.toString(),
                             CoreAdminParams.SHARD_BACKUP_ID, fourthShardBackupId.getIdAsString()),
                             resp);
             assertNull("Backup of snapshot1a should have succeeded", resp.getException());
-            simpleBackupCheck(location, fourthShardBackupId, oneDocSegmentFile);
+            simpleBackupCheck(locationUri, fourthShardBackupId, oneDocSegmentFile);
         }
         final ShardBackupId fifthShardBackupId = new ShardBackupId("shard1", new BackupId(4));
         {
@@ -311,11 +314,11 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
                     (req(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString(),
                             "core", DEFAULT_TEST_COLLECTION_NAME,
                             "commitName", "snapshot1b",
-                            "location", location.getPath(),
+                            "location", locationPath.toString(),
                             CoreAdminParams.SHARD_BACKUP_ID, fifthShardBackupId.getIdAsString()),
                             resp);
             assertNull("Backup of snapshot1b should have succeeded", resp.getException());
-            simpleBackupCheck(location, fifthShardBackupId, oneDocSegmentFile);
+            simpleBackupCheck(locationUri, fifthShardBackupId, oneDocSegmentFile);
         }
 
         admin.close();
@@ -353,15 +356,18 @@ public class TestIncrementalCoreBackup extends SolrTestCaseJ4 {
         }
     }
 
-    private URI createAndBootstrapLocationForBackup() throws IOException {
-        final File locationFile = createTempDir().toFile();
-        final String location = locationFile.getAbsolutePath();
+    private Path createBackupLocation() {
+        return createTempDir().toAbsolutePath();
+    }
 
-        h.getCoreContainer().getAllowPaths().add(locationFile.toPath());
+    private URI bootstrapBackupLocation(Path locationPath) throws IOException {
+        final String locationPathStr = locationPath.toString();
+        h.getCoreContainer().getAllowPaths().add(locationPath);
         try (BackupRepository backupRepo = h.getCoreContainer().newBackupRepository(null)) {
-            final BackupFilePaths backupFilePaths = new BackupFilePaths(backupRepo, backupRepo.createURI(location));
+            final URI locationUri = backupRepo.createURI(locationPathStr);
+            final BackupFilePaths backupFilePaths = new BackupFilePaths(backupRepo, locationUri);
             backupFilePaths.createIncrementalBackupFolders();
-            return backupRepo.createURI(location);
+            return locationUri;
         }
     }
 }
diff --git a/solr/core/src/test/org/apache/solr/handler/TestStressIncrementalBackup.java b/solr/core/src/test/org/apache/solr/handler/TestStressIncrementalBackup.java
index 871d349..0e507a4 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestStressIncrementalBackup.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestStressIncrementalBackup.java
@@ -17,11 +17,6 @@
 
 package org.apache.solr.handler;
 
-import java.io.File;
-import java.lang.invoke.MethodHandles;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -32,26 +27,31 @@ import org.apache.solr.client.solrj.response.UpdateResponse;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.params.UpdateParams;
-import org.apache.solr.util.LogLevel;
 import org.junit.After;
 import org.junit.Before;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.lang.invoke.MethodHandles;
+import java.nio.file.Path;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
 import static org.apache.solr.handler.TestStressThreadBackup.makeDoc;
 
 //@LuceneTestCase.Nightly
 @LuceneTestCase.SuppressCodecs({"SimpleText"})
-@LogLevel("org.apache.solr.handler.SnapShooter=DEBUG;org.apache.solr.core.IndexDeletionPolicyWrapper=DEBUG")
 public class TestStressIncrementalBackup extends SolrCloudTestCase {
     private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-    private File backupDir;
+    private Path backupPath;
     private SolrClient adminClient;
     private SolrClient coreClient;
+    
     @Before
     public void beforeTest() throws Exception {
-        backupDir = createTempDir(getTestClass().getSimpleName() + "_backups").toFile();
+        backupPath = createTempDir(getTestClass().getSimpleName() + "_backups");
+        System.setProperty("solr.allowPaths", backupPath.toString());
 
         // NOTE: we don't actually care about using SolrCloud, but we want to use SolrClient and I can't
         // bring myself to deal with the nonsense that is SolrJettyTestBase.
@@ -87,6 +87,8 @@ public class TestStressIncrementalBackup extends SolrCloudTestCase {
         if (null != coreClient) {
             coreClient.close();
         }
+
+        System.clearProperty("solr.allowPaths");
     }
 
     public void testCoreAdminHandler() throws Exception {
@@ -158,7 +160,7 @@ public class TestStressIncrementalBackup extends SolrCloudTestCase {
 
     public void makeBackup() throws Exception {
         CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(DEFAULT_TEST_COLLECTION_NAME, "stressBackup")
-                .setLocation(backupDir.getAbsolutePath())
+                .setLocation(backupPath.toString())
                 .setIncremental(true)
                 .setMaxNumberBackupPoints(5);
         if (random().nextBoolean()) {
diff --git a/solr/core/src/test/org/apache/solr/handler/TestStressThreadBackup.java b/solr/core/src/test/org/apache/solr/handler/TestStressThreadBackup.java
index 8c7128b..f8f33a3 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestStressThreadBackup.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestStressThreadBackup.java
@@ -37,7 +37,6 @@ import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.params.UpdateParams;
 import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.util.LogLevel;
 import org.apache.solr.util.TimeOut;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -63,7 +62,6 @@ import java.util.regex.Pattern;
 
 @Nightly
 @SuppressCodecs({"SimpleText"})
-@LogLevel("org.apache.solr.handler.SnapShooter=DEBUG;org.apache.solr.core.IndexDeletionPolicyWrapper=DEBUG")
 public class TestStressThreadBackup extends SolrCloudTestCase {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
@@ -369,7 +367,8 @@ public class TestStressThreadBackup extends SolrCloudTestCase {
     public void makeBackup(final String backupName, final String snapName) throws Exception {
       ModifiableSolrParams p = params(CoreAdminParams.CORE, coreName,
                                       CoreAdminParams.NAME, backupName,
-                                      CoreAdminParams.BACKUP_LOCATION, backupDir.getAbsolutePath());
+                                      CoreAdminParams.BACKUP_LOCATION, backupDir.getAbsolutePath(),
+                                      CoreAdminParams.BACKUP_INCREMENTAL, "false");
       if (null != snapName) {
         p.add(CoreAdminParams.COMMIT_NAME, snapName);
       }
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/AdminHandlersProxyTest.java b/solr/core/src/test/org/apache/solr/handler/admin/AdminHandlersProxyTest.java
index a9b6e9b..ee6183b 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/AdminHandlersProxyTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/AdminHandlersProxyTest.java
@@ -18,7 +18,6 @@
 package org.apache.solr.handler.admin;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
@@ -28,17 +27,12 @@ import org.apache.lucene.util.IOUtils;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.GenericSolrRequest;
-import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.SimpleSolrResponse;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.MapSolrParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -97,33 +91,6 @@ public class AdminHandlersProxyTest extends SolrCloudTestCase {
     assertNotNull(((NamedList)nl.get(nl.getName(1))).get("metrics"));
   }
 
-  @Test
-  @BadApple(bugUrl = "https://issues.apache.org/jira/browse/SOLR-15011")
-  public void proxyLoggingHandlerAllNodes() throws IOException, SolrServerException {
-    CollectionAdminRequest.createCollection("collection", "conf", 2, 2).process(solrClient);
-    ModifiableSolrParams mparams = new ModifiableSolrParams();
-
-    mparams.set(CommonParams.QT, "/admin/logging");
-    mparams.set("nodes", "all");
-    mparams.set("set", "com.codahale.metrics.jmx.JmxReporter:WARN");
-    solrClient.query("collection", mparams, SolrRequest.METHOD.GET);
-
-    Set<String> nodes = solrClient.getClusterStateProvider().getLiveNodes();
-    nodes.forEach(node -> {
-      mparams.clear();
-      mparams.set(CommonParams.QT, "/admin/logging");
-      mparams.set("nodes", node);
-      QueryResponse rsp = null;
-      try {
-        rsp = solrClient.query("collection", mparams, SolrRequest.METHOD.GET);
-      } catch (Exception e) {
-        fail("Exception while proxying request to node " + node);
-      }
-      NamedList<Object> nl = rsp.getResponse();
-      assertEquals("WARN", ((SimpleOrderedMap) ((ArrayList)nl.get("loggers")).get(5)).get("level"));
-    });
-  }
-
   @Test(expected = SolrException.class)
   public void proxySystemInfoHandlerNonExistingNode() throws IOException, SolrServerException {
     MapSolrParams params = new MapSolrParams(Collections.singletonMap("nodes", "example.com:1234_solr"));
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/TestCollectionAPIs.java b/solr/core/src/test/org/apache/solr/handler/admin/TestCollectionAPIs.java
index f423f0f..cb3562c 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/TestCollectionAPIs.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/TestCollectionAPIs.java
@@ -84,7 +84,9 @@ public class TestCollectionAPIs extends SolrTestCaseJ4 {
     ApiBag apiBag;
     try (MockCollectionsHandler collectionsHandler = new MockCollectionsHandler()) {
       apiBag = new ApiBag(false);
+      final CollectionsAPI collectionsAPI = new CollectionsAPI(collectionsHandler);
       apiBag.registerObject(new CollectionsAPI(collectionsHandler));
+      apiBag.registerObject(collectionsAPI.collectionsCommands);
       Collection<Api> apis = collectionsHandler.getApis();
       for (Api api : apis) apiBag.register(api, Collections.emptyMap());
 
@@ -112,7 +114,7 @@ public class TestCollectionAPIs extends SolrTestCaseJ4 {
 
 
     compareOutput(apiBag, "/collections", POST,
-        "{create-alias:{name: aliasName , collections:[c1,c2] }}", null, "{operation : createalias, name: aliasName, collections:[c1,c2] }");
+        "{create-alias:{name: aliasName , collections:[c1,c2] }}", null, "{operation : createalias, name: aliasName, collections:\"c1,c2\" }");
 
     compareOutput(apiBag, "/collections", POST,
         "{delete-alias:{ name: aliasName}}", null, "{operation : deletealias, name: aliasName}");
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/V2CollectionsAPIMappingTest.java b/solr/core/src/test/org/apache/solr/handler/admin/V2CollectionsAPIMappingTest.java
new file mode 100644
index 0000000..689073c
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/handler/admin/V2CollectionsAPIMappingTest.java
@@ -0,0 +1,293 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.handler.admin;
+
+import com.google.common.collect.Maps;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.api.Api;
+import org.apache.solr.api.ApiBag;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.api.collections.CategoryRoutedAlias;
+import org.apache.solr.cloud.api.collections.RoutedAlias;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ShardParams;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.CommandOperation;
+import org.apache.solr.common.util.ContentStreamBase;
+import org.apache.solr.core.backup.BackupManager;
+import org.apache.solr.handler.CollectionsAPI;
+import org.apache.solr.request.LocalSolrQueryRequest;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.apache.solr.common.params.CommonParams.ACTION;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Unit tests for the API mappings found in {@link org.apache.solr.handler.CollectionsAPI}.
+ *
+ * This test bears many similarities to {@link TestCollectionAPIs} which appears to test the mappings indirectly by
+ * checking message sent to the ZK overseer (which is similar, but not identical to the v1 param list).  If there's no
+ * particular benefit to testing the mappings in this way (there very well may be), then we should combine these two
+ * test classes at some point in the future using the simpler approach here.
+ *
+ * Note that the V2 requests made by these tests are not necessarily semantically valid.  They shouldn't be taken as
+ * examples. In several instances, mutually exclusive JSON parameters are provided.  This is done to exercise conversion
+ * of all parameters, even if particular combinations are never expected in the same request.
+ */
+public class V2CollectionsAPIMappingTest extends SolrTestCaseJ4 {
+
+    private ApiBag apiBag;
+
+    private ArgumentCaptor<SolrQueryRequest> queryRequestCaptor;
+    private CollectionsHandler mockCollectionsHandler;
+
+    @BeforeClass
+    public static void ensureWorkingMockito() {
+        assumeWorkingMockito();
+    }
+
+    @Before
+    public void setupApiBag() throws Exception {
+        mockCollectionsHandler = mock(CollectionsHandler.class);
+        queryRequestCaptor = ArgumentCaptor.forClass(SolrQueryRequest.class);
+
+        apiBag = new ApiBag(false);
+        final CollectionsAPI collectionsAPI = new CollectionsAPI(mockCollectionsHandler);
+        apiBag.registerObject(collectionsAPI);
+        apiBag.registerObject(collectionsAPI.collectionsCommands);
+    }
+
+    @Test
+    public void testCreateCollectionAllProperties() throws Exception {
+        final SolrParams v1Params = captureConvertedV1Params("/collections", "POST",
+                "{'create': {" +
+                        "'name': 'techproducts', " +
+                        "'config':'_default', " +
+                        "'router': {'name': 'composite', 'field': 'routeField', 'foo': 'bar'}, " +
+                        "'shards': 'customShardName,anotherCustomShardName', " +
+                        "'replicationFactor': 3," +
+                        "'nrtReplicas': 1, " +
+                        "'tlogReplicas': 1, " +
+                        "'pullReplicas': 1, " +
+                        "'nodeSet': ['localhost:8983_solr', 'localhost:7574_solr']," +
+                        "'shuffleNodes': true," +
+                        "'properties': {'foo': 'bar', 'foo2': 'bar2'}, " +
+                        "'async': 'requestTrackingId', " +
+                        "'waitForFinalState': false, " +
+                        "'perReplicaState': false," +
+                        "'numShards': 1}}");
+
+        assertEquals(CollectionParams.CollectionAction.CREATE.lowerName, v1Params.get(ACTION));
+        assertEquals("techproducts", v1Params.get(CommonParams.NAME));
+        assertEquals("_default", v1Params.get(CollectionAdminParams.COLL_CONF));
+        assertEquals("composite", v1Params.get("router.name"));
+        assertEquals("routeField", v1Params.get("router.field"));
+        assertEquals("bar", v1Params.get("router.foo"));
+        assertEquals("customShardName,anotherCustomShardName", v1Params.get(ShardParams.SHARDS));
+        assertEquals(3, v1Params.getPrimitiveInt(ZkStateReader.REPLICATION_FACTOR));
+        assertEquals(1, v1Params.getPrimitiveInt(ZkStateReader.NRT_REPLICAS));
+        assertEquals(1, v1Params.getPrimitiveInt(ZkStateReader.TLOG_REPLICAS));
+        assertEquals(1, v1Params.getPrimitiveInt(ZkStateReader.PULL_REPLICAS));
+        assertEquals("localhost:8983_solr,localhost:7574_solr", v1Params.get(CollectionAdminParams.CREATE_NODE_SET_PARAM));
+        assertEquals(true, v1Params.getPrimitiveBool(CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM));
+        assertEquals("bar", v1Params.get("property.foo"));
+        assertEquals("bar2", v1Params.get("property.foo2"));
+        assertEquals("requestTrackingId", v1Params.get(CommonAdminParams.ASYNC));
+        assertEquals(false, v1Params.getPrimitiveBool(CommonAdminParams.WAIT_FOR_FINAL_STATE));
+        assertEquals(false, v1Params.getPrimitiveBool(DocCollection.PER_REPLICA_STATE));
+        assertEquals(1, v1Params.getPrimitiveInt(CollectionAdminParams.NUM_SHARDS));
+    }
+
+    @Test
+    public void testCreateAliasAllProperties() throws Exception {
+        final SolrParams v1Params = captureConvertedV1Params("/collections", "POST",
+                "{'create-alias': {" +
+                        "'name': 'aliasName', " +
+                        "'collections': ['techproducts1', 'techproducts2'], " +
+                        "'tz': 'someTimeZone', " +
+                        "'async': 'requestTrackingId', " +
+                        "'router': {" +
+                        "    'name': 'time', " +
+                        "    'field': 'date_dt', " +
+                        "    'interval': '+1HOUR', " +
+                        "     'maxFutureMs': 3600, " +
+                        "     'preemptiveCreateMath': 'somePreemptiveCreateMathString', " +
+                        "     'autoDeleteAge': 'someAutoDeleteAgeExpression', " +
+                        "     'maxCardinality': 36, " +
+                        "     'mustMatch': 'someRegex', " +
+                        "}, " +
+                        "'create-collection': {" +
+                        "     'numShards': 1, " +
+                        "     'properties': {'foo': 'bar', 'foo2': 'bar2'}, " +
+                        "     'replicationFactor': 3 " +
+                        "}" +
+                        "}}");
+
+        assertEquals(CollectionParams.CollectionAction.CREATEALIAS.lowerName, v1Params.get(ACTION));
+        assertEquals("aliasName", v1Params.get(CommonParams.NAME));
+        assertEquals("techproducts1,techproducts2", v1Params.get("collections"));
+        assertEquals("someTimeZone", v1Params.get(CommonParams.TZ.toLowerCase(Locale.ROOT)));
+        assertEquals("requestTrackingId", v1Params.get(CommonAdminParams.ASYNC));
+        assertEquals("time", v1Params.get(CollectionAdminRequest.CreateTimeRoutedAlias.ROUTER_TYPE_NAME));
+        assertEquals("date_dt", v1Params.get(CollectionAdminRequest.CreateTimeRoutedAlias.ROUTER_FIELD));
+        assertEquals("+1HOUR", v1Params.get(CollectionAdminRequest.CreateTimeRoutedAlias.ROUTER_INTERVAL));
+        assertEquals(3600, v1Params.getPrimitiveInt(CollectionAdminRequest.CreateTimeRoutedAlias.ROUTER_MAX_FUTURE));
+        assertEquals("somePreemptiveCreateMathString", v1Params.get(CollectionAdminRequest.CreateTimeRoutedAlias.ROUTER_PREEMPTIVE_CREATE_WINDOW));
+        assertEquals("someAutoDeleteAgeExpression", v1Params.get(CollectionAdminRequest.CreateTimeRoutedAlias.ROUTER_AUTO_DELETE_AGE));
+        assertEquals(36, v1Params.getPrimitiveInt(CategoryRoutedAlias.ROUTER_MAX_CARDINALITY));
+        assertEquals("someRegex", v1Params.get(CategoryRoutedAlias.ROUTER_MUST_MATCH));
+        assertEquals(1, v1Params.getPrimitiveInt(RoutedAlias.CREATE_COLLECTION_PREFIX + CollectionAdminParams.NUM_SHARDS));
+        assertEquals("bar", v1Params.get(RoutedAlias.CREATE_COLLECTION_PREFIX + "property.foo"));
+        assertEquals("bar2", v1Params.get(RoutedAlias.CREATE_COLLECTION_PREFIX + "property.foo2"));
+        assertEquals(3, v1Params.getPrimitiveInt(RoutedAlias.CREATE_COLLECTION_PREFIX + ZkStateReader.REPLICATION_FACTOR));
+    }
+
+    @Test
+    public void testDeleteAliasAllProperties() throws Exception {
+        final SolrParams v1Params = captureConvertedV1Params("/collections", "POST",
+                "{'delete-alias': {" +
+                        "'name': 'aliasName', " +
+                        "'async': 'requestTrackingId'" +
+                        "}}");
+
+        assertEquals(CollectionParams.CollectionAction.DELETEALIAS.lowerName, v1Params.get(ACTION));
+        assertEquals("aliasName", v1Params.get(CommonParams.NAME));
+        assertEquals("requestTrackingId", v1Params.get(CommonAdminParams.ASYNC));
+    }
+
+    @Test
+    public void testSetAliasAllProperties() throws Exception {
+        final SolrParams v1Params = captureConvertedV1Params("/collections", "POST",
+                "{'set-alias-property': {" +
+                        "'name': 'aliasName', " +
+                        "'async': 'requestTrackingId', " +
+                        "'properties': {'foo':'bar', 'foo2':'bar2'}" +
+                        "}}");
+
+        assertEquals(CollectionParams.CollectionAction.ALIASPROP.lowerName, v1Params.get(ACTION));
+        assertEquals("aliasName", v1Params.get(CommonParams.NAME));
+        assertEquals("requestTrackingId", v1Params.get(CommonAdminParams.ASYNC));
+        assertEquals("bar", v1Params.get("property.foo"));
+        assertEquals("bar2", v1Params.get("property.foo2"));
+    }
+
+    @Test
+    public void testBackupAllProperties() throws Exception {
+        final SolrParams v1Params = captureConvertedV1Params("/collections", "POST",
+                "{'backup-collection': {" +
+                        "'name': 'backupName', " +
+                        "'collection': 'collectionName', " +
+                        "'location': '/some/location/uri', " +
+                        "'repository': 'someRepository', " +
+                        "'followAliases': true, " +
+                        "'indexBackup': 'copy-files', " +
+                        "'commitName': 'someSnapshotName', " +
+                        "'incremental': true, " +
+                        "'async': 'requestTrackingId' " +
+                        "}}");
+
+        assertEquals(CollectionParams.CollectionAction.BACKUP.lowerName, v1Params.get(ACTION));
+        assertEquals("backupName", v1Params.get(CommonParams.NAME));
+        assertEquals("collectionName", v1Params.get(BackupManager.COLLECTION_NAME_PROP));
+        assertEquals("/some/location/uri", v1Params.get(CoreAdminParams.BACKUP_LOCATION));
+        assertEquals("someRepository", v1Params.get(CoreAdminParams.BACKUP_REPOSITORY));
+        assertEquals(true, v1Params.getPrimitiveBool(CollectionAdminParams.FOLLOW_ALIASES));
+        assertEquals("copy-files", v1Params.get(CollectionAdminParams.INDEX_BACKUP_STRATEGY));
+        assertEquals("someSnapshotName", v1Params.get(CoreAdminParams.COMMIT_NAME));
+        assertEquals(true, v1Params.getPrimitiveBool(CoreAdminParams.BACKUP_INCREMENTAL));
+        assertEquals("requestTrackingId", v1Params.get(CommonAdminParams.ASYNC));
+    }
+
+    @Test
+    public void testRestoreAllProperties() throws Exception {
+        final SolrParams v1Params = captureConvertedV1Params("/collections", "POST",
+                "{'restore-collection': {" +
+                        "'name': 'backupName', " +
+                        "'collection': 'collectionName', " +
+                        "'location': '/some/location/uri', " +
+                        "'repository': 'someRepository', " +
+                        "'backupId': 123, " +
+                        "'async': 'requestTrackingId', " +
+                        "'create-collection': {" +
+                        "     'numShards': 1, " +
+                        "     'properties': {'foo': 'bar', 'foo2': 'bar2'}, " +
+                        "     'replicationFactor': 3 " +
+                        "}" +
+                        "}}");
+
+        assertEquals(CollectionParams.CollectionAction.RESTORE.lowerName, v1Params.get(ACTION));
+        assertEquals("backupName", v1Params.get(CommonParams.NAME));
+        assertEquals("collectionName", v1Params.get(BackupManager.COLLECTION_NAME_PROP));
+        assertEquals("/some/location/uri", v1Params.get(CoreAdminParams.BACKUP_LOCATION));
+        assertEquals("someRepository", v1Params.get(CoreAdminParams.BACKUP_REPOSITORY));
+        assertEquals(123, v1Params.getPrimitiveInt(CoreAdminParams.BACKUP_ID));
+        assertEquals("requestTrackingId", v1Params.get(CommonAdminParams.ASYNC));
+        // NOTE: Unlike other v2 APIs that have a nested object for collection-creation params, restore's v1 equivalent
+        // for these properties doesn't have a "create-collection." prefix.
+        assertEquals(1, v1Params.getPrimitiveInt(CollectionAdminParams.NUM_SHARDS));
+        assertEquals("bar", v1Params.get("property.foo"));
+        assertEquals("bar2", v1Params.get("property.foo2"));
+        assertEquals(3, v1Params.getPrimitiveInt(ZkStateReader.REPLICATION_FACTOR));
+    }
+
+    private SolrParams captureConvertedV1Params(String path, String method, String v2RequestBody) throws Exception {
+        final HashMap<String, String> parts = new HashMap<>();
+        final Api api = apiBag.lookup(path, method, parts);
+        final SolrQueryResponse rsp = new SolrQueryResponse();
+        final LocalSolrQueryRequest req = new LocalSolrQueryRequest(null, Maps.newHashMap()) {
+            @Override
+            public List<CommandOperation> getCommands(boolean validateInput) {
+                if (v2RequestBody == null) return Collections.emptyList();
+                return ApiBag.getCommandOperations(new ContentStreamBase.StringStream(v2RequestBody), api.getCommandSchema(), true);
+            }
+
+            @Override
+            public Map<String, String> getPathTemplateValues() {
+                return parts;
+            }
+
+            @Override
+            public String getHttpMethod() {
+                return method;
+            }
+        };
+
+
+        api.call(req, rsp);
+        verify(mockCollectionsHandler).handleRequestBody(queryRequestCaptor.capture(), any());
+        return queryRequestCaptor.getValue().getParams();
+    }
+}
diff --git a/solr/solr-ref-guide/src/_layouts/default.html b/solr/solr-ref-guide/src/_layouts/default.html
index 5b929bc..75b66b2 100755
--- a/solr/solr-ref-guide/src/_layouts/default.html
+++ b/solr/solr-ref-guide/src/_layouts/default.html
@@ -1,5 +1,5 @@
 <!DOCTYPE html>
-{% comment %}NOTE: page_id is also definied in page.html{% endcomment %}
+{% comment %}NOTE: page_id is also defined in page.html{% endcomment %}
 {% assign page_id = page.url | split: '/' | last | remove: '.html' %}
 <head>
     {% include head.html %}
diff --git a/solr/solr-ref-guide/src/_layouts/home.html b/solr/solr-ref-guide/src/_layouts/home.html
index e5b7119..c1d9cb6 100644
--- a/solr/solr-ref-guide/src/_layouts/home.html
+++ b/solr/solr-ref-guide/src/_layouts/home.html
@@ -7,7 +7,7 @@ layout: default
      Its main difference is that it uses a different class for the content
      container in order to have freedom to customize it as needed. -->
 
-{% comment %}NOTE: page_id is also definied in default.html{% endcomment %}
+{% comment %}NOTE: page_id is also defined in default.html{% endcomment %}
 {% assign page_id = page.url | split: '/' | last | remove: '.html' %}
 
 <div class="homepage container">
diff --git a/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc b/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc
index e554c15..5350fe5 100644
--- a/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc
+++ b/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc
@@ -164,6 +164,15 @@ See for example https://lucene.apache.org/solr/guide/8_5/cluster-node-management
 
 == Configuration and Default Parameter Changes in Solr 9
 
+* base_url removed from stored state*
+
+If you're able to upgrade SolrJ to 8.8.x for all of your client applications, then you can set `-Dsolr.storeBaseUrl=false` (introduced in Solr 8.8.1)
+to better align the stored state in Zookeeper with future versions of Solr; as of Solr 9.x, the `base_url` will no longer be
+persisted in stored state. However, if you are not able to upgrade SolrJ to 8.8.x for all client applications, then you should
+set `-Dsolr.storeBaseUrl=true` so that Solr will continue to store the `base_url` in Zookeeper. For background, see: SOLR-12182 and SOLR-15145.
+
+Support for the `solr.storeBaseUrl` system property will be removed in Solr 10.x and `base_url` will no longer be stored.
+
 === Schema Changes in 9
 
 === Authentication & Security Changes in Solr 9
diff --git a/solr/solr-ref-guide/src/parallel-sql-interface.adoc b/solr/solr-ref-guide/src/parallel-sql-interface.adoc
index e1ddd4f..95c7584 100644
--- a/solr/solr-ref-guide/src/parallel-sql-interface.adoc
+++ b/solr/solr-ref-guide/src/parallel-sql-interface.adoc
@@ -421,7 +421,7 @@ A step-by-step guide for setting up https://www.dbvis.com/[DbVisualizer] is in t
 
 A step-by-step guide for setting up http://squirrel-sql.sourceforge.net[SQuirreL SQL] is in the section <<solr-jdbc-squirrel-sql.adoc#,Solr JDBC - SQuirreL SQL>>.
 
-=== Apache Zeppelin (incubating)
+=== Apache Zeppelin
 
 A step-by-step guide for setting up http://zeppelin.apache.org/[Apache Zeppelin] is in the section <<solr-jdbc-apache-zeppelin.adoc#,Solr JDBC - Apache Zeppelin>>.
 
diff --git a/solr/solr-ref-guide/src/solr-upgrade-notes.adoc b/solr/solr-ref-guide/src/solr-upgrade-notes.adoc
index 3bfbf02..0c52226 100644
--- a/solr/solr-ref-guide/src/solr-upgrade-notes.adoc
+++ b/solr/solr-ref-guide/src/solr-upgrade-notes.adoc
@@ -100,6 +100,16 @@ The default Prometheus Exporter configuration includes metrics like queries-per-
 Plugin developers using `SolrPaths.locateSolrHome()` or 'new `SolrResourceLoader`' should check deprecation warnings as existing some existing functionality will be removed in 9.0.
 https://issues.apache.org/jira/browse/SOLR-14934[SOLR-14934] has more technical details about this change for those concerned.
 
+*base_url removed from stored state*
+
+As of Solr 8.8.0, the `base_url` property was removed from the stored state for replicas (SOLR-12182). If you're able to upgrade SolrJ to 8.8.x
+for all of your client applications, then you can set `-Dsolr.storeBaseUrl=false` (introduced in Solr 8.8.1) to better align the stored state
+in Zookeeper with future versions of Solr. However, if you are not able to upgrade SolrJ to 8.8.x for all client applications,
+then leave the default `-Dsolr.storeBaseUrl=true` so that Solr will continue to store the `base_url` in Zookeeper.
+
+You may also see some NPE in collection state updates during a rolling upgrade to 8.8.0 from a previous version of Solr. After upgrading all nodes in your cluster
+to 8.8.0, collections should fully recover. Trigger another rolling restart if there are any replicas that do not recover after the upgrade to re-elect leaders.
+
 === Solr 8.7
 
 See the https://cwiki.apache.org/confluence/display/SOLR/ReleaseNote87[8.7 Release Notes^]
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
index e09ebb3..4037609 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
@@ -16,25 +16,11 @@
  */
 package org.apache.solr.client.solrj.request;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Properties;
-import java.util.Set;
-import java.util.TimeZone;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
 import org.apache.solr.client.solrj.RoutedAliasTypes;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.V2RequestSupport;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.client.solrj.response.RequestStatusState;
 import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
@@ -54,26 +40,29 @@ import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Properties;
+import java.util.Set;
+import java.util.TimeZone;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+
 import static org.apache.solr.common.cloud.DocCollection.PER_REPLICA_STATE;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.READ_ONLY;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
-import static org.apache.solr.common.params.CollectionAdminParams.ALIAS;
-import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF;
-import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
-import static org.apache.solr.common.params.CollectionAdminParams.CREATE_NODE_SET_PARAM;
-import static org.apache.solr.common.params.CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM;
-import static org.apache.solr.common.params.CollectionAdminParams.ROUTER_PREFIX;
-import static org.apache.solr.common.params.CollectionAdminParams.SKIP_NODE_ASSIGNMENT;
+import static org.apache.solr.common.cloud.ZkStateReader.*;
+import static org.apache.solr.common.params.CollectionAdminParams.*;
 
 /**
  * This class is experimental and subject to change.
  *
  * @since solr 4.5
  */
-public abstract class CollectionAdminRequest<T extends CollectionAdminResponse> extends SolrRequest<T> implements V2RequestSupport, MapWriter {
+public abstract class CollectionAdminRequest<T extends CollectionAdminResponse> extends SolrRequest<T> implements MapWriter {
 
   /**
    * The set of modifiable collection properties
@@ -98,14 +87,6 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
   }
 
   @Override
-  @SuppressWarnings({"rawtypes"})
-  public SolrRequest getV2Request() {
-    return usev2 ?
-        V1toV2ApiMapper.convert(this).useBinary(useBinaryV2).build() :
-        this;
-  }
-
-  @Override
   public SolrParams getParams() {
     ModifiableSolrParams params = new ModifiableSolrParams();
     params.set(CoreAdminParams.ACTION, action.toString());
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionApiMapping.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionApiMapping.java
index 9b106ab..a88a656 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionApiMapping.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionApiMapping.java
@@ -18,6 +18,13 @@
 package org.apache.solr.client.solrj.request;
 
 
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.request.beans.V2ApiConstants;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.util.CommandOperation;
+import org.apache.solr.common.util.Pair;
+import org.apache.solr.common.util.Utils;
+
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -25,26 +32,11 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.util.CommandOperation;
-import org.apache.solr.common.util.Pair;
-import org.apache.solr.common.util.Utils;
-
 import static org.apache.solr.client.solrj.SolrRequest.METHOD.DELETE;
-import static org.apache.solr.client.solrj.SolrRequest.METHOD.GET;
-import static org.apache.solr.client.solrj.SolrRequest.METHOD.POST;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.CLUSTER_ALIASES;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.COLLECTIONS_COMMANDS;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.COLLECTION_STATE;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.PER_COLLECTION;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.PER_COLLECTION_PER_SHARD_COMMANDS;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.PER_COLLECTION_PER_SHARD_DELETE;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.PER_COLLECTION_PER_SHARD_PER_REPLICA_DELETE;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.PER_COLLECTION_SHARDS_COMMANDS;
+import static org.apache.solr.client.solrj.SolrRequest.METHOD.*;
+import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.*;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
 import static org.apache.solr.common.params.CommonParams.NAME;
 
@@ -57,17 +49,6 @@ public class CollectionApiMapping {
   public enum Meta implements CommandMeta {
     GET_A_COLLECTION(COLLECTION_STATE, GET, CLUSTERSTATUS),
     LIST_ALIASES(CLUSTER_ALIASES, GET, LISTALIASES),
-    CREATE_COLLECTION(COLLECTIONS_COMMANDS,
-        POST,
-        CREATE,
-        CREATE.toLower(),
-        Utils.makeMap(
-            "collection.configName", "config",
-            "createNodeSet.shuffle", "shuffleNodes",
-            "createNodeSet", "nodeSet"
-        ),
-        Utils.makeMap("property.", "properties.")),
-
     RELOAD_COLL(PER_COLLECTION,
         POST,
         RELOAD,
@@ -91,34 +72,11 @@ public class CollectionApiMapping {
         POST,
         REBALANCELEADERS,
         "rebalance-leaders", null),
-    CREATE_ALIAS(COLLECTIONS_COMMANDS,
-        POST,
-        CREATEALIAS,
-        "create-alias",
-        CREATE_COLLECTION.paramsToAttrs.entrySet().stream().collect(Collectors.toMap(
-            entry -> "create-collection." + entry.getKey(),
-            entry -> "create-collection." + entry.getValue()
-        )),
-        CREATE_COLLECTION.prefixParamsToAttrs.entrySet().stream().collect(Collectors.toMap(
-            entry -> "create-collection." + entry.getKey(),
-            entry -> "create-collection." + entry.getValue()
-        ))),
-    DELETE_ALIAS(COLLECTIONS_COMMANDS,
-        POST,
-        DELETEALIAS,
-        "delete-alias",
-        null),
-    ALIAS_PROP(COLLECTIONS_COMMANDS,
-        POST,
-        ALIASPROP,
-        "set-alias-property",
-        null,
-        Utils.makeMap("property.", "properties.")),
     CREATE_SHARD(PER_COLLECTION_SHARDS_COMMANDS,
         POST,
         CREATESHARD,
         "create",
-        Utils.makeMap("createNodeSet", "nodeSet"),
+        Utils.makeMap("createNodeSet", V2ApiConstants.NODE_SET),
         Utils.makeMap("property.", "coreProperties.")) {
       @Override
       public String getParamSubstitute(String param) {
@@ -169,17 +127,6 @@ public class CollectionApiMapping {
             NAME, "collection",
             "propertyName", "name",
             "propertyValue", "value")),
-    BACKUP_COLLECTION(COLLECTIONS_COMMANDS,
-        POST,
-        BACKUP,
-        "backup-collection", null
-    ),
-    RESTORE_COLLECTION(COLLECTIONS_COMMANDS,
-        POST,
-        RESTORE,
-        "restore-collection",
-        null
-    ),
     FORCE_LEADER(PER_COLLECTION_PER_SHARD_COMMANDS, POST, CollectionAction.FORCELEADER, "force-leader", null),
     BALANCE_SHARD_UNIQUE(PER_COLLECTION, POST, BALANCESHARDUNIQUE,"balance-shard-unique" , null)
     ;
@@ -304,7 +251,6 @@ public class CollectionApiMapping {
 
   public enum EndPoint implements V2EndPoint {
     CLUSTER_ALIASES("cluster.aliases"),
-    COLLECTIONS_COMMANDS("collections.Commands"),
     COLLECTION_STATE("collections.collection"),
     PER_COLLECTION("collections.collection.Commands"),
     PER_COLLECTION_SHARDS_COMMANDS("collections.collection.shards.Commands"),
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/BackupCollectionPayload.java
similarity index 58%
copy from solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java
copy to solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/BackupCollectionPayload.java
index 5cd10b6..5d5e7ce 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/BackupCollectionPayload.java
@@ -16,17 +16,39 @@
  */
 package org.apache.solr.client.solrj.request.beans;
 
-import java.util.Map;
-
 import org.apache.solr.common.annotation.JsonProperty;
 import org.apache.solr.common.util.ReflectMapWriter;
 
-public class CreateConfigInfo implements ReflectMapWriter {
-  @JsonProperty(required = true)
-  public String name;
-  @JsonProperty
-  public String baseConfigSet;
-  @JsonProperty
-  public Map<String,Object> properties;
+/**
+ * V2 API POJO for the /v2/collections 'backup-collection' command.
+ *
+ * Analogous to the request parameters for v1 /admin/collections?action=BACKUP API.
+ */
+public class BackupCollectionPayload implements ReflectMapWriter {
+    @JsonProperty(required = true)
+    public String collection;
+
+    @JsonProperty(required = true)
+    public String name;
+
+    @JsonProperty
+    public String location;
+
+    @JsonProperty
+    public String repository;
+
+    @JsonProperty
+    public Boolean followAliases;
+
+    @JsonProperty
+    public String indexBackup;
+
+    @JsonProperty
+    public String commitName;
+
+    @JsonProperty
+    public Boolean incremental;
 
+    @JsonProperty
+    public String async;
 }
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropInfo.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropPayload.java
similarity index 86%
copy from solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropInfo.java
copy to solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropPayload.java
index b8de08d..64b233a 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropInfo.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropPayload.java
@@ -20,7 +20,7 @@ package org.apache.solr.client.solrj.request.beans;
 import org.apache.solr.common.annotation.JsonProperty;
 import org.apache.solr.common.util.ReflectMapWriter;
 
-public class ClusterPropInfo implements ReflectMapWriter {
+public class ClusterPropPayload implements ReflectMapWriter {
 
   @JsonProperty
   public String urlScheme;
@@ -31,7 +31,7 @@ public class ClusterPropInfo implements ReflectMapWriter {
   public String location;
 
   @JsonProperty
-  public DefaultsInfo defaults;
+  public Defaults defaults;
 
   @JsonProperty
   public CollectionDefaults collectionDefaults;
@@ -48,17 +48,17 @@ public class ClusterPropInfo implements ReflectMapWriter {
 
   }
 
-  public static class DefaultsInfo implements ReflectMapWriter {
+  public static class Defaults implements ReflectMapWriter {
 
     @JsonProperty
     public CollectionDefaults collection;
 
     @JsonProperty
-    public ClusterInfo cluster;
+    public Cluster cluster;
 
   }
 
-  public static class ClusterInfo implements ReflectMapWriter {
+  public static class Cluster implements ReflectMapWriter {
     @JsonProperty
     public Boolean useLegacyReplicaAssignment;
 
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateAliasPayload.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateAliasPayload.java
new file mode 100644
index 0000000..b84b49c
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateAliasPayload.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.request.beans;
+
+import org.apache.solr.common.annotation.JsonProperty;
+import org.apache.solr.common.util.ReflectMapWriter;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.solr.client.solrj.request.beans.V2ApiConstants.CREATE_COLLECTION_KEY;
+
+public class CreateAliasPayload implements ReflectMapWriter {
+    @JsonProperty(required = true)
+    public String name;
+
+    @JsonProperty
+    public List<String> collections;
+
+    @JsonProperty
+    public AliasRouter router;
+
+    @JsonProperty
+    public String tz;
+
+    @JsonProperty(CREATE_COLLECTION_KEY)
+    public Map<String, Object> createCollectionParams;
+
+    @JsonProperty
+    public String async;
+
+    public static class AliasRouter implements ReflectMapWriter {
+        @JsonProperty(required = true)
+        public String name;
+
+        @JsonProperty
+        public String field;
+
+        @JsonProperty
+        public String start;
+
+        @JsonProperty
+        public String interval;
+
+        @JsonProperty
+        public Integer maxFutureMs;
+
+        @JsonProperty
+        public String preemptiveCreateMath;
+
+        @JsonProperty
+        public String autoDeleteAge;
+
+        @JsonProperty
+        public Integer maxCardinality;
+
+        @JsonProperty
+        public String mustMatch;
+
+        @JsonProperty
+        public List<Map<String, Object>> routerList;
+    }
+}
+
+
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigPayload.java
similarity index 94%
copy from solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java
copy to solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigPayload.java
index 5cd10b6..98b22b4 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigPayload.java
@@ -21,7 +21,7 @@ import java.util.Map;
 import org.apache.solr.common.annotation.JsonProperty;
 import org.apache.solr.common.util.ReflectMapWriter;
 
-public class CreateConfigInfo implements ReflectMapWriter {
+public class CreateConfigPayload implements ReflectMapWriter {
   @JsonProperty(required = true)
   public String name;
   @JsonProperty
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropInfo.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreatePayload.java
similarity index 64%
rename from solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropInfo.java
rename to solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreatePayload.java
index b8de08d..403af4d 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropInfo.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreatePayload.java
@@ -14,59 +14,57 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.solr.client.solrj.request.beans;
 
 import org.apache.solr.common.annotation.JsonProperty;
 import org.apache.solr.common.util.ReflectMapWriter;
 
-public class ClusterPropInfo implements ReflectMapWriter {
-
-  @JsonProperty
-  public String urlScheme;
+import java.util.List;
+import java.util.Map;
 
-  @JsonProperty
-  public Integer maxCoresPerNode;
-  @JsonProperty
-  public String location;
+public class CreatePayload implements ReflectMapWriter {
+    @JsonProperty(required = true)
+    public String name;
 
-  @JsonProperty
-  public DefaultsInfo defaults;
+    @JsonProperty
+    public String config;
 
-  @JsonProperty
-  public CollectionDefaults collectionDefaults;
+    @JsonProperty
+    public Map<String, Object> router;
 
-  public static class CollectionDefaults implements ReflectMapWriter {
     @JsonProperty
     public Integer numShards;
+
     @JsonProperty
-    public Integer tlogReplicas;
+    public String shards;
+
     @JsonProperty
-    public Integer pullReplicas;
+    public Integer replicationFactor;
+
     @JsonProperty
     public Integer nrtReplicas;
 
-  }
-
-  public static class DefaultsInfo implements ReflectMapWriter {
-
     @JsonProperty
-    public CollectionDefaults collection;
+    public Integer tlogReplicas;
 
     @JsonProperty
-    public ClusterInfo cluster;
-
-  }
+    public Integer pullReplicas;
 
-  public static class ClusterInfo implements ReflectMapWriter {
     @JsonProperty
-    public Boolean useLegacyReplicaAssignment;
+    public List<String> nodeSet;
 
+    @JsonProperty
+    public Boolean shuffleNodes;
 
     @JsonProperty
-    public CollectionDefaults collection;
+    public Map<String, Object> properties;
 
-  }
+    @JsonProperty
+    public String async;
 
+    @JsonProperty
+    public Boolean waitForFinalState;
 
+    @JsonProperty
+    public Boolean perReplicaState;
 }
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/DeleteAliasPayload.java
similarity index 80%
copy from solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java
copy to solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/DeleteAliasPayload.java
index 5cd10b6..7565c3d 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/DeleteAliasPayload.java
@@ -16,17 +16,13 @@
  */
 package org.apache.solr.client.solrj.request.beans;
 
-import java.util.Map;
-
 import org.apache.solr.common.annotation.JsonProperty;
 import org.apache.solr.common.util.ReflectMapWriter;
 
-public class CreateConfigInfo implements ReflectMapWriter {
-  @JsonProperty(required = true)
-  public String name;
-  @JsonProperty
-  public String baseConfigSet;
-  @JsonProperty
-  public Map<String,Object> properties;
+public class DeleteAliasPayload implements ReflectMapWriter {
+    @JsonProperty(required = true)
+    public String name;
 
+    @JsonProperty
+    public String async;
 }
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RateLimiterMeta.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RateLimiterPayload.java
similarity index 89%
rename from solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RateLimiterMeta.java
rename to solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RateLimiterPayload.java
index 7cf70fd..42058bc 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RateLimiterMeta.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RateLimiterPayload.java
@@ -25,7 +25,7 @@ import org.apache.solr.common.util.ReflectMapWriter;
 /**
  * POJO for Rate Limiter Metadata Configuration
  */
-public class RateLimiterMeta implements ReflectMapWriter {
+public class RateLimiterPayload implements ReflectMapWriter {
   @JsonProperty
   public Boolean enabled;
 
@@ -41,8 +41,8 @@ public class RateLimiterMeta implements ReflectMapWriter {
   @JsonProperty
   public Integer slotAcquisitionTimeoutInMS;
 
-  public RateLimiterMeta copy() {
-    RateLimiterMeta result = new RateLimiterMeta();
+  public RateLimiterPayload copy() {
+    RateLimiterPayload result = new RateLimiterPayload();
 
     result.enabled = enabled;
     result.guaranteedSlots = guaranteedSlots;
@@ -55,8 +55,8 @@ public class RateLimiterMeta implements ReflectMapWriter {
 
   @Override
   public boolean equals(Object obj) {
-    if (obj instanceof RateLimiterMeta) {
-      RateLimiterMeta that = (RateLimiterMeta) obj;
+    if (obj instanceof RateLimiterPayload) {
+      RateLimiterPayload that = (RateLimiterPayload) obj;
       return Objects.equals(this.enabled, that.enabled) &&
           Objects.equals(this.guaranteedSlots, that.guaranteedSlots) &&
           Objects.equals(this.allowedRequests, that.allowedRequests) &&
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RestoreCollectionPayload.java
similarity index 57%
copy from solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java
copy to solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RestoreCollectionPayload.java
index 5cd10b6..2634802 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/RestoreCollectionPayload.java
@@ -16,17 +16,38 @@
  */
 package org.apache.solr.client.solrj.request.beans;
 
-import java.util.Map;
-
 import org.apache.solr.common.annotation.JsonProperty;
 import org.apache.solr.common.util.ReflectMapWriter;
 
-public class CreateConfigInfo implements ReflectMapWriter {
-  @JsonProperty(required = true)
-  public String name;
-  @JsonProperty
-  public String baseConfigSet;
-  @JsonProperty
-  public Map<String,Object> properties;
+import java.util.Map;
+
+import static org.apache.solr.client.solrj.request.beans.V2ApiConstants.CREATE_COLLECTION_KEY;
+
+/**
+ * V2 API POJO for the /v2/collections 'restore-collection' command.
+ *
+ * Analogous to the request parameters for v1 /admin/collections?action=RESTORE API.
+ */
+public class RestoreCollectionPayload implements ReflectMapWriter {
+
+    @JsonProperty(required = true)
+    public String collection;
+
+    @JsonProperty(required = true)
+    public String name;
+
+    @JsonProperty
+    public String location;
+
+    @JsonProperty
+    public String repository;
+
+    @JsonProperty
+    public Integer backupId;
+
+    @JsonProperty(CREATE_COLLECTION_KEY)
+    public Map<String, Object> createCollectionParams;
 
+    @JsonProperty
+    public String async;
 }
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/SetAliasPropertyPayload.java
similarity index 81%
rename from solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java
rename to solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/SetAliasPropertyPayload.java
index 5cd10b6..c3c8585 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateConfigInfo.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/SetAliasPropertyPayload.java
@@ -16,17 +16,18 @@
  */
 package org.apache.solr.client.solrj.request.beans;
 
-import java.util.Map;
-
 import org.apache.solr.common.annotation.JsonProperty;
 import org.apache.solr.common.util.ReflectMapWriter;
 
-public class CreateConfigInfo implements ReflectMapWriter {
-  @JsonProperty(required = true)
-  public String name;
-  @JsonProperty
-  public String baseConfigSet;
-  @JsonProperty
-  public Map<String,Object> properties;
+import java.util.Map;
+
+public class SetAliasPropertyPayload implements ReflectMapWriter {
+    @JsonProperty(required = true)
+    public String name;
+
+    @JsonProperty
+    public String async;
 
+    @JsonProperty
+    public Map<String, Object> properties;
 }
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/V2ApiConstants.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/V2ApiConstants.java
new file mode 100644
index 0000000..174b8bf
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/V2ApiConstants.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.request.beans;
+
+public class V2ApiConstants {
+    private V2ApiConstants() { /* Private ctor prevents instantiation */ }
+
+    /**
+     * Parent key for collection or alias properties to set.
+     */
+    public static final String PROPERTIES_KEY = "properties";
+    /**
+     * Parent key for v2 params used to create a collection.
+     */
+    public static final String CREATE_COLLECTION_KEY = "create-collection";
+
+    /**
+     * Parent key holding alias-router parameters.
+     */
+    public static final String ROUTER_KEY = "router";
+
+    /**
+     * Parameter name for the configset used by a collection
+     */
+    public static final String CONFIG = "config";
+
+    /**
+     * Property controlling whether 'nodeSet' should be shuffled before use.
+     */
+    public static final String SHUFFLE_NODES = "shuffleNodes";
+
+    /**
+     * The set of nodes to consider as potential locations for a new collection or its constituent components.
+     */
+    public static final String NODE_SET = "nodeSet";
+
+    /**
+     * The collections to be included in an alias.
+     */
+    public static final String COLLECTIONS = "collections";
+}
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
index 6f893c9..357bda0 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java
@@ -35,6 +35,13 @@ import static org.apache.solr.common.util.Utils.toJSONString;
  */
 public class ZkNodeProps implements JSONWriter.Writable {
 
+  /**
+   * Feature flag to enable storing the 'base_url' property; base_url will not be stored as of Solr 9.x.
+   * Installations that use an older (pre-8.8) SolrJ against a 8.8.0 or newer server will need to set this system
+   * property to 'true' to avoid NPEs when reading cluster state from Zookeeper, see SOLR-15145.
+   */
+  static final boolean STORE_BASE_URL = Boolean.parseBoolean(System.getProperty("solr.storeBaseUrl", "false"));
+
   protected final Map<String,Object> propMap;
 
   /**
@@ -45,7 +52,7 @@ public class ZkNodeProps implements JSONWriter.Writable {
 
     // don't store base_url if we have a node_name to recompute from when we read back from ZK
     // sub-classes that know they need a base_url (Replica) can eagerly compute in their ctor
-    if (this.propMap.containsKey(ZkStateReader.NODE_NAME_PROP)) {
+    if (!STORE_BASE_URL && this.propMap.containsKey(ZkStateReader.NODE_NAME_PROP)) {
       this.propMap.remove(ZkStateReader.BASE_URL_PROP);
     }
 
@@ -118,14 +125,9 @@ public class ZkNodeProps implements JSONWriter.Writable {
   @Override
   public void write(JSONWriter jsonWriter) {
     // don't write out the base_url if we have a node_name
-    if (propMap.containsKey(ZkStateReader.BASE_URL_PROP) && propMap.containsKey(ZkStateReader.NODE_NAME_PROP)) {
-      final Map<String,Object> filtered = new HashMap<>();
-      // stream / collect is no good here as the Collector doesn't like null values
-      propMap.forEach((key, value) -> {
-        if (!ZkStateReader.BASE_URL_PROP.equals(key)) {
-          filtered.put(key, value);
-        }
-      });
+    if (!STORE_BASE_URL && propMap.containsKey(ZkStateReader.BASE_URL_PROP) && propMap.containsKey(ZkStateReader.NODE_NAME_PROP)) {
+      final Map<String,Object> filtered = new HashMap<>(propMap);
+      filtered.remove(ZkStateReader.BASE_URL_PROP);
       jsonWriter.write(filtered);
     } else {
       jsonWriter.write(propMap);
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
index c38f397..d259f35 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
@@ -39,6 +39,11 @@ public interface CollectionAdminParams {
   String CREATE_NODE_SET_PARAM = "createNodeSet";
 
   /**
+   * The number of shards to create a particular collection with.
+   */
+  String NUM_SHARDS = "numShards";
+
+  /**
    * A parameter which specifies if the provided list of Solr nodes (via {@linkplain #CREATE_NODE_SET_PARAM})
    * should be shuffled before being used.
    */
diff --git a/solr/solrj/src/resources/apispec/collections.Commands.json b/solr/solrj/src/resources/apispec/collections.Commands.json
deleted file mode 100644
index 4d4a178..0000000
--- a/solr/solrj/src/resources/apispec/collections.Commands.json
+++ /dev/null
@@ -1,298 +0,0 @@
-{
-  "documentation": "https://lucene.apache.org/solr/guide/collection-management.html#create",
-  "description": "Create collections and collection aliases, backup or restore collections, and delete collections and aliases.",
-  "methods": [
-    "POST"
-  ],
-  "url": {
-    "paths": [
-      "/collections",
-      "/c"
-    ]
-  },
-  "commands": {
-    "create": {
-      "type": "object",
-      "documentation": "https://lucene.apache.org/solr/guide/collection-management.html#create",
-      "description": "Create a collection.",
-      "properties": {
-        "name": {
-          "type": "string",
-          "description": "The name of the collection to be created."
-        },
-        "config": {
-          "type": "string",
-          "description": "The name of the configuration set (which must already be stored in ZooKeeper) to use for this collection. If not provided, Solr will default to the collection name as the configuration set name."
-        },
-        "router": {
-          "type": "object",
-          "documentation": "https://lucene.apache.org/solr/guide/shards-and-indexing-data-in-solrcloud.html",
-          "description": "These properties define how to distribute documents across a collection's shards.",
-          "properties": {
-            "name": {
-              "type": "string",
-              "enum":["implicit","compositeId"],
-              "description": "The router implementation to use for this collection. There are two options: compositeId or implicit. The compositeId option has Solr decide how to distribute documents (with some possibilities for customization). The implicit option requires you define your own routing strategy, and puts the balancing of documents in shards entirely in your hands.",
-              "default": "compositeId"
-            },
-            "field": {
-              "type": "string",
-              "description": "A field to be used by Solr to identify the shard a document should be routed to. By default, the field defined as the unique ID for each document is used, but an alternative field can be defined with this parameter."
-            }
-          }
-        },
-        "numShards": {
-          "type": "integer",
-          "description": "The number of shards to be created as part of the collection. Shards are logical partitions of a single collection. Each shard has at least one replica, but more replicas for each shard can be defined with the replicationFactor property. This is a required parameter when using the 'compositeId' router."
-        },
-        "shards": {
-          "type": "string",
-          "description": "A comma-separated list of shard names, e.g., shard-x,shard-y,shard-z. This is a required parameter when using the 'implicit' router."
-        },
-        "replicationFactor": {
-          "type": "integer",
-          "description": "The number of NRT replicas to be created for each shard. Replicas are physical copies of each shard, acting as failover for the shard."
-        },
-        "nrtReplicas": {
-          "type": "integer",
-          "description": "The number of NRT replicas to be created for each shard. Replicas are physical copies of each shard, acting as failover for the shard. Replicas of type NRT will be updated with each document that is added to the cluster, and can use \"softCommits\" to get a new view of the index in Near Real Time. This parameter works in the same way as 'replicationFactor'"
-        },
-        "tlogReplicas": {
-          "type": "integer",
-          "description": "The number of TLOG replicas to be created for each shard. TLOG replicas update their transaction log for every update to the cluster, but only the shard leader updates the local index, other TLOG replicas will use segment replication and copy the latest index files from the leader."
-        },
-        "pullReplicas": {
-          "type": "integer",
-          "description": "The number of PULL replicas to be created for each shard. PULL replicas don't receive copies of the documents on update requests, they just replicate the latest segments periodically from the shard leader. PULL replicas can't become shard leaders, and need at least one active TLOG(recommended) or NRT replicas in the shard to replicate from."
-        },
-        "nodeSet": {
-          "type": "array",
-          "items": {
-            "type": "string"
-          },
-          "description": "Defines nodes to spread the new collection across. If not provided, the collection will be spread across all live Solr nodes. The names to use are the 'node_name', which can be found by a request to the cluster/nodes endpoint. A special value of EMPTY will create no shards or replicas for the new collection. In this case, shards and replicas can be added later with the add-replica command available on the /collections/{collection}/shards endpoint."
-        },
-        "shuffleNodes": {
-          "type": "boolean",
-          "description": "Controls whether or not the shard-replicas created for this collection will be assigned to the nodes specified by the nodeSet property in a sequential manner, or if the list of nodes should be shuffled prior to creating individual replicas. A 'false' value makes the results of a collection creation predictable and gives more exact control over the location of the individual shard-replicas, but 'true' can be a better choice for ensuring replicas are distributed e [...]
-        },
-        "properties": {
-          "type": "object",
-          "documentation": "https://lucene.apache.org/solr/guide/defining-core-properties.html",
-          "description": "Allows adding core.properties for the collection. Some examples of core properties you may want to modify include the config set, the node name, the data directory, among others.",
-          "additionalProperties": true
-        },
-        "async": {
-          "type": "string",
-          "description": "Defines a request ID that can be used to track this action after it's submitted. The action will be processed asynchronously."
-        },
-        "waitForFinalState": {
-          "type": "boolean",
-          "description": "If true then request will complete only when all affected replicas become active.",
-          "default": false
-        },
-        "perReplicaState": {
-          "type": "boolean",
-          "description": "Use Per replica states",
-          "default": false
-        }
-      },
-      "required": [
-        "name"
-      ]
-    },
-    "create-alias": {
-      "documentation": "https://lucene.apache.org/solr/guide/collection-aliasing.html#createalias",
-      "description": "Allows one or more collections to be known by another name (to include time partitioned collections). If this command is used on an existing alias, the existing alias will be replaced with the new collection details.",
-      "type": "object",
-      "properties": {
-        "name": {
-          "type": "string",
-          "description": "The alias name to be created."
-        },
-        "collections": {
-          "type": "array",
-          "description": "The list of collections to be known as this alias. Incompatible with any of the routing parameters. Either this parameter or a complete set of routing parameters is required.",
-          "items": {
-            "type": "string"
-          }
-        },
-        "router" : {
-          "type":"object",
-          "documentation": "https://lucene.apache.org/solr/guide/collection-aliasing.html#createalias",
-          "description":"Routing specific properties to define a time routed alias.  Do not specify 'collections' when creating a time routed alias.",
-          "properties" : {
-            "name" : {
-              "type" : "string",
-              "description": "The type of routing to perform. Currently only 'time' is supported, and it's required."
-            },
-            "field" : {
-              "type": "string",
-              "description": "The date field name in incoming documents that is consulted to decide which collection the document should be routed to."
-            },
-            "start": {
-              "type": "string",
-              "description": "The earliest date/time in a document that may be indexed into this alias. Documents with values less than this will return an error. For time based routing this may be a date math expression."
-            },
-            "interval" : {
-              "type": "string",
-              "description": "A specification of the width of the interval for each partition collection. For time based routing this should be a date math expression fragment starting with the + character."
-            },
-            "maxFutureMs": {
-              "type": "integer",
-              "description":"How many milliseconds into the future to accept document. Documents with a value in router.field that is greater than now() + maxFutureMs will be rejected to avoid provisioning too much resources."
-            },
-            "preemptiveCreateMath":{
-              "type": "string",
-              "description": "If a document arrives with a timestamp that is after the end time of the most recent collection minus this interval, then the next collection will be created asynchronously. Without this setting, collections are created synchronously when required by the document time stamp and thus block the flow of documents until the collection is created (possibly several seconds). Preemptive creation reduces these hiccups. If set to enough time (perhaps an hour or more) [...]
-            },
-            "autoDeleteAge": {
-              "type": "string",
-              "description": "A date math expressions yielding a time in the past. Collections covering a period of time entirely before this age will be automatically deleted."
-            },
-            "maxCardinality": {
-              "type": "integer",
-              "description": "The maximum number of categories allowed for this alias."
-            },
-            "mustMatch": {
-              "type": "string",
-              "description": "A regular expression that the value of the field specified by `router.field` must match before a corresponding collection will be created."
-            },
-            "routerList": {
-              "type": "array",
-              "description": "A list of router property sets to be used with router type Dimensional[foo,bar] where foo and bar are valid router type names (i.e. time or category). The order must correspond to the type specification in [] in the Dimensional type, so Dimensional[category,time] would require the first set of router properties to be valid for a category routed alias, and the second set to be valid for a time routed alias. In these sets of properties, router.name will be ign [...]
-              "items": {
-                "type": "object",
-                "additionalProperties": true
-              }
-            }
-          }
-        },
-        "TZ": {
-          "type": "string",
-          "description": "Optional timezone for use with any date math that may exist in other parameters.  Defaults to UTC."
-        },
-        "create-collection": {
-          "type": "object",
-          "documentation": "https://lucene.apache.org/solr/guide/collection-management.html#create",
-          "description": "The settings to use to create a collection for each new time partition. Most options from the collection create command are available, except for 'name', 'async' and 'waitForFinalState'.",
-          "additionalProperties": true
-        },
-        "async": {
-          "type": "string",
-          "description": "Defines a request ID that can be used to track this action after it's submitted. The action will be processed asynchronously."
-        }
-      },
-      "required": [
-        "name"
-      ]
-    },
-    "delete-alias": {
-      "documentation": "https://lucene.apache.org/solr/guide/collection-aliasing.html#deletealias",
-      "description": "Deletes a collection alias",
-      "type": "object",
-      "properties": {
-        "name": {
-          "type": "string",
-          "description": "The name of the alias to delete."
-        },
-        "async": {
-          "type": "string",
-          "description": "Defines a request ID that can be used to track this action after it's submitted. The action will be processed asynchronously."
-        }
-      },
-      "required": [
-        "name"
-      ]
-    },
-    "set-alias-property": {
-      "documentation": "https://lucene.apache.org/solr/guide/collection-aliasing.html#modifyalias",
-      "description": "Allows changing the properties on an alias. If a key is set with an empty string then it will be removed",
-      "type": "object",
-      "properties": {
-        "name": {
-          "type": "string",
-          "description": "The alias name on which to set properties."
-        },
-        "properties" : {
-          "type": "object",
-          "description": "A map of key/value pairs that will be associated with the alias as alias properties (metadata). An empty value will delete any existing value for a given key.",
-          "additionalProperties": true
-        },
-        "async": {
-          "type": "string",
-          "description": "Defines a request ID that can be used to track this action after it's submitted. The action will be processed asynchronously."
-        }
-      },
-      "required": [
-        "name"
-      ]
-    },
-    "backup-collection": {
-      "documentation": "https://lucene.apache.org/solr/guide/collection-management.html#backup",
-      "description": "Backup Solr indexes and configurations for a specific collection. One copy of the indexes will be taken from each shard, and the config set for the collection will also be copied.",
-      "type": "object",
-      "properties": {
-        "collection": {
-          "type": "string",
-          "description": "The name of the collection to back up."
-        },
-        "name": {
-          "type": "string",
-          "description": "The name of the backup."
-        },
-        "location": {
-          "type": "string",
-          "description": "A location on a shared drive for the backup-collection command to write to. Alternately, it can be set as a cluster property with the cluster endpoint, which also supports setting a location."
-        },
-        "followAliases": {
-          "type": "boolean",
-          "description": "Controls whether aliases are resolved when trying to back up the specified collection, or whether Solr should only backup the provided collection name if it matches a concrete collection."
-        },
-        "incremental": {
-          "type": "boolean",
-          "description": "An internal property that controls whether the backup should use the standard 'incremental' file format or the deprecated 'full-snapshot' based format."
-        },
-        "async": {
-          "type": "string",
-          "description": "Defines a request ID that can be used to track this action after it's submitted. The action will be processed asynchronously."
-        }
-      },
-      "required": [
-        "collection",
-        "name"
-      ]
-    },
-    "restore-collection": {
-      "documentation": "https://lucene.apache.org/solr/guide/collection-management.html#restore",
-      "description": "Restore Solr indexes and configurations from a backup. You cannot restore into the same collection you took the backup from. The target collection must not exist before calling this command, as it will be created by the restore action. The new collection will have the same number of shards and replicas as the original collection, and all routing strategies will be retained.",
-      "type": "object",
-      "properties": {
-        "collection": {
-          "type": "string",
-          "description": "The name of the collection the backup will be restored to. This collection must not exist prior to this "
-        },
-        "name": {
-          "type": "string",
-          "description": "The name of the backup file."
-        },
-        "location": {
-          "type": "string",
-          "description": "The location on the shared drive for the restore-collection command to read from. Alternately, it can be set as a cluster property with the cluster endpoint, which also supports setting a location."
-        },
-        "backupId": {
-          "type": "integer",
-          "description": "The ID of the backup to restore, when the provided location and backup name hold multiple backups for the provided collection.  Defaults to the most recent backup if not specified."
-        },
-        "async": {
-          "type": "string",
-          "description": "Defines a request ID that can be used to track this action after it's submitted. The action will be processed asynchronously."
-        }
-      },
-      "required": [
-        "collection",
-        "name"
-      ]
-    }
-  }
-}
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index 7d8a062..56769c2 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -16,9 +16,7 @@
  */
 package org.apache.solr.client.solrj.io.stream;
 
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.IOException;
+import java.io.*;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -30,6 +28,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
+import java.util.zip.GZIPOutputStream;
 
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.Slow;
@@ -3484,6 +3483,28 @@ public class StreamExpressionTest extends SolrCloudTestCase {
   }
 
   @Test
+  public void testCatStreamSingleGzipFile() throws Exception {
+    final String catStream = "cat(\"topLevel1.txt.gz\")";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", catStream);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+FILESTREAM_COLLECTION;
+
+    SolrStream solrStream = new SolrStream(url, paramsLoc);
+
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertEquals(4, tuples.size());
+
+    for (int i = 0; i < 4; i++) {
+      Tuple t = tuples.get(i);
+      assertEquals("topLevel1.txt.gz line " + String.valueOf(i+1), t.get("line"));
+      assertEquals("topLevel1.txt.gz", t.get("file"));
+    }
+  }
+
+  @Test
   public void testCatStreamEmptyFile() throws Exception {
     final String catStream = "cat(\"topLevel-empty.txt\")";
     ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
@@ -3648,6 +3669,7 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     Files.createDirectories(dataDir);
     Files.createDirectories(dataDir.resolve("directory1"));
 
+    populateFileWithGzipData(dataDir.resolve("topLevel1.txt.gz"));
     populateFileWithData(dataDir.resolve("topLevel1.txt"));
     populateFileWithData(dataDir.resolve("topLevel2.txt"));
     Files.createFile(dataDir.resolve("topLevel-empty.txt"));
@@ -3665,6 +3687,16 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     }
   }
 
+  private static void populateFileWithGzipData(Path dataFile) throws Exception {
+    Files.createFile(dataFile);
+    try (final BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(dataFile.toFile())), StandardCharsets.UTF_8))) {
+      for (int i = 1; i <=4; i++) {
+        writer.write(dataFile.getFileName() + " line " + i);
+        writer.newLine();
+      }
+    }
+  }
+
   protected List<Tuple> getTuples(TupleStream tupleStream) throws IOException {
     List<Tuple> tuples = new ArrayList<Tuple>();
 
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV1toV2ApiMapper.java b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV1toV2ApiMapper.java
index b144e40..a57d859 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV1toV2ApiMapper.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV1toV2ApiMapper.java
@@ -17,36 +17,16 @@
 
 package org.apache.solr.client.solrj.request;
 
-import java.io.IOException;
-import java.util.Map;
-
-import com.google.common.collect.ImmutableMap;
 import org.apache.solr.SolrTestCase;
 import org.apache.solr.client.solrj.impl.BinaryRequestWriter;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest.Create;
 import org.apache.solr.common.util.ContentStreamBase;
 import org.apache.solr.common.util.Utils;
 import org.junit.Test;
 
-public class TestV1toV2ApiMapper extends SolrTestCase {
+import java.io.IOException;
+import java.util.Map;
 
-  @Test
-  // commented out on: 24-Dec-2018   @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
-  public void testCreate() throws IOException {
-    Create cmd = CollectionAdminRequest
-        .createCollection("mycoll", "conf1", 3, 2)
-        .setProperties(ImmutableMap.<String,String>builder()
-            .put("p1","v1")
-            .put("p2","v2")
-            .build());
-    V2Request v2r = V1toV2ApiMapper.convert(cmd).build();
-    Map<?,?> m = (Map<?,?>) Utils.fromJSON(ContentStreamBase.create(new BinaryRequestWriter(), v2r).getStream());
-    assertEquals("/c", v2r.getPath());
-    assertEquals("v1", Utils.getObjectByPath(m,true,"/create/properties/p1"));
-    assertEquals("v2", Utils.getObjectByPath(m,true,"/create/properties/p2"));
-    assertEquals("3", Utils.getObjectByPath(m,true,"/create/numShards"));
-    assertEquals("2", Utils.getObjectByPath(m,true,"/create/nrtReplicas"));
-  }
+public class TestV1toV2ApiMapper extends SolrTestCase {
 
   @Test
   // commented out on: 24-Dec-2018   @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018
diff --git a/solr/solrj/src/test/org/apache/solr/common/util/JsonValidatorTest.java b/solr/solrj/src/test/org/apache/solr/common/util/JsonValidatorTest.java
index d539088..66aa39f 100644
--- a/solr/solrj/src/test/org/apache/solr/common/util/JsonValidatorTest.java
+++ b/solr/solrj/src/test/org/apache/solr/common/util/JsonValidatorTest.java
@@ -28,7 +28,6 @@ import static org.apache.solr.common.util.ValidatingJsonMap.NOT_NULL;
 public class JsonValidatorTest extends SolrTestCaseJ4  {
 
   public void testSchema() {
-    checkSchema("collections.Commands");
     checkSchema("collections.collection.Commands");
     checkSchema("collections.collection.shards.Commands");
     checkSchema("collections.collection.shards.shard.Commands");
@@ -43,38 +42,42 @@ public class JsonValidatorTest extends SolrTestCaseJ4  {
 
 
   public void testSchemaValidation() {
-    ValidatingJsonMap spec = Utils.getSpec("collections.Commands").getSpec();
-    @SuppressWarnings({"rawtypes"})
-    Map createSchema = spec.getMap("commands", NOT_NULL).getMap("create-alias", NOT_NULL);
-    JsonSchemaValidator validator = new JsonSchemaValidator(createSchema);
-    List<String> errs = validator.validateJson(Utils.fromJSONString("{name : x, collections: [ c1 , c2]}"));
+    // merge-indexes chosen to exercise string and array/list props.
+    ValidatingJsonMap spec = Utils.getSpec("cores.core.Commands").getSpec();
+    final Map<String, Object> mergeIndexesSchema = spec.getMap("commands", NOT_NULL).getMap("merge-indexes", NOT_NULL);
+    final JsonSchemaValidator mergeIndexesSchemaValidator = new JsonSchemaValidator(mergeIndexesSchema);
+
+    List<String> errs = mergeIndexesSchemaValidator.validateJson(Utils.fromJSONString("{async : x, indexDir: [ c1 , c2]}"));
     assertNull(toJSONString(errs), errs);
-    errs = validator.validateJson(Utils.fromJSONString("{name : x, collections: [c1] }"));
+    errs = mergeIndexesSchemaValidator.validateJson(Utils.fromJSONString("{async : x, indexDir: [c1] }"));
     assertNull(toJSONString(errs), errs);
-    errs = validator.validateJson(Utils.fromJSONString("{name : x, x:y, collections: [ c1 , c2]}"));
+    errs = mergeIndexesSchemaValidator.validateJson(Utils.fromJSONString("{async : x, x:y, indexDir: [ c1 , c2]}"));
     assertNotNull(toJSONString(errs), errs);
     assertTrue(toJSONString(errs), errs.get(0).contains("Unknown"));
-    errs = validator.validateJson(Utils.fromJSONString("{name : 123, collections: c1 }"));
+    errs = mergeIndexesSchemaValidator.validateJson(Utils.fromJSONString("{async : 123, indexDir: c1 }"));
     assertNotNull(toJSONString(errs), errs);
     assertTrue(toJSONString(errs), errs.get(0).contains("expected"));
-    errs = validator.validateJson(Utils.fromJSONString("{x:y, collections: [ c1 , c2]}"));
+    errs = mergeIndexesSchemaValidator.validateJson(Utils.fromJSONString("{x:y, indexDir: [ c1 , c2]}"));
     assertTrue(toJSONString(errs), StrUtils.join(errs, '|').contains("Unknown"));
-    errs = validator.validateJson(Utils.fromJSONString("{name : x, collections: [ 1 , 2]}"));
+    errs = mergeIndexesSchemaValidator.validateJson(Utils.fromJSONString("{async : x, indexDir: [ 1 , 2]}"));
     assertFalse(toJSONString(errs), errs.isEmpty());
     assertTrue(toJSONString(errs), errs.get(0).contains("expected"));
-    validator = new JsonSchemaValidator("{" +
+
+
+    final JsonSchemaValidator personSchemaValidator = new JsonSchemaValidator("{" +
         "  type:object," +
         "  properties: {" +
         "   age : {type: number}," +
         "   adult : {type: boolean}," +
         "   name: {type: string}}}");
-    errs = validator.validateJson(Utils.fromJSONString("{name:x, age:21, adult:true}"));
+    errs = personSchemaValidator.validateJson(Utils.fromJSONString("{name:x, age:21, adult:true}"));
     assertNull(errs);
-    errs = validator.validateJson(Utils.fromJSONString("{name:x, age:'21', adult:'true'}"));
+    errs = personSchemaValidator.validateJson(Utils.fromJSONString("{name:x, age:'21', adult:'true'}"));
     assertNotNull(errs);
-
-    errs = validator.validateJson(Utils.fromJSONString("{name:x, age:'x21', adult:'true'}"));
+    errs = personSchemaValidator.validateJson(Utils.fromJSONString("{name:x, age:'x21', adult:'true'}"));
     assertEquals(1, errs.size());
+
+
     Exception e = expectThrows(Exception.class, () -> {
       new JsonSchemaValidator("{" +
           "  type:object," +
@@ -106,16 +109,16 @@ public class JsonValidatorTest extends SolrTestCaseJ4  {
     });
     assertTrue(e.getMessage().contains("Unknown key : propertes"));
 
-    validator = new JsonSchemaValidator("{" +
+    final JsonSchemaValidator personWithEnumValidator = new JsonSchemaValidator("{" +
         "  type:object," +
         "  properties: {" +
         "   age : {type: number}," +
         "   sex: {type: string, enum:[M, F]}," +
         "   adult : {type: boolean}," +
         "   name: {type: string}}}");
-    errs = validator.validateJson(Utils.fromJSONString("{name: 'Joe Average' , sex:M}"));
+    errs = personWithEnumValidator.validateJson(Utils.fromJSONString("{name: 'Joe Average' , sex:M}"));
     assertNull("errs are " + errs, errs);
-    errs = validator.validateJson(Utils.fromJSONString("{name: 'Joe Average' , sex:m}"));
+    errs = personWithEnumValidator.validateJson(Utils.fromJSONString("{name: 'Joe Average' , sex:m}"));
     assertEquals(1, errs.size());
     assertTrue(errs.get(0).contains("Value of enum"));
 
@@ -139,8 +142,8 @@ public class JsonValidatorTest extends SolrTestCaseJ4  {
         "\n" +
         "  }\n" +
         "}";
-    validator = new JsonSchemaValidator(schema);
-    validator.validateJson(Utils.fromJSONString("{\n" +
+    final JsonSchemaValidator nestedObjectValidator = new JsonSchemaValidator(schema);
+    nestedObjectValidator.validateJson(Utils.fromJSONString("{\n" +
         "  'links': [\n" +
         "    {\n" +
         "        'rel': 'x',\n" +
@@ -161,11 +164,12 @@ public class JsonValidatorTest extends SolrTestCaseJ4  {
         "'type' : 'object',\n" +
         "'oneOf' : ['a', 'b']\n" +
         "}";
-    validator = new JsonSchemaValidator(schema);
-    errs = validator.validateJson(Utils.fromJSONString("" +
+
+    final JsonSchemaValidator mutuallyExclusivePropertiesValidator = new JsonSchemaValidator(schema);
+    errs = mutuallyExclusivePropertiesValidator.validateJson(Utils.fromJSONString("" +
         "{'c':'val'}"));
     assertNotNull(errs);
-    errs = validator.validateJson(Utils.fromJSONString("" +
+    errs = mutuallyExclusivePropertiesValidator.validateJson(Utils.fromJSONString("" +
         "{'a':'val'}"));
     assertNull(errs);