You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@solr.apache.org by ds...@apache.org on 2022/03/25 13:42:55 UTC

[solr] branch branch_9_0 updated: SOLR-15223: BaseCloudSolrClient is the new CloudSolrClient (#750)

This is an automated email from the ASF dual-hosted git repository.

dsmiley pushed a commit to branch branch_9_0
in repository https://gitbox.apache.org/repos/asf/solr.git


The following commit(s) were added to refs/heads/branch_9_0 by this push:
     new 963b47f  SOLR-15223: BaseCloudSolrClient is the new CloudSolrClient (#750)
963b47f is described below

commit 963b47fff4b0ac23bd98889b79abb6ef442549ff
Author: David Smiley <ds...@salesforce.com>
AuthorDate: Fri Mar 25 09:18:22 2022 -0400

    SOLR-15223: BaseCloudSolrClient is the new CloudSolrClient (#750)
    
    * Rename CloudSolrClient to CloudLegacySolrClient (deprecated)
    * Rename BaseCloudSolrClient to CloudSolrClient
    * Add CloudSolrClient.Builder via CloudHttp2SolrClient.Builder
    
    Note: all users of CloudSolrClient are now using CloudLegacySolrClient because this commit is only a refactoring.
    
    Co-authored-by: Jan Høydahl <ja...@users.noreply.github.com>
    Co-authored-by: Christine Poerschke <cp...@apache.org>
---
 solr/CHANGES.txt                                   |    6 +-
 .../src/java/org/apache/solr/cloud/Overseer.java   |    3 +-
 .../java/org/apache/solr/cloud/ZkController.java   |    6 +-
 .../designer/SchemaDesignerConfigSetHelper.java    |    8 +-
 .../src/java/org/apache/solr/util/ExportTool.java  |    3 +-
 .../src/java/org/apache/solr/util/SolrCLI.java     |   16 +-
 .../apache/solr/cloud/AliasIntegrationTest.java    |    3 +-
 ...aosMonkeyNothingIsSafeWithPullReplicasTest.java |    3 +-
 .../apache/solr/cloud/CreateRoutedAliasTest.java   |    7 +-
 .../org/apache/solr/cloud/HttpPartitionTest.java   |    5 +-
 .../test/org/apache/solr/cloud/OverseerTest.java   |    5 +-
 .../apache/solr/cloud/SolrCloudExampleTest.java    |    3 +-
 .../org/apache/solr/cloud/TestConfigSetsAPI.java   |    4 +-
 .../cloud/TestLeaderElectionWithEmptyReplica.java  |    6 +-
 .../solr/cloud/TestMiniSolrCloudClusterSSL.java    |    4 +-
 .../org/apache/solr/cloud/TestPullReplica.java     |   16 +-
 .../org/apache/solr/cloud/TestTlogReplica.java     |   16 +-
 .../solr/cloud/api/collections/ShardSplitTest.java |   26 +-
 .../org/apache/solr/handler/TestBlobHandler.java   |   18 +-
 .../org/apache/solr/handler/TestConfigReload.java  |    8 +-
 .../handler/TestSolrConfigHandlerConcurrent.java   |    3 +-
 .../solr/handler/admin/AdminHandlersProxyTest.java |    3 +-
 .../solr/security/BasicAuthIntegrationTest.java    |   10 +-
 .../security/PKIAuthenticationIntegrationTest.java |    3 +-
 .../solr/security/TestAuthorizationFramework.java  |    3 +-
 .../stream/AnalyticsShardRequestManager.java       |    6 +-
 .../hadoop/TestDelegationWithHadoopAuth.java       |    4 +-
 .../hadoop/TestSolrCloudWithDelegationTokens.java  |    4 +-
 .../solr/hdfs/snapshots/SolrSnapshotsTool.java     |    4 +-
 .../prometheus/exporter/SolrClientFactory.java     |    5 +-
 .../prometheus/scraper/SolrCloudScraperTest.java   |   11 +-
 .../pages/major-changes-in-solr-9.adoc             |    1 +
 .../client/solrj/impl/BaseCloudSolrClient.java     | 1381 -----------------
 .../client/solrj/impl/CloudHttp2SolrClient.java    |    2 +-
 ...dSolrClient.java => CloudLegacySolrClient.java} |   32 +-
 .../solr/client/solrj/impl/CloudSolrClient.java    | 1562 +++++++++++++++++---
 .../client/solrj/impl/SolrClientCloudManager.java  |    9 +-
 .../solrj/impl/SolrClientNodeStateProvider.java    |   10 +-
 .../solrj/impl/ZkClientClusterStateProvider.java   |    2 +-
 .../solr/client/solrj/io/SolrClientCache.java      |    5 +-
 .../solr/client/solrj/io/stream/Facet2DStream.java |    4 +-
 .../solr/client/solrj/io/stream/FacetStream.java   |    4 +-
 .../solr/client/solrj/io/stream/RandomStream.java  |    3 +-
 .../solr/client/solrj/io/stream/SearchStream.java  |    3 +-
 .../client/solrj/io/stream/TimeSeriesStream.java   |    4 +-
 .../solr/client/solrj/io/stream/TopicStream.java   |    4 +-
 .../solr/client/solrj/io/stream/UpdateStream.java  |    4 +-
 .../apache/solr/common/cloud/ZkStateReader.java    |    4 +-
 .../solrj/impl/CloudHttp2SolrClientRetryTest.java  |    2 +-
 .../solrj/impl/CloudHttp2SolrClientTest.java       |    4 +-
 .../solrj/impl/CloudSolrClientBuilderTest.java     |    4 +-
 .../impl/CloudSolrClientMultiConstructorTest.java  |    2 +-
 .../client/solrj/impl/CloudSolrClientTest.java     |    4 +-
 .../client/solrj/impl/HttpClusterStateSSLTest.java |    2 +-
 .../src/java/org/apache/solr/SolrTestCaseJ4.java   |    4 +-
 .../AbstractChaosMonkeyNothingIsSafeTestBase.java  |    3 +-
 .../solr/cloud/AbstractRecoveryZkTestBase.java     |    5 +-
 .../apache/solr/cloud/MiniSolrCloudCluster.java    |    3 +-
 .../org/apache/solr/cloud/SolrCloudTestCase.java   |    5 +-
 .../AbstractCloudBackupRestoreTestCase.java        |    5 +-
 60 files changed, 1544 insertions(+), 1755 deletions(-)

diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index c2c62a7..5cb3bf8 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -600,7 +600,11 @@ Other Changes
 
 * SOLR-16039: Upgrade to Hadoop 3.3.2 (Kevin Risden)
 
-* SOLR-15223: Deprecate HttpSolrClient and friends, please use the Http2SolrClient equivalents (janhoy, David Smiley)
+* SOLR-15223: SolrJ: The project is migrating away from the Apache HttpClient dependency in favor of
+ Jetty's client (supporting HTTP/2), to occur over the 9x releases.  We deprecated HttpSolrClient
+ and friends in favor of Http2SolrClient and equivalents.  For SolrCloud, the former CloudSolrClient
+ was renamed CloudLegacySolrClient (deprecated), and instead BaseCloudSolrClient was renamed to
+ CloudSolrClient and given a Builder for the Jetty based HTTP/2 client. (janhoy, David Smiley)
 
 * SOLR-16061: CloudSolrClient refactoring: Removed ZK specific methods (Haythem Khiri, David Smiley, janhoy)
 
diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
index 9b0ae2f..1f76e48 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
@@ -35,6 +35,7 @@ import java.util.function.BiConsumer;
 import org.apache.lucene.util.Version;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
@@ -828,7 +829,7 @@ public class Overseer implements SolrCloseable {
       return;
     }
     try (CloudSolrClient client =
-        new CloudSolrClient.Builder(
+        new CloudLegacySolrClient.Builder(
                 Collections.singletonList(getZkController().getZkServerAddress()), Optional.empty())
             .withSocketTimeout(30000)
             .withConnectionTimeout(15000)
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
index 28dd7c7..a0c1e7e 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
@@ -60,7 +60,7 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Supplier;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
 import org.apache.solr.client.solrj.impl.SolrClientCloudManager;
@@ -181,7 +181,7 @@ public class ZkController implements Closeable {
   private final SolrZkClient zkClient;
   public final ZkStateReader zkStateReader;
   private SolrCloudManager cloudManager;
-  private CloudSolrClient cloudSolrClient;
+  private CloudLegacySolrClient cloudSolrClient;
 
   private final String zkServerAddress; // example: 127.0.0.1:54062/solr
 
@@ -828,7 +828,7 @@ public class ZkController implements Closeable {
         return cloudManager;
       }
       cloudSolrClient =
-          new CloudSolrClient.Builder(new ZkClientClusterStateProvider(zkStateReader))
+          new CloudLegacySolrClient.Builder(new ZkClientClusterStateProvider(zkStateReader))
               .withSocketTimeout(30000)
               .withConnectionTimeout(15000)
               .withHttpClient(cc.getUpdateShardHandler().getDefaultHttpClient())
diff --git a/solr/core/src/java/org/apache/solr/handler/designer/SchemaDesignerConfigSetHelper.java b/solr/core/src/java/org/apache/solr/handler/designer/SchemaDesignerConfigSetHelper.java
index 3adaf09..879ed52 100644
--- a/solr/core/src/java/org/apache/solr/handler/designer/SchemaDesignerConfigSetHelper.java
+++ b/solr/core/src/java/org/apache/solr/handler/designer/SchemaDesignerConfigSetHelper.java
@@ -69,6 +69,7 @@ import org.apache.http.entity.ByteArrayEntity;
 import org.apache.http.util.EntityUtils;
 import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.schema.FieldTypeDefinition;
@@ -146,7 +147,7 @@ class SchemaDesignerConfigSetHelper implements SchemaDesignerConstants {
     httpPost.setHeader("Content-Type", "text/plain");
     httpPost.setEntity(new ByteArrayEntity(fieldText.getBytes(StandardCharsets.UTF_8)));
     try {
-      HttpResponse resp = cloudClient().getHttpClient().execute(httpPost);
+      HttpResponse resp = ((CloudLegacySolrClient) cloudClient()).getHttpClient().execute(httpPost);
       int statusCode = resp.getStatusLine().getStatusCode();
       if (statusCode != HttpStatus.SC_OK) {
         throw new SolrException(
@@ -539,7 +540,8 @@ class SchemaDesignerConfigSetHelper implements SchemaDesignerConstants {
 
     HttpGet httpGet = new HttpGet(uri);
     try {
-      HttpResponse entity = cloudClient().getHttpClient().execute(httpGet);
+      HttpResponse entity =
+          ((CloudLegacySolrClient) cloudClient()).getHttpClient().execute(httpGet);
       int statusCode = entity.getStatusLine().getStatusCode();
       if (statusCode == HttpStatus.SC_OK) {
         byte[] bytes = DefaultSampleDocumentsLoader.streamAsBytes(entity.getEntity().getContent());
@@ -581,7 +583,7 @@ class SchemaDesignerConfigSetHelper implements SchemaDesignerConstants {
     try {
       httpPost.setHeader("Content-Type", "application/octet-stream");
       httpPost.setEntity(new ByteArrayEntity(bytes));
-      HttpResponse resp = cloudClient.getHttpClient().execute(httpPost);
+      HttpResponse resp = ((CloudLegacySolrClient) cloudClient).getHttpClient().execute(httpPost);
       int statusCode = resp.getStatusLine().getStatusCode();
       if (statusCode != HttpStatus.SC_OK) {
         throw new SolrException(
diff --git a/solr/core/src/java/org/apache/solr/util/ExportTool.java b/solr/core/src/java/org/apache/solr/util/ExportTool.java
index 45972f4..7c5f5b9 100644
--- a/solr/core/src/java/org/apache/solr/util/ExportTool.java
+++ b/solr/core/src/java/org/apache/solr/util/ExportTool.java
@@ -60,6 +60,7 @@ import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.StreamingResponseCallback;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
@@ -145,7 +146,7 @@ public class ExportTool extends SolrCLI.ToolBase {
     abstract void exportDocs() throws Exception;
 
     void fetchUniqueKey() throws SolrServerException, IOException {
-      solrClient = new CloudSolrClient.Builder(Collections.singletonList(baseurl)).build();
+      solrClient = new CloudLegacySolrClient.Builder(Collections.singletonList(baseurl)).build();
       NamedList<Object> response =
           solrClient.request(
               new GenericSolrRequest(
diff --git a/solr/core/src/java/org/apache/solr/util/SolrCLI.java b/solr/core/src/java/org/apache/solr/util/SolrCLI.java
index 04caf7b..b9cbbc1 100755
--- a/solr/core/src/java/org/apache/solr/util/SolrCLI.java
+++ b/solr/core/src/java/org/apache/solr/util/SolrCLI.java
@@ -93,6 +93,7 @@ import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpClientUtil;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
@@ -203,8 +204,8 @@ public class SolrCLI implements CLIO {
       String zkHost = cli.getOptionValue("zkHost", ZK_HOST);
 
       log.debug("Connecting to Solr cluster: {}", zkHost);
-      try (CloudSolrClient cloudSolrClient =
-          new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty())
+      try (var cloudSolrClient =
+          new CloudLegacySolrClient.Builder(Collections.singletonList(zkHost), Optional.empty())
               .build()) {
 
         String collection = cli.getOptionValue("collection");
@@ -216,7 +217,7 @@ public class SolrCLI implements CLIO {
     }
 
     /** Runs a SolrCloud tool with CloudSolrClient initialized */
-    protected abstract void runCloudTool(CloudSolrClient cloudSolrClient, CommandLine cli)
+    protected abstract void runCloudTool(CloudLegacySolrClient cloudSolrClient, CommandLine cli)
         throws Exception;
   }
 
@@ -1203,7 +1204,8 @@ public class SolrCLI implements CLIO {
     }
 
     @Override
-    protected void runCloudTool(CloudSolrClient cloudSolrClient, CommandLine cli) throws Exception {
+    protected void runCloudTool(CloudLegacySolrClient cloudSolrClient, CommandLine cli)
+        throws Exception {
       raiseLogLevelUnlessVerbose(cli);
       String collection = cli.getOptionValue("collection");
       if (collection == null)
@@ -1407,7 +1409,7 @@ public class SolrCLI implements CLIO {
             "Must provide either the '-solrUrl' or '-zkHost' parameters!");
 
       try (CloudSolrClient cloudSolrClient =
-          new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty())
+          new CloudLegacySolrClient.Builder(Collections.singletonList(zkHost), Optional.empty())
               .build()) {
         cloudSolrClient.connect();
         Set<String> liveNodes = cloudSolrClient.getClusterState().getLiveNodes();
@@ -1536,7 +1538,7 @@ public class SolrCLI implements CLIO {
       }
 
       try (CloudSolrClient cloudSolrClient =
-          new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty())
+          new CloudLegacySolrClient.Builder(Collections.singletonList(zkHost), Optional.empty())
               .build()) {
         echoIfVerbose("\nConnecting to ZooKeeper at " + zkHost + " ...", cli);
         cloudSolrClient.connect();
@@ -2494,7 +2496,7 @@ public class SolrCLI implements CLIO {
     protected void deleteCollection(CommandLine cli) throws Exception {
       String zkHost = getZkHost(cli);
       try (CloudSolrClient cloudSolrClient =
-          new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty())
+          new CloudLegacySolrClient.Builder(Collections.singletonList(zkHost), Optional.empty())
               .withSocketTimeout(30000)
               .withConnectionTimeout(15000)
               .build()) {
diff --git a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
index 591cab3..68c35fc 100644
--- a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
@@ -38,6 +38,7 @@ import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
@@ -78,7 +79,7 @@ public class AliasIntegrationTest extends SolrCloudTestCase {
   public void setUp() throws Exception {
     super.setUp();
     solrClient = getCloudSolrClient(cluster);
-    httpClient = (CloseableHttpClient) solrClient.getHttpClient();
+    httpClient = (CloseableHttpClient) ((CloudLegacySolrClient) solrClient).getHttpClient();
   }
 
   @After
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeWithPullReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeWithPullReplicasTest.java
index 26a21b2..ce71002 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeWithPullReplicasTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeWithPullReplicasTest.java
@@ -26,6 +26,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
 import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.DocCollection;
@@ -218,7 +219,7 @@ public class ChaosMonkeyNothingIsSafeWithPullReplicasTest extends AbstractFullDi
       if (runFullThrottle) {
         ftIndexThread =
             new FullThrottleStoppableIndexingThread(
-                cloudClient.getHttpClient(),
+                ((CloudLegacySolrClient) cloudClient).getHttpClient(),
                 controlClient,
                 cloudClient,
                 clients,
diff --git a/solr/core/src/test/org/apache/solr/cloud/CreateRoutedAliasTest.java b/solr/core/src/test/org/apache/solr/cloud/CreateRoutedAliasTest.java
index ad4efdb..7f3a872 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CreateRoutedAliasTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CreateRoutedAliasTest.java
@@ -36,6 +36,7 @@ import org.apache.http.util.EntityUtils;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.cloud.api.collections.TimeRoutedAlias;
@@ -418,7 +419,8 @@ public class CreateRoutedAliasTest extends SolrCloudTestCase {
   }
 
   private void assertSuccess(HttpUriRequest msg) throws IOException {
-    CloseableHttpClient httpClient = (CloseableHttpClient) solrClient.getHttpClient();
+    CloseableHttpClient httpClient =
+        (CloseableHttpClient) ((CloudLegacySolrClient) solrClient).getHttpClient();
     try (CloseableHttpResponse response = httpClient.execute(msg)) {
       if (200 != response.getStatusLine().getStatusCode()) {
         System.err.println(EntityUtils.toString(response.getEntity()));
@@ -428,7 +430,8 @@ public class CreateRoutedAliasTest extends SolrCloudTestCase {
   }
 
   private void assertFailure(HttpUriRequest msg, String expectedErrorSubstring) throws IOException {
-    CloseableHttpClient httpClient = (CloseableHttpClient) solrClient.getHttpClient();
+    CloseableHttpClient httpClient =
+        (CloseableHttpClient) ((CloudLegacySolrClient) solrClient).getHttpClient();
     try (CloseableHttpResponse response = httpClient.execute(msg)) {
       assertEquals(400, response.getStatusLine().getStatusCode());
       String entity = EntityUtils.toString(response.getEntity());
diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
index 725088a..e2f8595 100644
--- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
@@ -39,6 +39,7 @@ import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.cloud.SocketProxy;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
@@ -100,8 +101,8 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
   /** We need to turn off directUpdatesToLeadersOnly due to SOLR-9512 */
   @Override
   protected CloudSolrClient createCloudClient(String defaultCollection) {
-    CloudSolrClient client =
-        new CloudSolrClient.Builder(
+    var client =
+        new CloudLegacySolrClient.Builder(
                 Collections.singletonList(zkServer.getZkAddress()), Optional.empty())
             .sendDirectUpdatesToAnyShardReplica()
             .withConnectionTimeout(5000)
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
index c2a46d3..8e8a9e7 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
@@ -53,6 +53,7 @@ import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.cloud.DistributedQueue;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.SolrClientCloudManager;
 import org.apache.solr.cloud.overseer.NodeMutator;
@@ -1945,8 +1946,8 @@ public class OverseerTest extends SolrTestCaseJ4 {
 
   private SolrCloudManager getCloudDataProvider(
       String zkAddress, SolrZkClient zkClient, ZkStateReader reader) {
-    CloudSolrClient client =
-        new CloudSolrClient.Builder(Collections.singletonList(zkAddress), Optional.empty())
+    var client =
+        new CloudLegacySolrClient.Builder(Collections.singletonList(zkAddress), Optional.empty())
             .withSocketTimeout(30000)
             .withConnectionTimeout(15000)
             .build();
diff --git a/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java b/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java
index a24de71..9d4bea1 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java
@@ -41,6 +41,7 @@ import org.apache.http.HttpEntity;
 import org.apache.http.client.methods.HttpGet;
 import org.apache.http.util.EntityUtils;
 import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.StreamingUpdateRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
@@ -319,7 +320,7 @@ public class SolrCloudExampleTest extends AbstractFullDistribZkTestBase {
     HttpGet get = new HttpGet(uri);
     HttpEntity entity = null;
     try {
-      entity = cloudClient.getLbClient().getHttpClient().execute(get).getEntity();
+      entity = ((CloudLegacySolrClient) cloudClient).getHttpClient().execute(get).getEntity();
       String response = EntityUtils.toString(entity, StandardCharsets.UTF_8);
       return (Map<?, ?>) fromJSONString(response);
     } finally {
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java b/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java
index 3333d8e..b30ec3d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java
@@ -69,6 +69,7 @@ import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.ConfigSetAdminRequest;
 import org.apache.solr.client.solrj.request.ConfigSetAdminRequest.Create;
@@ -1579,7 +1580,8 @@ public class TestConfigSetsAPI extends SolrCloudTestCase {
       httpRequest.setEntity(
           new ByteArrayEntity(bytarr.array(), bytarr.arrayOffset(), bytarr.limit()));
       log.info("Uploading configset with user {}", username);
-      entity = cloudClient.getLbClient().getHttpClient().execute(httpRequest).getEntity();
+      entity =
+          ((CloudLegacySolrClient) cloudClient).getHttpClient().execute(httpRequest).getEntity();
       try {
         response = EntityUtils.toString(entity, UTF_8);
         m = (Map<?, ?>) Utils.fromJSONString(response);
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestLeaderElectionWithEmptyReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestLeaderElectionWithEmptyReplica.java
index 951b470..6772cea 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestLeaderElectionWithEmptyReplica.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestLeaderElectionWithEmptyReplica.java
@@ -23,8 +23,10 @@ import java.util.concurrent.TimeUnit;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrInputDocument;
@@ -118,8 +120,8 @@ public class TestLeaderElectionWithEmptyReplica extends SolrCloudTestCase {
     int count = 0;
     for (Replica replica : shard.getReplicas()) {
       HttpSolrClient client =
-          new HttpSolrClient.Builder(replica.getCoreUrl())
-              .withHttpClient(cloudClient.getLbClient().getHttpClient())
+          new Builder(replica.getCoreUrl())
+              .withHttpClient(((CloudLegacySolrClient) cloudClient).getHttpClient())
               .build();
       QueryResponse response = client.query(new SolrQuery("q", "*:*", "distrib", "false"));
       //      log.info("Found numFound={} on replica: {}", response.getResults().getNumFound(),
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterSSL.java b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterSSL.java
index d17b57d..1604c9b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterSSL.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterSSL.java
@@ -37,6 +37,7 @@ import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettyConfig;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.Http2SolrClient;
 import org.apache.solr.client.solrj.impl.HttpClientUtil;
@@ -320,7 +321,8 @@ public class TestMiniSolrCloudClusterSSL extends SolrTestCaseJ4 {
       // ensure it has the necessary protocols/credentials for each jetty server
       //
       // NOTE: we're not responsible for closing the cloud client
-      final HttpClient cloudClient = cluster.getSolrClient().getLbClient().getHttpClient();
+      final HttpClient cloudClient =
+          ((CloudLegacySolrClient) cluster.getSolrClient()).getHttpClient();
       try (HttpSolrClient client = getRandomizedHttpSolrClient(baseURL)) {
         assertEquals(0, CoreAdminRequest.getStatus(/* all */ null, client).getStatus());
       }
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
index edfd6ad..8ff4607 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
@@ -37,6 +37,7 @@ import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.QueryRequest;
@@ -148,7 +149,7 @@ public class TestPullReplica extends SolrCloudTestCase {
           // These options should all mean the same
           url = url + pickRandom("", "&nrtReplicas=1", "&replicationFactor=1");
           HttpGet createCollectionGet = new HttpGet(url);
-          cluster.getSolrClient().getHttpClient().execute(createCollectionGet);
+          getHttpClient().execute(createCollectionGet);
           break;
         case 2:
           // Sometimes use V2 API
@@ -168,8 +169,7 @@ public class TestPullReplica extends SolrCloudTestCase {
           HttpPost createCollectionPost = new HttpPost(url);
           createCollectionPost.setHeader("Content-type", "application/json");
           createCollectionPost.setEntity(new StringEntity(requestBody));
-          HttpResponse httpResponse =
-              cluster.getSolrClient().getHttpClient().execute(createCollectionPost);
+          HttpResponse httpResponse = getHttpClient().execute(createCollectionPost);
           assertEquals(200, httpResponse.getStatusLine().getStatusCode());
           break;
       }
@@ -459,7 +459,7 @@ public class TestPullReplica extends SolrCloudTestCase {
         collectionName,
         activeReplicaCount(numReplicas, 0, numReplicas));
     DocCollection docCollection = assertNumberOfReplicas(numReplicas, 0, numReplicas, false, true);
-    HttpClient httpClient = cluster.getSolrClient().getHttpClient();
+    HttpClient httpClient = getHttpClient();
     int id = 0;
     Slice slice = docCollection.getSlice("shard1");
     List<String> ids = new ArrayList<>(slice.getReplicas().size());
@@ -858,7 +858,7 @@ public class TestPullReplica extends SolrCloudTestCase {
                 shardName,
                 type);
         HttpGet addReplicaGet = new HttpGet(url);
-        HttpResponse httpResponse = cluster.getSolrClient().getHttpClient().execute(addReplicaGet);
+        HttpResponse httpResponse = getHttpClient().execute(addReplicaGet);
         assertEquals(200, httpResponse.getStatusLine().getStatusCode());
         break;
       case 2: // Add replica with V2 API
@@ -873,9 +873,13 @@ public class TestPullReplica extends SolrCloudTestCase {
         HttpPost addReplicaPost = new HttpPost(url);
         addReplicaPost.setHeader("Content-type", "application/json");
         addReplicaPost.setEntity(new StringEntity(requestBody));
-        httpResponse = cluster.getSolrClient().getHttpClient().execute(addReplicaPost);
+        httpResponse = getHttpClient().execute(addReplicaPost);
         assertEquals(200, httpResponse.getStatusLine().getStatusCode());
         break;
     }
   }
+
+  private HttpClient getHttpClient() {
+    return ((CloudLegacySolrClient) cluster.getSolrClient()).getHttpClient();
+  }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java
index 074d833..d751ccd 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java
@@ -43,6 +43,7 @@ import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -172,8 +173,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
                 2, // numShards
                 4); // tlogReplicas
         HttpGet createCollectionGet = new HttpGet(url);
-        HttpResponse httpResponse =
-            cluster.getSolrClient().getHttpClient().execute(createCollectionGet);
+        HttpResponse httpResponse = getHttpClient().execute(createCollectionGet);
         assertEquals(200, httpResponse.getStatusLine().getStatusCode());
         cluster.waitForActiveCollection(collectionName, 2, 8);
         break;
@@ -192,7 +192,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
         HttpPost createCollectionPost = new HttpPost(url);
         createCollectionPost.setHeader("Content-type", "application/json");
         createCollectionPost.setEntity(new StringEntity(requestBody));
-        httpResponse = cluster.getSolrClient().getHttpClient().execute(createCollectionPost);
+        httpResponse = getHttpClient().execute(createCollectionPost);
         assertEquals(200, httpResponse.getStatusLine().getStatusCode());
         cluster.waitForActiveCollection(collectionName, 2, 8);
         break;
@@ -248,6 +248,10 @@ public class TestTlogReplica extends SolrCloudTestCase {
     }
   }
 
+  private HttpClient getHttpClient() {
+    return ((CloudLegacySolrClient) cluster.getSolrClient()).getHttpClient();
+  }
+
   @SuppressWarnings("unchecked")
   public void testAddDocs() throws Exception {
     int numTlogReplicas = 1 + random().nextInt(3);
@@ -349,7 +353,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
                 shardName,
                 type);
         HttpGet addReplicaGet = new HttpGet(url);
-        HttpResponse httpResponse = cluster.getSolrClient().getHttpClient().execute(addReplicaGet);
+        HttpResponse httpResponse = getHttpClient().execute(addReplicaGet);
         assertEquals(200, httpResponse.getStatusLine().getStatusCode());
         break;
       case 2: // Add replica with V2 API
@@ -364,7 +368,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
         HttpPost addReplicaPost = new HttpPost(url);
         addReplicaPost.setHeader("Content-type", "application/json");
         addReplicaPost.setEntity(new StringEntity(requestBody));
-        httpResponse = cluster.getSolrClient().getHttpClient().execute(addReplicaPost);
+        httpResponse = getHttpClient().execute(addReplicaPost);
         assertEquals(200, httpResponse.getStatusLine().getStatusCode());
         break;
     }
@@ -392,7 +396,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
         activeReplicaCount(numNrtReplicas, numReplicas, 0));
     DocCollection docCollection =
         assertNumberOfReplicas(numNrtReplicas, numReplicas, 0, false, true);
-    HttpClient httpClient = cluster.getSolrClient().getHttpClient();
+    HttpClient httpClient = getHttpClient();
     int id = 0;
     Slice slice = docCollection.getSlice("shard1");
     List<String> ids = new ArrayList<>(slice.getReplicas().size());
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
index 4b5623b..acd66c6 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
@@ -39,6 +39,7 @@ import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -46,9 +47,20 @@ import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.cloud.*;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.BasicDistributedZkTest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.cloud.StoppableIndexingThread;
 import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.cloud.*;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.CompositeIdRouter;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.HashBasedRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -136,7 +148,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
 
     try (CloudSolrClient client =
         getCloudSolrClient(
-            zkServer.getZkAddress(), true, cloudClient.getLbClient().getHttpClient())) {
+            zkServer.getZkAddress(), true, ((CloudLegacySolrClient) cloudClient).getHttpClient())) {
       client.setDefaultCollection(collectionName);
       StoppableIndexingThread thread =
           new StoppableIndexingThread(controlClient, client, "i1", true);
@@ -218,9 +230,9 @@ public class ShardSplitTest extends BasicDistributedZkTest {
                   .getReplicas()
                   .get(0)
                   .getBaseUrl();
-          try (HttpSolrClient control =
+          try (var control =
               new HttpSolrClient.Builder(control_collection)
-                  .withHttpClient(client.getLbClient().getHttpClient())
+                  .withHttpClient(((CloudLegacySolrClient) client).getHttpClient())
                   .build()) {
             state = addReplica.processAndWait(control, 30);
           }
@@ -283,9 +295,9 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     long numFound = Long.MIN_VALUE;
     int count = 0;
     for (Replica replica : shard.getReplicas()) {
-      HttpSolrClient client =
+      var client =
           new HttpSolrClient.Builder(replica.getCoreUrl())
-              .withHttpClient(cloudClient.getLbClient().getHttpClient())
+              .withHttpClient(((CloudLegacySolrClient) cloudClient).getHttpClient())
               .build();
       QueryResponse response = client.query(new SolrQuery("q", "*:*", "distrib", "false"));
       if (log.isInfoEnabled()) {
diff --git a/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java b/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java
index e92210b..1e2b51e 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java
@@ -34,6 +34,7 @@ import org.apache.http.entity.ByteArrayEntity;
 import org.apache.http.util.EntityUtils;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -100,8 +101,12 @@ public class TestBlobHandler extends AbstractFullDistribZkTestBase {
     map = TestSolrConfigHandlerConcurrent.getAsMap(url, cloudClient);
     assertEquals("" + bytarr.length, map._getStr("response/docs[0]/size", null));
 
-    compareInputAndOutput(baseUrl + "/.system/blob/test?wt=filestream", bytarr2, cloudClient);
-    compareInputAndOutput(baseUrl + "/.system/blob/test/1?wt=filestream", bytarr, cloudClient);
+    compareInputAndOutput(
+        baseUrl + "/.system/blob/test?wt=filestream", bytarr2, (CloudLegacySolrClient) cloudClient);
+    compareInputAndOutput(
+        baseUrl + "/.system/blob/test/1?wt=filestream",
+        bytarr,
+        (CloudLegacySolrClient) cloudClient);
   }
 
   static void checkBlobPostMd5(String baseUrl, CloudSolrClient cloudClient) throws Exception {
@@ -157,7 +162,7 @@ public class TestBlobHandler extends AbstractFullDistribZkTestBase {
             i, count, timer.getTime(), map.toString()));
   }
 
-  static void compareInputAndOutput(String url, byte[] bytarr, CloudSolrClient cloudClient)
+  static void compareInputAndOutput(String url, byte[] bytarr, CloudLegacySolrClient cloudClient)
       throws IOException {
 
     HttpClient httpClient = cloudClient.getLbClient().getHttpClient();
@@ -185,7 +190,12 @@ public class TestBlobHandler extends AbstractFullDistribZkTestBase {
       httpPost = new HttpPost(baseUrl + "/.system/blob/" + blobName);
       httpPost.setHeader("Content-Type", "application/octet-stream");
       httpPost.setEntity(new ByteArrayEntity(bytarr.array(), bytarr.arrayOffset(), bytarr.limit()));
-      entity = cloudClient.getLbClient().getHttpClient().execute(httpPost).getEntity();
+      entity =
+          ((CloudLegacySolrClient) cloudClient)
+              .getLbClient()
+              .getHttpClient()
+              .execute(httpPost)
+              .getEntity();
       try {
         response = EntityUtils.toString(entity, StandardCharsets.UTF_8);
         Map<?, ?> m = (Map<?, ?>) fromJSONString(response);
diff --git a/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java b/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java
index 5e9e077..36ce1c0 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java
@@ -28,6 +28,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.http.HttpEntity;
 import org.apache.http.client.methods.HttpGet;
 import org.apache.http.util.EntityUtils;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
 import org.apache.solr.cloud.ZkConfigSetService;
 import org.apache.solr.common.LinkedHashMapWriter;
@@ -123,7 +124,12 @@ public class TestConfigReload extends AbstractFullDistribZkTestBase {
     HttpGet get = new HttpGet(uri);
     HttpEntity entity = null;
     try {
-      entity = cloudClient.getLbClient().getHttpClient().execute(get).getEntity();
+      entity =
+          ((CloudLegacySolrClient) cloudClient)
+              .getLbClient()
+              .getHttpClient()
+              .execute(get)
+              .getEntity();
       String response = EntityUtils.toString(entity, StandardCharsets.UTF_8);
       return (LinkedHashMapWriter)
           Utils.MAPWRITEROBJBUILDER.apply(Utils.getJSONParser(new StringReader(response))).getVal();
diff --git a/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerConcurrent.java b/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerConcurrent.java
index 538870e..a4bb6f1 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerConcurrent.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerConcurrent.java
@@ -31,6 +31,7 @@ import org.apache.http.HttpEntity;
 import org.apache.http.client.methods.HttpGet;
 import org.apache.http.util.EntityUtils;
 import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
 import org.apache.solr.common.LinkedHashMapWriter;
@@ -194,7 +195,7 @@ public class TestSolrConfigHandlerConcurrent extends AbstractFullDistribZkTestBa
     HttpGet get = new HttpGet(uri);
     HttpEntity entity = null;
     try {
-      entity = cloudClient.getLbClient().getHttpClient().execute(get).getEntity();
+      entity = ((CloudLegacySolrClient) cloudClient).getHttpClient().execute(get).getEntity();
       String response = EntityUtils.toString(entity, StandardCharsets.UTF_8);
       try {
         return (LinkedHashMapWriter)
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/AdminHandlersProxyTest.java b/solr/core/src/test/org/apache/solr/handler/admin/AdminHandlersProxyTest.java
index 4b12370..d479a1b 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/AdminHandlersProxyTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/AdminHandlersProxyTest.java
@@ -25,6 +25,7 @@ import org.apache.http.impl.client.CloseableHttpClient;
 import org.apache.lucene.util.IOUtils;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.GenericSolrRequest;
 import org.apache.solr.client.solrj.response.SimpleSolrResponse;
@@ -53,7 +54,7 @@ public class AdminHandlersProxyTest extends SolrCloudTestCase {
     super.setUp();
     solrClient = getCloudSolrClient(cluster);
     solrClient.connect(1000, TimeUnit.MILLISECONDS);
-    httpClient = (CloseableHttpClient) solrClient.getHttpClient();
+    httpClient = (CloseableHttpClient) ((CloudLegacySolrClient) solrClient).getHttpClient();
   }
 
   @After
diff --git a/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java b/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java
index d8214d6..9706b4f 100644
--- a/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/security/BasicAuthIntegrationTest.java
@@ -42,6 +42,7 @@ import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.HttpClientUtil;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -151,12 +152,9 @@ public class BasicAuthIntegrationTest extends SolrCloudAuthTestCase {
       }
 
       // avoid bad connection races due to shutdown
-      cluster.getSolrClient().getHttpClient().getConnectionManager().closeExpiredConnections();
-      cluster
-          .getSolrClient()
-          .getHttpClient()
-          .getConnectionManager()
-          .closeIdleConnections(1, TimeUnit.MILLISECONDS);
+      final var httpClient = ((CloudLegacySolrClient) cluster.getSolrClient()).getHttpClient();
+      httpClient.getConnectionManager().closeExpiredConnections();
+      httpClient.getConnectionManager().closeIdleConnections(1, TimeUnit.MILLISECONDS);
 
       BaseHttpSolrClient.RemoteSolrException exp =
           expectThrows(
diff --git a/solr/core/src/test/org/apache/solr/security/PKIAuthenticationIntegrationTest.java b/solr/core/src/test/org/apache/solr/security/PKIAuthenticationIntegrationTest.java
index 43f0c45..89fc9a4 100644
--- a/solr/core/src/test/org/apache/solr/security/PKIAuthenticationIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/security/PKIAuthenticationIntegrationTest.java
@@ -25,6 +25,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import javax.servlet.http.HttpServletRequest;
 import org.apache.http.client.HttpClient;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.cloud.SolrCloudAuthTestCase;
@@ -64,7 +65,7 @@ public class PKIAuthenticationIntegrationTest extends SolrCloudAuthTestCase {
 
   @Test
   public void testPkiAuth() throws Exception {
-    HttpClient httpClient = cluster.getSolrClient().getHttpClient();
+    HttpClient httpClient = ((CloudLegacySolrClient) cluster.getSolrClient()).getHttpClient();
     for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
       String baseUrl = jetty.getBaseUrl().toString();
       verifySecurityStatus(
diff --git a/solr/core/src/test/org/apache/solr/security/TestAuthorizationFramework.java b/solr/core/src/test/org/apache/solr/security/TestAuthorizationFramework.java
index 74d0cbd..fe2767e 100644
--- a/solr/core/src/test/org/apache/solr/security/TestAuthorizationFramework.java
+++ b/solr/core/src/test/org/apache/solr/security/TestAuthorizationFramework.java
@@ -27,6 +27,7 @@ import org.apache.http.client.HttpClient;
 import org.apache.http.client.methods.HttpGet;
 import org.apache.http.util.EntityUtils;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.HttpClientUtil;
 import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
 import org.apache.solr.common.cloud.ZkStateReader;
@@ -68,7 +69,7 @@ public class TestAuthorizationFramework extends AbstractFullDistribZkTestBase {
       waitForThingsToLevelOut(10, TimeUnit.SECONDS);
       String baseUrl = jettys.get(0).getBaseUrl().toString();
       verifySecurityStatus(
-          cloudClient.getLbClient().getHttpClient(),
+          ((CloudLegacySolrClient) cloudClient).getHttpClient(),
           baseUrl + "/admin/authorization",
           "authorization/class",
           MockAuthorizationPlugin.class.getName(),
diff --git a/solr/modules/analytics/src/java/org/apache/solr/analytics/stream/AnalyticsShardRequestManager.java b/solr/modules/analytics/src/java/org/apache/solr/analytics/stream/AnalyticsShardRequestManager.java
index d07e5a4..fb30454 100644
--- a/solr/modules/analytics/src/java/org/apache/solr/analytics/stream/AnalyticsShardRequestManager.java
+++ b/solr/modules/analytics/src/java/org/apache/solr/analytics/stream/AnalyticsShardRequestManager.java
@@ -32,8 +32,8 @@ import org.apache.solr.analytics.AnalyticsRequestManager;
 import org.apache.solr.analytics.AnalyticsRequestParser;
 import org.apache.solr.analytics.TimeExceededStubException;
 import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.CloudSolrClient.Builder;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.SolrException;
@@ -83,7 +83,9 @@ public class AnalyticsShardRequestManager {
    */
   public void sendRequests(String collection, String zkHost) throws IOException {
     this.replicaUrls = new ArrayList<>();
-    this.cloudSolrClient = new Builder(Collections.singletonList(zkHost), Optional.empty()).build();
+    this.cloudSolrClient =
+        new CloudLegacySolrClient.Builder(Collections.singletonList(zkHost), Optional.empty())
+            .build();
     try {
       this.cloudSolrClient.connect();
       pickShards(collection);
diff --git a/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestDelegationWithHadoopAuth.java b/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestDelegationWithHadoopAuth.java
index 46c1bd6..6277f67 100644
--- a/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestDelegationWithHadoopAuth.java
+++ b/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestDelegationWithHadoopAuth.java
@@ -27,7 +27,7 @@ import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -174,7 +174,7 @@ public class TestDelegationWithHadoopAuth extends SolrCloudTestCase {
               .build();
     else
       delegationTokenClient =
-          new CloudSolrClient.Builder(
+          new CloudLegacySolrClient.Builder(
                   Collections.singletonList(cluster.getZkServer().getZkAddress()), Optional.empty())
               .withLBHttpSolrClientBuilder(
                   new LBHttpSolrClient.Builder()
diff --git a/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithDelegationTokens.java b/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithDelegationTokens.java
index 796beb9..6083500 100644
--- a/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithDelegationTokens.java
+++ b/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithDelegationTokens.java
@@ -31,7 +31,7 @@ import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
 import org.apache.solr.client.solrj.request.AbstractUpdateRequest.ACTION;
@@ -201,7 +201,7 @@ public class TestSolrCloudWithDelegationTokens extends SolrTestCaseJ4 {
               .build();
     else
       delegationTokenClient =
-          new CloudSolrClient.Builder(
+          new CloudLegacySolrClient.Builder(
                   Collections.singletonList(miniCluster.getZkServer().getZkAddress()),
                   Optional.empty())
               .withLBHttpSolrClientBuilder(
diff --git a/solr/modules/hdfs/src/java/org/apache/solr/hdfs/snapshots/SolrSnapshotsTool.java b/solr/modules/hdfs/src/java/org/apache/solr/hdfs/snapshots/SolrSnapshotsTool.java
index 4bccc21..01d9391 100644
--- a/solr/modules/hdfs/src/java/org/apache/solr/hdfs/snapshots/SolrSnapshotsTool.java
+++ b/solr/modules/hdfs/src/java/org/apache/solr/hdfs/snapshots/SolrSnapshotsTool.java
@@ -46,6 +46,7 @@ import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.PosixParser;
 import org.apache.hadoop.fs.Path;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
@@ -102,7 +103,8 @@ public class SolrSnapshotsTool implements Closeable, CLIO {
 
   public SolrSnapshotsTool(String solrZkEnsemble) {
     solrClient =
-        new CloudSolrClient.Builder(Collections.singletonList(solrZkEnsemble), Optional.empty())
+        new CloudLegacySolrClient.Builder(
+                Collections.singletonList(solrZkEnsemble), Optional.empty())
             .build();
   }
 
diff --git a/solr/prometheus-exporter/src/java/org/apache/solr/prometheus/exporter/SolrClientFactory.java b/solr/prometheus-exporter/src/java/org/apache/solr/prometheus/exporter/SolrClientFactory.java
index 2c7d2c3..877058b 100644
--- a/solr/prometheus-exporter/src/java/org/apache/solr/prometheus/exporter/SolrClientFactory.java
+++ b/solr/prometheus-exporter/src/java/org/apache/solr/prometheus/exporter/SolrClientFactory.java
@@ -19,6 +19,7 @@ package org.apache.solr.prometheus.exporter;
 
 import java.util.Optional;
 import java.util.stream.Collectors;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.impl.NoOpResponseParser;
@@ -56,8 +57,8 @@ public class SolrClientFactory {
 
     ConnectStringParser parser = new ConnectStringParser(zookeeperConnectionString);
 
-    CloudSolrClient.Builder cloudBuilder =
-        new CloudSolrClient.Builder(
+    var cloudBuilder =
+        new CloudLegacySolrClient.Builder(
             parser.getServerAddresses().stream()
                 .map(address -> address.getHostString() + ":" + address.getPort())
                 .collect(Collectors.toList()),
diff --git a/solr/prometheus-exporter/src/test/org/apache/solr/prometheus/scraper/SolrCloudScraperTest.java b/solr/prometheus-exporter/src/test/org/apache/solr/prometheus/scraper/SolrCloudScraperTest.java
index 947d533..0b56ba3 100644
--- a/solr/prometheus-exporter/src/test/org/apache/solr/prometheus/scraper/SolrCloudScraperTest.java
+++ b/solr/prometheus-exporter/src/test/org/apache/solr/prometheus/scraper/SolrCloudScraperTest.java
@@ -28,9 +28,12 @@ import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.ExecutorService;
 import java.util.stream.Collectors;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.NoOpResponseParser;
-import org.apache.solr.common.cloud.*;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.util.ExecutorUtil;
 import org.apache.solr.common.util.IOUtils;
 import org.apache.solr.common.util.SolrNamedThreadFactory;
@@ -52,8 +55,8 @@ public class SolrCloudScraperTest extends PrometheusExporterTestBase {
   private ExecutorService executor;
 
   private SolrCloudScraper createSolrCloudScraper() {
-    CloudSolrClient solrClient =
-        new CloudSolrClient.Builder(
+    var solrClient =
+        new CloudLegacySolrClient.Builder(
                 Collections.singletonList(cluster.getZkServer().getZkAddress()), Optional.empty())
             .build();
 
diff --git a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-9.adoc b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-9.adoc
index 296a5b8..e7f7e71 100644
--- a/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-9.adoc
+++ b/solr/solr-ref-guide/modules/upgrade-notes/pages/major-changes-in-solr-9.adoc
@@ -347,3 +347,4 @@ where they really belong: /admin/threads, /admin/properties, /admin/logging
 
 * Atomic/partial updates to nested documents now _require_ the `\_root_` field to clearly show the document isn't a root document.  Solr 8 would fallback on the `\_route_` param but no longer.
 
+* SolrJ: The project is migrating away from the Apache HttpClient dependency in favor of Jetty's client (supporting HTTP/2), to occur over the 9x releases.  We deprecated HttpSolrClient and friends in favor of Http2SolrClient and equivalents.  For SolrCloud, the former CloudSolrClient was renamed CloudLegacySolrClient (deprecated), and instead BaseCloudSolrClient was renamed to CloudSolrClient and given a Builder for the Jetty based HTTP/2 client.
\ No newline at end of file
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java
deleted file mode 100644
index 999425f..0000000
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java
+++ /dev/null
@@ -1,1381 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.client.solrj.impl;
-
-import static org.apache.solr.common.params.CommonParams.ADMIN_PATHS;
-import static org.apache.solr.common.params.CommonParams.ID;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.ConnectException;
-import java.net.SocketException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.function.Predicate;
-import java.util.function.Supplier;
-import java.util.stream.Collectors;
-import org.apache.solr.client.solrj.ResponseParser;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.V2RequestSupport;
-import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
-import org.apache.solr.client.solrj.request.IsUpdateRequest;
-import org.apache.solr.client.solrj.request.RequestWriter;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.request.V2Request;
-import org.apache.solr.client.solrj.routing.ReplicaListTransformer;
-import org.apache.solr.client.solrj.routing.RequestReplicaListTransformerGenerator;
-import org.apache.solr.client.solrj.util.ClientUtils;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.ToleratedUpdateError;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CollectionStatePredicate;
-import org.apache.solr.common.cloud.CollectionStateWatcher;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocCollectionWatcher;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.params.UpdateParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.Hash;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.SolrNamedThreadFactory;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-public abstract class BaseCloudSolrClient extends SolrClient {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private volatile String defaultCollection;
-  // no of times collection state to be reloaded if stale state error is received
-  private static final int MAX_STALE_RETRIES =
-      Integer.parseInt(System.getProperty("cloudSolrClientMaxStaleRetries", "5"));
-  private Random rand = new Random();
-
-  private final boolean updatesToLeaders;
-  private final boolean directUpdatesToLeadersOnly;
-  private final RequestReplicaListTransformerGenerator requestRLTGenerator;
-  boolean parallelUpdates; // TODO final
-  private ExecutorService threadPool =
-      ExecutorUtil.newMDCAwareCachedThreadPool(
-          new SolrNamedThreadFactory("CloudSolrClient ThreadPool"));
-
-  public static final String STATE_VERSION = "_stateVer_";
-  private long retryExpiryTime =
-      TimeUnit.NANOSECONDS.convert(3, TimeUnit.SECONDS); // 3 seconds or 3 million nanos
-  private final Set<String> NON_ROUTABLE_PARAMS;
-
-  {
-    NON_ROUTABLE_PARAMS = new HashSet<>();
-    NON_ROUTABLE_PARAMS.add(UpdateParams.EXPUNGE_DELETES);
-    NON_ROUTABLE_PARAMS.add(UpdateParams.MAX_OPTIMIZE_SEGMENTS);
-    NON_ROUTABLE_PARAMS.add(UpdateParams.COMMIT);
-    NON_ROUTABLE_PARAMS.add(UpdateParams.WAIT_SEARCHER);
-    NON_ROUTABLE_PARAMS.add(UpdateParams.OPEN_SEARCHER);
-
-    NON_ROUTABLE_PARAMS.add(UpdateParams.SOFT_COMMIT);
-    NON_ROUTABLE_PARAMS.add(UpdateParams.PREPARE_COMMIT);
-    NON_ROUTABLE_PARAMS.add(UpdateParams.OPTIMIZE);
-
-    // Not supported via SolrCloud
-    // NON_ROUTABLE_PARAMS.add(UpdateParams.ROLLBACK);
-
-  }
-
-  private volatile List<Object> locks = objectList(3);
-
-  static class StateCache extends ConcurrentHashMap<String, ExpiringCachedDocCollection> {
-    final AtomicLong puts = new AtomicLong();
-    final AtomicLong hits = new AtomicLong();
-    final Lock evictLock = new ReentrantLock(true);
-    protected volatile long timeToLive = 60 * 1000L;
-
-    @Override
-    public ExpiringCachedDocCollection get(Object key) {
-      ExpiringCachedDocCollection val = super.get(key);
-      if (val == null) {
-        // a new collection is likely to be added now.
-        // check if there are stale items and remove them
-        evictStale();
-        return null;
-      }
-      if (val.isExpired(timeToLive)) {
-        super.remove(key);
-        return null;
-      }
-      hits.incrementAndGet();
-      return val;
-    }
-
-    @Override
-    public ExpiringCachedDocCollection put(String key, ExpiringCachedDocCollection value) {
-      puts.incrementAndGet();
-      return super.put(key, value);
-    }
-
-    void evictStale() {
-      if (!evictLock.tryLock()) return;
-      try {
-        for (Entry<String, ExpiringCachedDocCollection> e : entrySet()) {
-          if (e.getValue().isExpired(timeToLive)) {
-            super.remove(e.getKey());
-          }
-        }
-      } finally {
-        evictLock.unlock();
-      }
-    }
-  }
-
-  /**
-   * This is the time to wait to refetch the state after getting the same state version from ZK
-   *
-   * <p>secs
-   */
-  public void setRetryExpiryTime(int secs) {
-    this.retryExpiryTime = TimeUnit.NANOSECONDS.convert(secs, TimeUnit.SECONDS);
-  }
-
-  protected final StateCache collectionStateCache = new StateCache();
-
-  class ExpiringCachedDocCollection {
-    final DocCollection cached;
-    final long cachedAt;
-    // This is the time at which the collection is retried and got the same old version
-    volatile long retriedAt = -1;
-    // flag that suggests that this is potentially to be rechecked
-    volatile boolean maybeStale = false;
-
-    ExpiringCachedDocCollection(DocCollection cached) {
-      this.cached = cached;
-      this.cachedAt = System.nanoTime();
-    }
-
-    boolean isExpired(long timeToLiveMs) {
-      return (System.nanoTime() - cachedAt)
-          > TimeUnit.NANOSECONDS.convert(timeToLiveMs, TimeUnit.MILLISECONDS);
-    }
-
-    boolean shouldRetry() {
-      if (maybeStale) { // we are not sure if it is stale so check with retry time
-        if ((retriedAt == -1 || (System.nanoTime() - retriedAt) > retryExpiryTime)) {
-          return true; // we retried a while back. and we could not get anything new.
-          // it's likely that it is not going to be available now also.
-        }
-      }
-      return false;
-    }
-
-    void setRetriedAt() {
-      retriedAt = System.nanoTime();
-    }
-  }
-
-  protected BaseCloudSolrClient(
-      boolean updatesToLeaders, boolean parallelUpdates, boolean directUpdatesToLeadersOnly) {
-    this.updatesToLeaders = updatesToLeaders;
-    this.parallelUpdates = parallelUpdates;
-    this.directUpdatesToLeadersOnly = directUpdatesToLeadersOnly;
-    this.requestRLTGenerator = new RequestReplicaListTransformerGenerator();
-  }
-
-  /**
-   * Sets the cache ttl for DocCollection Objects cached.
-   *
-   * @param seconds ttl value in seconds
-   */
-  public void setCollectionCacheTTl(int seconds) {
-    assert seconds > 0;
-    this.collectionStateCache.timeToLive = seconds * 1000L;
-  }
-
-  protected abstract LBSolrClient getLbClient();
-
-  public abstract ClusterStateProvider getClusterStateProvider();
-
-  public ClusterState getClusterState() {
-    return getClusterStateProvider().getClusterState();
-  }
-
-  protected abstract boolean wasCommError(Throwable t);
-
-  @Override
-  public void close() throws IOException {
-    if (this.threadPool != null && !this.threadPool.isShutdown()) {
-      this.threadPool.shutdown();
-    }
-  }
-
-  public ResponseParser getParser() {
-    return getLbClient().getParser();
-  }
-
-  /**
-   * Note: This setter method is <b>not thread-safe</b>.
-   *
-   * @param processor Default Response Parser chosen to parse the response if the parser were not
-   *     specified as part of the request.
-   * @see org.apache.solr.client.solrj.SolrRequest#getResponseParser()
-   */
-  public void setParser(ResponseParser processor) {
-    getLbClient().setParser(processor);
-  }
-
-  public RequestWriter getRequestWriter() {
-    return getLbClient().getRequestWriter();
-  }
-
-  public void setRequestWriter(RequestWriter requestWriter) {
-    getLbClient().setRequestWriter(requestWriter);
-  }
-
-  /** Sets the default collection for request */
-  public void setDefaultCollection(String collection) {
-    this.defaultCollection = collection;
-  }
-
-  /** Gets the default collection for request */
-  public String getDefaultCollection() {
-    return defaultCollection;
-  }
-
-  /** Gets whether direct updates are sent in parallel */
-  public boolean isParallelUpdates() {
-    return parallelUpdates;
-  }
-
-  /**
-   * Connect to the zookeeper ensemble. This is an optional method that may be used to force a
-   * connect before any other requests are sent.
-   */
-  public void connect() {
-    getClusterStateProvider().connect();
-  }
-
-  /**
-   * Connect to a cluster. If the cluster is not ready, retry connection up to a given timeout.
-   *
-   * @param duration the timeout
-   * @param timeUnit the units of the timeout
-   * @throws TimeoutException if the cluster is not ready after the timeout
-   * @throws InterruptedException if the wait is interrupted
-   */
-  public void connect(long duration, TimeUnit timeUnit)
-      throws TimeoutException, InterruptedException {
-    if (log.isInfoEnabled()) {
-      log.info(
-          "Waiting for {} {} for cluster at {} to be ready",
-          duration,
-          timeUnit,
-          getClusterStateProvider());
-    }
-    long timeout = System.nanoTime() + timeUnit.toNanos(duration);
-    while (System.nanoTime() < timeout) {
-      try {
-        connect();
-        if (log.isInfoEnabled()) {
-          log.info("Cluster at {} ready", getClusterStateProvider());
-        }
-        return;
-      } catch (RuntimeException e) {
-        // not ready yet, then...
-      }
-      TimeUnit.MILLISECONDS.sleep(250);
-    }
-    throw new TimeoutException("Timed out waiting for cluster");
-  }
-
-  private ZkClientClusterStateProvider assertZKStateProvider() {
-    if (getClusterStateProvider() instanceof ZkClientClusterStateProvider) {
-      return (ZkClientClusterStateProvider) getClusterStateProvider();
-    }
-    throw new IllegalArgumentException("This client does not use ZK");
-  }
-
-  /**
-   * Block until a CollectionStatePredicate returns true, or the wait times out
-   *
-   * <p>Note that the predicate may be called again even after it has returned true, so implementors
-   * should avoid changing state within the predicate call itself.
-   *
-   * <p>This implementation utilizes {@link CollectionStateWatcher} internally. Callers that don't
-   * care about liveNodes are encouraged to use a {@link DocCollection} {@link Predicate} instead
-   *
-   * @see #waitForState(String, long, TimeUnit, Predicate)
-   * @see #registerCollectionStateWatcher
-   * @param collection the collection to watch
-   * @param wait how long to wait
-   * @param unit the units of the wait parameter
-   * @param predicate a {@link CollectionStatePredicate} to check the collection state
-   * @throws InterruptedException on interrupt
-   * @throws TimeoutException on timeout
-   */
-  public void waitForState(
-      String collection, long wait, TimeUnit unit, CollectionStatePredicate predicate)
-      throws InterruptedException, TimeoutException {
-    getClusterStateProvider().connect();
-    assertZKStateProvider().zkStateReader.waitForState(collection, wait, unit, predicate);
-  }
-  /**
-   * Block until a Predicate returns true, or the wait times out
-   *
-   * <p>Note that the predicate may be called again even after it has returned true, so implementors
-   * should avoid changing state within the predicate call itself.
-   *
-   * @see #registerDocCollectionWatcher
-   * @param collection the collection to watch
-   * @param wait how long to wait
-   * @param unit the units of the wait parameter
-   * @param predicate a {@link Predicate} to test against the {@link DocCollection}
-   * @throws InterruptedException on interrupt
-   * @throws TimeoutException on timeout
-   */
-  public void waitForState(
-      String collection, long wait, TimeUnit unit, Predicate<DocCollection> predicate)
-      throws InterruptedException, TimeoutException {
-    getClusterStateProvider().connect();
-    assertZKStateProvider().zkStateReader.waitForState(collection, wait, unit, predicate);
-  }
-
-  /**
-   * Register a CollectionStateWatcher to be called when the cluster state for a collection changes
-   * <em>or</em> the set of live nodes changes.
-   *
-   * <p>The Watcher will automatically be removed when it's <code>onStateChanged</code> returns
-   * <code>true</code>
-   *
-   * <p>This implementation utilizes {@link ZkStateReader#registerCollectionStateWatcher}
-   * internally. Callers that don't care about liveNodes are encouraged to use a {@link
-   * DocCollectionWatcher} instead
-   *
-   * @see #registerDocCollectionWatcher(String, DocCollectionWatcher)
-   * @see ZkStateReader#registerCollectionStateWatcher
-   * @param collection the collection to watch
-   * @param watcher a watcher that will be called when the state changes
-   */
-  public void registerCollectionStateWatcher(String collection, CollectionStateWatcher watcher) {
-    getClusterStateProvider().connect();
-    assertZKStateProvider().zkStateReader.registerCollectionStateWatcher(collection, watcher);
-  }
-
-  /**
-   * Register a DocCollectionWatcher to be called when the cluster state for a collection changes.
-   *
-   * <p>The Watcher will automatically be removed when it's <code>onStateChanged</code> returns
-   * <code>true</code>
-   *
-   * @see ZkStateReader#registerDocCollectionWatcher
-   * @param collection the collection to watch
-   * @param watcher a watcher that will be called when the state changes
-   */
-  public void registerDocCollectionWatcher(String collection, DocCollectionWatcher watcher) {
-    getClusterStateProvider().connect();
-    assertZKStateProvider().zkStateReader.registerDocCollectionWatcher(collection, watcher);
-  }
-
-  @SuppressWarnings({"unchecked"})
-  private NamedList<Object> directUpdate(AbstractUpdateRequest request, String collection)
-      throws SolrServerException {
-    UpdateRequest updateRequest = (UpdateRequest) request;
-    SolrParams params = request.getParams();
-    ModifiableSolrParams routableParams = new ModifiableSolrParams();
-    ModifiableSolrParams nonRoutableParams = new ModifiableSolrParams();
-
-    if (params != null) {
-      nonRoutableParams.add(params);
-      routableParams.add(params);
-      for (String param : NON_ROUTABLE_PARAMS) {
-        routableParams.remove(param);
-      }
-    } else {
-      params = new ModifiableSolrParams();
-    }
-
-    if (collection == null) {
-      throw new SolrServerException(
-          "No collection param specified on request and no default collection has been set.");
-    }
-
-    // Check to see if the collection is an alias. Updates to multi-collection aliases are ok as
-    // long as they are routed aliases
-    List<String> aliasedCollections = getClusterStateProvider().resolveAlias(collection);
-    if (getClusterStateProvider().isRoutedAlias(collection) || aliasedCollections.size() == 1) {
-      collection = aliasedCollections.get(0); // pick 1st (consistent with HttpSolrCall behavior)
-    } else {
-      throw new SolrException(
-          SolrException.ErrorCode.BAD_REQUEST,
-          "Update request to non-routed multi-collection alias not supported: "
-              + collection
-              + " -> "
-              + aliasedCollections);
-    }
-
-    DocCollection col = getDocCollection(collection, null);
-
-    DocRouter router = col.getRouter();
-
-    if (router instanceof ImplicitDocRouter) {
-      // short circuit as optimization
-      return null;
-    }
-
-    ReplicaListTransformer replicaListTransformer =
-        requestRLTGenerator.getReplicaListTransformer(params);
-
-    // Create the URL map, which is keyed on slice name.
-    // The value is a list of URLs for each replica in the slice.
-    // The first value in the list is the leader for the slice.
-    final Map<String, List<String>> urlMap = buildUrlMap(col, replicaListTransformer);
-    String routeField =
-        (col.getRouter().getRouteField(col) == null) ? ID : col.getRouter().getRouteField(col);
-    final Map<String, ? extends LBSolrClient.Req> routes =
-        createRoutes(updateRequest, routableParams, col, router, urlMap, routeField);
-    if (routes == null) {
-      if (directUpdatesToLeadersOnly && hasInfoToFindLeaders(updateRequest, routeField)) {
-        // we have info (documents with ids and/or ids to delete) with
-        // which to find the leaders but we could not find (all of) them
-        throw new SolrException(
-            SolrException.ErrorCode.SERVICE_UNAVAILABLE,
-            "directUpdatesToLeadersOnly==true but could not find leader(s)");
-      } else {
-        // we could not find a leader or routes yet - use unoptimized general path
-        return null;
-      }
-    }
-
-    final NamedList<Throwable> exceptions = new NamedList<>();
-    final NamedList<NamedList<?>> shardResponses =
-        new NamedList<>(routes.size() + 1); // +1 for deleteQuery
-
-    long start = System.nanoTime();
-
-    if (parallelUpdates) {
-      final Map<String, Future<NamedList<?>>> responseFutures = new HashMap<>(routes.size());
-      for (final Map.Entry<String, ? extends LBSolrClient.Req> entry : routes.entrySet()) {
-        final String url = entry.getKey();
-        final LBSolrClient.Req lbRequest = entry.getValue();
-        try {
-          MDC.put("CloudSolrClient.url", url);
-          responseFutures.put(
-              url,
-              threadPool.submit(
-                  () -> {
-                    return getLbClient().request(lbRequest).getResponse();
-                  }));
-        } finally {
-          MDC.remove("CloudSolrClient.url");
-        }
-      }
-
-      for (final Map.Entry<String, Future<NamedList<?>>> entry : responseFutures.entrySet()) {
-        final String url = entry.getKey();
-        final Future<NamedList<?>> responseFuture = entry.getValue();
-        try {
-          shardResponses.add(url, responseFuture.get());
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          throw new RuntimeException(e);
-        } catch (ExecutionException e) {
-          exceptions.add(url, e.getCause());
-        }
-      }
-
-      if (exceptions.size() > 0) {
-        Throwable firstException = exceptions.getVal(0);
-        if (firstException instanceof SolrException) {
-          SolrException e = (SolrException) firstException;
-          throw getRouteException(
-              SolrException.ErrorCode.getErrorCode(e.code()), exceptions, routes);
-        } else {
-          throw getRouteException(SolrException.ErrorCode.SERVER_ERROR, exceptions, routes);
-        }
-      }
-    } else {
-      for (Map.Entry<String, ? extends LBSolrClient.Req> entry : routes.entrySet()) {
-        String url = entry.getKey();
-        LBSolrClient.Req lbRequest = entry.getValue();
-        try {
-          NamedList<Object> rsp = getLbClient().request(lbRequest).getResponse();
-          shardResponses.add(url, rsp);
-        } catch (Exception e) {
-          if (e instanceof SolrException) {
-            throw (SolrException) e;
-          } else {
-            throw new SolrServerException(e);
-          }
-        }
-      }
-    }
-
-    UpdateRequest nonRoutableRequest = null;
-    List<String> deleteQuery = updateRequest.getDeleteQuery();
-    if (deleteQuery != null && deleteQuery.size() > 0) {
-      UpdateRequest deleteQueryRequest = new UpdateRequest();
-      deleteQueryRequest.setDeleteQuery(deleteQuery);
-      nonRoutableRequest = deleteQueryRequest;
-    }
-
-    Set<String> paramNames = nonRoutableParams.getParameterNames();
-
-    Set<String> intersection = new HashSet<>(paramNames);
-    intersection.retainAll(NON_ROUTABLE_PARAMS);
-
-    if (nonRoutableRequest != null || intersection.size() > 0) {
-      if (nonRoutableRequest == null) {
-        nonRoutableRequest = new UpdateRequest();
-      }
-      nonRoutableRequest.setParams(nonRoutableParams);
-      nonRoutableRequest.setBasicAuthCredentials(
-          request.getBasicAuthUser(), request.getBasicAuthPassword());
-      List<String> urlList = new ArrayList<>(routes.keySet());
-      Collections.shuffle(urlList, rand);
-      LBSolrClient.Req req = new LBSolrClient.Req(nonRoutableRequest, urlList);
-      try {
-        LBSolrClient.Rsp rsp = getLbClient().request(req);
-        shardResponses.add(urlList.get(0), rsp.getResponse());
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, urlList.get(0), e);
-      }
-    }
-
-    long end = System.nanoTime();
-
-    @SuppressWarnings({"rawtypes"})
-    RouteResponse rr =
-        condenseResponse(
-            shardResponses, (int) TimeUnit.MILLISECONDS.convert(end - start, TimeUnit.NANOSECONDS));
-    rr.setRouteResponses(shardResponses);
-    rr.setRoutes(routes);
-    return rr;
-  }
-
-  protected RouteException getRouteException(
-      SolrException.ErrorCode serverError,
-      NamedList<Throwable> exceptions,
-      Map<String, ? extends LBSolrClient.Req> routes) {
-    return new RouteException(serverError, exceptions, routes);
-  }
-
-  protected Map<String, ? extends LBSolrClient.Req> createRoutes(
-      UpdateRequest updateRequest,
-      ModifiableSolrParams routableParams,
-      DocCollection col,
-      DocRouter router,
-      Map<String, List<String>> urlMap,
-      String routeField) {
-    return urlMap == null
-        ? null
-        : updateRequest.getRoutesToCollection(router, col, urlMap, routableParams, routeField);
-  }
-
-  private Map<String, List<String>> buildUrlMap(
-      DocCollection col, ReplicaListTransformer replicaListTransformer) {
-    Map<String, List<String>> urlMap = new HashMap<>();
-    Slice[] slices = col.getActiveSlicesArr();
-    for (Slice slice : slices) {
-      String name = slice.getName();
-      List<Replica> sortedReplicas = new ArrayList<>();
-      Replica leader = slice.getLeader();
-      if (directUpdatesToLeadersOnly && leader == null) {
-        for (Replica replica :
-            slice.getReplicas(
-                replica ->
-                    replica.isActive(getClusterStateProvider().getLiveNodes())
-                        && replica.getType() == Replica.Type.NRT)) {
-          leader = replica;
-          break;
-        }
-      }
-      if (leader == null) {
-        if (directUpdatesToLeadersOnly) {
-          continue;
-        }
-        // take unoptimized general path - we cannot find a leader yet
-        return null;
-      }
-
-      if (!directUpdatesToLeadersOnly) {
-        for (Replica replica : slice.getReplicas()) {
-          if (!replica.equals(leader)) {
-            sortedReplicas.add(replica);
-          }
-        }
-      }
-
-      // Sort the non-leader replicas according to the request parameters
-      replicaListTransformer.transform(sortedReplicas);
-
-      // put the leaderUrl first.
-      sortedReplicas.add(0, leader);
-
-      urlMap.put(
-          name, sortedReplicas.stream().map(Replica::getCoreUrl).collect(Collectors.toList()));
-    }
-    return urlMap;
-  }
-
-  protected <T extends RouteResponse<?>> T condenseResponse(
-      NamedList<?> response, int timeMillis, Supplier<T> supplier) {
-    T condensed = supplier.get();
-    int status = 0;
-    Integer rf = null;
-
-    // TolerantUpdateProcessor
-    List<SimpleOrderedMap<String>> toleratedErrors = null;
-    int maxToleratedErrors = Integer.MAX_VALUE;
-
-    // For "adds", "deletes", "deleteByQuery" etc.
-    Map<String, NamedList<Object>> versions = new HashMap<>();
-
-    for (int i = 0; i < response.size(); i++) {
-      NamedList<?> shardResponse = (NamedList<?>) response.getVal(i);
-      NamedList<?> header = (NamedList<?>) shardResponse.get("responseHeader");
-      Integer shardStatus = (Integer) header.get("status");
-      int s = shardStatus.intValue();
-      if (s > 0) {
-        status = s;
-      }
-      Object rfObj = header.get(UpdateRequest.REPFACT);
-      if (rfObj != null && rfObj instanceof Integer) {
-        Integer routeRf = (Integer) rfObj;
-        if (rf == null || routeRf < rf) rf = routeRf;
-      }
-
-      @SuppressWarnings("unchecked")
-      List<SimpleOrderedMap<String>> shardTolerantErrors =
-          (List<SimpleOrderedMap<String>>) header.get("errors");
-      if (null != shardTolerantErrors) {
-        Integer shardMaxToleratedErrors = (Integer) header.get("maxErrors");
-        assert null != shardMaxToleratedErrors
-            : "TolerantUpdateProcessor reported errors but not maxErrors";
-        // if we get into some weird state where the nodes disagree about the effective maxErrors,
-        // assume the min value seen to decide if we should fail.
-        maxToleratedErrors =
-            Math.min(
-                maxToleratedErrors,
-                ToleratedUpdateError.getEffectiveMaxErrors(shardMaxToleratedErrors.intValue()));
-
-        if (null == toleratedErrors) {
-          toleratedErrors = new ArrayList<SimpleOrderedMap<String>>(shardTolerantErrors.size());
-        }
-        for (SimpleOrderedMap<String> err : shardTolerantErrors) {
-          toleratedErrors.add(err);
-        }
-      }
-      for (String updateType : Arrays.asList("adds", "deletes", "deleteByQuery")) {
-        Object obj = shardResponse.get(updateType);
-        if (obj instanceof NamedList) {
-          NamedList<Object> versionsList =
-              versions.containsKey(updateType) ? versions.get(updateType) : new NamedList<>();
-          NamedList<?> nl = (NamedList<?>) obj;
-          versionsList.addAll(nl);
-          versions.put(updateType, versionsList);
-        }
-      }
-    }
-
-    NamedList<Object> cheader = new NamedList<>();
-    cheader.add("status", status);
-    cheader.add("QTime", timeMillis);
-    if (rf != null) cheader.add(UpdateRequest.REPFACT, rf);
-    if (null != toleratedErrors) {
-      cheader.add("maxErrors", ToleratedUpdateError.getUserFriendlyMaxErrors(maxToleratedErrors));
-      cheader.add("errors", toleratedErrors);
-      if (maxToleratedErrors < toleratedErrors.size()) {
-        // cumulative errors are too high, we need to throw a client exception w/correct metadata
-
-        // NOTE: it shouldn't be possible for 1 == toleratedErrors.size(), because if that were the
-        // case then at least one shard should have thrown a real error before this, so we don't
-        // worry about having a more "singular" exception msg for that situation
-        StringBuilder msgBuf =
-            new StringBuilder()
-                .append(toleratedErrors.size())
-                .append(" Async failures during distributed update: ");
-
-        NamedList<String> metadata = new NamedList<>();
-        for (SimpleOrderedMap<String> err : toleratedErrors) {
-          ToleratedUpdateError te = ToleratedUpdateError.parseMap(err);
-          metadata.add(te.getMetadataKey(), te.getMetadataValue());
-
-          msgBuf.append("\n").append(te.getMessage());
-        }
-
-        SolrException toThrow =
-            new SolrException(SolrException.ErrorCode.BAD_REQUEST, msgBuf.toString());
-        toThrow.setMetadata(metadata);
-        throw toThrow;
-      }
-    }
-    for (Map.Entry<String, NamedList<Object>> entry : versions.entrySet()) {
-      condensed.add(entry.getKey(), entry.getValue());
-    }
-    condensed.add("responseHeader", cheader);
-    return condensed;
-  }
-
-  @SuppressWarnings({"rawtypes"})
-  public RouteResponse condenseResponse(NamedList<?> response, int timeMillis) {
-    return condenseResponse(response, timeMillis, RouteResponse::new);
-  }
-
-  @SuppressWarnings({"rawtypes"})
-  public static class RouteResponse<T extends LBSolrClient.Req> extends NamedList<Object> {
-    private NamedList<NamedList<?>> routeResponses;
-    private Map<String, T> routes;
-
-    public void setRouteResponses(NamedList<NamedList<?>> routeResponses) {
-      this.routeResponses = routeResponses;
-    }
-
-    public NamedList<NamedList<?>> getRouteResponses() {
-      return routeResponses;
-    }
-
-    public void setRoutes(Map<String, T> routes) {
-      this.routes = routes;
-    }
-
-    public Map<String, T> getRoutes() {
-      return routes;
-    }
-  }
-
-  public static class RouteException extends SolrException {
-
-    private NamedList<Throwable> throwables;
-    private Map<String, ? extends LBSolrClient.Req> routes;
-
-    public RouteException(
-        ErrorCode errorCode,
-        NamedList<Throwable> throwables,
-        Map<String, ? extends LBSolrClient.Req> routes) {
-      super(errorCode, throwables.getVal(0).getMessage(), throwables.getVal(0));
-      this.throwables = throwables;
-      this.routes = routes;
-
-      // create a merged copy of the metadata from all wrapped exceptions
-      NamedList<String> metadata = new NamedList<String>();
-      for (int i = 0; i < throwables.size(); i++) {
-        Throwable t = throwables.getVal(i);
-        if (t instanceof SolrException) {
-          SolrException e = (SolrException) t;
-          NamedList<String> eMeta = e.getMetadata();
-          if (null != eMeta) {
-            metadata.addAll(eMeta);
-          }
-        }
-      }
-      if (0 < metadata.size()) {
-        this.setMetadata(metadata);
-      }
-    }
-
-    public NamedList<Throwable> getThrowables() {
-      return throwables;
-    }
-
-    public Map<String, ? extends LBSolrClient.Req> getRoutes() {
-      return this.routes;
-    }
-  }
-
-  @Override
-  public NamedList<Object> request(SolrRequest<?> request, String collection)
-      throws SolrServerException, IOException {
-    // the collection parameter of the request overrides that of the parameter to this method
-    String requestCollection = request.getCollection();
-    if (requestCollection != null) {
-      collection = requestCollection;
-    } else if (collection == null) {
-      collection = defaultCollection;
-    }
-    List<String> inputCollections =
-        collection == null ? Collections.emptyList() : StrUtils.splitSmart(collection, ",", true);
-    return requestWithRetryOnStaleState(request, 0, inputCollections);
-  }
-
-  /**
-   * As this class doesn't watch external collections on the client side, there's a chance that the
-   * request will fail due to cached stale state, which means the state must be refreshed from ZK
-   * and retried.
-   */
-  protected NamedList<Object> requestWithRetryOnStaleState(
-      SolrRequest<?> request, int retryCount, List<String> inputCollections)
-      throws SolrServerException, IOException {
-    connect(); // important to call this before you start working with the ZkStateReader
-
-    // build up a _stateVer_ param to pass to the server containing all of the
-    // external collection state versions involved in this request, which allows
-    // the server to notify us that our cached state for one or more of the external
-    // collections is stale and needs to be refreshed ... this code has no impact on internal
-    // collections
-    String stateVerParam = null;
-    List<DocCollection> requestedCollections = null;
-    boolean isCollectionRequestOfV2 = false;
-    if (request instanceof V2RequestSupport) {
-      request = ((V2RequestSupport) request).getV2Request();
-    }
-    if (request instanceof V2Request) {
-      isCollectionRequestOfV2 = ((V2Request) request).isPerCollectionRequest();
-    }
-    boolean isAdmin = ADMIN_PATHS.contains(request.getPath());
-    boolean isUpdate = (request instanceof IsUpdateRequest) && (request instanceof UpdateRequest);
-    if (!inputCollections.isEmpty()
-        && !isAdmin
-        && !isCollectionRequestOfV2) { // don't do _stateVer_ checking for admin, v2 api requests
-      Set<String> requestedCollectionNames = resolveAliases(inputCollections, isUpdate);
-
-      StringBuilder stateVerParamBuilder = null;
-      for (String requestedCollection : requestedCollectionNames) {
-        // track the version of state we're using on the client side using the _stateVer_ param
-        DocCollection coll = getDocCollection(requestedCollection, null);
-        if (coll == null) {
-          throw new SolrException(
-              SolrException.ErrorCode.BAD_REQUEST, "Collection not found: " + requestedCollection);
-        }
-        int collVer = coll.getZNodeVersion();
-        if (requestedCollections == null)
-          requestedCollections = new ArrayList<>(requestedCollectionNames.size());
-        requestedCollections.add(coll);
-
-        if (stateVerParamBuilder == null) {
-          stateVerParamBuilder = new StringBuilder();
-        } else {
-          stateVerParamBuilder.append(
-              "|"); // hopefully pipe is not an allowed char in a collection name
-        }
-
-        stateVerParamBuilder.append(coll.getName()).append(":").append(collVer);
-      }
-
-      if (stateVerParamBuilder != null) {
-        stateVerParam = stateVerParamBuilder.toString();
-      }
-    }
-
-    if (request.getParams() instanceof ModifiableSolrParams) {
-      ModifiableSolrParams params = (ModifiableSolrParams) request.getParams();
-      if (stateVerParam != null) {
-        params.set(STATE_VERSION, stateVerParam);
-      } else {
-        params.remove(STATE_VERSION);
-      }
-    } // else: ??? how to set this ???
-
-    NamedList<Object> resp = null;
-    try {
-      resp = sendRequest(request, inputCollections);
-      // to avoid an O(n) operation we always add STATE_VERSION to the last and try to read it from
-      // there
-      Object o = resp == null || resp.size() == 0 ? null : resp.get(STATE_VERSION, resp.size() - 1);
-      if (o != null && o instanceof Map) {
-        // remove this because no one else needs this and tests would fail if they are comparing
-        // responses
-        resp.remove(resp.size() - 1);
-        Map<?, ?> invalidStates = (Map<?, ?>) o;
-        for (Map.Entry<?, ?> e : invalidStates.entrySet()) {
-          getDocCollection((String) e.getKey(), (Integer) e.getValue());
-        }
-      }
-    } catch (Exception exc) {
-
-      Throwable rootCause = SolrException.getRootCause(exc);
-      // don't do retry support for admin requests
-      // or if the request doesn't have a collection specified
-      // or request is v2 api and its method is not GET
-      if (inputCollections.isEmpty()
-          || isAdmin
-          || (request instanceof V2Request && request.getMethod() != SolrRequest.METHOD.GET)) {
-        if (exc instanceof SolrServerException) {
-          throw (SolrServerException) exc;
-        } else if (exc instanceof IOException) {
-          throw (IOException) exc;
-        } else if (exc instanceof RuntimeException) {
-          throw (RuntimeException) exc;
-        } else {
-          throw new SolrServerException(rootCause);
-        }
-      }
-
-      int errorCode =
-          (rootCause instanceof SolrException)
-              ? ((SolrException) rootCause).code()
-              : SolrException.ErrorCode.UNKNOWN.code;
-
-      boolean wasCommError =
-          (rootCause instanceof ConnectException
-              || rootCause instanceof SocketException
-              || wasCommError(rootCause));
-
-      if (wasCommError
-          || (exc instanceof RouteException
-              && (errorCode == 503)) // 404 because the core does not exist 503 service unavailable
-      // TODO there are other reasons for 404. We need to change the solr response format from HTML
-      // to structured data to know that
-      ) {
-        // it was a communication error. it is likely that
-        // the node to which the request to be sent is down . So , expire the state
-        // so that the next attempt would fetch the fresh state
-        // just re-read state for all of them, if it has not been retried
-        // in retryExpiryTime time
-        if (requestedCollections != null) {
-          for (DocCollection ext : requestedCollections) {
-            ExpiringCachedDocCollection cacheEntry = collectionStateCache.get(ext.getName());
-            if (cacheEntry == null) continue;
-            cacheEntry.maybeStale = true;
-          }
-        }
-        if (retryCount < MAX_STALE_RETRIES) { // if it is a communication error , we must try again
-          // may be, we have a stale version of the collection state
-          // and we could not get any information from the server
-          // it is probably not worth trying again and again because
-          // the state would not have been updated
-          log.info(
-              "Request to collection {} failed due to ({}) {}, retry={} maxRetries={} commError={} errorCode={} - retrying",
-              inputCollections,
-              errorCode,
-              rootCause,
-              retryCount,
-              MAX_STALE_RETRIES,
-              wasCommError,
-              errorCode);
-          return requestWithRetryOnStaleState(request, retryCount + 1, inputCollections);
-        }
-      } else {
-        log.info("request was not communication error it seems");
-      }
-      log.info(
-          "Request to collection {} failed due to ({}) {}, retry={} maxRetries={} commError={} errorCode={} ",
-          inputCollections,
-          errorCode,
-          rootCause,
-          retryCount,
-          MAX_STALE_RETRIES,
-          wasCommError,
-          errorCode);
-
-      boolean stateWasStale = false;
-      if (retryCount < MAX_STALE_RETRIES
-          && requestedCollections != null
-          && !requestedCollections.isEmpty()
-          && (SolrException.ErrorCode.getErrorCode(errorCode)
-                  == SolrException.ErrorCode.INVALID_STATE
-              || errorCode == 404)) {
-        // cached state for one or more external collections was stale
-        // re-issue request using updated state
-        stateWasStale = true;
-
-        // just re-read state for all of them, which is a little heavy handed but hopefully a rare
-        // occurrence
-        for (DocCollection ext : requestedCollections) {
-          collectionStateCache.remove(ext.getName());
-        }
-      }
-
-      // if we experienced a communication error, it's worth checking the state
-      // with ZK just to make sure the node we're trying to hit is still part of the collection
-      if (retryCount < MAX_STALE_RETRIES
-          && !stateWasStale
-          && requestedCollections != null
-          && !requestedCollections.isEmpty()
-          && wasCommError) {
-        for (DocCollection ext : requestedCollections) {
-          DocCollection latestStateFromZk = getDocCollection(ext.getName(), null);
-          if (latestStateFromZk.getZNodeVersion() != ext.getZNodeVersion()) {
-            // looks like we couldn't reach the server because the state was stale == retry
-            stateWasStale = true;
-            // we just pulled state from ZK, so update the cache so that the retry uses it
-            collectionStateCache.put(
-                ext.getName(), new ExpiringCachedDocCollection(latestStateFromZk));
-          }
-        }
-      }
-
-      if (requestedCollections != null) {
-        requestedCollections.clear(); // done with this
-      }
-
-      // if the state was stale, then we retry the request once with new state pulled from Zk
-      if (stateWasStale) {
-        log.warn(
-            "Re-trying request to collection(s) {} after stale state error from server.",
-            inputCollections);
-        resp = requestWithRetryOnStaleState(request, retryCount + 1, inputCollections);
-      } else {
-        if (exc instanceof SolrException
-            || exc instanceof SolrServerException
-            || exc instanceof IOException) {
-          throw exc;
-        } else {
-          throw new SolrServerException(rootCause);
-        }
-      }
-    }
-
-    return resp;
-  }
-
-  protected NamedList<Object> sendRequest(SolrRequest<?> request, List<String> inputCollections)
-      throws SolrServerException, IOException {
-    connect();
-
-    boolean sendToLeaders = false;
-    boolean isUpdate = false;
-
-    if (request instanceof IsUpdateRequest) {
-      if (request instanceof UpdateRequest) {
-        isUpdate = true;
-        if (inputCollections.size() > 1) {
-          throw new SolrException(
-              SolrException.ErrorCode.BAD_REQUEST,
-              "Update request must be sent to a single collection "
-                  + "or an alias: "
-                  + inputCollections);
-        }
-        String collection =
-            inputCollections.isEmpty()
-                ? null
-                : inputCollections.get(0); // getting first mimics HttpSolrCall
-        NamedList<Object> response = directUpdate((AbstractUpdateRequest) request, collection);
-        if (response != null) {
-          return response;
-        }
-      }
-      sendToLeaders = true;
-    }
-
-    SolrParams reqParams = request.getParams();
-    if (reqParams == null) { // TODO fix getParams to never return null!
-      reqParams = new ModifiableSolrParams();
-    }
-
-    ReplicaListTransformer replicaListTransformer =
-        requestRLTGenerator.getReplicaListTransformer(reqParams);
-
-    final ClusterStateProvider provider = getClusterStateProvider();
-    final String urlScheme = provider.getClusterProperty(ZkStateReader.URL_SCHEME, "http");
-    final Set<String> liveNodes = provider.getLiveNodes();
-
-    final List<String> theUrlList = new ArrayList<>(); // we populate this as follows...
-
-    if (request instanceof V2Request) {
-      if (!liveNodes.isEmpty()) {
-        List<String> liveNodesList = new ArrayList<>(liveNodes);
-        Collections.shuffle(liveNodesList, rand);
-        theUrlList.add(Utils.getBaseUrlForNodeName(liveNodesList.get(0), urlScheme));
-      }
-
-    } else if (ADMIN_PATHS.contains(request.getPath())) {
-      for (String liveNode : liveNodes) {
-        theUrlList.add(Utils.getBaseUrlForNodeName(liveNode, urlScheme));
-      }
-
-    } else { // Typical...
-      Set<String> collectionNames = resolveAliases(inputCollections, isUpdate);
-      if (collectionNames.isEmpty()) {
-        throw new SolrException(
-            SolrException.ErrorCode.BAD_REQUEST,
-            "No collection param specified on request and no default collection has been set: "
-                + inputCollections);
-      }
-
-      // TODO: not a big deal because of the caching, but we could avoid looking
-      //   at every shard when getting leaders if we tweaked some things
-
-      // Retrieve slices from the cloud state and, for each collection specified, add it to the Map
-      // of slices.
-      Map<String, Slice> slices = new HashMap<>();
-      String shardKeys = reqParams.get(ShardParams._ROUTE_);
-      for (String collectionName : collectionNames) {
-        DocCollection col = getDocCollection(collectionName, null);
-        if (col == null) {
-          throw new SolrException(
-              SolrException.ErrorCode.BAD_REQUEST, "Collection not found: " + collectionName);
-        }
-        Collection<Slice> routeSlices = col.getRouter().getSearchSlices(shardKeys, reqParams, col);
-        ClientUtils.addSlices(slices, collectionName, routeSlices, true);
-      }
-
-      // Gather URLs, grouped by leader or replica
-      List<Replica> sortedReplicas = new ArrayList<>();
-      List<Replica> replicas = new ArrayList<>();
-      for (Slice slice : slices.values()) {
-        Replica leader = slice.getLeader();
-        for (Replica replica : slice.getReplicas()) {
-          String node = replica.getNodeName();
-          if (!liveNodes.contains(node) // Must be a live node to continue
-              || replica.getState()
-                  != Replica.State.ACTIVE) // Must be an ACTIVE replica to continue
-          continue;
-          if (sendToLeaders && replica.equals(leader)) {
-            sortedReplicas.add(replica); // put leaders here eagerly (if sendToLeader mode)
-          } else {
-            replicas.add(replica); // replicas here
-          }
-        }
-      }
-
-      // Sort the leader replicas, if any, according to the request preferences    (none if
-      // !sendToLeaders)
-      replicaListTransformer.transform(sortedReplicas);
-
-      // Sort the replicas, if any, according to the request preferences and append to our list
-      replicaListTransformer.transform(replicas);
-
-      sortedReplicas.addAll(replicas);
-
-      String joinedInputCollections = StrUtils.join(inputCollections, ',');
-      Set<String> seenNodes = new HashSet<>();
-      sortedReplicas.forEach(
-          replica -> {
-            if (seenNodes.add(replica.getNodeName())) {
-              theUrlList.add(
-                  ZkCoreNodeProps.getCoreUrl(replica.getBaseUrl(), joinedInputCollections));
-            }
-          });
-
-      if (theUrlList.isEmpty()) {
-        collectionStateCache.keySet().removeAll(collectionNames);
-        throw new SolrException(
-            SolrException.ErrorCode.INVALID_STATE,
-            "Could not find a healthy node to handle the request.");
-      }
-    }
-
-    LBSolrClient.Req req = new LBSolrClient.Req(request, theUrlList);
-    LBSolrClient.Rsp rsp = getLbClient().request(req);
-    return rsp.getResponse();
-  }
-
-  /**
-   * Resolves the input collections to their possible aliased collections. Doesn't validate
-   * collection existence.
-   */
-  private Set<String> resolveAliases(List<String> inputCollections, boolean isUpdate) {
-    if (inputCollections.isEmpty()) {
-      return Collections.emptySet();
-    }
-    LinkedHashSet<String> uniqueNames = new LinkedHashSet<>(); // consistent ordering
-    for (String collectionName : inputCollections) {
-      if (getClusterStateProvider().getState(collectionName) == null) {
-        // perhaps it's an alias
-        uniqueNames.addAll(getClusterStateProvider().resolveAlias(collectionName));
-      } else {
-        uniqueNames.add(collectionName); // it's a collection
-      }
-    }
-    return uniqueNames;
-  }
-
-  public boolean isUpdatesToLeaders() {
-    return updatesToLeaders;
-  }
-
-  /** @return true if direct updates are sent to shard leaders only */
-  public boolean isDirectUpdatesToLeadersOnly() {
-    return directUpdatesToLeadersOnly;
-  }
-
-  /**
-   * If caches are expired they are refreshed after acquiring a lock. use this to set the number of
-   * locks
-   */
-  public void setParallelCacheRefreshes(int n) {
-    locks = objectList(n);
-  }
-
-  protected static ArrayList<Object> objectList(int n) {
-    ArrayList<Object> l = new ArrayList<>(n);
-    for (int i = 0; i < n; i++) l.add(new Object());
-    return l;
-  }
-
-  protected DocCollection getDocCollection(String collection, Integer expectedVersion)
-      throws SolrException {
-    if (expectedVersion == null) expectedVersion = -1;
-    if (collection == null) return null;
-    ExpiringCachedDocCollection cacheEntry = collectionStateCache.get(collection);
-    DocCollection col = cacheEntry == null ? null : cacheEntry.cached;
-    if (col != null) {
-      if (expectedVersion <= col.getZNodeVersion() && !cacheEntry.shouldRetry()) return col;
-    }
-
-    ClusterState.CollectionRef ref = getCollectionRef(collection);
-    if (ref == null) {
-      // no such collection exists
-      return null;
-    }
-    if (!ref.isLazilyLoaded()) {
-      // it is readily available just return it
-      return ref.get();
-    }
-    List<Object> locks = this.locks;
-    final Object lock =
-        locks.get(
-            Math.abs(
-                Hash.murmurhash3_x86_32(collection, 0, collection.length(), 0) % locks.size()));
-    DocCollection fetchedCol = null;
-    synchronized (lock) {
-      /*we have waited for sometime just check once again*/
-      cacheEntry = collectionStateCache.get(collection);
-      col = cacheEntry == null ? null : cacheEntry.cached;
-      if (col != null) {
-        if (expectedVersion <= col.getZNodeVersion() && !cacheEntry.shouldRetry()) return col;
-      }
-      // We are going to fetch a new version
-      // we MUST try to get a new version
-      fetchedCol = ref.get(); // this is a call to ZK
-      if (fetchedCol == null) return null; // this collection no more exists
-      if (col != null && fetchedCol.getZNodeVersion() == col.getZNodeVersion()) {
-        cacheEntry.setRetriedAt(); // we retried and found that it is the same version
-        cacheEntry.maybeStale = false;
-      } else {
-        collectionStateCache.put(collection, new ExpiringCachedDocCollection(fetchedCol));
-      }
-      return fetchedCol;
-    }
-  }
-
-  ClusterState.CollectionRef getCollectionRef(String collection) {
-    return getClusterStateProvider().getState(collection);
-  }
-
-  /**
-   * Useful for determining the minimum achieved replication factor across all shards involved in
-   * processing an update request, typically useful for gauging the replication factor of a batch.
-   */
-  public int getMinAchievedReplicationFactor(String collection, NamedList<?> resp) {
-    // it's probably already on the top-level header set by condense
-    NamedList<?> header = (NamedList<?>) resp.get("responseHeader");
-    Integer achRf = (Integer) header.get(UpdateRequest.REPFACT);
-    if (achRf != null) return achRf.intValue();
-
-    // not on the top-level header, walk the shard route tree
-    Map<String, Integer> shardRf = getShardReplicationFactor(collection, resp);
-    for (Integer rf : shardRf.values()) {
-      if (achRf == null || rf < achRf) {
-        achRf = rf;
-      }
-    }
-    return (achRf != null) ? achRf.intValue() : -1;
-  }
-
-  /**
-   * Walks the NamedList response after performing an update request looking for the replication
-   * factor that was achieved in each shard involved in the request. For single doc updates, there
-   * will be only one shard in the return value.
-   */
-  public Map<String, Integer> getShardReplicationFactor(String collection, NamedList<?> resp) {
-    connect();
-
-    Map<String, Integer> results = new HashMap<>();
-    if (resp instanceof RouteResponse) {
-      NamedList<NamedList<?>> routes = ((RouteResponse<?>) resp).getRouteResponses();
-      DocCollection coll = getDocCollection(collection, null);
-      Map<String, String> leaders = new HashMap<>();
-      for (Slice slice : coll.getActiveSlicesArr()) {
-        Replica leader = slice.getLeader();
-        if (leader != null) {
-          ZkCoreNodeProps zkProps = new ZkCoreNodeProps(leader);
-          String leaderUrl = zkProps.getBaseUrl() + "/" + zkProps.getCoreName();
-          leaders.put(leaderUrl, slice.getName());
-          String altLeaderUrl = zkProps.getBaseUrl() + "/" + collection;
-          leaders.put(altLeaderUrl, slice.getName());
-        }
-      }
-
-      Iterator<Map.Entry<String, NamedList<?>>> routeIter = routes.iterator();
-      while (routeIter.hasNext()) {
-        Map.Entry<String, NamedList<?>> next = routeIter.next();
-        String host = next.getKey();
-        NamedList<?> hostResp = next.getValue();
-        Integer rf =
-            (Integer) ((NamedList<?>) hostResp.get("responseHeader")).get(UpdateRequest.REPFACT);
-        if (rf != null) {
-          String shard = leaders.get(host);
-          if (shard == null) {
-            if (host.endsWith("/")) shard = leaders.get(host.substring(0, host.length() - 1));
-            if (shard == null) {
-              shard = host;
-            }
-          }
-          results.put(shard, rf);
-        }
-      }
-    }
-    return results;
-  }
-
-  private static boolean hasInfoToFindLeaders(UpdateRequest updateRequest, String idField) {
-    final Map<SolrInputDocument, Map<String, Object>> documents = updateRequest.getDocumentsMap();
-    final Map<String, Map<String, Object>> deleteById = updateRequest.getDeleteByIdMap();
-
-    final boolean hasNoDocuments = (documents == null || documents.isEmpty());
-    final boolean hasNoDeleteById = (deleteById == null || deleteById.isEmpty());
-    if (hasNoDocuments && hasNoDeleteById) {
-      // no documents and no delete-by-id, so no info to find leader(s)
-      return false;
-    }
-
-    if (documents != null) {
-      for (final Map.Entry<SolrInputDocument, Map<String, Object>> entry : documents.entrySet()) {
-        final SolrInputDocument doc = entry.getKey();
-        final Object fieldValue = doc.getFieldValue(idField);
-        if (fieldValue == null) {
-          // a document with no id field value, so can't find leader for it
-          return false;
-        }
-      }
-    }
-
-    return true;
-  }
-}
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudHttp2SolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudHttp2SolrClient.java
index 34c3315..acc9f76 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudHttp2SolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudHttp2SolrClient.java
@@ -34,7 +34,7 @@ import org.apache.solr.common.SolrException;
  * @since solr 8.0
  */
 @SuppressWarnings("serial")
-public class CloudHttp2SolrClient extends BaseCloudSolrClient {
+public class CloudHttp2SolrClient extends CloudSolrClient {
 
   private final ClusterStateProvider stateProvider;
   private final LBHttp2SolrClient lbClient;
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudLegacySolrClient.java
similarity index 93%
copy from solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
copy to solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudLegacySolrClient.java
index 54b2c6d..8d237f1 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudLegacySolrClient.java
@@ -37,10 +37,10 @@ import org.apache.solr.common.util.NamedList;
  * Zookeeper to discover Solr endpoints for SolrCloud collections, and then use the {@link
  * LBHttpSolrClient} to issue requests.
  *
- * @deprecated Please use {@link CloudHttp2SolrClient}
+ * @deprecated Please use {@link CloudSolrClient}
  */
 @Deprecated(since = "9.0")
-public class CloudSolrClient extends BaseCloudSolrClient {
+public class CloudLegacySolrClient extends CloudSolrClient {
 
   private final ClusterStateProvider stateProvider;
   private final LBHttpSolrClient lbClient;
@@ -48,7 +48,7 @@ public class CloudSolrClient extends BaseCloudSolrClient {
   private HttpClient myClient;
   private final boolean clientIsInternal;
 
-  public static final String STATE_VERSION = BaseCloudSolrClient.STATE_VERSION;
+  public static final String STATE_VERSION = CloudSolrClient.STATE_VERSION;
 
   /**
    * Create a new client object that connects to Zookeeper and is always aware of the SolrCloud
@@ -56,9 +56,9 @@ public class CloudSolrClient extends BaseCloudSolrClient {
    * every shard in a collection, there is no single point of failure. Updates will be sent to shard
    * leaders by default.
    *
-   * @param builder a {@link CloudSolrClient.Builder} with the options used to create the client.
+   * @param builder a {@link Builder} with the options used to create the client.
    */
-  protected CloudSolrClient(Builder builder) {
+  protected CloudLegacySolrClient(Builder builder) {
     super(builder.shardLeadersOnly, builder.parallelUpdates, builder.directUpdatesToLeadersOnly);
     if (builder.stateProvider == null) {
       if (builder.zkHosts != null && builder.solrUrls != null) {
@@ -180,7 +180,7 @@ public class CloudSolrClient extends BaseCloudSolrClient {
     return lbClient;
   }
 
-  /** Constructs {@link CloudSolrClient} instances from provided configuration. */
+  /** Constructs {@link CloudLegacySolrClient} instances from provided configuration. */
   public static class Builder extends SolrClientBuilder<Builder> {
     protected Collection<String> zkHosts = new ArrayList<>();
     protected List<String> solrUrls = new ArrayList<>();
@@ -196,9 +196,9 @@ public class CloudSolrClient extends BaseCloudSolrClient {
     protected Builder() {}
 
     /**
-     * Provide a series of Solr URLs to be used when configuring {@link CloudSolrClient} instances.
-     * The solr client will use these urls to understand the cluster topology, which solr nodes are
-     * active etc.
+     * Provide a series of Solr URLs to be used when configuring {@link CloudLegacySolrClient}
+     * instances. The solr client will use these urls to understand the cluster topology, which solr
+     * nodes are active etc.
      *
      * <p>Provided Solr URLs are expected to point to the root Solr path
      * ("http://hostname:8983/solr"); they should not include any collections, cores, or other path
@@ -222,8 +222,8 @@ public class CloudSolrClient extends BaseCloudSolrClient {
     }
 
     /**
-     * Provide a series of ZK hosts which will be used when configuring {@link CloudSolrClient}
-     * instances.
+     * Provide a series of ZK hosts which will be used when configuring {@link
+     * CloudLegacySolrClient} instances.
      *
      * <p>Usage example when Solr stores data at the ZooKeeper root ('/'):
      *
@@ -309,8 +309,8 @@ public class CloudSolrClient extends BaseCloudSolrClient {
      * Tells {@link Builder} whether created clients should send shard updates serially or in
      * parallel
      *
-     * <p>When an {@link UpdateRequest} affects multiple shards, {@link CloudSolrClient} splits it
-     * up and sends a request to each affected shard. This setting chooses whether those
+     * <p>When an {@link UpdateRequest} affects multiple shards, {@link CloudLegacySolrClient}
+     * splits it up and sends a request to each affected shard. This setting chooses whether those
      * sub-requests are sent serially or in parallel.
      *
      * <p>If not set, this defaults to 'true' and sends sub-requests in parallel.
@@ -320,8 +320,8 @@ public class CloudSolrClient extends BaseCloudSolrClient {
       return this;
     }
 
-    /** Create a {@link CloudSolrClient} based on the provided configuration. */
-    public CloudSolrClient build() {
+    /** Create a {@link CloudLegacySolrClient} based on the provided configuration. */
+    public CloudLegacySolrClient build() {
       if (stateProvider == null) {
         if (!zkHosts.isEmpty()) {
           stateProvider = new ZkClientClusterStateProvider(zkHosts, zkChroot);
@@ -340,7 +340,7 @@ public class CloudSolrClient extends BaseCloudSolrClient {
           throw new IllegalArgumentException("Both zkHosts and solrUrl cannot be null.");
         }
       }
-      return new CloudSolrClient(this);
+      return new CloudLegacySolrClient(this);
     }
 
     @Override
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
index 54b2c6d..fdd3c61 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
@@ -14,186 +14,127 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.solr.client.solrj.impl;
 
+import static org.apache.solr.common.params.CommonParams.ADMIN_PATHS;
+import static org.apache.solr.common.params.CommonParams.ID;
+
 import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.net.ConnectException;
+import java.net.SocketException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
-import org.apache.http.NoHttpResponseException;
-import org.apache.http.client.HttpClient;
-import org.apache.http.conn.ConnectTimeoutException;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+import org.apache.solr.client.solrj.ResponseParser;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.V2RequestSupport;
+import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
+import org.apache.solr.client.solrj.request.IsUpdateRequest;
+import org.apache.solr.client.solrj.request.RequestWriter;
 import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.request.V2Request;
+import org.apache.solr.client.solrj.routing.ReplicaListTransformer;
+import org.apache.solr.client.solrj.routing.RequestReplicaListTransformerGenerator;
+import org.apache.solr.client.solrj.util.ClientUtils;
 import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.ToleratedUpdateError;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.CollectionStatePredicate;
+import org.apache.solr.common.cloud.CollectionStateWatcher;
 import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocCollectionWatcher;
 import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.ImplicitDocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.ShardParams;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.params.UpdateParams;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.Hash;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
 
-/**
- * SolrJ client class to communicate with SolrCloud. Instances of this class communicate with
- * Zookeeper to discover Solr endpoints for SolrCloud collections, and then use the {@link
- * LBHttpSolrClient} to issue requests.
- *
- * @deprecated Please use {@link CloudHttp2SolrClient}
- */
-@Deprecated(since = "9.0")
-public class CloudSolrClient extends BaseCloudSolrClient {
-
-  private final ClusterStateProvider stateProvider;
-  private final LBHttpSolrClient lbClient;
-  private final boolean shutdownLBHttpSolrServer;
-  private HttpClient myClient;
-  private final boolean clientIsInternal;
-
-  public static final String STATE_VERSION = BaseCloudSolrClient.STATE_VERSION;
-
-  /**
-   * Create a new client object that connects to Zookeeper and is always aware of the SolrCloud
-   * state. If there is a fully redundant Zookeeper quorum and SolrCloud has enough replicas for
-   * every shard in a collection, there is no single point of failure. Updates will be sent to shard
-   * leaders by default.
-   *
-   * @param builder a {@link CloudSolrClient.Builder} with the options used to create the client.
-   */
-  protected CloudSolrClient(Builder builder) {
-    super(builder.shardLeadersOnly, builder.parallelUpdates, builder.directUpdatesToLeadersOnly);
-    if (builder.stateProvider == null) {
-      if (builder.zkHosts != null && builder.solrUrls != null) {
-        throw new IllegalArgumentException(
-            "Both zkHost(s) & solrUrl(s) have been specified. Only specify one.");
-      }
-      if (builder.zkHosts != null) {
-        this.stateProvider = new ZkClientClusterStateProvider(builder.zkHosts, builder.zkChroot);
-      } else if (builder.solrUrls != null && !builder.solrUrls.isEmpty()) {
-        try {
-          this.stateProvider = new HttpClusterStateProvider(builder.solrUrls, builder.httpClient);
-        } catch (Exception e) {
-          throw new RuntimeException(
-              "Couldn't initialize a HttpClusterStateProvider (is/are the "
-                  + "Solr server(s), "
-                  + builder.solrUrls
-                  + ", down?)",
-              e);
-        }
-      } else {
-        throw new IllegalArgumentException("Both zkHosts and solrUrl cannot be null.");
-      }
-    } else {
-      this.stateProvider = builder.stateProvider;
-    }
-    this.clientIsInternal = builder.httpClient == null;
-    this.shutdownLBHttpSolrServer = builder.loadBalancedSolrClient == null;
-    if (builder.lbClientBuilder != null) {
-      propagateLBClientConfigOptions(builder);
-      builder.loadBalancedSolrClient = builder.lbClientBuilder.build();
-    }
-    if (builder.loadBalancedSolrClient != null)
-      builder.httpClient = builder.loadBalancedSolrClient.getHttpClient();
-    this.myClient =
-        (builder.httpClient == null) ? HttpClientUtil.createClient(null) : builder.httpClient;
-    if (builder.loadBalancedSolrClient == null)
-      builder.loadBalancedSolrClient = createLBHttpSolrClient(builder, myClient);
-    this.lbClient = builder.loadBalancedSolrClient;
-  }
-
-  private void propagateLBClientConfigOptions(Builder builder) {
-    final LBHttpSolrClient.Builder lbBuilder = builder.lbClientBuilder;
-
-    if (builder.connectionTimeoutMillis != null) {
-      lbBuilder.withConnectionTimeout(builder.connectionTimeoutMillis);
-    }
-
-    if (builder.socketTimeoutMillis != null) {
-      lbBuilder.withSocketTimeout(builder.socketTimeoutMillis);
-    }
-  }
-
-  protected Map<String, LBSolrClient.Req> createRoutes(
-      UpdateRequest updateRequest,
-      ModifiableSolrParams routableParams,
-      DocCollection col,
-      DocRouter router,
-      Map<String, List<String>> urlMap,
-      String idField) {
-    return urlMap == null
-        ? null
-        : updateRequest.getRoutesToCollection(router, col, urlMap, routableParams, idField);
-  }
-
-  protected RouteException getRouteException(
-      SolrException.ErrorCode serverError,
-      NamedList<Throwable> exceptions,
-      Map<String, ? extends LBSolrClient.Req> routes) {
-    return new RouteException(serverError, exceptions, routes);
-  }
+public abstract class CloudSolrClient extends SolrClient {
 
-  @Override
-  public void close() throws IOException {
-    stateProvider.close();
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-    if (shutdownLBHttpSolrServer) {
-      lbClient.close();
-    }
+  private volatile String defaultCollection;
+  // no of times collection state to be reloaded if stale state error is received
+  private static final int MAX_STALE_RETRIES =
+      Integer.parseInt(System.getProperty("cloudSolrClientMaxStaleRetries", "5"));
+  private Random rand = new Random();
 
-    if (clientIsInternal && myClient != null) {
-      HttpClientUtil.close(myClient);
-    }
+  private final boolean updatesToLeaders;
+  private final boolean directUpdatesToLeadersOnly;
+  private final RequestReplicaListTransformerGenerator requestRLTGenerator;
+  boolean parallelUpdates; // TODO final
+  private ExecutorService threadPool =
+      ExecutorUtil.newMDCAwareCachedThreadPool(
+          new SolrNamedThreadFactory("CloudSolrClient ThreadPool"));
 
-    super.close();
-  }
+  public static final String STATE_VERSION = "_stateVer_";
+  private long retryExpiryTime =
+      TimeUnit.NANOSECONDS.convert(3, TimeUnit.SECONDS); // 3 seconds or 3 million nanos
+  private final Set<String> NON_ROUTABLE_PARAMS;
 
-  public LBHttpSolrClient getLbClient() {
-    return lbClient;
-  }
+  {
+    NON_ROUTABLE_PARAMS = new HashSet<>();
+    NON_ROUTABLE_PARAMS.add(UpdateParams.EXPUNGE_DELETES);
+    NON_ROUTABLE_PARAMS.add(UpdateParams.MAX_OPTIMIZE_SEGMENTS);
+    NON_ROUTABLE_PARAMS.add(UpdateParams.COMMIT);
+    NON_ROUTABLE_PARAMS.add(UpdateParams.WAIT_SEARCHER);
+    NON_ROUTABLE_PARAMS.add(UpdateParams.OPEN_SEARCHER);
 
-  public HttpClient getHttpClient() {
-    return myClient;
-  }
+    NON_ROUTABLE_PARAMS.add(UpdateParams.SOFT_COMMIT);
+    NON_ROUTABLE_PARAMS.add(UpdateParams.PREPARE_COMMIT);
+    NON_ROUTABLE_PARAMS.add(UpdateParams.OPTIMIZE);
 
-  public ClusterStateProvider getClusterStateProvider() {
-    return stateProvider;
-  }
+    // Not supported via SolrCloud
+    // NON_ROUTABLE_PARAMS.add(UpdateParams.ROLLBACK);
 
-  @Override
-  protected boolean wasCommError(Throwable rootCause) {
-    return rootCause instanceof ConnectTimeoutException
-        || rootCause instanceof NoHttpResponseException;
   }
 
-  private static LBHttpSolrClient createLBHttpSolrClient(
-      Builder cloudSolrClientBuilder, HttpClient httpClient) {
-    final LBHttpSolrClient.Builder lbBuilder = new LBHttpSolrClient.Builder();
-    lbBuilder.withHttpClient(httpClient);
-    if (cloudSolrClientBuilder.connectionTimeoutMillis != null) {
-      lbBuilder.withConnectionTimeout(cloudSolrClientBuilder.connectionTimeoutMillis);
-    }
-    if (cloudSolrClientBuilder.socketTimeoutMillis != null) {
-      lbBuilder.withSocketTimeout(cloudSolrClientBuilder.socketTimeoutMillis);
-    }
-    final LBHttpSolrClient lbClient = lbBuilder.build();
-    lbClient.setRequestWriter(new BinaryRequestWriter());
-    lbClient.setParser(new BinaryResponseParser());
-
-    return lbClient;
-  }
+  private volatile List<Object> locks = objectList(3);
 
   /** Constructs {@link CloudSolrClient} instances from provided configuration. */
-  public static class Builder extends SolrClientBuilder<Builder> {
-    protected Collection<String> zkHosts = new ArrayList<>();
-    protected List<String> solrUrls = new ArrayList<>();
-    protected String zkChroot;
-    protected LBHttpSolrClient loadBalancedSolrClient;
-    protected LBHttpSolrClient.Builder lbClientBuilder;
-    protected boolean shardLeadersOnly = true;
-    protected boolean directUpdatesToLeadersOnly = false;
-    protected boolean parallelUpdates = true;
-    protected ClusterStateProvider stateProvider;
-
-    /** Constructor for use by subclasses. This constructor was public prior to version 9.0 */
-    protected Builder() {}
+  public static class Builder extends CloudHttp2SolrClient.Builder {
 
     /**
      * Provide a series of Solr URLs to be used when configuring {@link CloudSolrClient} instances.
@@ -213,12 +154,7 @@ public class CloudSolrClient extends BaseCloudSolrClient {
      * </pre>
      */
     public Builder(List<String> solrUrls) {
-      this.solrUrls = solrUrls;
-    }
-
-    /** Provide an already created {@link ClusterStateProvider} instance */
-    public Builder(ClusterStateProvider stateProvider) {
-      this.stateProvider = stateProvider;
+      super(solrUrls);
     }
 
     /**
@@ -246,106 +182,1254 @@ public class CloudSolrClient extends BaseCloudSolrClient {
      *     java.util.Optional.empty()} if no ZK chroot is used.
      */
     public Builder(List<String> zkHosts, Optional<String> zkChroot) {
-      this.zkHosts = zkHosts;
-      if (zkChroot.isPresent()) this.zkChroot = zkChroot.get();
+      super(zkHosts, zkChroot);
     }
+  }
+
+  static class StateCache extends ConcurrentHashMap<String, ExpiringCachedDocCollection> {
+    final AtomicLong puts = new AtomicLong();
+    final AtomicLong hits = new AtomicLong();
+    final Lock evictLock = new ReentrantLock(true);
+    protected volatile long timeToLive = 60 * 1000L;
 
-    /** Provides a {@link HttpClient} for the builder to use when creating clients. */
-    public Builder withLBHttpSolrClientBuilder(LBHttpSolrClient.Builder lbHttpSolrClientBuilder) {
-      this.lbClientBuilder = lbHttpSolrClientBuilder;
-      return this;
+    @Override
+    public ExpiringCachedDocCollection get(Object key) {
+      ExpiringCachedDocCollection val = super.get(key);
+      if (val == null) {
+        // a new collection is likely to be added now.
+        // check if there are stale items and remove them
+        evictStale();
+        return null;
+      }
+      if (val.isExpired(timeToLive)) {
+        super.remove(key);
+        return null;
+      }
+      hits.incrementAndGet();
+      return val;
     }
 
-    /** Provides a {@link LBHttpSolrClient} for the builder to use when creating clients. */
-    public Builder withLBHttpSolrClient(LBHttpSolrClient loadBalancedSolrClient) {
-      this.loadBalancedSolrClient = loadBalancedSolrClient;
-      return this;
+    @Override
+    public ExpiringCachedDocCollection put(String key, ExpiringCachedDocCollection value) {
+      puts.incrementAndGet();
+      return super.put(key, value);
     }
 
-    /**
-     * Tells {@link Builder} that created clients should send updates only to shard leaders.
-     *
-     * <p>WARNING: This method currently has no effect. See SOLR-6312 for more information.
-     */
-    public Builder sendUpdatesOnlyToShardLeaders() {
-      shardLeadersOnly = true;
-      return this;
+    void evictStale() {
+      if (!evictLock.tryLock()) return;
+      try {
+        for (Entry<String, ExpiringCachedDocCollection> e : entrySet()) {
+          if (e.getValue().isExpired(timeToLive)) {
+            super.remove(e.getKey());
+          }
+        }
+      } finally {
+        evictLock.unlock();
+      }
     }
+  }
 
-    /**
-     * Tells {@link Builder} that created clients should send updates to all replicas for a shard.
-     *
-     * <p>WARNING: This method currently has no effect. See SOLR-6312 for more information.
-     */
-    public Builder sendUpdatesToAllReplicasInShard() {
-      shardLeadersOnly = false;
-      return this;
+  /**
+   * This is the time to wait to refetch the state after getting the same state version from ZK
+   *
+   * <p>secs
+   */
+  public void setRetryExpiryTime(int secs) {
+    this.retryExpiryTime = TimeUnit.NANOSECONDS.convert(secs, TimeUnit.SECONDS);
+  }
+
+  protected final StateCache collectionStateCache = new StateCache();
+
+  class ExpiringCachedDocCollection {
+    final DocCollection cached;
+    final long cachedAt;
+    // This is the time at which the collection is retried and got the same old version
+    volatile long retriedAt = -1;
+    // flag that suggests that this is potentially to be rechecked
+    volatile boolean maybeStale = false;
+
+    ExpiringCachedDocCollection(DocCollection cached) {
+      this.cached = cached;
+      this.cachedAt = System.nanoTime();
     }
 
-    /**
-     * Tells {@link Builder} that created clients should send direct updates to shard leaders only.
-     *
-     * <p>UpdateRequests whose leaders cannot be found will "fail fast" on the client side with a
-     * {@link SolrException}
-     */
-    public Builder sendDirectUpdatesToShardLeadersOnly() {
-      directUpdatesToLeadersOnly = true;
-      return this;
+    boolean isExpired(long timeToLiveMs) {
+      return (System.nanoTime() - cachedAt)
+          > TimeUnit.NANOSECONDS.convert(timeToLiveMs, TimeUnit.MILLISECONDS);
     }
 
-    /**
-     * Tells {@link Builder} that created clients can send updates to any shard replica (shard
-     * leaders and non-leaders).
-     *
-     * <p>Shard leaders are still preferred, but the created clients will fallback to using other
-     * replicas if a leader cannot be found.
-     */
-    public Builder sendDirectUpdatesToAnyShardReplica() {
-      directUpdatesToLeadersOnly = false;
-      return this;
+    boolean shouldRetry() {
+      if (maybeStale) { // we are not sure if it is stale so check with retry time
+        if ((retriedAt == -1 || (System.nanoTime() - retriedAt) > retryExpiryTime)) {
+          return true; // we retried a while back. and we could not get anything new.
+          // it's likely that it is not going to be available now also.
+        }
+      }
+      return false;
     }
 
-    /**
-     * Tells {@link Builder} whether created clients should send shard updates serially or in
-     * parallel
-     *
-     * <p>When an {@link UpdateRequest} affects multiple shards, {@link CloudSolrClient} splits it
-     * up and sends a request to each affected shard. This setting chooses whether those
-     * sub-requests are sent serially or in parallel.
-     *
-     * <p>If not set, this defaults to 'true' and sends sub-requests in parallel.
-     */
-    public Builder withParallelUpdates(boolean parallelUpdates) {
-      this.parallelUpdates = parallelUpdates;
-      return this;
-    }
-
-    /** Create a {@link CloudSolrClient} based on the provided configuration. */
-    public CloudSolrClient build() {
-      if (stateProvider == null) {
-        if (!zkHosts.isEmpty()) {
-          stateProvider = new ZkClientClusterStateProvider(zkHosts, zkChroot);
-        } else if (!this.solrUrls.isEmpty()) {
-          try {
-            stateProvider = new HttpClusterStateProvider(solrUrls, httpClient);
-          } catch (Exception e) {
-            throw new RuntimeException(
-                "Couldn't initialize a HttpClusterStateProvider (is/are the "
-                    + "Solr server(s), "
-                    + solrUrls
-                    + ", down?)",
-                e);
+    void setRetriedAt() {
+      retriedAt = System.nanoTime();
+    }
+  }
+
+  protected CloudSolrClient(
+      boolean updatesToLeaders, boolean parallelUpdates, boolean directUpdatesToLeadersOnly) {
+    this.updatesToLeaders = updatesToLeaders;
+    this.parallelUpdates = parallelUpdates;
+    this.directUpdatesToLeadersOnly = directUpdatesToLeadersOnly;
+    this.requestRLTGenerator = new RequestReplicaListTransformerGenerator();
+  }
+
+  /**
+   * Sets the cache ttl for DocCollection Objects cached.
+   *
+   * @param seconds ttl value in seconds
+   */
+  public void setCollectionCacheTTl(int seconds) {
+    assert seconds > 0;
+    this.collectionStateCache.timeToLive = seconds * 1000L;
+  }
+
+  protected abstract LBSolrClient getLbClient();
+
+  public abstract ClusterStateProvider getClusterStateProvider();
+
+  public ClusterState getClusterState() {
+    return getClusterStateProvider().getClusterState();
+  }
+
+  protected abstract boolean wasCommError(Throwable t);
+
+  @Override
+  public void close() throws IOException {
+    if (this.threadPool != null && !this.threadPool.isShutdown()) {
+      this.threadPool.shutdown();
+    }
+  }
+
+  public ResponseParser getParser() {
+    return getLbClient().getParser();
+  }
+
+  /**
+   * Note: This setter method is <b>not thread-safe</b>.
+   *
+   * @param processor Default Response Parser chosen to parse the response if the parser were not
+   *     specified as part of the request.
+   * @see org.apache.solr.client.solrj.SolrRequest#getResponseParser()
+   */
+  public void setParser(ResponseParser processor) {
+    getLbClient().setParser(processor);
+  }
+
+  public RequestWriter getRequestWriter() {
+    return getLbClient().getRequestWriter();
+  }
+
+  public void setRequestWriter(RequestWriter requestWriter) {
+    getLbClient().setRequestWriter(requestWriter);
+  }
+
+  /** Sets the default collection for request */
+  public void setDefaultCollection(String collection) {
+    this.defaultCollection = collection;
+  }
+
+  /** Gets the default collection for request */
+  public String getDefaultCollection() {
+    return defaultCollection;
+  }
+
+  /** Gets whether direct updates are sent in parallel */
+  public boolean isParallelUpdates() {
+    return parallelUpdates;
+  }
+
+  /**
+   * Connect to the zookeeper ensemble. This is an optional method that may be used to force a
+   * connect before any other requests are sent.
+   */
+  public void connect() {
+    getClusterStateProvider().connect();
+  }
+
+  /**
+   * Connect to a cluster. If the cluster is not ready, retry connection up to a given timeout.
+   *
+   * @param duration the timeout
+   * @param timeUnit the units of the timeout
+   * @throws TimeoutException if the cluster is not ready after the timeout
+   * @throws InterruptedException if the wait is interrupted
+   */
+  public void connect(long duration, TimeUnit timeUnit)
+      throws TimeoutException, InterruptedException {
+    if (log.isInfoEnabled()) {
+      log.info(
+          "Waiting for {} {} for cluster at {} to be ready",
+          duration,
+          timeUnit,
+          getClusterStateProvider());
+    }
+    long timeout = System.nanoTime() + timeUnit.toNanos(duration);
+    while (System.nanoTime() < timeout) {
+      try {
+        connect();
+        if (log.isInfoEnabled()) {
+          log.info("Cluster at {} ready", getClusterStateProvider());
+        }
+        return;
+      } catch (RuntimeException e) {
+        // not ready yet, then...
+      }
+      TimeUnit.MILLISECONDS.sleep(250);
+    }
+    throw new TimeoutException("Timed out waiting for cluster");
+  }
+
+  private ZkClientClusterStateProvider assertZKStateProvider() {
+    if (getClusterStateProvider() instanceof ZkClientClusterStateProvider) {
+      return (ZkClientClusterStateProvider) getClusterStateProvider();
+    }
+    throw new IllegalArgumentException("This client does not use ZK");
+  }
+
+  /**
+   * Block until a CollectionStatePredicate returns true, or the wait times out
+   *
+   * <p>Note that the predicate may be called again even after it has returned true, so implementors
+   * should avoid changing state within the predicate call itself.
+   *
+   * <p>This implementation utilizes {@link CollectionStateWatcher} internally. Callers that don't
+   * care about liveNodes are encouraged to use a {@link DocCollection} {@link Predicate} instead
+   *
+   * @see #waitForState(String, long, TimeUnit, Predicate)
+   * @see #registerCollectionStateWatcher
+   * @param collection the collection to watch
+   * @param wait how long to wait
+   * @param unit the units of the wait parameter
+   * @param predicate a {@link CollectionStatePredicate} to check the collection state
+   * @throws InterruptedException on interrupt
+   * @throws TimeoutException on timeout
+   */
+  public void waitForState(
+      String collection, long wait, TimeUnit unit, CollectionStatePredicate predicate)
+      throws InterruptedException, TimeoutException {
+    getClusterStateProvider().connect();
+    assertZKStateProvider().zkStateReader.waitForState(collection, wait, unit, predicate);
+  }
+  /**
+   * Block until a Predicate returns true, or the wait times out
+   *
+   * <p>Note that the predicate may be called again even after it has returned true, so implementors
+   * should avoid changing state within the predicate call itself.
+   *
+   * @see #registerDocCollectionWatcher
+   * @param collection the collection to watch
+   * @param wait how long to wait
+   * @param unit the units of the wait parameter
+   * @param predicate a {@link Predicate} to test against the {@link DocCollection}
+   * @throws InterruptedException on interrupt
+   * @throws TimeoutException on timeout
+   */
+  public void waitForState(
+      String collection, long wait, TimeUnit unit, Predicate<DocCollection> predicate)
+      throws InterruptedException, TimeoutException {
+    getClusterStateProvider().connect();
+    assertZKStateProvider().zkStateReader.waitForState(collection, wait, unit, predicate);
+  }
+
+  /**
+   * Register a CollectionStateWatcher to be called when the cluster state for a collection changes
+   * <em>or</em> the set of live nodes changes.
+   *
+   * <p>The Watcher will automatically be removed when it's <code>onStateChanged</code> returns
+   * <code>true</code>
+   *
+   * <p>This implementation utilizes {@link ZkStateReader#registerCollectionStateWatcher}
+   * internally. Callers that don't care about liveNodes are encouraged to use a {@link
+   * DocCollectionWatcher} instead
+   *
+   * @see #registerDocCollectionWatcher(String, DocCollectionWatcher)
+   * @see ZkStateReader#registerCollectionStateWatcher
+   * @param collection the collection to watch
+   * @param watcher a watcher that will be called when the state changes
+   */
+  public void registerCollectionStateWatcher(String collection, CollectionStateWatcher watcher) {
+    getClusterStateProvider().connect();
+    assertZKStateProvider().zkStateReader.registerCollectionStateWatcher(collection, watcher);
+  }
+
+  /**
+   * Register a DocCollectionWatcher to be called when the cluster state for a collection changes.
+   *
+   * <p>The Watcher will automatically be removed when it's <code>onStateChanged</code> returns
+   * <code>true</code>
+   *
+   * @see ZkStateReader#registerDocCollectionWatcher
+   * @param collection the collection to watch
+   * @param watcher a watcher that will be called when the state changes
+   */
+  public void registerDocCollectionWatcher(String collection, DocCollectionWatcher watcher) {
+    getClusterStateProvider().connect();
+    assertZKStateProvider().zkStateReader.registerDocCollectionWatcher(collection, watcher);
+  }
+
+  @SuppressWarnings({"unchecked"})
+  private NamedList<Object> directUpdate(AbstractUpdateRequest request, String collection)
+      throws SolrServerException {
+    UpdateRequest updateRequest = (UpdateRequest) request;
+    SolrParams params = request.getParams();
+    ModifiableSolrParams routableParams = new ModifiableSolrParams();
+    ModifiableSolrParams nonRoutableParams = new ModifiableSolrParams();
+
+    if (params != null) {
+      nonRoutableParams.add(params);
+      routableParams.add(params);
+      for (String param : NON_ROUTABLE_PARAMS) {
+        routableParams.remove(param);
+      }
+    } else {
+      params = new ModifiableSolrParams();
+    }
+
+    if (collection == null) {
+      throw new SolrServerException(
+          "No collection param specified on request and no default collection has been set.");
+    }
+
+    // Check to see if the collection is an alias. Updates to multi-collection aliases are ok as
+    // long as they are routed aliases
+    List<String> aliasedCollections = getClusterStateProvider().resolveAlias(collection);
+    if (getClusterStateProvider().isRoutedAlias(collection) || aliasedCollections.size() == 1) {
+      collection = aliasedCollections.get(0); // pick 1st (consistent with HttpSolrCall behavior)
+    } else {
+      throw new SolrException(
+          SolrException.ErrorCode.BAD_REQUEST,
+          "Update request to non-routed multi-collection alias not supported: "
+              + collection
+              + " -> "
+              + aliasedCollections);
+    }
+
+    DocCollection col = getDocCollection(collection, null);
+
+    DocRouter router = col.getRouter();
+
+    if (router instanceof ImplicitDocRouter) {
+      // short circuit as optimization
+      return null;
+    }
+
+    ReplicaListTransformer replicaListTransformer =
+        requestRLTGenerator.getReplicaListTransformer(params);
+
+    // Create the URL map, which is keyed on slice name.
+    // The value is a list of URLs for each replica in the slice.
+    // The first value in the list is the leader for the slice.
+    final Map<String, List<String>> urlMap = buildUrlMap(col, replicaListTransformer);
+    String routeField =
+        (col.getRouter().getRouteField(col) == null) ? ID : col.getRouter().getRouteField(col);
+    final Map<String, ? extends LBSolrClient.Req> routes =
+        createRoutes(updateRequest, routableParams, col, router, urlMap, routeField);
+    if (routes == null) {
+      if (directUpdatesToLeadersOnly && hasInfoToFindLeaders(updateRequest, routeField)) {
+        // we have info (documents with ids and/or ids to delete) with
+        // which to find the leaders but we could not find (all of) them
+        throw new SolrException(
+            SolrException.ErrorCode.SERVICE_UNAVAILABLE,
+            "directUpdatesToLeadersOnly==true but could not find leader(s)");
+      } else {
+        // we could not find a leader or routes yet - use unoptimized general path
+        return null;
+      }
+    }
+
+    final NamedList<Throwable> exceptions = new NamedList<>();
+    final NamedList<NamedList<?>> shardResponses =
+        new NamedList<>(routes.size() + 1); // +1 for deleteQuery
+
+    long start = System.nanoTime();
+
+    if (parallelUpdates) {
+      final Map<String, Future<NamedList<?>>> responseFutures = new HashMap<>(routes.size());
+      for (final Map.Entry<String, ? extends LBSolrClient.Req> entry : routes.entrySet()) {
+        final String url = entry.getKey();
+        final LBSolrClient.Req lbRequest = entry.getValue();
+        try {
+          MDC.put("CloudSolrClient.url", url);
+          responseFutures.put(
+              url,
+              threadPool.submit(
+                  () -> {
+                    return getLbClient().request(lbRequest).getResponse();
+                  }));
+        } finally {
+          MDC.remove("CloudSolrClient.url");
+        }
+      }
+
+      for (final Map.Entry<String, Future<NamedList<?>>> entry : responseFutures.entrySet()) {
+        final String url = entry.getKey();
+        final Future<NamedList<?>> responseFuture = entry.getValue();
+        try {
+          shardResponses.add(url, responseFuture.get());
+        } catch (InterruptedException e) {
+          Thread.currentThread().interrupt();
+          throw new RuntimeException(e);
+        } catch (ExecutionException e) {
+          exceptions.add(url, e.getCause());
+        }
+      }
+
+      if (exceptions.size() > 0) {
+        Throwable firstException = exceptions.getVal(0);
+        if (firstException instanceof SolrException) {
+          SolrException e = (SolrException) firstException;
+          throw getRouteException(
+              SolrException.ErrorCode.getErrorCode(e.code()), exceptions, routes);
+        } else {
+          throw getRouteException(SolrException.ErrorCode.SERVER_ERROR, exceptions, routes);
+        }
+      }
+    } else {
+      for (Map.Entry<String, ? extends LBSolrClient.Req> entry : routes.entrySet()) {
+        String url = entry.getKey();
+        LBSolrClient.Req lbRequest = entry.getValue();
+        try {
+          NamedList<Object> rsp = getLbClient().request(lbRequest).getResponse();
+          shardResponses.add(url, rsp);
+        } catch (Exception e) {
+          if (e instanceof SolrException) {
+            throw (SolrException) e;
+          } else {
+            throw new SolrServerException(e);
           }
+        }
+      }
+    }
+
+    UpdateRequest nonRoutableRequest = null;
+    List<String> deleteQuery = updateRequest.getDeleteQuery();
+    if (deleteQuery != null && deleteQuery.size() > 0) {
+      UpdateRequest deleteQueryRequest = new UpdateRequest();
+      deleteQueryRequest.setDeleteQuery(deleteQuery);
+      nonRoutableRequest = deleteQueryRequest;
+    }
+
+    Set<String> paramNames = nonRoutableParams.getParameterNames();
+
+    Set<String> intersection = new HashSet<>(paramNames);
+    intersection.retainAll(NON_ROUTABLE_PARAMS);
+
+    if (nonRoutableRequest != null || intersection.size() > 0) {
+      if (nonRoutableRequest == null) {
+        nonRoutableRequest = new UpdateRequest();
+      }
+      nonRoutableRequest.setParams(nonRoutableParams);
+      nonRoutableRequest.setBasicAuthCredentials(
+          request.getBasicAuthUser(), request.getBasicAuthPassword());
+      List<String> urlList = new ArrayList<>(routes.keySet());
+      Collections.shuffle(urlList, rand);
+      LBSolrClient.Req req = new LBSolrClient.Req(nonRoutableRequest, urlList);
+      try {
+        LBSolrClient.Rsp rsp = getLbClient().request(req);
+        shardResponses.add(urlList.get(0), rsp.getResponse());
+      } catch (Exception e) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, urlList.get(0), e);
+      }
+    }
+
+    long end = System.nanoTime();
+
+    @SuppressWarnings({"rawtypes"})
+    RouteResponse rr =
+        condenseResponse(
+            shardResponses, (int) TimeUnit.MILLISECONDS.convert(end - start, TimeUnit.NANOSECONDS));
+    rr.setRouteResponses(shardResponses);
+    rr.setRoutes(routes);
+    return rr;
+  }
+
+  protected RouteException getRouteException(
+      SolrException.ErrorCode serverError,
+      NamedList<Throwable> exceptions,
+      Map<String, ? extends LBSolrClient.Req> routes) {
+    return new RouteException(serverError, exceptions, routes);
+  }
+
+  protected Map<String, ? extends LBSolrClient.Req> createRoutes(
+      UpdateRequest updateRequest,
+      ModifiableSolrParams routableParams,
+      DocCollection col,
+      DocRouter router,
+      Map<String, List<String>> urlMap,
+      String routeField) {
+    return urlMap == null
+        ? null
+        : updateRequest.getRoutesToCollection(router, col, urlMap, routableParams, routeField);
+  }
+
+  private Map<String, List<String>> buildUrlMap(
+      DocCollection col, ReplicaListTransformer replicaListTransformer) {
+    Map<String, List<String>> urlMap = new HashMap<>();
+    Slice[] slices = col.getActiveSlicesArr();
+    for (Slice slice : slices) {
+      String name = slice.getName();
+      List<Replica> sortedReplicas = new ArrayList<>();
+      Replica leader = slice.getLeader();
+      if (directUpdatesToLeadersOnly && leader == null) {
+        for (Replica replica :
+            slice.getReplicas(
+                replica ->
+                    replica.isActive(getClusterStateProvider().getLiveNodes())
+                        && replica.getType() == Replica.Type.NRT)) {
+          leader = replica;
+          break;
+        }
+      }
+      if (leader == null) {
+        if (directUpdatesToLeadersOnly) {
+          continue;
+        }
+        // take unoptimized general path - we cannot find a leader yet
+        return null;
+      }
+
+      if (!directUpdatesToLeadersOnly) {
+        for (Replica replica : slice.getReplicas()) {
+          if (!replica.equals(leader)) {
+            sortedReplicas.add(replica);
+          }
+        }
+      }
+
+      // Sort the non-leader replicas according to the request parameters
+      replicaListTransformer.transform(sortedReplicas);
+
+      // put the leaderUrl first.
+      sortedReplicas.add(0, leader);
+
+      urlMap.put(
+          name, sortedReplicas.stream().map(Replica::getCoreUrl).collect(Collectors.toList()));
+    }
+    return urlMap;
+  }
+
+  protected <T extends RouteResponse<?>> T condenseResponse(
+      NamedList<?> response, int timeMillis, Supplier<T> supplier) {
+    T condensed = supplier.get();
+    int status = 0;
+    Integer rf = null;
+
+    // TolerantUpdateProcessor
+    List<SimpleOrderedMap<String>> toleratedErrors = null;
+    int maxToleratedErrors = Integer.MAX_VALUE;
+
+    // For "adds", "deletes", "deleteByQuery" etc.
+    Map<String, NamedList<Object>> versions = new HashMap<>();
+
+    for (int i = 0; i < response.size(); i++) {
+      NamedList<?> shardResponse = (NamedList<?>) response.getVal(i);
+      NamedList<?> header = (NamedList<?>) shardResponse.get("responseHeader");
+      Integer shardStatus = (Integer) header.get("status");
+      int s = shardStatus.intValue();
+      if (s > 0) {
+        status = s;
+      }
+      Object rfObj = header.get(UpdateRequest.REPFACT);
+      if (rfObj != null && rfObj instanceof Integer) {
+        Integer routeRf = (Integer) rfObj;
+        if (rf == null || routeRf < rf) rf = routeRf;
+      }
+
+      @SuppressWarnings("unchecked")
+      List<SimpleOrderedMap<String>> shardTolerantErrors =
+          (List<SimpleOrderedMap<String>>) header.get("errors");
+      if (null != shardTolerantErrors) {
+        Integer shardMaxToleratedErrors = (Integer) header.get("maxErrors");
+        assert null != shardMaxToleratedErrors
+            : "TolerantUpdateProcessor reported errors but not maxErrors";
+        // if we get into some weird state where the nodes disagree about the effective maxErrors,
+        // assume the min value seen to decide if we should fail.
+        maxToleratedErrors =
+            Math.min(
+                maxToleratedErrors,
+                ToleratedUpdateError.getEffectiveMaxErrors(shardMaxToleratedErrors.intValue()));
+
+        if (null == toleratedErrors) {
+          toleratedErrors = new ArrayList<SimpleOrderedMap<String>>(shardTolerantErrors.size());
+        }
+        for (SimpleOrderedMap<String> err : shardTolerantErrors) {
+          toleratedErrors.add(err);
+        }
+      }
+      for (String updateType : Arrays.asList("adds", "deletes", "deleteByQuery")) {
+        Object obj = shardResponse.get(updateType);
+        if (obj instanceof NamedList) {
+          NamedList<Object> versionsList =
+              versions.containsKey(updateType) ? versions.get(updateType) : new NamedList<>();
+          NamedList<?> nl = (NamedList<?>) obj;
+          versionsList.addAll(nl);
+          versions.put(updateType, versionsList);
+        }
+      }
+    }
+
+    NamedList<Object> cheader = new NamedList<>();
+    cheader.add("status", status);
+    cheader.add("QTime", timeMillis);
+    if (rf != null) cheader.add(UpdateRequest.REPFACT, rf);
+    if (null != toleratedErrors) {
+      cheader.add("maxErrors", ToleratedUpdateError.getUserFriendlyMaxErrors(maxToleratedErrors));
+      cheader.add("errors", toleratedErrors);
+      if (maxToleratedErrors < toleratedErrors.size()) {
+        // cumulative errors are too high, we need to throw a client exception w/correct metadata
+
+        // NOTE: it shouldn't be possible for 1 == toleratedErrors.size(), because if that were the
+        // case then at least one shard should have thrown a real error before this, so we don't
+        // worry about having a more "singular" exception msg for that situation
+        StringBuilder msgBuf =
+            new StringBuilder()
+                .append(toleratedErrors.size())
+                .append(" Async failures during distributed update: ");
+
+        NamedList<String> metadata = new NamedList<>();
+        for (SimpleOrderedMap<String> err : toleratedErrors) {
+          ToleratedUpdateError te = ToleratedUpdateError.parseMap(err);
+          metadata.add(te.getMetadataKey(), te.getMetadataValue());
+
+          msgBuf.append("\n").append(te.getMessage());
+        }
+
+        SolrException toThrow =
+            new SolrException(SolrException.ErrorCode.BAD_REQUEST, msgBuf.toString());
+        toThrow.setMetadata(metadata);
+        throw toThrow;
+      }
+    }
+    for (Map.Entry<String, NamedList<Object>> entry : versions.entrySet()) {
+      condensed.add(entry.getKey(), entry.getValue());
+    }
+    condensed.add("responseHeader", cheader);
+    return condensed;
+  }
+
+  @SuppressWarnings({"rawtypes"})
+  public RouteResponse condenseResponse(NamedList<?> response, int timeMillis) {
+    return condenseResponse(response, timeMillis, RouteResponse::new);
+  }
+
+  @SuppressWarnings({"rawtypes"})
+  public static class RouteResponse<T extends LBSolrClient.Req> extends NamedList<Object> {
+    private NamedList<NamedList<?>> routeResponses;
+    private Map<String, T> routes;
+
+    public void setRouteResponses(NamedList<NamedList<?>> routeResponses) {
+      this.routeResponses = routeResponses;
+    }
+
+    public NamedList<NamedList<?>> getRouteResponses() {
+      return routeResponses;
+    }
+
+    public void setRoutes(Map<String, T> routes) {
+      this.routes = routes;
+    }
+
+    public Map<String, T> getRoutes() {
+      return routes;
+    }
+  }
+
+  public static class RouteException extends SolrException {
+
+    private NamedList<Throwable> throwables;
+    private Map<String, ? extends LBSolrClient.Req> routes;
+
+    public RouteException(
+        ErrorCode errorCode,
+        NamedList<Throwable> throwables,
+        Map<String, ? extends LBSolrClient.Req> routes) {
+      super(errorCode, throwables.getVal(0).getMessage(), throwables.getVal(0));
+      this.throwables = throwables;
+      this.routes = routes;
+
+      // create a merged copy of the metadata from all wrapped exceptions
+      NamedList<String> metadata = new NamedList<String>();
+      for (int i = 0; i < throwables.size(); i++) {
+        Throwable t = throwables.getVal(i);
+        if (t instanceof SolrException) {
+          SolrException e = (SolrException) t;
+          NamedList<String> eMeta = e.getMetadata();
+          if (null != eMeta) {
+            metadata.addAll(eMeta);
+          }
+        }
+      }
+      if (0 < metadata.size()) {
+        this.setMetadata(metadata);
+      }
+    }
+
+    public NamedList<Throwable> getThrowables() {
+      return throwables;
+    }
+
+    public Map<String, ? extends LBSolrClient.Req> getRoutes() {
+      return this.routes;
+    }
+  }
+
+  @Override
+  public NamedList<Object> request(SolrRequest<?> request, String collection)
+      throws SolrServerException, IOException {
+    // the collection parameter of the request overrides that of the parameter to this method
+    String requestCollection = request.getCollection();
+    if (requestCollection != null) {
+      collection = requestCollection;
+    } else if (collection == null) {
+      collection = defaultCollection;
+    }
+    List<String> inputCollections =
+        collection == null ? Collections.emptyList() : StrUtils.splitSmart(collection, ",", true);
+    return requestWithRetryOnStaleState(request, 0, inputCollections);
+  }
+
+  /**
+   * As this class doesn't watch external collections on the client side, there's a chance that the
+   * request will fail due to cached stale state, which means the state must be refreshed from ZK
+   * and retried.
+   */
+  protected NamedList<Object> requestWithRetryOnStaleState(
+      SolrRequest<?> request, int retryCount, List<String> inputCollections)
+      throws SolrServerException, IOException {
+    connect(); // important to call this before you start working with the ZkStateReader
+
+    // build up a _stateVer_ param to pass to the server containing all of the
+    // external collection state versions involved in this request, which allows
+    // the server to notify us that our cached state for one or more of the external
+    // collections is stale and needs to be refreshed ... this code has no impact on internal
+    // collections
+    String stateVerParam = null;
+    List<DocCollection> requestedCollections = null;
+    boolean isCollectionRequestOfV2 = false;
+    if (request instanceof V2RequestSupport) {
+      request = ((V2RequestSupport) request).getV2Request();
+    }
+    if (request instanceof V2Request) {
+      isCollectionRequestOfV2 = ((V2Request) request).isPerCollectionRequest();
+    }
+    boolean isAdmin = ADMIN_PATHS.contains(request.getPath());
+    boolean isUpdate = (request instanceof IsUpdateRequest) && (request instanceof UpdateRequest);
+    if (!inputCollections.isEmpty()
+        && !isAdmin
+        && !isCollectionRequestOfV2) { // don't do _stateVer_ checking for admin, v2 api requests
+      Set<String> requestedCollectionNames = resolveAliases(inputCollections, isUpdate);
+
+      StringBuilder stateVerParamBuilder = null;
+      for (String requestedCollection : requestedCollectionNames) {
+        // track the version of state we're using on the client side using the _stateVer_ param
+        DocCollection coll = getDocCollection(requestedCollection, null);
+        if (coll == null) {
+          throw new SolrException(
+              SolrException.ErrorCode.BAD_REQUEST, "Collection not found: " + requestedCollection);
+        }
+        int collVer = coll.getZNodeVersion();
+        if (requestedCollections == null)
+          requestedCollections = new ArrayList<>(requestedCollectionNames.size());
+        requestedCollections.add(coll);
+
+        if (stateVerParamBuilder == null) {
+          stateVerParamBuilder = new StringBuilder();
         } else {
-          throw new IllegalArgumentException("Both zkHosts and solrUrl cannot be null.");
+          stateVerParamBuilder.append(
+              "|"); // hopefully pipe is not an allowed char in a collection name
         }
+
+        stateVerParamBuilder.append(coll.getName()).append(":").append(collVer);
+      }
+
+      if (stateVerParamBuilder != null) {
+        stateVerParam = stateVerParamBuilder.toString();
       }
-      return new CloudSolrClient(this);
     }
 
-    @Override
-    public Builder getThis() {
-      return this;
+    if (request.getParams() instanceof ModifiableSolrParams) {
+      ModifiableSolrParams params = (ModifiableSolrParams) request.getParams();
+      if (stateVerParam != null) {
+        params.set(STATE_VERSION, stateVerParam);
+      } else {
+        params.remove(STATE_VERSION);
+      }
+    } // else: ??? how to set this ???
+
+    NamedList<Object> resp = null;
+    try {
+      resp = sendRequest(request, inputCollections);
+      // to avoid an O(n) operation we always add STATE_VERSION to the last and try to read it from
+      // there
+      Object o = resp == null || resp.size() == 0 ? null : resp.get(STATE_VERSION, resp.size() - 1);
+      if (o != null && o instanceof Map) {
+        // remove this because no one else needs this and tests would fail if they are comparing
+        // responses
+        resp.remove(resp.size() - 1);
+        Map<?, ?> invalidStates = (Map<?, ?>) o;
+        for (Map.Entry<?, ?> e : invalidStates.entrySet()) {
+          getDocCollection((String) e.getKey(), (Integer) e.getValue());
+        }
+      }
+    } catch (Exception exc) {
+
+      Throwable rootCause = SolrException.getRootCause(exc);
+      // don't do retry support for admin requests
+      // or if the request doesn't have a collection specified
+      // or request is v2 api and its method is not GET
+      if (inputCollections.isEmpty()
+          || isAdmin
+          || (request instanceof V2Request && request.getMethod() != SolrRequest.METHOD.GET)) {
+        if (exc instanceof SolrServerException) {
+          throw (SolrServerException) exc;
+        } else if (exc instanceof IOException) {
+          throw (IOException) exc;
+        } else if (exc instanceof RuntimeException) {
+          throw (RuntimeException) exc;
+        } else {
+          throw new SolrServerException(rootCause);
+        }
+      }
+
+      int errorCode =
+          (rootCause instanceof SolrException)
+              ? ((SolrException) rootCause).code()
+              : SolrException.ErrorCode.UNKNOWN.code;
+
+      boolean wasCommError =
+          (rootCause instanceof ConnectException
+              || rootCause instanceof SocketException
+              || wasCommError(rootCause));
+
+      if (wasCommError
+          || (exc instanceof RouteException
+              && (errorCode == 503)) // 404 because the core does not exist 503 service unavailable
+      // TODO there are other reasons for 404. We need to change the solr response format from HTML
+      // to structured data to know that
+      ) {
+        // it was a communication error. it is likely that
+        // the node to which the request to be sent is down . So , expire the state
+        // so that the next attempt would fetch the fresh state
+        // just re-read state for all of them, if it has not been retried
+        // in retryExpiryTime time
+        if (requestedCollections != null) {
+          for (DocCollection ext : requestedCollections) {
+            ExpiringCachedDocCollection cacheEntry = collectionStateCache.get(ext.getName());
+            if (cacheEntry == null) continue;
+            cacheEntry.maybeStale = true;
+          }
+        }
+        if (retryCount < MAX_STALE_RETRIES) { // if it is a communication error , we must try again
+          // may be, we have a stale version of the collection state
+          // and we could not get any information from the server
+          // it is probably not worth trying again and again because
+          // the state would not have been updated
+          log.info(
+              "Request to collection {} failed due to ({}) {}, retry={} maxRetries={} commError={} errorCode={} - retrying",
+              inputCollections,
+              errorCode,
+              rootCause,
+              retryCount,
+              MAX_STALE_RETRIES,
+              wasCommError,
+              errorCode);
+          return requestWithRetryOnStaleState(request, retryCount + 1, inputCollections);
+        }
+      } else {
+        log.info("request was not communication error it seems");
+      }
+      log.info(
+          "Request to collection {} failed due to ({}) {}, retry={} maxRetries={} commError={} errorCode={} ",
+          inputCollections,
+          errorCode,
+          rootCause,
+          retryCount,
+          MAX_STALE_RETRIES,
+          wasCommError,
+          errorCode);
+
+      boolean stateWasStale = false;
+      if (retryCount < MAX_STALE_RETRIES
+          && requestedCollections != null
+          && !requestedCollections.isEmpty()
+          && (SolrException.ErrorCode.getErrorCode(errorCode)
+                  == SolrException.ErrorCode.INVALID_STATE
+              || errorCode == 404)) {
+        // cached state for one or more external collections was stale
+        // re-issue request using updated state
+        stateWasStale = true;
+
+        // just re-read state for all of them, which is a little heavy handed but hopefully a rare
+        // occurrence
+        for (DocCollection ext : requestedCollections) {
+          collectionStateCache.remove(ext.getName());
+        }
+      }
+
+      // if we experienced a communication error, it's worth checking the state
+      // with ZK just to make sure the node we're trying to hit is still part of the collection
+      if (retryCount < MAX_STALE_RETRIES
+          && !stateWasStale
+          && requestedCollections != null
+          && !requestedCollections.isEmpty()
+          && wasCommError) {
+        for (DocCollection ext : requestedCollections) {
+          DocCollection latestStateFromZk = getDocCollection(ext.getName(), null);
+          if (latestStateFromZk.getZNodeVersion() != ext.getZNodeVersion()) {
+            // looks like we couldn't reach the server because the state was stale == retry
+            stateWasStale = true;
+            // we just pulled state from ZK, so update the cache so that the retry uses it
+            collectionStateCache.put(
+                ext.getName(), new ExpiringCachedDocCollection(latestStateFromZk));
+          }
+        }
+      }
+
+      if (requestedCollections != null) {
+        requestedCollections.clear(); // done with this
+      }
+
+      // if the state was stale, then we retry the request once with new state pulled from Zk
+      if (stateWasStale) {
+        log.warn(
+            "Re-trying request to collection(s) {} after stale state error from server.",
+            inputCollections);
+        resp = requestWithRetryOnStaleState(request, retryCount + 1, inputCollections);
+      } else {
+        if (exc instanceof SolrException
+            || exc instanceof SolrServerException
+            || exc instanceof IOException) {
+          throw exc;
+        } else {
+          throw new SolrServerException(rootCause);
+        }
+      }
+    }
+
+    return resp;
+  }
+
+  protected NamedList<Object> sendRequest(SolrRequest<?> request, List<String> inputCollections)
+      throws SolrServerException, IOException {
+    connect();
+
+    boolean sendToLeaders = false;
+    boolean isUpdate = false;
+
+    if (request instanceof IsUpdateRequest) {
+      if (request instanceof UpdateRequest) {
+        isUpdate = true;
+        if (inputCollections.size() > 1) {
+          throw new SolrException(
+              SolrException.ErrorCode.BAD_REQUEST,
+              "Update request must be sent to a single collection "
+                  + "or an alias: "
+                  + inputCollections);
+        }
+        String collection =
+            inputCollections.isEmpty()
+                ? null
+                : inputCollections.get(0); // getting first mimics HttpSolrCall
+        NamedList<Object> response = directUpdate((AbstractUpdateRequest) request, collection);
+        if (response != null) {
+          return response;
+        }
+      }
+      sendToLeaders = true;
+    }
+
+    SolrParams reqParams = request.getParams();
+    if (reqParams == null) { // TODO fix getParams to never return null!
+      reqParams = new ModifiableSolrParams();
+    }
+
+    ReplicaListTransformer replicaListTransformer =
+        requestRLTGenerator.getReplicaListTransformer(reqParams);
+
+    final ClusterStateProvider provider = getClusterStateProvider();
+    final String urlScheme = provider.getClusterProperty(ZkStateReader.URL_SCHEME, "http");
+    final Set<String> liveNodes = provider.getLiveNodes();
+
+    final List<String> theUrlList = new ArrayList<>(); // we populate this as follows...
+
+    if (request instanceof V2Request) {
+      if (!liveNodes.isEmpty()) {
+        List<String> liveNodesList = new ArrayList<>(liveNodes);
+        Collections.shuffle(liveNodesList, rand);
+        theUrlList.add(Utils.getBaseUrlForNodeName(liveNodesList.get(0), urlScheme));
+      }
+
+    } else if (ADMIN_PATHS.contains(request.getPath())) {
+      for (String liveNode : liveNodes) {
+        theUrlList.add(Utils.getBaseUrlForNodeName(liveNode, urlScheme));
+      }
+
+    } else { // Typical...
+      Set<String> collectionNames = resolveAliases(inputCollections, isUpdate);
+      if (collectionNames.isEmpty()) {
+        throw new SolrException(
+            SolrException.ErrorCode.BAD_REQUEST,
+            "No collection param specified on request and no default collection has been set: "
+                + inputCollections);
+      }
+
+      // TODO: not a big deal because of the caching, but we could avoid looking
+      //   at every shard when getting leaders if we tweaked some things
+
+      // Retrieve slices from the cloud state and, for each collection specified, add it to the Map
+      // of slices.
+      Map<String, Slice> slices = new HashMap<>();
+      String shardKeys = reqParams.get(ShardParams._ROUTE_);
+      for (String collectionName : collectionNames) {
+        DocCollection col = getDocCollection(collectionName, null);
+        if (col == null) {
+          throw new SolrException(
+              SolrException.ErrorCode.BAD_REQUEST, "Collection not found: " + collectionName);
+        }
+        Collection<Slice> routeSlices = col.getRouter().getSearchSlices(shardKeys, reqParams, col);
+        ClientUtils.addSlices(slices, collectionName, routeSlices, true);
+      }
+
+      // Gather URLs, grouped by leader or replica
+      List<Replica> sortedReplicas = new ArrayList<>();
+      List<Replica> replicas = new ArrayList<>();
+      for (Slice slice : slices.values()) {
+        Replica leader = slice.getLeader();
+        for (Replica replica : slice.getReplicas()) {
+          String node = replica.getNodeName();
+          if (!liveNodes.contains(node) // Must be a live node to continue
+              || replica.getState()
+                  != Replica.State.ACTIVE) // Must be an ACTIVE replica to continue
+          continue;
+          if (sendToLeaders && replica.equals(leader)) {
+            sortedReplicas.add(replica); // put leaders here eagerly (if sendToLeader mode)
+          } else {
+            replicas.add(replica); // replicas here
+          }
+        }
+      }
+
+      // Sort the leader replicas, if any, according to the request preferences    (none if
+      // !sendToLeaders)
+      replicaListTransformer.transform(sortedReplicas);
+
+      // Sort the replicas, if any, according to the request preferences and append to our list
+      replicaListTransformer.transform(replicas);
+
+      sortedReplicas.addAll(replicas);
+
+      String joinedInputCollections = StrUtils.join(inputCollections, ',');
+      Set<String> seenNodes = new HashSet<>();
+      sortedReplicas.forEach(
+          replica -> {
+            if (seenNodes.add(replica.getNodeName())) {
+              theUrlList.add(
+                  ZkCoreNodeProps.getCoreUrl(replica.getBaseUrl(), joinedInputCollections));
+            }
+          });
+
+      if (theUrlList.isEmpty()) {
+        collectionStateCache.keySet().removeAll(collectionNames);
+        throw new SolrException(
+            SolrException.ErrorCode.INVALID_STATE,
+            "Could not find a healthy node to handle the request.");
+      }
+    }
+
+    LBSolrClient.Req req = new LBSolrClient.Req(request, theUrlList);
+    LBSolrClient.Rsp rsp = getLbClient().request(req);
+    return rsp.getResponse();
+  }
+
+  /**
+   * Resolves the input collections to their possible aliased collections. Doesn't validate
+   * collection existence.
+   */
+  private Set<String> resolveAliases(List<String> inputCollections, boolean isUpdate) {
+    if (inputCollections.isEmpty()) {
+      return Collections.emptySet();
+    }
+    LinkedHashSet<String> uniqueNames = new LinkedHashSet<>(); // consistent ordering
+    for (String collectionName : inputCollections) {
+      if (getClusterStateProvider().getState(collectionName) == null) {
+        // perhaps it's an alias
+        uniqueNames.addAll(getClusterStateProvider().resolveAlias(collectionName));
+      } else {
+        uniqueNames.add(collectionName); // it's a collection
+      }
+    }
+    return uniqueNames;
+  }
+
+  public boolean isUpdatesToLeaders() {
+    return updatesToLeaders;
+  }
+
+  /** @return true if direct updates are sent to shard leaders only */
+  public boolean isDirectUpdatesToLeadersOnly() {
+    return directUpdatesToLeadersOnly;
+  }
+
+  /**
+   * If caches are expired they are refreshed after acquiring a lock. use this to set the number of
+   * locks
+   */
+  public void setParallelCacheRefreshes(int n) {
+    locks = objectList(n);
+  }
+
+  protected static ArrayList<Object> objectList(int n) {
+    ArrayList<Object> l = new ArrayList<>(n);
+    for (int i = 0; i < n; i++) l.add(new Object());
+    return l;
+  }
+
+  protected DocCollection getDocCollection(String collection, Integer expectedVersion)
+      throws SolrException {
+    if (expectedVersion == null) expectedVersion = -1;
+    if (collection == null) return null;
+    ExpiringCachedDocCollection cacheEntry = collectionStateCache.get(collection);
+    DocCollection col = cacheEntry == null ? null : cacheEntry.cached;
+    if (col != null) {
+      if (expectedVersion <= col.getZNodeVersion() && !cacheEntry.shouldRetry()) return col;
+    }
+
+    ClusterState.CollectionRef ref = getCollectionRef(collection);
+    if (ref == null) {
+      // no such collection exists
+      return null;
+    }
+    if (!ref.isLazilyLoaded()) {
+      // it is readily available just return it
+      return ref.get();
     }
+    List<Object> locks = this.locks;
+    final Object lock =
+        locks.get(
+            Math.abs(
+                Hash.murmurhash3_x86_32(collection, 0, collection.length(), 0) % locks.size()));
+    DocCollection fetchedCol = null;
+    synchronized (lock) {
+      /*we have waited for sometime just check once again*/
+      cacheEntry = collectionStateCache.get(collection);
+      col = cacheEntry == null ? null : cacheEntry.cached;
+      if (col != null) {
+        if (expectedVersion <= col.getZNodeVersion() && !cacheEntry.shouldRetry()) return col;
+      }
+      // We are going to fetch a new version
+      // we MUST try to get a new version
+      fetchedCol = ref.get(); // this is a call to ZK
+      if (fetchedCol == null) return null; // this collection no more exists
+      if (col != null && fetchedCol.getZNodeVersion() == col.getZNodeVersion()) {
+        cacheEntry.setRetriedAt(); // we retried and found that it is the same version
+        cacheEntry.maybeStale = false;
+      } else {
+        collectionStateCache.put(collection, new ExpiringCachedDocCollection(fetchedCol));
+      }
+      return fetchedCol;
+    }
+  }
+
+  ClusterState.CollectionRef getCollectionRef(String collection) {
+    return getClusterStateProvider().getState(collection);
+  }
+
+  /**
+   * Useful for determining the minimum achieved replication factor across all shards involved in
+   * processing an update request, typically useful for gauging the replication factor of a batch.
+   */
+  public int getMinAchievedReplicationFactor(String collection, NamedList<?> resp) {
+    // it's probably already on the top-level header set by condense
+    NamedList<?> header = (NamedList<?>) resp.get("responseHeader");
+    Integer achRf = (Integer) header.get(UpdateRequest.REPFACT);
+    if (achRf != null) return achRf.intValue();
+
+    // not on the top-level header, walk the shard route tree
+    Map<String, Integer> shardRf = getShardReplicationFactor(collection, resp);
+    for (Integer rf : shardRf.values()) {
+      if (achRf == null || rf < achRf) {
+        achRf = rf;
+      }
+    }
+    return (achRf != null) ? achRf.intValue() : -1;
+  }
+
+  /**
+   * Walks the NamedList response after performing an update request looking for the replication
+   * factor that was achieved in each shard involved in the request. For single doc updates, there
+   * will be only one shard in the return value.
+   */
+  public Map<String, Integer> getShardReplicationFactor(String collection, NamedList<?> resp) {
+    connect();
+
+    Map<String, Integer> results = new HashMap<>();
+    if (resp instanceof RouteResponse) {
+      NamedList<NamedList<?>> routes = ((RouteResponse<?>) resp).getRouteResponses();
+      DocCollection coll = getDocCollection(collection, null);
+      Map<String, String> leaders = new HashMap<>();
+      for (Slice slice : coll.getActiveSlicesArr()) {
+        Replica leader = slice.getLeader();
+        if (leader != null) {
+          ZkCoreNodeProps zkProps = new ZkCoreNodeProps(leader);
+          String leaderUrl = zkProps.getBaseUrl() + "/" + zkProps.getCoreName();
+          leaders.put(leaderUrl, slice.getName());
+          String altLeaderUrl = zkProps.getBaseUrl() + "/" + collection;
+          leaders.put(altLeaderUrl, slice.getName());
+        }
+      }
+
+      Iterator<Map.Entry<String, NamedList<?>>> routeIter = routes.iterator();
+      while (routeIter.hasNext()) {
+        Map.Entry<String, NamedList<?>> next = routeIter.next();
+        String host = next.getKey();
+        NamedList<?> hostResp = next.getValue();
+        Integer rf =
+            (Integer) ((NamedList<?>) hostResp.get("responseHeader")).get(UpdateRequest.REPFACT);
+        if (rf != null) {
+          String shard = leaders.get(host);
+          if (shard == null) {
+            if (host.endsWith("/")) shard = leaders.get(host.substring(0, host.length() - 1));
+            if (shard == null) {
+              shard = host;
+            }
+          }
+          results.put(shard, rf);
+        }
+      }
+    }
+    return results;
+  }
+
+  private static boolean hasInfoToFindLeaders(UpdateRequest updateRequest, String idField) {
+    final Map<SolrInputDocument, Map<String, Object>> documents = updateRequest.getDocumentsMap();
+    final Map<String, Map<String, Object>> deleteById = updateRequest.getDeleteByIdMap();
+
+    final boolean hasNoDocuments = (documents == null || documents.isEmpty());
+    final boolean hasNoDeleteById = (deleteById == null || deleteById.isEmpty());
+    if (hasNoDocuments && hasNoDeleteById) {
+      // no documents and no delete-by-id, so no info to find leader(s)
+      return false;
+    }
+
+    if (documents != null) {
+      for (final Map.Entry<SolrInputDocument, Map<String, Object>> entry : documents.entrySet()) {
+        final SolrInputDocument doc = entry.getKey();
+        final Object fieldValue = doc.getFieldValue(idField);
+        if (fieldValue == null) {
+          // a document with no id field value, so can't find leader for it
+          return false;
+        }
+      }
+    }
+
+    return true;
   }
 }
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientCloudManager.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientCloudManager.java
index 02649a7..7a58a80 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientCloudManager.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientCloudManager.java
@@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory;
 public class SolrClientCloudManager implements SolrCloudManager {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  protected final CloudSolrClient solrClient;
+  protected final CloudLegacySolrClient solrClient;
   private final ZkDistribStateManager stateManager;
   private final DistributedQueueFactory queueFactory;
   private final ZkStateReader zkStateReader;
@@ -60,12 +60,15 @@ public class SolrClientCloudManager implements SolrCloudManager {
   private final boolean closeObjectCache;
   private volatile boolean isClosed;
 
-  public SolrClientCloudManager(DistributedQueueFactory queueFactory, CloudSolrClient solrClient) {
+  public SolrClientCloudManager(
+      DistributedQueueFactory queueFactory, CloudLegacySolrClient solrClient) {
     this(queueFactory, solrClient, null);
   }
 
   public SolrClientCloudManager(
-      DistributedQueueFactory queueFactory, CloudSolrClient solrClient, ObjectCache objectCache) {
+      DistributedQueueFactory queueFactory,
+      CloudLegacySolrClient solrClient,
+      ObjectCache objectCache) {
     this.queueFactory = queueFactory;
     this.solrClient = solrClient;
     this.zkStateReader = ZkStateReader.from(solrClient);
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientNodeStateProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientNodeStateProvider.java
index 07bb2c8..4c26154 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientNodeStateProvider.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientNodeStateProvider.java
@@ -61,7 +61,7 @@ public class SolrClientNodeStateProvider implements NodeStateProvider, MapWriter
   public static final String METRICS_PREFIX = "metrics:";
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  private final CloudSolrClient solrClient;
+  private final CloudLegacySolrClient solrClient;
   protected final Map<String, Map<String, Map<String, List<Replica>>>>
       nodeVsCollectionVsShardVsReplicaInfo = new HashMap<>();
   private Map<String, Object> snitchSession = new HashMap<>();
@@ -69,7 +69,7 @@ public class SolrClientNodeStateProvider implements NodeStateProvider, MapWriter
   @SuppressWarnings({"rawtypes"})
   private Map<String, Map> nodeVsTags = new HashMap<>();
 
-  public SolrClientNodeStateProvider(CloudSolrClient solrClient) {
+  public SolrClientNodeStateProvider(CloudLegacySolrClient solrClient) {
     this.solrClient = solrClient;
     try {
       readReplicaDetails();
@@ -324,7 +324,7 @@ public class SolrClientNodeStateProvider implements NodeStateProvider, MapWriter
     private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
     ZkClientClusterStateProvider zkClientClusterStateProvider;
-    CloudSolrClient solrClient;
+    CloudLegacySolrClient solrClient;
 
     public boolean isNodeAlive(String node) {
       if (zkClientClusterStateProvider != null) {
@@ -337,7 +337,7 @@ public class SolrClientNodeStateProvider implements NodeStateProvider, MapWriter
         SnitchInfo perSnitch,
         String node,
         Map<String, Object> session,
-        CloudSolrClient solrClient) {
+        CloudLegacySolrClient solrClient) {
       super(perSnitch, node, session);
       this.solrClient = solrClient;
       this.zkClientClusterStateProvider =
@@ -395,7 +395,7 @@ public class SolrClientNodeStateProvider implements NodeStateProvider, MapWriter
       String url = zkClientClusterStateProvider.getZkStateReader().getBaseUrlForNodeName(solrNode);
 
       GenericSolrRequest request = new GenericSolrRequest(SolrRequest.METHOD.POST, path, params);
-      try (HttpSolrClient client =
+      try (var client =
           new HttpSolrClient.Builder()
               .withHttpClient(solrClient.getHttpClient())
               .withBaseSolrUrl(url)
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java
index 5fdc5f1..555fed9 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java
@@ -46,7 +46,7 @@ public class ZkClientClusterStateProvider implements ClusterStateProvider {
   private volatile boolean isClosed = false;
 
   /** Extracts this from the client, or throws an exception if of the wrong type. */
-  public static ZkClientClusterStateProvider from(BaseCloudSolrClient client) {
+  public static ZkClientClusterStateProvider from(CloudSolrClient client) {
     if (client.getClusterStateProvider() instanceof ZkClientClusterStateProvider) {
       return (ZkClientClusterStateProvider) client.getClusterStateProvider();
     }
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/SolrClientCache.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/SolrClientCache.java
index 28d0d36..d07e549 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/SolrClientCache.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/SolrClientCache.java
@@ -27,6 +27,7 @@ import java.util.Objects;
 import java.util.Optional;
 import org.apache.http.client.HttpClient;
 import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpClientUtil;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
@@ -82,8 +83,8 @@ public class SolrClientCache implements Serializable {
     } else {
       final List<String> hosts = new ArrayList<String>();
       hosts.add(zkHost);
-      CloudSolrClient.Builder builder =
-          new CloudSolrClient.Builder(hosts, Optional.empty())
+      var builder =
+          new CloudLegacySolrClient.Builder(hosts, Optional.empty())
               .withSocketTimeout(socketTimeout)
               .withConnectionTimeout(conTimeout);
       if (httpClient != null) {
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/Facet2DStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/Facet2DStream.java
index 5657a33..344ffcf 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/Facet2DStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/Facet2DStream.java
@@ -27,8 +27,8 @@ import java.util.Map.Entry;
 import java.util.Optional;
 import java.util.stream.Collectors;
 import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.CloudSolrClient.Builder;
 import org.apache.solr.client.solrj.io.SolrClientCache;
 import org.apache.solr.client.solrj.io.Tuple;
 import org.apache.solr.client.solrj.io.comp.ComparatorOrder;
@@ -323,7 +323,7 @@ public class Facet2DStream extends TupleStream implements Expressible {
       final List<String> hosts = new ArrayList<>();
       hosts.add(zkHost);
       cloudSolrClient =
-          new Builder(hosts, Optional.empty())
+          new CloudLegacySolrClient.Builder(hosts, Optional.empty())
               .withSocketTimeout(30000)
               .withConnectionTimeout(15000)
               .build();
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
index f8c17a7..948f60a 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
@@ -28,8 +28,8 @@ import java.util.Map.Entry;
 import java.util.Optional;
 import java.util.stream.Collectors;
 import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.CloudSolrClient.Builder;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.client.solrj.io.SolrClientCache;
 import org.apache.solr.client.solrj.io.Tuple;
@@ -663,7 +663,7 @@ public class FacetStream extends TupleStream implements Expressible, ParallelMet
       final List<String> hosts = new ArrayList<>();
       hosts.add(zkHost);
       cloudSolrClient =
-          new Builder(hosts, Optional.empty())
+          new CloudLegacySolrClient.Builder(hosts, Optional.empty())
               .withSocketTimeout(30000)
               .withConnectionTimeout(15000)
               .build();
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RandomStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RandomStream.java
index c750d1b..7f12106 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RandomStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RandomStream.java
@@ -31,6 +31,7 @@ import java.util.Optional;
 import java.util.Random;
 import java.util.stream.Collectors;
 import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.io.SolrClientCache;
 import org.apache.solr.client.solrj.io.Tuple;
@@ -212,7 +213,7 @@ public class RandomStream extends TupleStream implements Expressible {
       final List<String> hosts = new ArrayList<>();
       hosts.add(zkHost);
       cloudSolrClient =
-          new CloudSolrClient.Builder(hosts, Optional.empty())
+          new CloudLegacySolrClient.Builder(hosts, Optional.empty())
               .withSocketTimeout(30000)
               .withConnectionTimeout(15000)
               .build();
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SearchStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SearchStream.java
index 211c133..87e8d6c 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SearchStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SearchStream.java
@@ -26,6 +26,7 @@ import java.util.Locale;
 import java.util.Map.Entry;
 import java.util.Optional;
 import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.io.SolrClientCache;
 import org.apache.solr.client.solrj.io.Tuple;
@@ -196,7 +197,7 @@ public class SearchStream extends TupleStream implements Expressible {
     } else {
       final List<String> hosts = new ArrayList<>();
       hosts.add(zkHost);
-      cloudSolrClient = new CloudSolrClient.Builder(hosts, Optional.empty()).build();
+      cloudSolrClient = new CloudLegacySolrClient.Builder(hosts, Optional.empty()).build();
     }
 
     QueryRequest request = new QueryRequest(params, SolrRequest.METHOD.POST);
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TimeSeriesStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TimeSeriesStream.java
index 2e20006..a104c19 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TimeSeriesStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TimeSeriesStream.java
@@ -28,8 +28,8 @@ import java.util.Map.Entry;
 import java.util.Optional;
 import java.util.stream.Collectors;
 import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.CloudSolrClient.Builder;
 import org.apache.solr.client.solrj.io.SolrClientCache;
 import org.apache.solr.client.solrj.io.Tuple;
 import org.apache.solr.client.solrj.io.comp.StreamComparator;
@@ -348,7 +348,7 @@ public class TimeSeriesStream extends TupleStream implements Expressible {
     } else {
       final List<String> hosts = new ArrayList<>();
       hosts.add(zkHost);
-      cloudSolrClient = new Builder(hosts, Optional.empty()).build();
+      cloudSolrClient = new CloudLegacySolrClient.Builder(hosts, Optional.empty()).build();
     }
 
     String json = getJsonFacetString(field, metrics, start, end, gap);
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
index 7db4d1b..ae98707 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
@@ -38,7 +38,7 @@ import java.util.TreeSet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.stream.Collectors;
-import org.apache.solr.client.solrj.impl.CloudSolrClient.Builder;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.io.Tuple;
 import org.apache.solr.client.solrj.io.comp.ComparatorOrder;
@@ -318,7 +318,7 @@ public class TopicStream extends CloudSolrStream implements Expressible {
     } else {
       final List<String> hosts = new ArrayList<String>();
       hosts.add(zkHost);
-      cloudSolrClient = new Builder(hosts, Optional.empty()).build();
+      cloudSolrClient = new CloudLegacySolrClient.Builder(hosts, Optional.empty()).build();
       this.cloudSolrClient.connect();
     }
 
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/UpdateStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/UpdateStream.java
index 308260d..362ea2c 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/UpdateStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/UpdateStream.java
@@ -23,8 +23,8 @@ import java.util.List;
 import java.util.Locale;
 import java.util.Optional;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.CloudSolrClient.Builder;
 import org.apache.solr.client.solrj.io.SolrClientCache;
 import org.apache.solr.client.solrj.io.Tuple;
 import org.apache.solr.client.solrj.io.comp.StreamComparator;
@@ -326,7 +326,7 @@ public class UpdateStream extends TupleStream implements Expressible {
     } else {
       final List<String> hosts = new ArrayList<>();
       hosts.add(zkHost);
-      this.cloudSolrClient = new Builder(hosts, Optional.empty()).build();
+      this.cloudSolrClient = new CloudLegacySolrClient.Builder(hosts, Optional.empty()).build();
       this.cloudSolrClient.connect();
     }
   }
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
index c44e220..21a201d 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
@@ -47,7 +47,7 @@ import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Predicate;
 import java.util.function.UnaryOperator;
 import java.util.stream.Collectors;
-import org.apache.solr.client.solrj.impl.BaseCloudSolrClient;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.ZkClientClusterStateProvider;
 import org.apache.solr.common.AlreadyClosedException;
 import org.apache.solr.common.Callable;
@@ -228,7 +228,7 @@ public class ZkStateReader implements SolrCloseable {
    *
    * @throws IllegalArgumentException if solrClient isn't ZK based.
    */
-  public static ZkStateReader from(BaseCloudSolrClient solrClient) {
+  public static ZkStateReader from(CloudSolrClient solrClient) {
     try {
       var provider = (ZkClientClusterStateProvider) solrClient.getClusterStateProvider();
       return provider.getZkStateReader();
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientRetryTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientRetryTest.java
index fe6e19d..ff255cb 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientRetryTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientRetryTest.java
@@ -76,7 +76,7 @@ public class CloudHttp2SolrClientRetryTest extends SolrCloudTestCase {
       TestInjection.failUpdateRequests = "true:100";
       try {
         expectThrows(
-            BaseCloudSolrClient.RouteException.class,
+            CloudSolrClient.RouteException.class,
             "Expected an exception on the client when failure is injected during updates",
             () -> {
               solrClient.add(collectionName, new SolrInputDocument("id", "2"));
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientTest.java
index c727c93..e54f3e8 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientTest.java
@@ -16,7 +16,7 @@
  */
 package org.apache.solr.client.solrj.impl;
 
-import static org.apache.solr.client.solrj.impl.BaseCloudSolrClient.RouteResponse;
+import static org.apache.solr.client.solrj.impl.CloudSolrClient.RouteResponse;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -854,7 +854,7 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
     try (CloudSolrClient solrClient =
         getCloudSolrClient(cluster.getZkServer().getZkAddress(), client)) {
 
-      assertTrue(solrClient.getLbClient().getHttpClient() == client);
+      assertSame(((CloudLegacySolrClient) solrClient).getLbClient().getHttpClient(), client);
 
     } finally {
       HttpClientUtil.close(client);
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBuilderTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBuilderTest.java
index d45dcf1..60e74ff 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBuilderTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBuilderTest.java
@@ -83,8 +83,8 @@ public class CloudSolrClientBuilderTest extends SolrTestCase {
   @Test
   @SuppressWarnings({"try"})
   public void test0Timeouts() throws IOException {
-    try (CloudSolrClient createdClient =
-        new Builder(Collections.singletonList(ANY_ZK_HOST), Optional.empty())
+    try (var createdClient =
+        new CloudLegacySolrClient.Builder(Collections.singletonList(ANY_ZK_HOST), Optional.empty())
             .withSocketTimeout(0)
             .withConnectionTimeout(0)
             .build()) {}
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
index 919563b..37abe20 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
@@ -98,7 +98,7 @@ public class CloudSolrClientMultiConstructorTest extends SolrTestCase {
 
     final Optional<String> chrootOption =
         withChroot == false ? Optional.empty() : Optional.of(chroot);
-    try (CloudSolrClient client = new CloudSolrClient.Builder(hosts, chrootOption).build()) {
+    try (var client = new CloudLegacySolrClient.Builder(hosts, chrootOption).build()) {
       assertEquals(sb.toString(), ZkClientClusterStateProvider.from(client).getZkHost());
     }
   }
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
index a67df6d..d0318b9 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
@@ -121,7 +121,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
 
     final List<String> solrUrls = new ArrayList<>();
     solrUrls.add(cluster.getJettySolrRunner(0).getBaseUrl().toString());
-    httpBasedCloudSolrClient = new CloudSolrClient.Builder(solrUrls).build();
+    httpBasedCloudSolrClient = new CloudLegacySolrClient.Builder(solrUrls).build();
   }
 
   @After
@@ -857,7 +857,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
     try (CloudSolrClient solrClient =
         getCloudSolrClient(cluster.getZkServer().getZkAddress(), client)) {
 
-      assertTrue(solrClient.getLbClient().getHttpClient() == client);
+      assertSame(((CloudLegacySolrClient) solrClient).getLbClient().getHttpClient(), client);
 
     } finally {
       HttpClientUtil.close(client);
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/HttpClusterStateSSLTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/HttpClusterStateSSLTest.java
index 9b12452..1cff762 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/HttpClusterStateSSLTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/HttpClusterStateSSLTest.java
@@ -88,7 +88,7 @@ public class HttpClusterStateSSLTest extends SolrCloudTestCase {
     try (CloudSolrClient httpBasedCloudSolrClient =
         new CloudSolrClient.Builder(Collections.singletonList(url0.toExternalForm())).build()) {
       ClusterStateProvider csp = httpBasedCloudSolrClient.getClusterStateProvider();
-      assertTrue(csp instanceof HttpClusterStateProvider);
+      assertTrue(csp instanceof Http2ClusterStateProvider);
       verifyUrlSchemeInClusterState(csp.getClusterState(), collectionId, expectedReplicas);
     }
 
diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
index 2f57c8d..9ca5bbc 100644
--- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
+++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
@@ -95,6 +95,7 @@ import org.apache.solr.client.solrj.ResponseParser;
 import org.apache.solr.client.solrj.embedded.JettyConfig;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.CloudHttp2SolrClient;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient;
@@ -2614,7 +2615,8 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase {
    * A variant of {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} that will
    * randomize some internal settings.
    */
-  public static class CloudSolrClientBuilder extends CloudSolrClient.Builder {
+  @Deprecated
+  public static class CloudSolrClientBuilder extends CloudLegacySolrClient.Builder {
 
     public CloudSolrClientBuilder(List<String> zkHosts, Optional<String> zkChroot) {
       super(zkHosts, zkChroot);
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java
index b0e4b4f..3655765 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java
@@ -22,6 +22,7 @@ import java.util.List;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ZkStateReader;
@@ -179,7 +180,7 @@ public abstract class AbstractChaosMonkeyNothingIsSafeTestBase
       if (runFullThrottle) {
         ftIndexThread =
             new FullThrottleStoppableIndexingThread(
-                cloudClient.getHttpClient(),
+                ((CloudLegacySolrClient) cloudClient).getHttpClient(),
                 controlClient,
                 cloudClient,
                 clients,
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRecoveryZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRecoveryZkTestBase.java
index 1a45e99..348d8a4 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRecoveryZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRecoveryZkTestBase.java
@@ -23,6 +23,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.UpdateRequest;
@@ -134,9 +135,9 @@ public abstract class AbstractRecoveryZkTestBase extends SolrCloudTestCase {
     long[] numCounts = new long[replicas.size()];
     int i = 0;
     for (Replica replica : replicas) {
-      try (HttpSolrClient client =
+      try (var client =
           new HttpSolrClient.Builder(replica.getCoreUrl())
-              .withHttpClient(cluster.getSolrClient().getHttpClient())
+              .withHttpClient(((CloudLegacySolrClient) cluster.getSolrClient()).getHttpClient())
               .build()) {
         numCounts[i] =
             client.query(new SolrQuery("*:*").add("distrib", "false")).getResults().getNumFound();
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
index 3e43547..12569f3 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
@@ -57,6 +57,7 @@ import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.embedded.JettyConfig;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.embedded.SSLConfig;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.ConfigSetAdminRequest;
@@ -741,7 +742,7 @@ public class MiniSolrCloudCluster {
   }
 
   protected CloudSolrClient buildSolrClient() {
-    return new CloudSolrClient.Builder(
+    return new CloudLegacySolrClient.Builder(
             Collections.singletonList(getZkServer().getZkAddress()), Optional.empty())
         .withSocketTimeout(90000)
         .withConnectionTimeout(15000)
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
index 9caff80..1611177 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
@@ -34,6 +34,7 @@ import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CoreAdminRequest;
 import org.apache.solr.client.solrj.request.CoreStatus;
@@ -291,7 +292,9 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
       throws IOException, SolrServerException {
     JettySolrRunner jetty = cluster.getReplicaJetty(replica);
     try (HttpSolrClient client =
-        getHttpSolrClient(jetty.getBaseUrl().toString(), cluster.getSolrClient().getHttpClient())) {
+        getHttpSolrClient(
+            jetty.getBaseUrl().toString(),
+            ((CloudLegacySolrClient) cluster.getSolrClient()).getHttpClient())) {
       return CoreAdminRequest.getCoreStatus(replica.getCoreName(), client);
     }
   }
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
index d3294e9..3a7bf20 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
@@ -31,6 +31,7 @@ import java.util.TreeMap;
 import org.apache.lucene.util.TestUtil;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudLegacySolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -483,9 +484,9 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
     Map<String, Integer> shardToDocCount = new TreeMap<>();
     for (Slice slice : docCollection.getActiveSlices()) {
       String shardName = slice.getName();
-      try (HttpSolrClient leaderClient =
+      try (var leaderClient =
           new HttpSolrClient.Builder(slice.getLeader().getCoreUrl())
-              .withHttpClient(client.getHttpClient())
+              .withHttpClient(((CloudLegacySolrClient) client).getHttpClient())
               .build()) {
         long docsInShard =
             leaderClient