You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@solr.apache.org by ds...@apache.org on 2022/03/15 03:59:31 UTC

[solr] branch main updated: SOLR-16061: CloudSolrClient changes, hide ZooKeeper (#708)

This is an automated email from the ASF dual-hosted git repository.

dsmiley pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/solr.git


The following commit(s) were added to refs/heads/main by this push:
     new f81161b  SOLR-16061: CloudSolrClient changes, hide ZooKeeper (#708)
f81161b is described below

commit f81161b93a37ebe9ac6454a82f34b5981da412cb
Author: Haythem Khiri <ha...@yahoo.fr>
AuthorDate: Tue Mar 15 04:59:24 2022 +0100

    SOLR-16061: CloudSolrClient changes, hide ZooKeeper (#708)
    
    BaseCloudSolrClient:
    * new getClusterState() convenience via getClusterStateProvider()
    * removed getZkStateReader; instead use the new ZkStateReader.from(client)
    * removed getZkHost, setZkConnectTimeout, setZkClientTimeout.  Instead use ZkClientClusterStateProvider.from(client) and then similar methods there.
    
    SolrCloudManager:
    * new getClusterState() convenience via getClusterStateProvider()
    
    MiniSolrCloudCluster:
    * new getZkStateReader convenience via the solrClient
    
    Co-authored-by: Haythem Khiri <hk...@salesforce.com>
    Co-authored-by: David Smiley <ds...@salesforce.com>
    Co-authored-by: Jan Høydahl <ja...@users.noreply.github.com>
---
 solr/CHANGES.txt                                   |   2 +
 .../src/java/org/apache/solr/cloud/CloudUtil.java  |   2 +-
 .../cloud/api/collections/CreateCollectionCmd.java |  13 +-
 ...istributedCollectionConfigSetCommandRunner.java |   5 +-
 .../OverseerCollectionMessageHandler.java          |   2 +-
 .../api/collections/ReindexCollectionCmd.java      |  10 +-
 .../impl/CollectionsRepairEventListener.java       |   2 +-
 .../impl/SimpleClusterAbstractionsImpl.java        |   2 +-
 .../src/java/org/apache/solr/util/SolrCLI.java     |  27 ++--
 .../client/solrj/impl/ConnectionReuseTest.java     |   2 +-
 .../test/org/apache/solr/cloud/AddReplicaTest.java |  23 ++--
 .../apache/solr/cloud/AliasIntegrationTest.java    |  61 +++------
 ...aosMonkeyNothingIsSafeWithPullReplicasTest.java |   7 +-
 .../ChaosMonkeySafeLeaderWithPullReplicasTest.java |   8 +-
 .../solr/cloud/ChaosMonkeyShardSplitTest.java      |   4 +-
 .../org/apache/solr/cloud/CleanupOldIndexTest.java |   2 +-
 .../cloud/CloudExitableDirectoryReaderTest.java    |   2 +-
 .../org/apache/solr/cloud/CollectionPropsTest.java |  26 ++--
 .../apache/solr/cloud/CollectionsAPISolrJTest.java |  78 +++---------
 .../solr/cloud/CreateCollectionCleanupTest.java    |  12 +-
 .../apache/solr/cloud/CreateRoutedAliasTest.java   |   6 +-
 .../test/org/apache/solr/cloud/DeleteNodeTest.java |   7 +-
 .../org/apache/solr/cloud/DeleteReplicaTest.java   |  10 +-
 .../DistribDocExpirationUpdateProcessorTest.java   |   6 +-
 .../solr/cloud/DistribJoinFromCollectionTest.java  |   2 +-
 .../solr/cloud/DistributedVersionInfoTest.java     |   2 +-
 .../org/apache/solr/cloud/ForceLeaderTest.java     |  21 ++--
 .../solr/cloud/FullSolrCloudDistribCmdsTest.java   |  48 +++----
 .../solr/cloud/HttpPartitionOnCommitTest.java      |  18 +--
 .../org/apache/solr/cloud/HttpPartitionTest.java   |  24 ++--
 .../solr/cloud/LeaderElectionContextKeyTest.java   |   6 +-
 .../solr/cloud/LeaderElectionIntegrationTest.java  |   3 +-
 .../cloud/LeaderFailoverAfterPartitionTest.java    |   5 +-
 .../cloud/LeaderFailureAfterFreshStartTest.java    |   2 +-
 .../apache/solr/cloud/LeaderTragicEventTest.java   |   2 +-
 .../solr/cloud/LeaderVoteWaitTimeoutTest.java      |   9 +-
 .../org/apache/solr/cloud/MigrateRouteKeyTest.java |   2 +-
 .../solr/cloud/NestedShardedAtomicUpdateTest.java  |  26 +---
 .../OverseerCollectionConfigSetProcessorTest.java  |   1 +
 .../solr/cloud/ParallelCommitExecutionTest.java    |   3 +-
 .../apache/solr/cloud/PeerSyncReplicationTest.java |   2 +-
 .../apache/solr/cloud/ReindexCollectionTest.java   |  12 +-
 .../org/apache/solr/cloud/ReplaceNodeTest.java     |  23 ++--
 .../apache/solr/cloud/ReplicationFactorTest.java   |   5 +-
 .../org/apache/solr/cloud/RollingRestartTest.java  |  45 +++----
 .../test/org/apache/solr/cloud/RouteFieldTest.java |   6 +-
 .../org/apache/solr/cloud/SSLMigrationTest.java    |   8 +-
 .../org/apache/solr/cloud/ShardRoutingTest.java    |   8 +-
 .../apache/solr/cloud/SolrCloudExampleTest.java    |  14 +--
 .../test/org/apache/solr/cloud/SplitShardTest.java |   8 +-
 .../apache/solr/cloud/TestCloudConsistency.java    |  49 ++++----
 .../apache/solr/cloud/TestCloudDeleteByQuery.java  |   2 +-
 .../TestCloudPhrasesIdentificationComponent.java   |   3 +-
 .../solr/cloud/TestCloudPseudoReturnFields.java    |   3 +-
 .../org/apache/solr/cloud/TestCloudRecovery.java   |  13 +-
 .../org/apache/solr/cloud/TestCloudRecovery2.java  |   2 +-
 .../solr/cloud/TestCloudSearcherWarming.java       |  40 ++----
 .../cloud/TestDeleteCollectionOnDownNodes.java     |   6 +-
 .../solr/cloud/TestDistribDocBasedVersion.java     |   8 +-
 .../org/apache/solr/cloud/TestLazySolrCluster.java |   4 +-
 .../cloud/TestLeaderElectionWithEmptyReplica.java  |  18 +--
 .../org/apache/solr/cloud/TestPrepRecovery.java    |   2 +-
 .../org/apache/solr/cloud/TestPullReplica.java     |  27 ++--
 .../solr/cloud/TestPullReplicaErrorHandling.java   |  13 +-
 .../apache/solr/cloud/TestRandomFlRTGCloud.java    |   8 +-
 .../solr/cloud/TestRandomRequestDistribution.java  |  15 +--
 .../apache/solr/cloud/TestRebalanceLeaders.java    |  81 ++++--------
 .../apache/solr/cloud/TestRequestForwarding.java   |   4 +-
 .../org/apache/solr/cloud/TestSegmentSorting.java  |   3 -
 .../solr/cloud/TestShortCircuitedRequests.java     |  13 +-
 .../solr/cloud/TestSkipOverseerOperations.java     |   6 +-
 .../cloud/TestStressCloudBlindAtomicUpdates.java   |   5 +-
 .../solr/cloud/TestStressInPlaceUpdates.java       |   6 +-
 .../org/apache/solr/cloud/TestStressLiveNodes.java |   4 +-
 .../solr/cloud/TestTlogReplayVsRecovery.java       |  37 +++---
 .../org/apache/solr/cloud/TestTlogReplica.java     |  43 +++----
 .../cloud/TestTolerantUpdateProcessorCloud.java    |   2 +-
 .../cloud/TestWaitForStateWithJettyShutdowns.java  |   9 +-
 .../test/org/apache/solr/cloud/ZkFailoverTest.java |   2 +-
 .../api/collections/CollectionReloadTest.java      |   5 +-
 .../CollectionsAPIAsyncDistributedZkTest.java      |  13 +-
 .../api/collections/ReplicaPropertiesBase.java     |  11 +-
 .../solr/cloud/api/collections/ShardSplitTest.java | 120 ++++++++----------
 .../SimpleCollectionCreateDeleteTest.java          |   7 +-
 .../cloud/api/collections/SplitByPrefixTest.java   |  14 +--
 .../cloud/api/collections/TestCollectionAPI.java   |  40 +++---
 .../TestCollectionsAPIViaSolrCloudCluster.java     |  11 +-
 .../api/collections/TestReplicaProperties.java     |   7 +-
 .../overseer/ZkCollectionPropsCachingTest.java     |  10 +-
 .../impl/PlacementPluginIntegrationTest.java       |  11 +-
 .../impl/SimpleClusterAbstractionsTest.java        |   2 +-
 .../apache/solr/core/BlobRepositoryCloudTest.java  |   2 +-
 .../core/snapshots/TestSolrCloudSnapshots.java     |  22 ++--
 .../solr/core/snapshots/TestSolrCoreSnapshots.java |   6 +-
 .../org/apache/solr/handler/TestBlobHandler.java   |   3 +-
 .../org/apache/solr/handler/TestConfigReload.java  |   9 +-
 .../org/apache/solr/handler/TestReqParamsAPI.java  |   4 +-
 .../solr/handler/TestSolrConfigHandlerCloud.java   |   9 +-
 .../handler/TestSolrConfigHandlerConcurrent.java   |   3 +-
 .../solr/handler/TestStressIncrementalBackup.java  |   1 -
 .../solr/handler/TestStressThreadBackup.java       |   1 -
 .../solr/handler/TestSystemCollAutoCreate.java     |   3 -
 .../component/CustomHighlightComponentTest.java    |   2 +-
 .../DistributedQueryComponentOptimizationTest.java |   4 +-
 .../solr/handler/component/SearchHandlerTest.java  |  18 +--
 .../handler/component/ShardsAllowListTest.java     |   1 -
 .../component/TestTrackingShardHandlerFactory.java |   9 +-
 .../solr/handler/component/UpdateLogCloudTest.java |   6 +-
 .../transform/TestSubQueryTransformerDistrib.java  |   2 +-
 .../schema/ManagedSchemaRoundRobinCloudTest.java   |   2 +-
 .../PreAnalyzedFieldManagedSchemaCloudTest.java    |   2 +-
 .../apache/solr/schema/SchemaApiFailureTest.java   |   2 +-
 .../apache/solr/schema/TestCloudSchemaless.java    |   8 +-
 .../search/facet/TestCloudJSONFacetJoinDomain.java |   3 +-
 .../solr/search/facet/TestCloudJSONFacetSKG.java   |   3 +-
 .../search/facet/TestCloudJSONFacetSKGEquiv.java   |   3 +-
 .../search/join/CrossCollectionJoinQueryTest.java  |   2 +-
 .../solr/search/join/TestCloudNestedDocsSort.java  |   3 +-
 .../apache/solr/servlet/HttpSolrCallCloudTest.java |   2 +-
 .../solr/update/DeleteByIdWithRouterFieldTest.java |  13 +-
 .../solr/update/TestInPlaceUpdatesDistrib.java     |  27 ++--
 .../processor/RoutedAliasUpdateProcessorTest.java  |  18 +--
 .../processor/TemplateUpdateProcessorTest.java     |   3 +-
 .../TimeRoutedAliasUpdateProcessorTest.java        |   2 +-
 .../stream/AnalyticsShardRequestManager.java       |   2 +-
 .../solr/analytics/SolrAnalyticsTestCase.java      |   2 +-
 solr/modules/hadoop-auth/build.gradle              |   2 +
 .../TestRuleBasedAuthorizationWithKerberos.java    |   3 +-
 .../hadoop/TestSolrCloudWithHadoopAuthPlugin.java  |   3 +-
 .../hadoop/TestSolrCloudWithKerberosAlt.java       |   3 +-
 .../security/hadoop/TestZkAclsWithHadoopAuth.java  |   2 +-
 .../solr/hdfs/snapshots/SolrSnapshotsTool.java     |   3 +-
 .../hdfs/cloud/MoveReplicaHdfsFailoverTest.java    |  38 ++----
 .../SharedFileSystemAutoReplicaFailoverTest.java   |  28 +++--
 .../org/apache/solr/hdfs/cloud/StressHdfsTest.java |  18 +--
 .../collections/TestHdfsCloudBackupRestore.java    |   2 +-
 .../hdfs/handler/TestHdfsBackupRestoreCore.java    |   3 +-
 .../org/apache/solr/ltr/TestLTROnSolrCloud.java    |   4 +-
 .../org/apache/solr/handler/sql/SolrSchema.java    |   2 +-
 .../solr/prometheus/scraper/SolrCloudScraper.java  |  12 +-
 .../solr/prometheus/scraper/SolrScraper.java       |   2 +-
 .../prometheus/PrometheusExporterTestBase.java     |   2 +-
 .../prometheus/scraper/SolrCloudScraperTest.java   |   7 +-
 .../cloud/DelegatingClusterStateProvider.java      |  10 +-
 .../solr/client/solrj/cloud/SolrCloudManager.java  |   5 +
 .../client/solrj/impl/BaseCloudSolrClient.java     |  31 +----
 .../solrj/impl/BaseHttpClusterStateProvider.java   |  10 +-
 .../client/solrj/impl/ClusterStateProvider.java    |   4 +-
 .../client/solrj/impl/SolrClientCloudManager.java  |   2 +-
 .../solrj/impl/ZkClientClusterStateProvider.java   |  47 ++++++-
 .../solr/client/solrj/io/sql/ConnectionImpl.java   |   2 +-
 .../client/solrj/io/sql/DatabaseMetaDataImpl.java  |   5 +-
 .../solr/client/solrj/io/sql/StatementImpl.java    |   2 +-
 .../solrj/io/stream/FeaturesSelectionStream.java   |   2 +-
 .../client/solrj/io/stream/TextLogitStream.java    |   2 +-
 .../solr/client/solrj/io/stream/TopicStream.java   |   6 +-
 .../solr/client/solrj/io/stream/TupleStream.java   |   2 +-
 .../apache/solr/common/cloud/ZkStateReader.java    |  20 ++-
 .../impl/CloudHttp2SolrClientBuilderTest.java      |   6 +-
 .../CloudHttp2SolrClientMultiConstructorTest.java  |   2 +-
 .../solrj/impl/CloudHttp2SolrClientTest.java       |  21 ++--
 .../solrj/impl/CloudSolrClientBuilderTest.java     |   9 +-
 .../impl/CloudSolrClientMultiConstructorTest.java  |   4 +-
 .../client/solrj/impl/CloudSolrClientTest.java     |  39 +++---
 .../client/solrj/io/graph/GraphExpressionTest.java |  11 +-
 .../apache/solr/client/solrj/io/sql/JdbcTest.java  |   4 +-
 .../solrj/io/stream/CloudAuthStreamTest.java       |  18 +--
 .../client/solrj/io/stream/JDBCStreamTest.java     |   5 +-
 .../client/solrj/io/stream/MathExpressionTest.java |  11 +-
 .../solrj/io/stream/SelectWithEvaluatorsTest.java  |   5 +-
 .../solrj/io/stream/StreamDecoratorTest.java       |  15 ++-
 .../solr/client/solrj/io/stream/StreamingTest.java |  11 +-
 .../solr/client/solrj/request/TestV2Request.java   |   5 +-
 .../cloud/PerReplicaStatesIntegrationTest.java     |  12 +-
 .../apache/solr/common/cloud/SolrZkClientTest.java |  12 +-
 .../cloud/TestCloudCollectionsListeners.java       |  27 ++--
 .../common/cloud/TestCollectionStateWatchers.java  | 140 ++++++++++++---------
 .../common/cloud/TestDocCollectionWatcher.java     | 113 +++++++++--------
 .../solr/common/cloud/TestPerReplicaStates.java    |   2 +-
 .../cloud/AbstractBasicDistributedZk2TestBase.java |  13 +-
 .../cloud/AbstractBasicDistributedZkTestBase.java  |  27 ++--
 .../AbstractChaosMonkeyNothingIsSafeTestBase.java  |   2 +-
 .../solr/cloud/AbstractDistribZkTestBase.java      |   2 +-
 .../solr/cloud/AbstractFullDistribZkTestBase.java  |  81 ++++++------
 .../solr/cloud/AbstractMoveReplicaTestBase.java    |  18 ++-
 .../solr/cloud/AbstractRecoveryZkTestBase.java     |   2 +-
 .../AbstractRestartWhileUpdatingTestBase.java      |   3 +-
 .../solr/cloud/AbstractSyncSliceTestBase.java      |   2 +-
 ...actTlogReplayBufferedWhileIndexingTestBase.java |   3 +-
 .../cloud/AbstractUnloadDistributedZkTestBase.java |  12 +-
 .../apache/solr/cloud/MiniSolrCloudCluster.java    |  67 ++++++----
 .../apache/solr/cloud/MultiSolrCloudTestCase.java  |   6 +-
 .../org/apache/solr/cloud/SolrCloudTestCase.java   |  12 +-
 .../AbstractCloudBackupRestoreTestCase.java        |  18 +--
 ...bstractCollectionsAPIDistributedZkTestBase.java |  14 +--
 .../collections/AbstractIncrementalBackupTest.java |  13 +-
 196 files changed, 1126 insertions(+), 1430 deletions(-)

diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index cf7bc78..17221f7 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -639,6 +639,8 @@ and each individual module's jar will be included in its directory's lib/ folder
 
 * SOLR-15223: Deprecate HttpSolrClient and friends, please use the Http2SolrClient equivalents (janhoy, David Smiley)
 
+* SOLR-16061: CloudSolrClient refactoring: Removed ZK specific methods (Haythem Khiri, David Smiley, janhoy)
+
 Bug Fixes
 ---------------------
 * SOLR-15849: Fix the connection reset problem caused by the incorrect use of 4LW with \n when monitoring zooKeeper status (Fa Ming).
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
index 8594213..7461d6e 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
@@ -226,7 +226,7 @@ public class CloudUtil {
     ClusterState state = null;
     DocCollection coll = null;
     while (!timeout.hasTimedOut()) {
-      state = cloudManager.getClusterStateProvider().getClusterState();
+      state = cloudManager.getClusterState();
       coll = state.getCollectionOrNull(collection);
       // due to the way we manage collections in SimClusterStateProvider a null here
       // can mean that a collection is still being created but has no replicas
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
index 23c3bd1..fb0f1c6 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
@@ -207,11 +207,7 @@ public class CreateCollectionCmd implements CollApiCmds.CollectionApiCommand {
         boolean created = false;
         while (!waitUntil.hasTimedOut()) {
           waitUntil.sleep(100);
-          created =
-              ccc.getSolrCloudManager()
-                  .getClusterStateProvider()
-                  .getClusterState()
-                  .hasCollection(collectionName);
+          created = ccc.getSolrCloudManager().getClusterState().hasCollection(collectionName);
           if (created) break;
         }
         if (!created) {
@@ -222,7 +218,7 @@ public class CreateCollectionCmd implements CollApiCmds.CollectionApiCommand {
 
         // refresh cluster state (value read below comes from Zookeeper watch firing following the
         // update done previously, be it by Overseer or by this thread when updates are distributed)
-        clusterState = ccc.getSolrCloudManager().getClusterStateProvider().getClusterState();
+        clusterState = ccc.getSolrCloudManager().getClusterState();
         newColl = clusterState.getCollection(collectionName);
       }
 
@@ -280,10 +276,7 @@ public class CreateCollectionCmd implements CollApiCmds.CollectionApiCommand {
             Assign.buildSolrCoreName(
                 ccc.getSolrCloudManager().getDistribStateManager(),
                 collectionName,
-                ccc.getSolrCloudManager()
-                    .getClusterStateProvider()
-                    .getClusterState()
-                    .getCollectionOrNull(collectionName),
+                ccc.getSolrCloudManager().getClusterState().getCollectionOrNull(collectionName),
                 replicaPosition.shard,
                 replicaPosition.type,
                 true);
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DistributedCollectionConfigSetCommandRunner.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DistributedCollectionConfigSetCommandRunner.java
index 1cae0948e..32a0faa 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DistributedCollectionConfigSetCommandRunner.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DistributedCollectionConfigSetCommandRunner.java
@@ -431,10 +431,7 @@ public class DistributedCollectionConfigSetCommandRunner {
 
           CollApiCmds.CollectionApiCommand command = commandMapper.getActionCommand(action);
           if (command != null) {
-            command.call(
-                ccc.getSolrCloudManager().getClusterStateProvider().getClusterState(),
-                message,
-                results);
+            command.call(ccc.getSolrCloudManager().getClusterState(), message, results);
           } else {
             asyncTaskTracker.cancelAsyncId(asyncId);
             // Seeing this is a bug, not bad user data
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
index f5b3b8f..de1f96e 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
@@ -121,7 +121,7 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler,
       CollectionAction action = getCollectionAction(operation);
       CollApiCmds.CollectionApiCommand command = commandMapper.getActionCommand(action);
       if (command != null) {
-        command.call(cloudManager.getClusterStateProvider().getClusterState(), message, results);
+        command.call(cloudManager.getClusterState(), message, results);
       } else {
         throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:" + operation);
       }
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReindexCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReindexCollectionCmd.java
index 7b314f5..868e239 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReindexCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReindexCollectionCmd.java
@@ -376,7 +376,7 @@ public class ReindexCollectionCmd implements CollApiCmds.CollectionApiCommand {
       while (!waitUntil.hasTimedOut()) {
         waitUntil.sleep(100);
         // this also refreshes our local var clusterState
-        clusterState = ccc.getSolrCloudManager().getClusterStateProvider().getClusterState();
+        clusterState = ccc.getSolrCloudManager().getClusterState();
         created =
             clusterState.hasCollection(targetCollection)
                 && clusterState.hasCollection(chkCollection);
@@ -661,10 +661,7 @@ public class ReindexCollectionCmd implements CollApiCmds.CollectionApiCommand {
 
   private boolean maybeAbort(String collection) throws Exception {
     DocCollection coll =
-        ccc.getSolrCloudManager()
-            .getClusterStateProvider()
-            .getClusterState()
-            .getCollectionOrNull(collection);
+        ccc.getSolrCloudManager().getClusterState().getCollectionOrNull(collection);
     if (coll == null) {
       // collection no longer present - abort
       log.info("## Aborting - collection {} no longer present.", collection);
@@ -915,8 +912,7 @@ public class ReindexCollectionCmd implements CollApiCmds.CollectionApiCommand {
     if (daemonUrl != null) {
       killDaemon(daemonName, daemonUrl);
     }
-    ClusterState clusterState =
-        ccc.getSolrCloudManager().getClusterStateProvider().getClusterState();
+    ClusterState clusterState = ccc.getSolrCloudManager().getClusterState();
     NamedList<Object> cmdResults = new NamedList<>();
     if (createdTarget
         && !collection.equals(targetCollection)
diff --git a/solr/core/src/java/org/apache/solr/cluster/events/impl/CollectionsRepairEventListener.java b/solr/core/src/java/org/apache/solr/cluster/events/impl/CollectionsRepairEventListener.java
index c32637e..2c621f0 100644
--- a/solr/core/src/java/org/apache/solr/cluster/events/impl/CollectionsRepairEventListener.java
+++ b/solr/core/src/java/org/apache/solr/cluster/events/impl/CollectionsRepairEventListener.java
@@ -168,7 +168,7 @@ public class CollectionsRepairEventListener
     // collection / positions
     Map<String, List<ReplicaPosition>> newPositions = new HashMap<>();
     try {
-      ClusterState clusterState = solrCloudManager.getClusterStateProvider().getClusterState();
+      ClusterState clusterState = solrCloudManager.getClusterState();
       clusterState.forEachCollection(
           coll -> {
             // shard / type / count
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/SimpleClusterAbstractionsImpl.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/SimpleClusterAbstractionsImpl.java
index 1ce53bb..9f85922 100644
--- a/solr/core/src/java/org/apache/solr/cluster/placement/impl/SimpleClusterAbstractionsImpl.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/SimpleClusterAbstractionsImpl.java
@@ -65,7 +65,7 @@ class SimpleClusterAbstractionsImpl {
           liveNodesWithData.size() == liveNodes.size()
               ? this.liveNodes
               : NodeImpl.getNodes(liveNodesWithData);
-      clusterState = solrCloudManager.getClusterStateProvider().getClusterState();
+      clusterState = solrCloudManager.getClusterState();
     }
 
     @Override
diff --git a/solr/core/src/java/org/apache/solr/util/SolrCLI.java b/solr/core/src/java/org/apache/solr/util/SolrCLI.java
index 9448d66..766dac7 100755
--- a/solr/core/src/java/org/apache/solr/util/SolrCLI.java
+++ b/solr/core/src/java/org/apache/solr/util/SolrCLI.java
@@ -1216,7 +1216,7 @@ public class SolrCLI implements CLIO {
 
       log.debug("Running healthcheck for {}", collection);
 
-      ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudSolrClient);
 
       ClusterState clusterState = zkStateReader.getClusterState();
       Set<String> liveNodes = clusterState.getLiveNodes();
@@ -1414,13 +1414,13 @@ public class SolrCLI implements CLIO {
           new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty())
               .build()) {
         cloudSolrClient.connect();
-        Set<String> liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes();
+        Set<String> liveNodes = cloudSolrClient.getClusterState().getLiveNodes();
         if (liveNodes.isEmpty())
           throw new IllegalStateException(
               "No live nodes found! Cannot determine 'solrUrl' from ZooKeeper: " + zkHost);
 
         String firstLiveNode = liveNodes.iterator().next();
-        solrUrl = cloudSolrClient.getZkStateReader().getBaseUrlForNodeName(firstLiveNode);
+        solrUrl = ZkStateReader.from(cloudSolrClient).getBaseUrlForNodeName(firstLiveNode);
       }
     }
     return solrUrl;
@@ -1550,7 +1550,7 @@ public class SolrCLI implements CLIO {
 
     protected void runCloudTool(CloudSolrClient cloudSolrClient, CommandLine cli) throws Exception {
 
-      Set<String> liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes();
+      Set<String> liveNodes = cloudSolrClient.getClusterState().getLiveNodes();
       if (liveNodes.isEmpty())
         throw new IllegalStateException(
             "No live nodes found! Cannot create a collection until "
@@ -1559,7 +1559,7 @@ public class SolrCLI implements CLIO {
       String baseUrl = cli.getOptionValue("solrUrl");
       if (baseUrl == null) {
         String firstLiveNode = liveNodes.iterator().next();
-        baseUrl = cloudSolrClient.getZkStateReader().getBaseUrlForNodeName(firstLiveNode);
+        baseUrl = ZkStateReader.from(cloudSolrClient).getBaseUrlForNodeName(firstLiveNode);
       }
 
       String collectionName = cli.getOptionValue(NAME);
@@ -1575,8 +1575,7 @@ public class SolrCLI implements CLIO {
       boolean configExistsInZk =
           confname != null
               && !"".equals(confname.trim())
-              && cloudSolrClient
-                  .getZkStateReader()
+              && ZkStateReader.from(cloudSolrClient)
                   .getZkClient()
                   .exists("/configs/" + confname, true);
 
@@ -1592,14 +1591,14 @@ public class SolrCLI implements CLIO {
 
         echoIfVerbose(
             "Uploading "
-                + confPath.toAbsolutePath().toString()
+                + confPath.toAbsolutePath()
                 + " for config "
                 + confname
                 + " to ZooKeeper at "
-                + cloudSolrClient.getZkHost(),
+                + cloudSolrClient.getClusterStateProvider().getQuorumHosts(),
             cli);
         ZkMaintenanceUtils.uploadToZK(
-            cloudSolrClient.getZkStateReader().getZkClient(),
+            ZkStateReader.from(cloudSolrClient).getZkClient(),
             confPath,
             ZkMaintenanceUtils.CONFIGS_ZKNODE + "/" + confname,
             ZkMaintenanceUtils.UPLOAD_FILENAME_EXCLUDE_PATTERN);
@@ -2511,14 +2510,14 @@ public class SolrCLI implements CLIO {
 
     protected void deleteCollection(CloudSolrClient cloudSolrClient, CommandLine cli)
         throws Exception {
-      Set<String> liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes();
+      Set<String> liveNodes = cloudSolrClient.getClusterState().getLiveNodes();
       if (liveNodes.isEmpty())
         throw new IllegalStateException(
             "No live nodes found! Cannot delete a collection until "
                 + "there is at least 1 live node in the cluster.");
 
       String firstLiveNode = liveNodes.iterator().next();
-      ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudSolrClient);
       String baseUrl = zkStateReader.getBaseUrlForNodeName(firstLiveNode);
       String collectionName = cli.getOptionValue(NAME);
       if (!zkStateReader.getClusterState().hasCollection(collectionName)) {
@@ -3254,7 +3253,7 @@ public class SolrCLI implements CLIO {
             new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty())
                 .build();
         cloudClient.connect();
-        Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+        Set<String> liveNodes = cloudClient.getClusterState().getLiveNodes();
         int numLiveNodes = (liveNodes != null) ? liveNodes.size() : 0;
         long timeout =
             System.nanoTime() + TimeUnit.NANOSECONDS.convert(maxWaitSecs, TimeUnit.SECONDS);
@@ -3270,7 +3269,7 @@ public class SolrCLI implements CLIO {
           } catch (InterruptedException ie) {
             Thread.interrupted();
           }
-          liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+          liveNodes = cloudClient.getClusterState().getLiveNodes();
           numLiveNodes = (liveNodes != null) ? liveNodes.size() : 0;
         }
         if (numLiveNodes < numNodes) {
diff --git a/solr/core/src/test/org/apache/solr/client/solrj/impl/ConnectionReuseTest.java b/solr/core/src/test/org/apache/solr/client/solrj/impl/ConnectionReuseTest.java
index a4050e7..8431686 100644
--- a/solr/core/src/test/org/apache/solr/client/solrj/impl/ConnectionReuseTest.java
+++ b/solr/core/src/test/org/apache/solr/client/solrj/impl/ConnectionReuseTest.java
@@ -65,7 +65,7 @@ public class ConnectionReuseTest extends SolrCloudTestCase {
         .processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
 
     cluster
-        .getSolrClient()
+        .getZkStateReader()
         .waitForState(
             COLLECTION,
             DEFAULT_TIMEOUT,
diff --git a/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java
index 9f45d94..b628091 100644
--- a/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/AddReplicaTest.java
@@ -73,16 +73,14 @@ public class AddReplicaTest extends SolrCloudTestCase {
 
     cluster.waitForActiveCollection(collection, 1, 4);
 
-    DocCollection docCollection =
-        cloudClient.getZkStateReader().getClusterState().getCollectionOrNull(collection);
+    DocCollection docCollection = cloudClient.getClusterState().getCollectionOrNull(collection);
     assertNotNull(docCollection);
     assertEquals(4, docCollection.getReplicas().size());
     assertEquals(2, docCollection.getReplicas(EnumSet.of(Replica.Type.NRT)).size());
     assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)).size());
     assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).size());
 
-    docCollection =
-        cloudClient.getZkStateReader().getClusterState().getCollectionOrNull(collection);
+    docCollection = cloudClient.getClusterState().getCollectionOrNull(collection);
     assertNotNull(docCollection);
     // sanity check that everything is as before
     assertEquals(4, docCollection.getReplicas().size());
@@ -107,8 +105,7 @@ public class AddReplicaTest extends SolrCloudTestCase {
     status = addReplica.processAndWait(collection + "_xyz1", cloudClient, 120);
     assertEquals(COMPLETED, status);
     waitForState("Timedout wait for collection to be created", collection, clusterShape(1, 9));
-    docCollection =
-        cloudClient.getZkStateReader().getClusterState().getCollectionOrNull(collection);
+    docCollection = cloudClient.getClusterState().getCollectionOrNull(collection);
     assertNotNull(docCollection);
     // sanity check that everything is as before
     assertEquals(9, docCollection.getReplicas().size());
@@ -130,7 +127,7 @@ public class AddReplicaTest extends SolrCloudTestCase {
 
     cluster.waitForActiveCollection(collection, 2, 2);
 
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    ClusterState clusterState = cloudClient.getClusterState();
     DocCollection coll = clusterState.getCollection(collection);
     String sliceName = coll.getSlices().iterator().next().getName();
     Collection<Replica> replicas = coll.getSlice(sliceName).getReplicas();
@@ -156,12 +153,7 @@ public class AddReplicaTest extends SolrCloudTestCase {
     assertTrue(success);
 
     Collection<Replica> replicas2 =
-        cloudClient
-            .getZkStateReader()
-            .getClusterState()
-            .getCollection(collection)
-            .getSlice(sliceName)
-            .getReplicas();
+        cloudClient.getClusterState().getCollection(collection).getSlice(sliceName).getReplicas();
     replicas2.removeAll(replicas);
     assertEquals(1, replicas2.size());
 
@@ -185,7 +177,7 @@ public class AddReplicaTest extends SolrCloudTestCase {
     assertTrue(success);
     // let the client watch fire
     Thread.sleep(1000);
-    clusterState = cloudClient.getZkStateReader().getClusterState();
+    clusterState = cloudClient.getClusterState();
     coll = clusterState.getCollection(collection);
     Collection<Replica> replicas3 = coll.getSlice(sliceName).getReplicas();
     replicas3.removeAll(replicas);
@@ -195,8 +187,7 @@ public class AddReplicaTest extends SolrCloudTestCase {
       if (replica.getName().equals(replica2)) {
         continue; // may be still recovering
       }
-      assertSame(
-          coll.toString() + "\n" + replica.toString(), replica.getState(), Replica.State.ACTIVE);
+      assertSame(coll + "\n" + replica, replica.getState(), Replica.State.ACTIVE);
     }
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
index 8259fb9..591cab3 100644
--- a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
@@ -109,7 +109,7 @@ public class AliasIntegrationTest extends SolrCloudTestCase {
         "Expected collection2 to be created with 1 shard and 1 replica",
         "collection2meta",
         clusterShape(1, 1));
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     zkStateReader.createClusterStateWatchersAndUpdate();
     List<String> aliases = zkStateReader.getAliases().resolveAliases("meta1");
     assertEquals(1, aliases.size());
@@ -436,7 +436,7 @@ public class AliasIntegrationTest extends SolrCloudTestCase {
         "Expected collection2 to be created with 1 shard and 1 replica",
         "collection2meta",
         clusterShape(1, 1));
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     zkStateReader.createClusterStateWatchersAndUpdate();
     List<String> aliases = zkStateReader.getAliases().resolveAliases(aliasName);
     assertEquals(1, aliases.size());
@@ -514,7 +514,7 @@ public class AliasIntegrationTest extends SolrCloudTestCase {
     QueryResponse res = cluster.getSolrClient().query("collection_old", new SolrQuery("*:*"));
     assertEquals(3, res.getResults().getNumFound());
 
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     int lastVersion = zkStateReader.aliasesManager.getAliases().getZNodeVersion();
     // Let's insure we have a "handle" to the old collection
     CollectionAdminRequest.createAlias("collection_old_reserve", "collection_old")
@@ -548,11 +548,7 @@ public class AliasIntegrationTest extends SolrCloudTestCase {
     // assure ourselves that the old colletion is, indeed, still there.
     assertNotNull(
         "collection_old should exist!",
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getClusterState()
-            .getCollectionOrNull("collection_old"));
+        cluster.getSolrClient().getClusterState().getCollectionOrNull("collection_old"));
 
     // Now we should still succeed using the alias collection_old which points to collection_new
     // aliase: collection_old -> collection_new, collection_old_reserve -> collection_old ->
@@ -561,7 +557,7 @@ public class AliasIntegrationTest extends SolrCloudTestCase {
     res = cluster.getSolrClient().query("collection_old", new SolrQuery("*:*"));
     assertEquals(1, res.getResults().getNumFound());
 
-    Aliases aliases = cluster.getSolrClient().getZkStateReader().getAliases();
+    Aliases aliases = cluster.getZkStateReader().getAliases();
     assertTrue(
         "collection_old should point to collection_new",
         aliases.resolveAliases("collection_old").contains("collection_new"));
@@ -585,35 +581,21 @@ public class AliasIntegrationTest extends SolrCloudTestCase {
     assertNull(
         "collection_old_reserve should be gone",
         cluster
-            .getSolrClient()
             .getZkStateReader()
             .getAliases()
             .getCollectionAliasMap()
             .get("collection_old_reserve"));
     assertNull(
         "collection_old should be gone",
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getAliases()
-            .getCollectionAliasMap()
-            .get("collection_old"));
+        cluster.getZkStateReader().getAliases().getCollectionAliasMap().get("collection_old"));
 
     assertFalse(
         "collection_new should be gone",
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getClusterState()
-            .hasCollection("collection_new"));
+        cluster.getSolrClient().getClusterState().hasCollection("collection_new"));
 
     assertFalse(
         "collection_old should be gone",
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getClusterState()
-            .hasCollection("collection_old"));
+        cluster.getSolrClient().getClusterState().hasCollection("collection_old"));
   }
 
   // While writing the above test I wondered what happens when an alias points to two collections
@@ -646,7 +628,7 @@ public class AliasIntegrationTest extends SolrCloudTestCase {
         .add("id", "11", "a_t", "humpty dumpy sat on a low wall")
         .commit(cluster.getSolrClient(), "collection_two");
 
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     int lastVersion = zkStateReader.aliasesManager.getAliases().getZNodeVersion();
 
     // Create an alias pointing to both
@@ -732,28 +714,19 @@ public class AliasIntegrationTest extends SolrCloudTestCase {
 
     assertNull(
         "collection_alias_pair should be gone",
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getAliases()
-            .getCollectionAliasMap()
-            .get("collection_alias_pair"));
+        zkStateReader.getAliases().getCollectionAliasMap().get("collection_alias_pair"));
 
     assertFalse(
         "collection_one should be gone",
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getClusterState()
-            .hasCollection("collection_one"));
+        zkStateReader.getClusterState().hasCollection("collection_one"));
+
+    assertFalse(
+        "collection_one should be gone",
+        zkStateReader.getClusterState().hasCollection("collection_one"));
 
     assertFalse(
         "collection_two should be gone",
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getClusterState()
-            .hasCollection("collection_two"));
+        zkStateReader.getClusterState().hasCollection("collection_two"));
   }
 
   @Test
@@ -788,7 +761,7 @@ public class AliasIntegrationTest extends SolrCloudTestCase {
 
     ///////////////
     // make sure there's only one level of alias
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     int lastVersion = zkStateReader.aliasesManager.getAliases().getZNodeVersion();
 
     CollectionAdminRequest.deleteAlias("collection1").process(cluster.getSolrClient());
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeWithPullReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeWithPullReplicasTest.java
index cadf6e2..26a21b2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeWithPullReplicasTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeWithPullReplicasTest.java
@@ -155,8 +155,7 @@ public class ChaosMonkeyNothingIsSafeWithPullReplicasTest extends AbstractFullDi
     // Using this low timeout will also help us catch index stalling.
     clientSoTimeout = 8000;
 
-    DocCollection docCollection =
-        cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
+    DocCollection docCollection = cloudClient.getClusterState().getCollection(DEFAULT_COLLECTION);
     assertEquals(this.sliceCount, docCollection.getSlices().size());
     Slice s = docCollection.getSlice("shard1");
     assertNotNull(s);
@@ -179,7 +178,7 @@ public class ChaosMonkeyNothingIsSafeWithPullReplicasTest extends AbstractFullDi
     try {
       handle.clear();
       handle.put("timestamp", SKIPVAL);
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
       // make sure we have leaders for each shard
       for (int j = 1; j < sliceCount; j++) {
         zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + j, 10000);
@@ -349,7 +348,7 @@ public class ChaosMonkeyNothingIsSafeWithPullReplicasTest extends AbstractFullDi
       testSuccessful = true;
     } finally {
       if (!testSuccessful) {
-        logReplicaTypesReplicationInfo(DEFAULT_COLLECTION, cloudClient.getZkStateReader());
+        logReplicaTypesReplicationInfo(DEFAULT_COLLECTION, ZkStateReader.from(cloudClient));
         printLayout();
       }
     }
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderWithPullReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderWithPullReplicasTest.java
index e09db9c..cac8ae9 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderWithPullReplicasTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderWithPullReplicasTest.java
@@ -30,6 +30,7 @@ import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.TimeSource;
 import org.apache.solr.util.TestInjection;
 import org.apache.solr.util.TimeOut;
@@ -115,8 +116,7 @@ public class ChaosMonkeySafeLeaderWithPullReplicasTest extends AbstractFullDistr
 
   @Test
   public void test() throws Exception {
-    DocCollection docCollection =
-        cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
+    DocCollection docCollection = cloudClient.getClusterState().getCollection(DEFAULT_COLLECTION);
     assertEquals(this.sliceCount, docCollection.getSlices().size());
     Slice s = docCollection.getSlice("shard1");
     assertNotNull(s);
@@ -191,7 +191,7 @@ public class ChaosMonkeySafeLeaderWithPullReplicasTest extends AbstractFullDistr
         runLength = runTimes[random().nextInt(runTimes.length - 1)];
       }
 
-      ChaosMonkey.wait(runLength, DEFAULT_COLLECTION, cloudClient.getZkStateReader());
+      ChaosMonkey.wait(runLength, DEFAULT_COLLECTION, ZkStateReader.from(cloudClient));
     } finally {
       chaosMonkey.stopTheMonkey();
     }
@@ -233,7 +233,7 @@ public class ChaosMonkeySafeLeaderWithPullReplicasTest extends AbstractFullDistr
 
     waitForReplicationFromReplicas(
         DEFAULT_COLLECTION,
-        cloudClient.getZkStateReader(),
+        ZkStateReader.from(cloudClient),
         new TimeOut(30, TimeUnit.SECONDS, TimeSource.NANO_TIME));
     //    waitForAllWarmingSearchers();
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
index 1dba0e1..8c38e40 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
@@ -68,7 +68,7 @@ public class ChaosMonkeyShardSplitTest extends ShardSplitTest {
   public void test() throws Exception {
     waitForThingsToLevelOut(15, TimeUnit.SECONDS);
 
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    ClusterState clusterState = cloudClient.getClusterState();
     final DocRouter router =
         clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
     Slice shard1 =
@@ -219,7 +219,7 @@ public class ChaosMonkeyShardSplitTest extends ShardSplitTest {
   private void waitTillRecovered() throws Exception {
     for (int i = 0; i < 30; i++) {
       Thread.sleep(3000);
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
       zkStateReader.forceUpdateCollection("collection1");
       ClusterState clusterState = zkStateReader.getClusterState();
       DocCollection collection1 = clusterState.getCollection("collection1");
diff --git a/solr/core/src/test/org/apache/solr/cloud/CleanupOldIndexTest.java b/solr/core/src/test/org/apache/solr/cloud/CleanupOldIndexTest.java
index 52cf837..6a6bb67 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CleanupOldIndexTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CleanupOldIndexTest.java
@@ -116,7 +116,7 @@ public class CleanupOldIndexTest extends SolrCloudTestCase {
     indexThread.join();
 
     cluster
-        .getSolrClient()
+        .getZkStateReader()
         .waitForState(
             COLLECTION,
             DEFAULT_TIMEOUT,
diff --git a/solr/core/src/test/org/apache/solr/cloud/CloudExitableDirectoryReaderTest.java b/solr/core/src/test/org/apache/solr/cloud/CloudExitableDirectoryReaderTest.java
index d48b6cb..c08b455 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CloudExitableDirectoryReaderTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CloudExitableDirectoryReaderTest.java
@@ -90,7 +90,7 @@ public class CloudExitableDirectoryReaderTest extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1)
         .processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
     cluster
-        .getSolrClient()
+        .getZkStateReader()
         .waitForState(
             COLLECTION,
             DEFAULT_TIMEOUT,
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionPropsTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionPropsTest.java
index 2dd7ef0..87db1a0 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionPropsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionPropsTest.java
@@ -114,8 +114,7 @@ public class CollectionPropsTest extends SolrCloudTestCase {
           }
         };
 
-    cluster.getSolrClient().getZkStateReader().registerCollectionPropsWatcher(collectionName, w);
-
+    cluster.getZkStateReader().registerCollectionPropsWatcher(collectionName, w);
     collectionProps.setCollectionProperty(collectionName, "property1", "value1");
     collectionProps.setCollectionProperty(collectionName, "property2", "value2");
     waitForValue("property1", "value1", 5000);
@@ -142,8 +141,7 @@ public class CollectionPropsTest extends SolrCloudTestCase {
     assertTrue(
         "Gave up waitng an excessive amount of time for watcher to see final expected props",
         sawExpectedProps.tryAcquire(1, 120, TimeUnit.SECONDS));
-    cluster.getSolrClient().getZkStateReader().removeCollectionPropsWatcher(collectionName, w);
-
+    cluster.getZkStateReader().removeCollectionPropsWatcher(collectionName, w);
     collectionProps.setCollectionProperty(collectionName, "property1", "value1");
     // Should be no cache, so the change should take effect immediately
     checkValue("property1", "value1");
@@ -151,17 +149,13 @@ public class CollectionPropsTest extends SolrCloudTestCase {
 
   private void checkValue(String propertyName, String expectedValue) throws InterruptedException {
     final Object value =
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getCollectionProperties(collectionName)
-            .get(propertyName);
+        cluster.getZkStateReader().getCollectionProperties(collectionName).get(propertyName);
     assertEquals("Unexpected value for collection property: " + propertyName, expectedValue, value);
   }
 
   private void waitForValue(String propertyName, String expectedValue, int timeout)
       throws InterruptedException {
-    final ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    final ZkStateReader zkStateReader = cluster.getZkStateReader();
 
     Object lastValueSeen = null;
     for (int i = 0; i < timeout; i += 10) {
@@ -188,11 +182,7 @@ public class CollectionPropsTest extends SolrCloudTestCase {
     }
 
     String propertiesInZkReader =
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getCollectionProperties(collectionName)
-            .toString();
+        cluster.getZkStateReader().getCollectionProperties(collectionName).toString();
 
     fail(
         String.format(
@@ -209,7 +199,7 @@ public class CollectionPropsTest extends SolrCloudTestCase {
 
   @Test
   public void testWatcher() throws KeeperException, InterruptedException, IOException {
-    final ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    final ZkStateReader zkStateReader = cluster.getZkStateReader();
     CollectionProperties collectionProps = new CollectionProperties(zkClient());
 
     // Add a watcher to collection props
@@ -252,7 +242,7 @@ public class CollectionPropsTest extends SolrCloudTestCase {
 
   @Test
   public void testMultipleWatchers() throws InterruptedException, IOException {
-    final ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    final ZkStateReader zkStateReader = cluster.getZkStateReader();
     CollectionProperties collectionProps = new CollectionProperties(zkClient());
 
     // Register the core with ZkStateReader
@@ -316,7 +306,7 @@ public class CollectionPropsTest extends SolrCloudTestCase {
     public boolean onStateChanged(Map<String, String> collectionProperties) {
       log.info("{}: state changed...", name);
       if (forceReadPropsFromZk) {
-        final ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+        final ZkStateReader zkStateReader = cluster.getZkStateReader();
         props = Map.copyOf(zkStateReader.getCollectionProperties(collectionName));
         log.info("{}: Setting props from zk={}", name, props);
       } else {
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
index 810e6bc..cafba44 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
@@ -148,20 +148,17 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
               .process(cluster.getSolrClient());
 
       for (int i = 0; i < 300; i++) {
-        Map<?, ?> m =
-            cluster.getSolrClient().getZkStateReader().getClusterProperty(COLLECTION_DEF, null);
+        Map<?, ?> m = cluster.getZkStateReader().getClusterProperty(COLLECTION_DEF, null);
         if (m != null) break;
         Thread.sleep(10);
       }
       Object clusterProperty =
           cluster
-              .getSolrClient()
               .getZkStateReader()
               .getClusterProperty(ImmutableList.of(DEFAULTS, COLLECTION, NUM_SHARDS_PROP), null);
       assertEquals("2", String.valueOf(clusterProperty));
       clusterProperty =
           cluster
-              .getSolrClient()
               .getZkStateReader()
               .getClusterProperty(ImmutableList.of(DEFAULTS, COLLECTION, NRT_REPLICAS), null);
       assertEquals("2", String.valueOf(clusterProperty));
@@ -173,12 +170,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
 
       cluster.waitForActiveCollection(COLL_NAME, 2, 4);
 
-      DocCollection coll =
-          cluster
-              .getSolrClient()
-              .getClusterStateProvider()
-              .getClusterState()
-              .getCollection(COLL_NAME);
+      DocCollection coll = cluster.getSolrClient().getClusterState().getCollection(COLL_NAME);
       Map<String, Slice> slices = coll.getSlicesMap();
       assertEquals(2, slices.size());
       for (Slice slice : slices.values()) {
@@ -207,7 +199,6 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
       while (!timeOut.hasTimedOut()) {
         clusterProperty =
             cluster
-                .getSolrClient()
                 .getZkStateReader()
                 .getClusterProperty(ImmutableList.of(DEFAULTS, COLLECTION, NRT_REPLICAS), null);
         if (clusterProperty == null) break;
@@ -215,7 +206,6 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
       assertNull(clusterProperty);
       clusterProperty =
           cluster
-              .getSolrClient()
               .getZkStateReader()
               .getClusterProperty(ImmutableList.of(COLLECTION_DEF, NRT_REPLICAS), null);
       assertNull(clusterProperty);
@@ -232,7 +222,6 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
       while (!timeOut.hasTimedOut()) {
         clusterProperty =
             cluster
-                .getSolrClient()
                 .getZkStateReader()
                 .getClusterProperty(ImmutableList.of(DEFAULTS, COLLECTION, NUM_SHARDS_PROP), null);
         if (clusterProperty == null) break;
@@ -240,7 +229,6 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
       assertNull(clusterProperty);
       clusterProperty =
           cluster
-              .getSolrClient()
               .getZkStateReader()
               .getClusterProperty(ImmutableList.of(COLLECTION_DEF, NUM_SHARDS_PROP), null);
       assertNull(clusterProperty);
@@ -268,20 +256,17 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
               .process(cluster.getSolrClient());
 
       for (int i = 0; i < 300; i++) {
-        Map<?, ?> m =
-            cluster.getSolrClient().getZkStateReader().getClusterProperty(COLLECTION_DEF, null);
+        Map<?, ?> m = cluster.getZkStateReader().getClusterProperty(COLLECTION_DEF, null);
         if (m != null) break;
         Thread.sleep(10);
       }
       Object clusterProperty =
           cluster
-              .getSolrClient()
               .getZkStateReader()
               .getClusterProperty(ImmutableList.of(DEFAULTS, COLLECTION, NUM_SHARDS_PROP), null);
       assertEquals("2", String.valueOf(clusterProperty));
       clusterProperty =
           cluster
-              .getSolrClient()
               .getZkStateReader()
               .getClusterProperty(ImmutableList.of(DEFAULTS, COLLECTION, NRT_REPLICAS), null);
       assertEquals("2", String.valueOf(clusterProperty));
@@ -293,12 +278,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
       assertTrue(response.isSuccess());
       cluster.waitForActiveCollection(COLL_NAME, 2, 4);
 
-      DocCollection coll =
-          cluster
-              .getSolrClient()
-              .getClusterStateProvider()
-              .getClusterState()
-              .getCollection(COLL_NAME);
+      DocCollection coll = cluster.getSolrClient().getClusterState().getCollection(COLL_NAME);
       Map<String, Slice> slices = coll.getSlicesMap();
       assertEquals(2, slices.size());
       for (Slice slice : slices.values()) {
@@ -328,7 +308,6 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
       while (!timeOut.hasTimedOut()) {
         clusterProperty =
             cluster
-                .getSolrClient()
                 .getZkStateReader()
                 .getClusterProperty(ImmutableList.of(DEFAULTS, COLLECTION, NRT_REPLICAS), null);
         if (clusterProperty == null) break;
@@ -346,7 +325,6 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
       while (!timeOut.hasTimedOut()) {
         clusterProperty =
             cluster
-                .getSolrClient()
                 .getZkStateReader()
                 .getClusterProperty(ImmutableList.of(DEFAULTS, COLLECTION, NUM_SHARDS_PROP), null);
         if (clusterProperty == null) break;
@@ -354,7 +332,6 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
       assertNull(clusterProperty);
       clusterProperty =
           cluster
-              .getSolrClient()
               .getZkStateReader()
               .getClusterProperty(ImmutableList.of(COLLECTION_DEF, NUM_SHARDS_PROP), null);
       assertNull(clusterProperty);
@@ -429,8 +406,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
     String corename = (String) response._get(asList("success", nodeName, "core"), null);
 
     try (HttpSolrClient coreclient =
-        getHttpSolrClient(
-            cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(nodeName))) {
+        getHttpSolrClient(cluster.getZkStateReader().getBaseUrlForNodeName(nodeName))) {
       CoreAdminResponse status = CoreAdminRequest.getStatus(corename, coreclient);
       assertEquals(
           collectionName, status._get(asList("status", corename, "cloud", "collection"), null));
@@ -466,13 +442,12 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
     assertTrue(response.isSuccess());
 
     cluster
-        .getSolrClient()
+        .getZkStateReader()
         .waitForState(
             collectionName,
             30,
             TimeUnit.SECONDS,
             (l, c) -> c != null && c.getSlice("shardC") != null);
-
     coresStatus = response.getCollectionCoresStatus();
     assertEquals(3, coresStatus.size());
     int replicaTlog = 0;
@@ -613,8 +588,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
     cluster.waitForActiveCollection(collectionName, 1, 2);
 
     ArrayList<String> nodeList =
-        new ArrayList<>(
-            cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
+        new ArrayList<>(cluster.getSolrClient().getClusterState().getLiveNodes());
     Collections.shuffle(nodeList, random());
     final String node = nodeList.get(0);
 
@@ -716,11 +690,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
     while (!timeout.hasTimedOut()) {
       Thread.sleep(10);
       if (Objects.equals(
-          cluster
-              .getSolrClient()
-              .getZkStateReader()
-              .getCollectionProperties(collection)
-              .get(propertyName),
+          cluster.getZkStateReader().getCollectionProperties(collection).get(propertyName),
           propertyValue)) {
         return;
       }
@@ -786,12 +756,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
     assertNotNull(
         Utils.toJSONString(rsp), segInfos.findRecursive("segments", "_0", "fields", "id", "flags"));
     // test for replicas not active - SOLR-13882
-    DocCollection coll =
-        cluster
-            .getSolrClient()
-            .getClusterStateProvider()
-            .getClusterState()
-            .getCollection(collectionName);
+    DocCollection coll = cluster.getSolrClient().getClusterState().getCollection(collectionName);
     Replica firstReplica = coll.getSlice("shard1").getReplicas().iterator().next();
     String firstNode = firstReplica.getNodeName();
     for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
@@ -895,7 +860,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
     solrClient.add(docs);
 
     Replica leader =
-        solrClient.getZkStateReader().getLeaderRetry(collectionName, "shard1", DEFAULT_TIMEOUT);
+        ZkStateReader.from(solrClient).getLeaderRetry(collectionName, "shard1", DEFAULT_TIMEOUT);
 
     final AtomicReference<Long> coreStartTime =
         new AtomicReference<>(getCoreStatus(leader).getCoreStartTime().getTime());
@@ -905,8 +870,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
             collectionName, Collections.singletonMap(ZkStateReader.READ_ONLY, "true"))
         .process(solrClient);
 
-    DocCollection coll =
-        solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
+    DocCollection coll = solrClient.getClusterState().getCollection(collectionName);
     assertNotNull(coll.toString(), coll.getProperties().get(ZkStateReader.READ_ONLY));
     assertEquals(
         coll.toString(), coll.getProperties().get(ZkStateReader.READ_ONLY).toString(), "true");
@@ -979,8 +943,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
     CollectionAdminRequest.modifyCollection(
             collectionName, Collections.singletonMap(ZkStateReader.READ_ONLY, ""))
         .process(cluster.getSolrClient());
-    coll =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(collectionName);
+    coll = solrClient.getClusterState().getCollection(collectionName);
     assertNull(coll.toString(), coll.getProperties().get(ZkStateReader.READ_ONLY));
 
     // wait for the expected collection reload
@@ -1021,7 +984,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
     CollectionAdminRequest.deleteAlias("simpleAlias").process(cluster.getSolrClient());
     CollectionAdminRequest.deleteAlias("catAlias").process(cluster.getSolrClient());
     CollectionAdminRequest.deleteAlias("compoundAlias").process(cluster.getSolrClient());
-    cluster.getSolrClient().getZkStateReader().aliasesManager.update();
+    cluster.getZkStateReader().aliasesManager.update();
     doTestRenameCollection(false);
   }
 
@@ -1059,7 +1022,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
 
     CollectionAdminRequest.Rename rename = CollectionAdminRequest.renameCollection("col1", "foo");
     rename.setFollowAliases(followAliases);
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     Aliases aliases;
     if (!followAliases) {
       Exception e = assertThrows(Exception.class, () -> rename.process(cluster.getSolrClient()));
@@ -1175,9 +1138,8 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
         TimeUnit.MILLISECONDS,
         () -> {
           try {
-            solrClient.getZkStateReader().aliasesManager.update();
-            return solrClient
-                .getZkStateReader()
+            ZkStateReader.from(solrClient).aliasesManager.update();
+            return ZkStateReader.from(solrClient)
                 .getAliases()
                 .resolveSimpleAlias(collectionName1)
                 .equals(collectionName2);
@@ -1195,7 +1157,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
     CollectionAdminRequest.Delete delete = CollectionAdminRequest.deleteCollection(collectionName1);
     delete.setFollowAliases(false);
     delete.process(solrClient);
-    ClusterState state = solrClient.getClusterStateProvider().getClusterState();
+    ClusterState state = solrClient.getClusterState();
     assertFalse(state.getCollectionsMap().toString(), state.hasCollection(collectionName1));
     // search should still work, returning results from collection 2
     assertDoc(solrClient, collectionName1, "2"); // aliased
@@ -1215,7 +1177,7 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
     delete.setFollowAliases(true);
     delete.process(solrClient);
 
-    state = solrClient.getClusterStateProvider().getClusterState();
+    state = solrClient.getClusterState();
     // the collection is gone
     assertFalse(state.getCollectionsMap().toString(), state.hasCollection(collectionName2));
 
@@ -1227,8 +1189,8 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
         TimeUnit.MILLISECONDS,
         () -> {
           try {
-            solrClient.getZkStateReader().aliasesManager.update();
-            return !solrClient.getZkStateReader().getAliases().hasAlias(collectionName1);
+            ZkStateReader.from(solrClient).aliasesManager.update();
+            return !ZkStateReader.from(solrClient).getAliases().hasAlias(collectionName1);
           } catch (Exception e) {
             fail("exception caught refreshing aliases: " + e);
             return false;
diff --git a/solr/core/src/test/org/apache/solr/cloud/CreateCollectionCleanupTest.java b/solr/core/src/test/org/apache/solr/cloud/CreateCollectionCleanupTest.java
index 7e41981..5671a8f 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CreateCollectionCleanupTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CreateCollectionCleanupTest.java
@@ -96,11 +96,7 @@ public class CreateCollectionCleanupTest extends SolrCloudTestCase {
     // Confirm using LIST that the collection does not exist
     assertThat(
         "Failed collection is still in the clusterstate: "
-            + cluster
-                .getSolrClient()
-                .getClusterStateProvider()
-                .getClusterState()
-                .getCollectionOrNull(collectionName),
+            + cluster.getSolrClient().getClusterState().getCollectionOrNull(collectionName),
         CollectionAdminRequest.listCollections(cloudClient),
         not(hasItem(collectionName)));
   }
@@ -132,11 +128,7 @@ public class CreateCollectionCleanupTest extends SolrCloudTestCase {
     // Confirm using LIST that the collection does not exist
     assertThat(
         "Failed collection is still in the clusterstate: "
-            + cluster
-                .getSolrClient()
-                .getClusterStateProvider()
-                .getClusterState()
-                .getCollectionOrNull(collectionName),
+            + cluster.getSolrClient().getClusterState().getCollectionOrNull(collectionName),
         CollectionAdminRequest.listCollections(cloudClient),
         not(hasItem(collectionName)));
   }
diff --git a/solr/core/src/test/org/apache/solr/cloud/CreateRoutedAliasTest.java b/solr/core/src/test/org/apache/solr/cloud/CreateRoutedAliasTest.java
index 783c8e2..ad4efdb 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CreateRoutedAliasTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CreateRoutedAliasTest.java
@@ -172,7 +172,7 @@ public class CreateRoutedAliasTest extends SolrCloudTestCase {
             .allMatch(createNode::equals));
 
     // Test Alias metadata:
-    Aliases aliases = cluster.getSolrClient().getZkStateReader().getAliases();
+    Aliases aliases = cluster.getZkStateReader().getAliases();
     Map<String, String> collectionAliasMap = aliases.getCollectionAliasMap();
     assertEquals(initialCollectionName, collectionAliasMap.get(aliasName));
     Map<String, String> meta = aliases.getCollectionAliasProperties(aliasName);
@@ -222,7 +222,7 @@ public class CreateRoutedAliasTest extends SolrCloudTestCase {
     assertEquals(2, coll.getReplicationFactor().intValue()); // num replicas
 
     // Test Alias metadata
-    Aliases aliases = cluster.getSolrClient().getZkStateReader().getAliases();
+    Aliases aliases = cluster.getZkStateReader().getAliases();
     Map<String, String> collectionAliasMap = aliases.getCollectionAliasMap();
     String alias = collectionAliasMap.get(aliasName);
     assertNotNull(alias);
@@ -269,7 +269,7 @@ public class CreateRoutedAliasTest extends SolrCloudTestCase {
         "Expected collection2 to be created with 1 shard and 1 replica",
         "collection2meta",
         clusterShape(1, 1));
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     zkStateReader.createClusterStateWatchersAndUpdate();
 
     final String baseUrl = cluster.getRandomJetty(random()).getBaseUrl().toString();
diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteNodeTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteNodeTest.java
index fd41bdf..7b21875 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DeleteNodeTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DeleteNodeTest.java
@@ -55,7 +55,7 @@ public class DeleteNodeTest extends SolrCloudTestCase {
   public void test() throws Exception {
     CloudSolrClient cloudClient = cluster.getSolrClient();
     String coll = "deletenodetest_coll";
-    ClusterState state = cloudClient.getZkStateReader().getClusterState();
+    ClusterState state = cloudClient.getClusterState();
     Set<String> liveNodes = state.getLiveNodes();
     ArrayList<String> l = new ArrayList<>(liveNodes);
     Collections.shuffle(l, random());
@@ -69,7 +69,7 @@ public class DeleteNodeTest extends SolrCloudTestCase {
             CollectionAdminRequest.createCollection(coll, "conf1", 5, 0, 1, 0));
     create.setCreateNodeSet(StrUtils.join(l, ','));
     cloudClient.request(create);
-    state = cloudClient.getZkStateReader().getClusterState();
+    state = cloudClient.getClusterState();
     String node2bdecommissioned = l.get(0);
     // check what replicas are on the node, and whether the call should fail
     boolean shouldFail = false;
@@ -110,8 +110,7 @@ public class DeleteNodeTest extends SolrCloudTestCase {
     }
     if (log.isInfoEnabled()) {
       log.info(
-          "####### DocCollection after: {}",
-          cloudClient.getZkStateReader().getClusterState().getCollection(coll));
+          "####### DocCollection after: {}", cloudClient.getClusterState().getCollection(coll));
     }
     if (shouldFail) {
       assertTrue(String.valueOf(rsp), rsp.getRequestStatus() == RequestStatusState.FAILED);
diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
index 5c533c6..44d4267 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java
@@ -169,8 +169,7 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(collectionName, "conf", 1, 2)
         .process(cluster.getSolrClient());
 
-    Replica leader =
-        cluster.getSolrClient().getZkStateReader().getLeaderRetry(collectionName, "shard1");
+    Replica leader = cluster.getZkStateReader().getLeaderRetry(collectionName, "shard1");
 
     // Confirm that the instance and data directory exist
     CoreStatus coreStatus = getCoreStatus(leader);
@@ -183,8 +182,7 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
     CollectionAdminRequest.deleteReplica(collectionName, "shard1", leader.getName())
         .process(cluster.getSolrClient());
 
-    Replica newLeader =
-        cluster.getSolrClient().getZkStateReader().getLeaderRetry(collectionName, "shard1");
+    Replica newLeader = cluster.getZkStateReader().getLeaderRetry(collectionName, "shard1");
 
     assertFalse(leader.equals(newLeader));
 
@@ -498,7 +496,7 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
   }
 
   private void waitForNodeLeave(String lostNodeName) throws InterruptedException {
-    ZkStateReader reader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader reader = cluster.getZkStateReader();
     TimeOut timeOut = new TimeOut(20, TimeUnit.SECONDS, TimeSource.NANO_TIME);
     while (reader.getClusterState().getLiveNodes().contains(lostNodeName)) {
       Thread.sleep(100);
@@ -545,7 +543,7 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
 
     try {
       cluster
-          .getSolrClient()
+          .getZkStateReader()
           .waitForState(
               collectionName,
               20,
diff --git a/solr/core/src/test/org/apache/solr/cloud/DistribDocExpirationUpdateProcessorTest.java b/solr/core/src/test/org/apache/solr/cloud/DistribDocExpirationUpdateProcessorTest.java
index 01242f0..64378b9 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DistribDocExpirationUpdateProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DistribDocExpirationUpdateProcessorTest.java
@@ -121,7 +121,7 @@ public class DistribDocExpirationUpdateProcessorTest extends SolrCloudTestCase {
         .process(cluster.getSolrClient());
 
     cluster
-        .getSolrClient()
+        .getZkStateReader()
         .waitForState(
             COLLECTION,
             DEFAULT_TIMEOUT,
@@ -251,7 +251,7 @@ public class DistribDocExpirationUpdateProcessorTest extends SolrCloudTestCase {
     int coresCompared = 0;
     int totalDocsOnAllShards = 0;
     final DocCollection collectionState =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION);
+        cluster.getSolrClient().getClusterState().getCollection(COLLECTION);
     for (Slice shard : collectionState) {
       boolean firstReplica = true;
       for (Replica replica : shard) {
@@ -307,7 +307,7 @@ public class DistribDocExpirationUpdateProcessorTest extends SolrCloudTestCase {
     Map<String, ReplicaData> results = new HashMap<>();
 
     DocCollection collectionState =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION);
+        cluster.getSolrClient().getClusterState().getCollection(COLLECTION);
 
     for (Replica replica : collectionState.getReplicas()) {
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/DistribJoinFromCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/DistribJoinFromCollectionTest.java
index 2fdb079..eed3acc 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DistribJoinFromCollectionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DistribJoinFromCollectionTest.java
@@ -81,7 +81,7 @@ public class DistribJoinFromCollectionTest extends SolrCloudTestCase {
 
     // get the set of nodes where replicas for the "to" collection exist
     Set<String> nodeSet = new HashSet<>();
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     ClusterState cs = zkStateReader.getClusterState();
     for (Slice slice : cs.getCollection(toColl).getActiveSlices())
       for (Replica replica : slice.getReplicas()) nodeSet.add(replica.getNodeName());
diff --git a/solr/core/src/test/org/apache/solr/cloud/DistributedVersionInfoTest.java b/solr/core/src/test/org/apache/solr/cloud/DistributedVersionInfoTest.java
index e0b9d4b..f4a8447 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DistributedVersionInfoTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DistributedVersionInfoTest.java
@@ -79,7 +79,7 @@ public class DistributedVersionInfoTest extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(COLLECTION, "conf", 1, 3)
         .processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
 
-    final ZkStateReader stateReader = cluster.getSolrClient().getZkStateReader();
+    final ZkStateReader stateReader = cluster.getZkStateReader();
     stateReader.waitForState(
         COLLECTION,
         DEFAULT_TIMEOUT,
diff --git a/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java b/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
index d3933b5..6184182 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
@@ -33,6 +33,7 @@ import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Replica.State;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.TimeSource;
 import org.apache.solr.util.TimeOut;
@@ -87,7 +88,7 @@ public class ForceLeaderTest extends HttpPartitionTest {
           2,
           notLeaders.size());
 
-      Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, SHARD1);
+      Replica leader = ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, SHARD1);
       JettySolrRunner notLeader0 = getJettyOnPort(getReplicaPort(notLeaders.get(0)));
       ZkController zkController = notLeader0.getCoreContainer().getZkController();
 
@@ -101,8 +102,8 @@ public class ForceLeaderTest extends HttpPartitionTest {
         waitForState(testCollectionName, replica.getName(), State.DOWN, 60000);
       }
       waitForState(testCollectionName, leader.getName(), State.DOWN, 60000);
-      cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName);
-      ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+      ZkStateReader.from(cloudClient).forceUpdateCollection(testCollectionName);
+      ClusterState clusterState = cloudClient.getClusterState();
       int numActiveReplicas = getNumberOfActiveReplicas(clusterState, testCollectionName, SHARD1);
       assertEquals(
           "Expected only 0 active replica but found "
@@ -135,10 +136,10 @@ public class ForceLeaderTest extends HttpPartitionTest {
       doForceLeader(testCollectionName, SHARD1);
 
       // By now we have an active leader. Wait for recoveries to begin
-      waitForRecoveriesToFinish(testCollectionName, cloudClient.getZkStateReader(), true);
+      waitForRecoveriesToFinish(testCollectionName, ZkStateReader.from(cloudClient), true);
 
-      cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName);
-      clusterState = cloudClient.getZkStateReader().getClusterState();
+      ZkStateReader.from(cloudClient).forceUpdateCollection(testCollectionName);
+      clusterState = cloudClient.getClusterState();
       if (log.isInfoEnabled()) {
         log.info(
             "After forcing leader: {}",
@@ -253,7 +254,7 @@ public class ForceLeaderTest extends HttpPartitionTest {
     for (SocketProxy proxy : nonLeaderProxies) proxy.reopen();
 
     try (ZkShardTerms zkShardTerms =
-        new ZkShardTerms(collectionName, shard, cloudClient.getZkStateReader().getZkClient())) {
+        new ZkShardTerms(collectionName, shard, ZkStateReader.from(cloudClient).getZkClient())) {
       for (Replica notLeader : notLeaders) {
         assertTrue(
             zkShardTerms.getTerm(leader.getName()) > zkShardTerms.getTerm(notLeader.getName()));
@@ -276,9 +277,9 @@ public class ForceLeaderTest extends HttpPartitionTest {
     JettySolrRunner leaderJetty = getJettyOnPort(getReplicaPort(leader));
     getProxyForReplica(leader).reopen();
     leaderJetty.start();
-    waitForRecoveriesToFinish(collection, cloudClient.getZkStateReader(), true);
-    cloudClient.getZkStateReader().forceUpdateCollection(collection);
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    waitForRecoveriesToFinish(collection, ZkStateReader.from(cloudClient), true);
+    ZkStateReader.from(cloudClient).forceUpdateCollection(collection);
+    ClusterState clusterState = cloudClient.getClusterState();
     if (log.isInfoEnabled()) {
       log.info(
           "After bringing back leader: {}",
diff --git a/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java b/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java
index d057cd8..54b0d58 100644
--- a/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java
@@ -43,9 +43,7 @@ import org.apache.solr.client.solrj.response.UpdateResponse;
 import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.*;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.ExecutorUtil;
 import org.junit.After;
@@ -86,8 +84,12 @@ public class FullSolrCloudDistribCmdsTest extends SolrCloudTestCase {
         RequestStatusState.COMPLETED,
         CollectionAdminRequest.createCollection(name, "_default", 2, 2)
             .processAndWait(cloudClient, DEFAULT_TIMEOUT));
-    cloudClient.waitForState(
-        name, DEFAULT_TIMEOUT, TimeUnit.SECONDS, (n, c) -> DocCollection.isFullyActive(n, c, 2, 2));
+    ZkStateReader.from(cloudClient)
+        .waitForState(
+            name,
+            DEFAULT_TIMEOUT,
+            TimeUnit.SECONDS,
+            (n, c) -> DocCollection.isFullyActive(n, c, 2, 2));
     cloudClient.setDefaultCollection(name);
     return name;
   }
@@ -146,12 +148,15 @@ public class FullSolrCloudDistribCmdsTest extends SolrCloudTestCase {
         CollectionAdminRequest.createCollectionWithImplicitRouter(
                 name, "_default", "shard1,shard2", 2)
             .processAndWait(cloudClient, DEFAULT_TIMEOUT));
-    cloudClient.waitForState(
-        name, DEFAULT_TIMEOUT, TimeUnit.SECONDS, (n, c) -> DocCollection.isFullyActive(n, c, 2, 2));
+    ZkStateReader.from(cloudClient)
+        .waitForState(
+            name,
+            (long) DEFAULT_TIMEOUT,
+            TimeUnit.SECONDS,
+            (CollectionStatePredicate) (n, c1) -> DocCollection.isFullyActive(n, c1, 2, 2));
     cloudClient.setDefaultCollection(name);
 
-    final DocCollection docCol =
-        cloudClient.getZkStateReader().getClusterState().getCollection(name);
+    final DocCollection docCol = cloudClient.getClusterState().getCollection(name);
     try (SolrClient shard1 = getHttpSolrClient(docCol.getSlice("shard1").getLeader().getCoreUrl());
         SolrClient shard2 = getHttpSolrClient(docCol.getSlice("shard2").getLeader().getCoreUrl())) {
 
@@ -261,12 +266,15 @@ public class FullSolrCloudDistribCmdsTest extends SolrCloudTestCase {
             .setRouterField("routefield_s")
             .setShards("shard1,shard2")
             .processAndWait(cloudClient, DEFAULT_TIMEOUT));
-    cloudClient.waitForState(
-        name, DEFAULT_TIMEOUT, TimeUnit.SECONDS, (n, c) -> DocCollection.isFullyActive(n, c, 2, 2));
+    ZkStateReader.from(cloudClient)
+        .waitForState(
+            name,
+            DEFAULT_TIMEOUT,
+            TimeUnit.SECONDS,
+            (n, c1) -> DocCollection.isFullyActive(n, c1, 2, 2));
     cloudClient.setDefaultCollection(name);
 
-    final DocCollection docCol =
-        cloudClient.getZkStateReader().getClusterState().getCollection(name);
+    final DocCollection docCol = cloudClient.getClusterState().getCollection(name);
     try (SolrClient shard1 = getHttpSolrClient(docCol.getSlice("shard1").getLeader().getCoreUrl());
         SolrClient shard2 = getHttpSolrClient(docCol.getSlice("shard2").getLeader().getCoreUrl())) {
 
@@ -398,21 +406,20 @@ public class FullSolrCloudDistribCmdsTest extends SolrCloudTestCase {
                 .setCreateNodeSet(leaderToPartition.getNodeName() + "," + otherLeader.getNodeName())
                 .processAndWait(cloudClient, DEFAULT_TIMEOUT));
 
-        cloudClient.waitForState(
-            collectionName,
-            DEFAULT_TIMEOUT,
-            TimeUnit.SECONDS,
-            (n, c) -> DocCollection.isFullyActive(n, c, 2, 1));
+        ZkStateReader.from(cloudClient)
+            .waitForState(
+                collectionName,
+                DEFAULT_TIMEOUT,
+                TimeUnit.SECONDS,
+                (n, c) -> DocCollection.isFullyActive(n, c, 2, 1));
 
         { // HACK: Check the leaderProps for the shard hosted on the node we're going to kill...
           final Replica leaderProps =
               cloudClient
-                  .getZkStateReader()
                   .getClusterState()
                   .getCollection(collectionName)
                   .getLeaderReplicas(leaderToPartition.getNodeName())
                   .get(0);
-
           // No point in this test if these aren't true...
           assertNotNull(
               "Sanity check: leaderProps isn't a leader?: " + leaderProps.toString(),
@@ -734,7 +741,6 @@ public class FullSolrCloudDistribCmdsTest extends SolrCloudTestCase {
     final DocCollection collection =
         cluster
             .getSolrClient()
-            .getZkStateReader()
             .getClusterState()
             .getCollection(cluster.getSolrClient().getDefaultCollection());
     log.info("Checking shard consistency via: {}", perReplicaParams);
diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionOnCommitTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionOnCommitTest.java
index 2e7b753..f9da7c5 100644
--- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionOnCommitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionOnCommitTest.java
@@ -25,6 +25,7 @@ import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.util.RTimer;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -92,7 +93,7 @@ public class HttpPartitionOnCommitTest extends BasicDistributedZkTest {
     }
 
     // let's put the leader in its own partition, no replicas can contact it now
-    Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
+    Replica leader = ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, "shard1");
     if (log.isInfoEnabled()) {
       log.info("Creating partition to leader at {}", leader.getCoreUrl());
     }
@@ -101,15 +102,14 @@ public class HttpPartitionOnCommitTest extends BasicDistributedZkTest {
 
     // let's find the leader of shard2 and ask him to commit
     Replica shard2Leader =
-        cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard2");
+        ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, "shard2");
     sendCommitWithRetry(shard2Leader);
 
     Thread.sleep(sleepMsBeforeHealPartition);
 
-    cloudClient
-        .getZkStateReader()
+    ZkStateReader.from(cloudClient)
         .forceUpdateCollection(testCollectionName); // get the latest state
-    leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
+    leader = ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, "shard1");
     assertSame("Leader was not active", Replica.State.ACTIVE, leader.getState());
 
     if (log.isInfoEnabled()) {
@@ -145,7 +145,7 @@ public class HttpPartitionOnCommitTest extends BasicDistributedZkTest {
     log.info("All replicas active for {}", testCollectionName);
 
     // let's put the leader in its own partition, no replicas can contact it now
-    Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
+    Replica leader = ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, "shard1");
     if (log.isInfoEnabled()) {
       log.info("Creating partition to leader at {}", leader.getCoreUrl());
     }
@@ -157,9 +157,9 @@ public class HttpPartitionOnCommitTest extends BasicDistributedZkTest {
     sendCommitWithRetry(replica);
     Thread.sleep(sleepMsBeforeHealPartition);
 
-    // get the latest state
-    cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName);
-    leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
+    ZkStateReader.from(cloudClient)
+        .forceUpdateCollection(testCollectionName); // get the latest state
+    leader = ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, "shard1");
     assertSame("Leader was not active", Replica.State.ACTIVE, leader.getState());
 
     if (log.isInfoEnabled()) {
diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
index da0051f..725088a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
@@ -182,7 +182,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
       assertEquals("Unexpected achieved replication factor", 1, achievedRf);
       try (ZkShardTerms zkShardTerms =
           new ZkShardTerms(
-              testCollectionName, "shard1", cloudClient.getZkStateReader().getZkClient())) {
+              testCollectionName, "shard1", ZkStateReader.from(cloudClient).getZkClient())) {
         assertFalse(zkShardTerms.canBecomeLeader(notLeaders.get(0).getName()));
       }
       waitForState(testCollectionName, notLeaders.get(0).getName(), DOWN, 10000);
@@ -198,8 +198,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
       String notLeaderNodeName = notLeaderJetty.getNodeName();
       notLeaderJetty.stop();
 
-      cloudClient
-          .getZkStateReader()
+      ZkStateReader.from(cloudClient)
           .waitForLiveNodes(
               15, TimeUnit.SECONDS, SolrCloudTestCase.missingLiveNode(notLeaderNodeName));
 
@@ -333,7 +332,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
     TimeOut timeOut = new TimeOut(ms, TimeUnit.MILLISECONDS, TimeSource.NANO_TIME);
     Replica.State replicaState = Replica.State.ACTIVE;
     while (!timeOut.hasTimedOut()) {
-      ZkStateReader zkr = cloudClient.getZkStateReader();
+      ZkStateReader zkr = ZkStateReader.from(cloudClient);
       zkr.forceUpdateCollection(collection); // force the state to be fresh
       ClusterState cs = zkr.getClusterState();
       Collection<Slice> slices = cs.getCollection(collection).getActiveSlices();
@@ -434,7 +433,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
             + printClusterStateInfo(testCollectionName),
         notLeaders.size() == 1);
 
-    Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
+    Replica leader = ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, "shard1");
     String leaderNode = leader.getNodeName();
     assertNotNull(
         "Could not find leader for shard1 of "
@@ -457,7 +456,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
       String currentLeaderName = null;
       try {
         Replica currentLeader =
-            cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
+            ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, "shard1");
         currentLeaderName = currentLeader.getName();
       } catch (Exception exc) {
       }
@@ -469,7 +468,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
     }
 
     Replica currentLeader =
-        cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
+        ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, "shard1");
     assertEquals(expectedNewLeaderCoreNodeName, currentLeader.getName());
 
     // TODO: This test logic seems to be timing dependent and fails on Jenkins
@@ -510,8 +509,8 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
 
   protected List<Replica> getActiveOrRecoveringReplicas(String testCollectionName, String shardId)
       throws Exception {
-    Map<String, Replica> activeReplicas = new HashMap<String, Replica>();
-    ZkStateReader zkr = cloudClient.getZkStateReader();
+    Map<String, Replica> activeReplicas = new HashMap<>();
+    ZkStateReader zkr = ZkStateReader.from(cloudClient);
     ClusterState cs = zkr.getClusterState();
     assertNotNull(cs);
     for (Slice shard : cs.getCollection(testCollectionName).getActiveSlices()) {
@@ -538,7 +537,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
       List<Replica> notLeaders, String testCollectionName, int firstDocId, int lastDocId)
       throws Exception {
     Replica leader =
-        cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1", 10000);
+        ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, "shard1", 10000);
     HttpSolrClient leaderSolr = getHttpSolrClient(leader, testCollectionName);
     List<HttpSolrClient> replicas = new ArrayList<HttpSolrClient>(notLeaders.size());
 
@@ -647,14 +646,13 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
       throws Exception {
     final RTimer timer = new RTimer();
 
-    ZkStateReader zkr = cloudClient.getZkStateReader();
+    ZkStateReader zkr = ZkStateReader.from(cloudClient);
     zkr.forceUpdateCollection(testCollectionName);
-    ClusterState cs = zkr.getClusterState();
     boolean allReplicasUp = false;
     long waitMs = 0L;
     long maxWaitMs = maxWaitSecs * 1000L;
     while (waitMs < maxWaitMs && !allReplicasUp) {
-      cs = cloudClient.getZkStateReader().getClusterState();
+      ClusterState cs = zkr.getClusterState();
       assertNotNull(cs);
       final DocCollection docCollection = cs.getCollectionOrNull(testCollectionName);
       assertNotNull(docCollection);
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionContextKeyTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionContextKeyTest.java
index 9b7d9ce..06deacc 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionContextKeyTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionContextKeyTest.java
@@ -60,15 +60,15 @@ public class LeaderElectionContextKeyTest extends SolrCloudTestCase {
     }
 
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        "testCollection1", cluster.getSolrClient().getZkStateReader(), false, true, 30);
+        "testCollection1", cluster.getZkStateReader(), false, true, 30);
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        "testCollection2", cluster.getSolrClient().getZkStateReader(), false, true, 30);
+        "testCollection2", cluster.getZkStateReader(), false, true, 30);
   }
 
   @Test
   public void test()
       throws KeeperException, InterruptedException, IOException, SolrServerException {
-    ZkStateReader stateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader stateReader = cluster.getZkStateReader();
     stateReader.forceUpdateCollection(TEST_COLLECTION_1);
     ClusterState clusterState = stateReader.getClusterState();
     // The test assume that TEST_COLLECTION_1 and TEST_COLLECTION_2 will have identical layout
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java
index 6e31054..d2df697 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java
@@ -147,8 +147,7 @@ public class LeaderElectionIntegrationTest extends SolrCloudTestCase {
 
   private String getLeader(String collection) throws InterruptedException {
 
-    ZkNodeProps props =
-        cluster.getSolrClient().getZkStateReader().getLeaderRetry(collection, "shard1", 30000);
+    ZkNodeProps props = cluster.getZkStateReader().getLeaderRetry(collection, "shard1", 30000);
     String leader = props.getStr(ZkStateReader.NODE_NAME_PROP);
 
     return leader;
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderFailoverAfterPartitionTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderFailoverAfterPartitionTest.java
index a09fca2..5f4b4b1 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderFailoverAfterPartitionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderFailoverAfterPartitionTest.java
@@ -29,6 +29,7 @@ import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -106,7 +107,7 @@ public class LeaderFailoverAfterPartitionTest extends HttpPartitionTest {
 
     assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, 4);
 
-    Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
+    Replica leader = ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, "shard1");
     String leaderNode = leader.getNodeName();
     assertNotNull(
         "Could not find leader for shard1 of "
@@ -153,7 +154,7 @@ public class LeaderFailoverAfterPartitionTest extends HttpPartitionTest {
     Thread.sleep(10000); // give chance for new leader to be elected.
 
     Replica newLeader =
-        cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1", 60000);
+        ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, "shard1", 60000);
 
     assertNotNull(
         "No new leader was elected after 60 seconds; clusterState: "
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderFailureAfterFreshStartTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderFailureAfterFreshStartTest.java
index 7172670..da91ad2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderFailureAfterFreshStartTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderFailureAfterFreshStartTest.java
@@ -212,7 +212,7 @@ public class LeaderFailureAfterFreshStartTest extends AbstractFullDistribZkTestB
   private void waitTillNodesActive() throws Exception {
     for (int i = 0; i < 60; i++) {
       Thread.sleep(3000);
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
       ClusterState clusterState = zkStateReader.getClusterState();
       DocCollection collection1 = clusterState.getCollection("collection1");
       Slice slice = collection1.getSlice("shard1");
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderTragicEventTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderTragicEventTest.java
index 249f4e2..0ba7e2d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderTragicEventTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderTragicEventTest.java
@@ -98,7 +98,7 @@ public class LeaderTragicEventTest extends SolrCloudTestCase {
           return true;
         });
     ClusterStateUtil.waitForAllActiveAndLiveReplicas(
-        cluster.getSolrClient().getZkStateReader(), collection, 120000);
+        cluster.getZkStateReader(), collection, 120000);
     Slice shard = getCollectionState(collection).getSlice("shard1");
     assertNotEquals(
         "Old leader should not be leader again",
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderVoteWaitTimeoutTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderVoteWaitTimeoutTest.java
index 95cd25e..1649dc2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderVoteWaitTimeoutTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderVoteWaitTimeoutTest.java
@@ -140,7 +140,6 @@ public class LeaderVoteWaitTimeoutTest extends SolrCloudTestCase {
     cluster.waitForJettyToStop(j);
 
     cluster
-        .getSolrClient()
         .getZkStateReader()
         .waitForState(
             collectionName,
@@ -301,13 +300,9 @@ public class LeaderVoteWaitTimeoutTest extends SolrCloudTestCase {
   private void assertDocsExistInAllReplicas(
       List<Replica> notLeaders, String testCollectionName, int firstDocId, int lastDocId)
       throws Exception {
-    Replica leader =
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getLeaderRetry(testCollectionName, "shard1", 10000);
+    Replica leader = cluster.getZkStateReader().getLeaderRetry(testCollectionName, "shard1", 10000);
     HttpSolrClient leaderSolr = getHttpSolrClient(leader, testCollectionName);
-    List<HttpSolrClient> replicas = new ArrayList<HttpSolrClient>(notLeaders.size());
+    List<HttpSolrClient> replicas = new ArrayList<>(notLeaders.size());
 
     for (Replica r : notLeaders) {
       replicas.add(getHttpSolrClient(r, testCollectionName));
diff --git a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java
index 2a451a8..fad6649 100644
--- a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java
@@ -59,7 +59,7 @@ public class MigrateRouteKeyTest extends SolrCloudTestCase {
     boolean ruleRemoved = false;
     long expiryTime = finishTime + TimeUnit.NANOSECONDS.convert(60, TimeUnit.SECONDS);
     while (System.nanoTime() < expiryTime) {
-      cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection);
+      cluster.getZkStateReader().forceUpdateCollection(collection);
       state = getCollectionState(collection);
       slice = state.getSlice(shard);
       Map<String, RoutingRule> routingRules = slice.getRoutingRules();
diff --git a/solr/core/src/test/org/apache/solr/cloud/NestedShardedAtomicUpdateTest.java b/solr/core/src/test/org/apache/solr/cloud/NestedShardedAtomicUpdateTest.java
index e385754..990f760 100644
--- a/solr/core/src/test/org/apache/solr/cloud/NestedShardedAtomicUpdateTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/NestedShardedAtomicUpdateTest.java
@@ -56,7 +56,7 @@ public class NestedShardedAtomicUpdateTest extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(DEFAULT_COLLECTION, 4, 1).process(cloudClient);
 
     clients = new ArrayList<>();
-    ClusterState clusterState = cloudClient.getClusterStateProvider().getClusterState();
+    ClusterState clusterState = cloudClient.getClusterState();
     for (Replica replica : clusterState.getCollection(DEFAULT_COLLECTION).getReplicas()) {
       clients.add(getHttpSolrClient(replica.getCoreUrl()));
     }
@@ -70,13 +70,7 @@ public class NestedShardedAtomicUpdateTest extends SolrCloudTestCase {
   @Test
   public void doRootShardRoutingTest() throws Exception {
     assertEquals(
-        4,
-        cloudClient
-            .getZkStateReader()
-            .getClusterState()
-            .getCollection(DEFAULT_COLLECTION)
-            .getSlices()
-            .size());
+        4, cloudClient.getClusterState().getCollection(DEFAULT_COLLECTION).getSlices().size());
     final String[] ids = {"3", "4", "5", "6"};
 
     assertEquals(
@@ -155,13 +149,7 @@ public class NestedShardedAtomicUpdateTest extends SolrCloudTestCase {
   @Test
   public void doNestedInplaceUpdateTest() throws Exception {
     assertEquals(
-        4,
-        cloudClient
-            .getZkStateReader()
-            .getClusterState()
-            .getCollection(DEFAULT_COLLECTION)
-            .getSlices()
-            .size());
+        4, cloudClient.getClusterState().getCollection(DEFAULT_COLLECTION).getSlices().size());
     final String[] ids = {"3", "4", "5", "6"};
 
     assertEquals(
@@ -281,13 +269,7 @@ public class NestedShardedAtomicUpdateTest extends SolrCloudTestCase {
   @Test
   public void sendWrongRouteParam() throws Exception {
     assertEquals(
-        4,
-        cloudClient
-            .getZkStateReader()
-            .getClusterState()
-            .getCollection(DEFAULT_COLLECTION)
-            .getSlices()
-            .size());
+        4, cloudClient.getClusterState().getCollection(DEFAULT_COLLECTION).getSlices().size());
     final String rootId = "1";
 
     SolrInputDocument doc = sdoc("id", rootId, "level_s", "root");
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
index 74325c9..488386a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
@@ -454,6 +454,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
     when(clusterStateProviderMock.getLiveNodes()).thenReturn(liveNodes);
     when(cloudDataProviderMock.getDistribStateManager()).thenReturn(stateManagerMock);
     when(cloudManagerMock.getDistribStateManager()).thenReturn(distribStateManagerMock);
+    when(cloudManagerMock.getClusterState()).thenReturn(clusterStateMock);
 
     Mockito.doAnswer(
             new Answer<Void>() {
diff --git a/solr/core/src/test/org/apache/solr/cloud/ParallelCommitExecutionTest.java b/solr/core/src/test/org/apache/solr/cloud/ParallelCommitExecutionTest.java
index 55b67cf..f84baff 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ParallelCommitExecutionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ParallelCommitExecutionTest.java
@@ -28,6 +28,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.lucene.util.TestUtil;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
 import org.apache.solr.update.CommitUpdateCommand;
@@ -103,7 +104,7 @@ public class ParallelCommitExecutionTest extends SolrCloudTestCase {
   public static void waitForRecoveriesToFinish(CloudSolrClient client) throws Exception {
     assert null != client.getDefaultCollection();
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        client.getDefaultCollection(), client.getZkStateReader(), true, true, 330);
+        client.getDefaultCollection(), ZkStateReader.from(client), true, true, 330);
   }
 
   public static class CheckFactory extends UpdateRequestProcessorFactory {
diff --git a/solr/core/src/test/org/apache/solr/cloud/PeerSyncReplicationTest.java b/solr/core/src/test/org/apache/solr/cloud/PeerSyncReplicationTest.java
index e21aedd..7d13cc9 100644
--- a/solr/core/src/test/org/apache/solr/cloud/PeerSyncReplicationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/PeerSyncReplicationTest.java
@@ -341,7 +341,7 @@ public class PeerSyncReplicationTest extends AbstractFullDistribZkTestBase {
   private void waitTillNodesActive() throws Exception {
     for (int i = 0; i < 60; i++) {
       Thread.sleep(3000);
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
       ClusterState clusterState = zkStateReader.getClusterState();
       DocCollection collection1 = clusterState.getCollection("collection1");
       Slice slice = collection1.getSlice("shard1");
diff --git a/solr/core/src/test/org/apache/solr/cloud/ReindexCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/ReindexCollectionTest.java
index 23446a6..ade4f5c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ReindexCollectionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ReindexCollectionTest.java
@@ -187,8 +187,7 @@ public class ReindexCollectionTest extends SolrCloudTestCase {
     String prefix = ReindexCollectionCmd.TARGET_COL_PREFIX + targetCollection;
     while (!timeOut.hasTimedOut()) {
       timeOut.sleep(500);
-      for (String name :
-          cloudManager.getClusterStateProvider().getClusterState().getCollectionsMap().keySet()) {
+      for (String name : cloudManager.getClusterState().getCollectionsMap().keySet()) {
         if (name.startsWith(prefix)) {
           realTargetCollection = name;
           break;
@@ -209,11 +208,11 @@ public class ReindexCollectionTest extends SolrCloudTestCase {
               ReindexCollectionCmd.State.get(coll.getStr(ReindexCollectionCmd.REINDEXING_STATE));
           return ReindexCollectionCmd.State.FINISHED == state;
         });
-    solrClient.getZkStateReader().aliasesManager.update();
+    ZkStateReader.from(solrClient).aliasesManager.update();
     // verify the target docs exist
     QueryResponse rsp = solrClient.query(targetCollection, params(CommonParams.Q, "*:*"));
     assertEquals("copied num docs", NUM_DOCS, rsp.getResults().getNumFound());
-    ClusterState state = solrClient.getClusterStateProvider().getClusterState();
+    ClusterState state = solrClient.getClusterState();
     if (sourceRemove) {
       assertFalse("source collection still present", state.hasCollection(sourceCollection));
     }
@@ -308,9 +307,9 @@ public class ReindexCollectionTest extends SolrCloudTestCase {
     }
 
     // check the shape of the new collection
-    ClusterState clusterState = solrClient.getClusterStateProvider().getClusterState();
+    ClusterState clusterState = solrClient.getClusterState();
     List<String> aliases =
-        solrClient.getZkStateReader().getAliases().resolveAliases(targetCollection);
+        ZkStateReader.from(solrClient).getAliases().resolveAliases(targetCollection);
     assertFalse(aliases.isEmpty());
     String realTargetCollection = aliases.get(0);
     DocCollection coll = clusterState.getCollection(realTargetCollection);
@@ -365,7 +364,6 @@ public class ReindexCollectionTest extends SolrCloudTestCase {
 
     // verify that the target and checkpoint collections don't exist
     cloudManager
-        .getClusterStateProvider()
         .getClusterState()
         .forEachCollection(
             coll -> {
diff --git a/solr/core/src/test/org/apache/solr/cloud/ReplaceNodeTest.java b/solr/core/src/test/org/apache/solr/cloud/ReplaceNodeTest.java
index 46272bb..ef1a414 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ReplaceNodeTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ReplaceNodeTest.java
@@ -37,6 +37,7 @@ import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
@@ -79,7 +80,7 @@ public class ReplaceNodeTest extends SolrCloudTestCase {
     }
 
     CloudSolrClient cloudClient = cluster.getSolrClient();
-    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+    Set<String> liveNodes = cloudClient.getClusterState().getLiveNodes();
     ArrayList<String> l = new ArrayList<>(liveNodes);
     Collections.shuffle(l, random());
     String emptyNode = l.remove(0);
@@ -113,20 +114,20 @@ public class ReplaceNodeTest extends SolrCloudTestCase {
                 + create.getNumPullReplicas()
                 + create.getNumTlogReplicas()));
 
-    DocCollection collection = cloudClient.getZkStateReader().getClusterState().getCollection(coll);
+    DocCollection collection = cloudClient.getClusterState().getCollection(coll);
     log.debug("### Before decommission: {}", collection);
     log.info("excluded_node : {}  ", emptyNode);
     createReplaceNodeRequest(node2bdecommissioned, emptyNode, null)
         .processAndWait("000", cloudClient, 15);
     try (HttpSolrClient coreclient =
         getHttpSolrClient(
-            cloudClient.getZkStateReader().getBaseUrlForNodeName(node2bdecommissioned))) {
+            ZkStateReader.from(cloudClient).getBaseUrlForNodeName(node2bdecommissioned))) {
       CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
       assertEquals(0, status.getCoreStatus().size());
     }
 
     Thread.sleep(5000);
-    collection = cloudClient.getZkStateReader().getClusterState().getCollection(coll);
+    collection = cloudClient.getClusterState().getCollection(coll);
     log.debug("### After decommission: {}", collection);
     // check what are replica states on the decommissioned node
     List<Replica> replicas = collection.getReplicas(node2bdecommissioned);
@@ -142,7 +143,7 @@ public class ReplaceNodeTest extends SolrCloudTestCase {
     replaceNodeRequest.processAndWait("001", cloudClient, 10);
 
     try (HttpSolrClient coreclient =
-        getHttpSolrClient(cloudClient.getZkStateReader().getBaseUrlForNodeName(emptyNode))) {
+        getHttpSolrClient(ZkStateReader.from(cloudClient).getBaseUrlForNodeName(emptyNode))) {
       CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
       assertEquals(
           "Expecting no cores but found some: " + status.getCoreStatus(),
@@ -150,7 +151,7 @@ public class ReplaceNodeTest extends SolrCloudTestCase {
           status.getCoreStatus().size());
     }
 
-    collection = cloudClient.getZkStateReader().getClusterState().getCollection(coll);
+    collection = cluster.getSolrClient().getClusterState().getCollection(coll);
     assertEquals(create.getNumShards().intValue(), collection.getSlices().size());
     for (Slice s : collection.getSlices()) {
       assertEquals(
@@ -229,7 +230,7 @@ public class ReplaceNodeTest extends SolrCloudTestCase {
     }
 
     CloudSolrClient cloudClient = cluster.getSolrClient();
-    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+    Set<String> liveNodes = cloudClient.getClusterState().getLiveNodes();
     List<String> l = new ArrayList<>(liveNodes);
     Collections.shuffle(l, random());
     List<String> emptyNodes = l.subList(0, 2);
@@ -251,8 +252,7 @@ public class ReplaceNodeTest extends SolrCloudTestCase {
                 + create.getNumPullReplicas()
                 + create.getNumTlogReplicas()));
 
-    DocCollection initialCollection =
-        cloudClient.getZkStateReader().getClusterState().getCollection(coll);
+    DocCollection initialCollection = cloudClient.getClusterState().getCollection(coll);
     log.debug("### Before decommission: {}", initialCollection);
     log.info("excluded_nodes : {}  ", emptyNodes);
     List<Integer> initialReplicaCounts =
@@ -262,7 +262,7 @@ public class ReplaceNodeTest extends SolrCloudTestCase {
     createReplaceNodeRequest(node2bdecommissioned, null, true)
         .processAndWait("000", cloudClient, 15);
 
-    DocCollection collection = cloudClient.getZkStateReader().getClusterState().getCollection(coll);
+    DocCollection collection = cloudClient.getClusterState().getCollection(coll);
     log.debug("### After decommission: {}", collection);
     // check what are replica states on the decommissioned node
     List<Replica> replicas = collection.getReplicas(node2bdecommissioned);
@@ -305,8 +305,7 @@ public class ReplaceNodeTest extends SolrCloudTestCase {
 
     cluster.waitForActiveCollection(coll, 5, 5);
 
-    String liveNode =
-        cloudClient.getZkStateReader().getClusterState().getLiveNodes().iterator().next();
+    String liveNode = cloudClient.getClusterState().getLiveNodes().iterator().next();
     expectThrows(
         SolrException.class,
         () -> createReplaceNodeRequest(liveNode, null, null).process(cloudClient));
diff --git a/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java b/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
index c23fb2b..65e2787 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ReplicationFactorTest.java
@@ -40,6 +40,7 @@ import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.DocRouter;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.NamedList;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -99,7 +100,7 @@ public class ReplicationFactorTest extends AbstractFullDistribZkTestBase {
     if (log.isInfoEnabled()) {
       log.info(
           "replication factor testing complete! final clusterState is: {}",
-          cloudClient.getZkStateReader().getClusterState());
+          cloudClient.getClusterState());
     }
   }
 
@@ -130,7 +131,7 @@ public class ReplicationFactorTest extends AbstractFullDistribZkTestBase {
     UpdateRequest up = new UpdateRequest();
     up.add(batch);
 
-    Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, shardId);
+    Replica leader = ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, shardId);
 
     sendNonDirectUpdateRequestReplicaWithRetry(leader, up, 2, testCollectionName);
     sendNonDirectUpdateRequestReplicaWithRetry(replicas.get(0), up, 2, testCollectionName);
diff --git a/solr/core/src/test/org/apache/solr/cloud/RollingRestartTest.java b/solr/core/src/test/org/apache/solr/cloud/RollingRestartTest.java
index 1b71333..75214c5 100644
--- a/solr/core/src/test/org/apache/solr/cloud/RollingRestartTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/RollingRestartTest.java
@@ -23,6 +23,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.commons.collections4.CollectionUtils;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.zookeeper.KeeperException;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -61,13 +62,12 @@ public class RollingRestartTest extends AbstractFullDistribZkTestBase {
   }
 
   public void restartWithRolesTest() throws Exception {
-    String leader =
-        OverseerCollectionConfigSetProcessor.getLeaderNode(
-            cloudClient.getZkStateReader().getZkClient());
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
+    String leader = OverseerCollectionConfigSetProcessor.getLeaderNode(zkStateReader.getZkClient());
     assertNotNull(leader);
     log.info("Current overseer leader = {}", leader);
 
-    cloudClient.getZkStateReader().getZkClient().printLayoutToStream(System.out);
+    zkStateReader.getZkClient().printLayoutToStream(System.out);
 
     int numDesignateOverseers = TEST_NIGHTLY ? 16 : 2;
     numDesignateOverseers = Math.max(getShardCount(), numDesignateOverseers);
@@ -82,10 +82,9 @@ public class RollingRestartTest extends AbstractFullDistribZkTestBase {
       designateJettys.add(cloudJettys.get(n));
     }
 
-    waitUntilOverseerDesignateIsLeader(
-        cloudClient.getZkStateReader().getZkClient(), designates, MAX_WAIT_TIME);
+    waitUntilOverseerDesignateIsLeader(zkStateReader.getZkClient(), designates, MAX_WAIT_TIME);
 
-    cloudClient.getZkStateReader().getZkClient().printLayoutToStream(System.out);
+    zkStateReader.getZkClient().printLayoutToStream(System.out);
 
     boolean sawLiveDesignate = false;
     int numRestarts = 1 + random().nextInt(TEST_NIGHTLY ? 12 : 2);
@@ -94,49 +93,43 @@ public class RollingRestartTest extends AbstractFullDistribZkTestBase {
       for (CloudJettyRunner cloudJetty : designateJettys) {
         log.info("Restarting {}", cloudJetty);
         chaosMonkey.stopJetty(cloudJetty);
-        cloudClient.getZkStateReader().updateLiveNodes();
+        zkStateReader.updateLiveNodes();
         boolean liveDesignates =
-            CollectionUtils.intersection(
-                        cloudClient.getZkStateReader().getClusterState().getLiveNodes(), designates)
+            CollectionUtils.intersection(zkStateReader.getClusterState().getLiveNodes(), designates)
                     .size()
                 > 0;
         if (liveDesignates) {
-          sawLiveDesignate = true;
           boolean success =
               waitUntilOverseerDesignateIsLeader(
-                  cloudClient.getZkStateReader().getZkClient(), designates, MAX_WAIT_TIME);
+                  zkStateReader.getZkClient(), designates, MAX_WAIT_TIME);
           if (!success) {
             leader =
-                OverseerCollectionConfigSetProcessor.getLeaderNode(
-                    cloudClient.getZkStateReader().getZkClient());
+                OverseerCollectionConfigSetProcessor.getLeaderNode(zkStateReader.getZkClient());
             if (leader == null)
               log.error(
                   "NOOVERSEER election queue is : {}",
                   OverseerCollectionConfigSetProcessor.getSortedElectionNodes(
-                      cloudClient.getZkStateReader().getZkClient(), "/overseer_elect/election"));
+                      zkStateReader.getZkClient(), "/overseer_elect/election"));
             fail("No overseer designate as leader found after restart #" + (i + 1) + ": " + leader);
           }
         }
         cloudJetty.jetty.start();
         boolean success =
             waitUntilOverseerDesignateIsLeader(
-                cloudClient.getZkStateReader().getZkClient(), designates, MAX_WAIT_TIME);
+                zkStateReader.getZkClient(), designates, MAX_WAIT_TIME);
         if (!success) {
-          leader =
-              OverseerCollectionConfigSetProcessor.getLeaderNode(
-                  cloudClient.getZkStateReader().getZkClient());
+          leader = OverseerCollectionConfigSetProcessor.getLeaderNode(zkStateReader.getZkClient());
           if (leader == null)
             log.error(
                 "NOOVERSEER election queue is :{}",
                 OverseerCollectionConfigSetProcessor.getSortedElectionNodes(
-                    cloudClient.getZkStateReader().getZkClient(), "/overseer_elect/election"));
+                    zkStateReader.getZkClient(), "/overseer_elect/election"));
           fail("No overseer leader found after restart #" + (i + 1) + ": " + leader);
         }
 
-        cloudClient.getZkStateReader().updateLiveNodes();
+        zkStateReader.updateLiveNodes();
         sawLiveDesignate =
-            CollectionUtils.intersection(
-                        cloudClient.getZkStateReader().getClusterState().getLiveNodes(), designates)
+            CollectionUtils.intersection(zkStateReader.getClusterState().getLiveNodes(), designates)
                     .size()
                 > 0;
       }
@@ -144,13 +137,11 @@ public class RollingRestartTest extends AbstractFullDistribZkTestBase {
 
     assertTrue("Test may not be working if we never saw a live designate", sawLiveDesignate);
 
-    leader =
-        OverseerCollectionConfigSetProcessor.getLeaderNode(
-            cloudClient.getZkStateReader().getZkClient());
+    leader = OverseerCollectionConfigSetProcessor.getLeaderNode(zkStateReader.getZkClient());
     assertNotNull(leader);
     log.info("Current overseer leader (after restart) = {}", leader);
 
-    cloudClient.getZkStateReader().getZkClient().printLayoutToStream(System.out);
+    zkStateReader.getZkClient().printLayoutToStream(System.out);
   }
 
   static boolean waitUntilOverseerDesignateIsLeader(
diff --git a/solr/core/src/test/org/apache/solr/cloud/RouteFieldTest.java b/solr/core/src/test/org/apache/solr/cloud/RouteFieldTest.java
index 5049c9b..f2693a1 100644
--- a/solr/core/src/test/org/apache/solr/cloud/RouteFieldTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/RouteFieldTest.java
@@ -34,6 +34,7 @@ import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.junit.BeforeClass;
@@ -121,15 +122,16 @@ public class RouteFieldTest extends SolrCloudTestCase {
   private void checkShardsHaveSameDocs() throws IOException, SolrServerException {
 
     CloudSolrClient client = cluster.getSolrClient();
+    ZkStateReader zkStateReader = ZkStateReader.from(client);
 
-    DocCollection docColl = client.getZkStateReader().getClusterState().getCollection(COLL_ROUTE);
+    DocCollection docColl = zkStateReader.getClusterState().getCollection(COLL_ROUTE);
     List<Replica> reps = new ArrayList<>(docColl.getSlice("shard1").getReplicas());
     String urlRouteShard1 = reps.get(0).get("base_url") + "/" + reps.get(0).get("core");
 
     reps = new ArrayList<>(docColl.getSlice("shard2").getReplicas());
     String urlRouteShard2 = reps.get(0).get("base_url") + "/" + reps.get(0).get("core");
 
-    docColl = client.getZkStateReader().getClusterState().getCollection(COLL_ID);
+    docColl = zkStateReader.getClusterState().getCollection(COLL_ID);
     reps = new ArrayList<>(docColl.getSlice("shard1").getReplicas());
     String urlIdShard1 = reps.get(0).get("base_url") + "/" + reps.get(0).get("core");
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java b/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java
index fff489d..496ca4e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SSLMigrationTest.java
@@ -16,6 +16,7 @@
  */
 package org.apache.solr.cloud;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Locale;
@@ -102,11 +103,10 @@ public class SSLMigrationTest extends AbstractFullDistribZkTestBase {
     }
   }
 
-  private List<Replica> getReplicas() {
-    List<Replica> replicas = new ArrayList<Replica>();
+  private List<Replica> getReplicas() throws IOException {
+    List<Replica> replicas = new ArrayList<>();
 
-    DocCollection collection =
-        this.cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
+    DocCollection collection = cloudClient.getClusterState().getCollection(DEFAULT_COLLECTION);
     for (Slice slice : collection.getSlices()) {
       replicas.addAll(slice.getReplicas());
     }
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java
index 5b7a2a2..8cc54a9 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java
@@ -117,13 +117,7 @@ public class ShardRoutingTest extends AbstractFullDistribZkTestBase {
   private void doHashingTest() throws Exception {
     log.info("### STARTING doHashingTest");
     assertEquals(
-        4,
-        cloudClient
-            .getZkStateReader()
-            .getClusterState()
-            .getCollection(DEFAULT_COLLECTION)
-            .getSlices()
-            .size());
+        4, cloudClient.getClusterState().getCollection(DEFAULT_COLLECTION).getSlices().size());
     String shardKeys = ShardParams._ROUTE_;
     // for now,  we know how ranges will be distributed to shards.
     // may have to look it up in clusterstate if that assumption changes.
diff --git a/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java b/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java
index 4e4acd6..a24de71 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SolrCloudExampleTest.java
@@ -79,12 +79,12 @@ public class SolrCloudExampleTest extends AbstractFullDistribZkTestBase {
     File defaultConfigs = new File(ExternalPaths.DEFAULT_CONFIGSET);
     assertTrue(defaultConfigs.getAbsolutePath() + " not found!", defaultConfigs.isDirectory());
 
-    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+    Set<String> liveNodes = cloudClient.getClusterState().getLiveNodes();
     if (liveNodes.isEmpty())
       fail(
           "No live nodes found! Cannot create a collection until there is at least 1 live node in the cluster.");
     String firstLiveNode = liveNodes.iterator().next();
-    String solrUrl = cloudClient.getZkStateReader().getBaseUrlForNodeName(firstLiveNode);
+    String solrUrl = ZkStateReader.from(cloudClient).getBaseUrlForNodeName(firstLiveNode);
 
     // create the gettingstarted collection just like the bin/solr script would do
     String[] args =
@@ -115,7 +115,7 @@ public class SolrCloudExampleTest extends AbstractFullDistribZkTestBase {
     tool.runTool(cli);
     assertTrue(
         "Collection '" + testCollectionName + "' doesn't exist after trying to create it!",
-        cloudClient.getZkStateReader().getClusterState().hasCollection(testCollectionName));
+        cloudClient.getClusterState().hasCollection(testCollectionName));
 
     // verify the collection is usable ...
     ensureAllReplicasAreActive(testCollectionName, "shard1", 2, 2, 20);
@@ -181,7 +181,7 @@ public class SolrCloudExampleTest extends AbstractFullDistribZkTestBase {
     doTestConfigUpdate(testCollectionName, solrUrl);
 
     log.info("Running healthcheck for {}", testCollectionName);
-    doTestHealthcheck(testCollectionName, cloudClient.getZkHost());
+    doTestHealthcheck(testCollectionName, cloudClient.getClusterStateProvider().getQuorumHosts());
 
     // verify the delete action works too
     log.info("Running delete for {}", testCollectionName);
@@ -267,9 +267,7 @@ public class SolrCloudExampleTest extends AbstractFullDistribZkTestBase {
         SolrCLI.atPath("/config/requestHandler/\\/query/defaults/echoParams", configJson));
 
     if (log.isInfoEnabled()) {
-      log.info(
-          "live_nodes_count :  {}",
-          cloudClient.getZkStateReader().getClusterState().getLiveNodes());
+      log.info("live_nodes_count :  {}", cloudClient.getClusterState().getLiveNodes());
     }
 
     // Since it takes some time for this command to complete we need to make sure all the reloads
@@ -297,7 +295,7 @@ public class SolrCloudExampleTest extends AbstractFullDistribZkTestBase {
   // Collect all of the autoSoftCommit intervals.
   private Map<String, Long> getSoftAutocommitInterval(String collection) throws Exception {
     Map<String, Long> ret = new HashMap<>();
-    DocCollection coll = cloudClient.getZkStateReader().getClusterState().getCollection(collection);
+    DocCollection coll = cloudClient.getClusterState().getCollection(collection);
     for (Slice slice : coll.getActiveSlices()) {
       for (Replica replica : slice.getReplicas()) {
         String uri =
diff --git a/solr/core/src/test/org/apache/solr/cloud/SplitShardTest.java b/solr/core/src/test/org/apache/solr/cloud/SplitShardTest.java
index de3181c..9402335 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SplitShardTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SplitShardTest.java
@@ -86,7 +86,6 @@ public class SplitShardTest extends SolrCloudTestCase {
         "Timed out waiting for sub shards to be active. Number of active shards="
             + cluster
                 .getSolrClient()
-                .getZkStateReader()
                 .getClusterState()
                 .getCollection(COLLECTION_NAME)
                 .getActiveSlices()
@@ -154,15 +153,13 @@ public class SplitShardTest extends SolrCloudTestCase {
         "Timed out waiting for sub shards to be active. Number of active shards="
             + cluster
                 .getSolrClient()
-                .getZkStateReader()
                 .getClusterState()
                 .getCollection(collectionName)
                 .getActiveSlices()
                 .size(),
         collectionName,
         activeClusterShape(3, 4));
-    DocCollection coll =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(collectionName);
+    DocCollection coll = cluster.getSolrClient().getClusterState().getCollection(collectionName);
     Slice s1_0 = coll.getSlice("shard1_0");
     Slice s1_1 = coll.getSlice("shard1_1");
     long fuzz = ((long) Integer.MAX_VALUE >> 3) + 1L;
@@ -188,8 +185,7 @@ public class SplitShardTest extends SolrCloudTestCase {
 
   long getNumDocs(CloudSolrClient client) throws Exception {
     String collectionName = client.getDefaultCollection();
-    DocCollection collection =
-        client.getZkStateReader().getClusterState().getCollection(collectionName);
+    DocCollection collection = client.getClusterState().getCollection(collectionName);
     Collection<Slice> slices = collection.getSlices();
 
     long totCount = 0;
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudConsistency.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudConsistency.java
index cd3db58..8a2d4d5 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCloudConsistency.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudConsistency.java
@@ -183,25 +183,24 @@ public class TestCloudConsistency extends SolrCloudTestCase {
     expectThrows(
         TimeoutException.class,
         "Did not time out waiting for new leader, out of sync replica became leader",
-        () -> {
-          cluster
-              .getSolrClient()
-              .waitForState(
-                  collection,
-                  10,
-                  TimeUnit.SECONDS,
-                  (state) -> {
-                    Replica newLeader = state.getSlice("shard1").getLeader();
-                    if (newLeader != null
-                        && !newLeader.getName().equals(leader.getName())
-                        && newLeader.getState() == Replica.State.ACTIVE) {
-                      // this is is the bad case, our "bad" state was found before timeout
-                      log.error("WTF: New Leader={}", newLeader);
-                      return true;
-                    }
-                    return false; // still no bad state, wait for timeout
-                  });
-        });
+        () ->
+            cluster
+                .getZkStateReader()
+                .waitForState(
+                    collection,
+                    10,
+                    TimeUnit.SECONDS,
+                    (state) -> {
+                      Replica newLeader = state.getSlice("shard1").getLeader();
+                      if (newLeader != null
+                          && !newLeader.getName().equals(leader.getName())
+                          && newLeader.getState() == Replica.State.ACTIVE) {
+                        // this is is the bad case, our "bad" state was found before timeout
+                        log.error("WTF: New Leader={}", newLeader);
+                        return true;
+                      }
+                      return false; // still no bad state, wait for timeout
+                    }));
 
     JettySolrRunner j0 = cluster.getJettySolrRunner(0);
     j0.start();
@@ -249,8 +248,10 @@ public class TestCloudConsistency extends SolrCloudTestCase {
         TimeoutException.class,
         "Did not time out waiting for new leader, out of sync replica became leader",
         () -> {
+          // this is the bad case, our "bad" state was found before timeout
+          // still no bad state, wait for timeout
           cluster
-              .getSolrClient()
+              .getZkStateReader()
               .waitForState(
                   collection,
                   10,
@@ -309,13 +310,9 @@ public class TestCloudConsistency extends SolrCloudTestCase {
   private void assertDocsExistInAllReplicas(
       List<Replica> notLeaders, String testCollectionName, int firstDocId, int lastDocId)
       throws Exception {
-    Replica leader =
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getLeaderRetry(testCollectionName, "shard1", 10000);
+    Replica leader = cluster.getZkStateReader().getLeaderRetry(testCollectionName, "shard1", 10000);
     HttpSolrClient leaderSolr = getHttpSolrClient(leader, testCollectionName);
-    List<HttpSolrClient> replicas = new ArrayList<HttpSolrClient>(notLeaders.size());
+    List<HttpSolrClient> replicas = new ArrayList<>(notLeaders.size());
 
     for (Replica r : notLeaders) {
       replicas.add(getHttpSolrClient(r, testCollectionName));
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudDeleteByQuery.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudDeleteByQuery.java
index 3ad3c02..872189b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCloudDeleteByQuery.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudDeleteByQuery.java
@@ -128,7 +128,7 @@ public class TestCloudDeleteByQuery extends SolrCloudTestCase {
     CLOUD_CLIENT = cluster.getSolrClient();
     CLOUD_CLIENT.setDefaultCollection(COLLECTION_NAME);
 
-    ZkStateReader zkStateReader = CLOUD_CLIENT.getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
 
     // really hackish way to get a URL for specific nodes based on shard/replica hosting
     // inspired by TestMiniSolrCloudCluster
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudPhrasesIdentificationComponent.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudPhrasesIdentificationComponent.java
index bef999b..e0413e8 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCloudPhrasesIdentificationComponent.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudPhrasesIdentificationComponent.java
@@ -33,6 +33,7 @@ import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.junit.AfterClass;
@@ -208,6 +209,6 @@ public class TestCloudPhrasesIdentificationComponent extends SolrCloudTestCase {
   public static void waitForRecoveriesToFinish(CloudSolrClient client) throws Exception {
     assert null != client.getDefaultCollection();
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        client.getDefaultCollection(), client.getZkStateReader(), true, true, 330);
+        client.getDefaultCollection(), ZkStateReader.from(client), true, true, 330);
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudPseudoReturnFields.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudPseudoReturnFields.java
index ae2d281..7bbb1a7 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCloudPseudoReturnFields.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudPseudoReturnFields.java
@@ -37,6 +37,7 @@ import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.schema.SchemaResponse.FieldResponse;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.search.TestPseudoReturnFields;
@@ -992,6 +993,6 @@ public class TestCloudPseudoReturnFields extends SolrCloudTestCase {
   public static void waitForRecoveriesToFinish(CloudSolrClient client) throws Exception {
     assert null != client.getDefaultCollection();
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        client.getDefaultCollection(), client.getZkStateReader(), true, true, 330);
+        client.getDefaultCollection(), ZkStateReader.from(client), true, true, 330);
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
index a834502..ee105fc 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
@@ -35,6 +35,7 @@ import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.cloud.ClusterStateUtil;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.metrics.SolrMetricManager;
@@ -117,7 +118,7 @@ public class TestCloudRecovery extends SolrCloudTestCase {
     }
     assertTrue(
         "Timeout waiting for all not live",
-        ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
+        ClusterStateUtil.waitForAllReplicasNotLive(ZkStateReader.from(cloudClient), 45000));
     ChaosMonkey.start(cluster.getJettySolrRunners());
 
     cluster.waitForAllNodes(30);
@@ -125,7 +126,7 @@ public class TestCloudRecovery extends SolrCloudTestCase {
     assertTrue(
         "Timeout waiting for all live and active",
         ClusterStateUtil.waitForAllActiveAndLiveReplicas(
-            cloudClient.getZkStateReader(), COLLECTION, 120000));
+            ZkStateReader.from(cloudClient), COLLECTION, 120000));
 
     resp = cloudClient.query(COLLECTION, params);
     assertEquals(4, resp.getResults().getNumFound());
@@ -207,7 +208,7 @@ public class TestCloudRecovery extends SolrCloudTestCase {
 
     assertTrue(
         "Timeout waiting for all not live",
-        ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
+        ClusterStateUtil.waitForAllReplicasNotLive(ZkStateReader.from(cloudClient), 45000));
 
     for (Map.Entry<String, byte[]> entry : contentFiles.entrySet()) {
       byte[] tlogBytes = entry.getValue();
@@ -229,12 +230,10 @@ public class TestCloudRecovery extends SolrCloudTestCase {
     assertTrue(
         "Timeout waiting for all live and active",
         ClusterStateUtil.waitForAllActiveAndLiveReplicas(
-            cloudClient.getZkStateReader(), COLLECTION, 120000));
-
+            ZkStateReader.from(cloudClient), COLLECTION, 120000));
     cluster.waitForActiveCollection(COLLECTION, 2, 2 * (nrtReplicas + tlogReplicas));
 
-    cloudClient.getZkStateReader().forceUpdateCollection(COLLECTION);
-
+    ZkStateReader.from(cloudClient).forceUpdateCollection(COLLECTION);
     resp = cloudClient.query(COLLECTION, params);
     // Make sure cluster still healthy
     // TODO: AwaitsFix - this will fail under test beasting
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery2.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery2.java
index e9a28ac..32a1158 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery2.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery2.java
@@ -46,7 +46,7 @@ public class TestCloudRecovery2 extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(COLLECTION, "config", 1, 2)
         .process(cluster.getSolrClient());
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        COLLECTION, cluster.getSolrClient().getZkStateReader(), false, true, 30);
+        COLLECTION, cluster.getZkStateReader(), false, true, 30);
   }
 
   @Test
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudSearcherWarming.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudSearcherWarming.java
index 5c403f4..cf4bb49 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCloudSearcherWarming.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudSearcherWarming.java
@@ -27,10 +27,7 @@ import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.CollectionStatePredicate;
-import org.apache.solr.common.cloud.CollectionStateWatcher;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.*;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.core.SolrEventListener;
 import org.apache.solr.search.SolrIndexSearcher;
@@ -126,27 +123,20 @@ public class TestCloudSearcherWarming extends SolrCloudTestCase {
     cluster.waitForJettyToStop(runner);
     // check waitForState only after we are sure the node has shutdown and have forced an update to
     // liveNodes ie: workaround SOLR-13490
-    cluster.getSolrClient().getZkStateReader().updateLiveNodes();
+    ZkStateReader.from(solrClient).updateLiveNodes();
     waitForState(
         "jetty count:" + cluster.getJettySolrRunners().size(), collectionName, clusterShape(1, 0));
-
     // restart
     sleepTime.set(1000);
     runner.start();
     cluster.waitForAllNodes(30);
-    cluster
-        .getSolrClient()
-        .getZkStateReader()
-        .registerCollectionStateWatcher(collectionName, stateWatcher);
+    ZkStateReader.from(solrClient).registerCollectionStateWatcher(collectionName, stateWatcher);
     cluster.waitForActiveCollection(collectionName, 1, 1);
     assertNull(
         "No replica should have been active without registering a searcher, found: "
             + failingCoreNodeName.get(),
         failingCoreNodeName.get());
-    cluster
-        .getSolrClient()
-        .getZkStateReader()
-        .removeCollectionStateWatcher(collectionName, stateWatcher);
+    ZkStateReader.from(solrClient).removeCollectionStateWatcher(collectionName, stateWatcher);
   }
 
   @Test
@@ -195,10 +185,7 @@ public class TestCloudSearcherWarming extends SolrCloudTestCase {
 
     CollectionStateWatcher stateWatcher =
         createActiveReplicaSearcherWatcher(expectedDocs, failingCoreNodeName);
-    cluster
-        .getSolrClient()
-        .getZkStateReader()
-        .registerCollectionStateWatcher(collectionName, stateWatcher);
+    ZkStateReader.from(solrClient).registerCollectionStateWatcher(collectionName, stateWatcher);
 
     JettySolrRunner newNode = cluster.startJettySolrRunner();
     cluster.waitForAllNodes(30);
@@ -235,7 +222,7 @@ public class TestCloudSearcherWarming extends SolrCloudTestCase {
           return false;
         };
     waitForState("", collectionName, collectionStatePredicate);
-    assertNotNull(solrClient.getZkStateReader().getLeaderRetry(collectionName, "shard1"));
+    assertNotNull(ZkStateReader.from(solrClient).getLeaderRetry(collectionName, "shard1"));
 
     // reset
     coreNameRef.set(null);
@@ -251,10 +238,7 @@ public class TestCloudSearcherWarming extends SolrCloudTestCase {
     waitForState("", collectionName, clusterShape(1, 2));
     // invoke statewatcher explicitly to avoid race condition where the assert happens before the
     // state watcher is invoked by ZkStateReader
-    cluster
-        .getSolrClient()
-        .getZkStateReader()
-        .registerCollectionStateWatcher(collectionName, stateWatcher);
+    ZkStateReader.from(solrClient).registerCollectionStateWatcher(collectionName, stateWatcher);
     assertNull(
         "No replica should have been active without registering a searcher, found: "
             + failingCoreNodeName.get(),
@@ -281,18 +265,12 @@ public class TestCloudSearcherWarming extends SolrCloudTestCase {
     waitForState("", collectionName, clusterShape(1, 2));
     // invoke statewatcher explicitly to avoid race condition where the assert happens before the
     // state watcher is invoked by ZkStateReader
-    cluster
-        .getSolrClient()
-        .getZkStateReader()
-        .registerCollectionStateWatcher(collectionName, stateWatcher);
+    ZkStateReader.from(solrClient).registerCollectionStateWatcher(collectionName, stateWatcher);
     assertNull(
         "No replica should have been active without registering a searcher, found: "
             + failingCoreNodeName.get(),
         failingCoreNodeName.get());
-    cluster
-        .getSolrClient()
-        .getZkStateReader()
-        .removeCollectionStateWatcher(collectionName, stateWatcher);
+    ZkStateReader.from(solrClient).removeCollectionStateWatcher(collectionName, stateWatcher);
   }
 
   private CollectionStateWatcher createActiveReplicaSearcherWatcher(
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestDeleteCollectionOnDownNodes.java b/solr/core/src/test/org/apache/solr/cloud/TestDeleteCollectionOnDownNodes.java
index 578cf46..8e932d8 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestDeleteCollectionOnDownNodes.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestDeleteCollectionOnDownNodes.java
@@ -64,10 +64,6 @@ public class TestDeleteCollectionOnDownNodes extends SolrCloudTestCase {
 
     assertFalse(
         "Still found collection that should be gone",
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getClusterState()
-            .hasCollection("halfdeletedcollection2"));
+        cluster.getSolrClient().getClusterState().hasCollection("halfdeletedcollection2"));
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java b/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java
index 7d386ed..a67711d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java
@@ -136,13 +136,7 @@ public class TestDistribDocBasedVersion extends AbstractFullDistribZkTestBase {
   private void doTestDocVersions() throws Exception {
     log.info("### STARTING doTestDocVersions");
     assertEquals(
-        2,
-        cloudClient
-            .getZkStateReader()
-            .getClusterState()
-            .getCollection(DEFAULT_COLLECTION)
-            .getSlices()
-            .size());
+        2, cloudClient.getClusterState().getCollection(DEFAULT_COLLECTION).getSlices().size());
 
     solrClient = cloudClient;
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestLazySolrCluster.java b/solr/core/src/test/org/apache/solr/cloud/TestLazySolrCluster.java
index 7c04857..7d7f702 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestLazySolrCluster.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestLazySolrCluster.java
@@ -48,7 +48,7 @@ public class TestLazySolrCluster extends SolrCloudTestCase {
     cloudClient.request(CollectionAdminRequest.createCollection(collection, "conf1", 2, 2));
     cluster.waitForActiveCollection(collection, 2, 4);
 
-    LazySolrCluster solrCluster = new LazySolrCluster(cluster.getSolrClient().getZkStateReader());
+    LazySolrCluster solrCluster = new LazySolrCluster(ZkStateReader.from(cloudClient));
     SimpleMap<SolrCollection> colls = solrCluster.collections();
 
     SolrCollection c = colls.get("testLazyCluster1");
@@ -70,7 +70,7 @@ public class TestLazySolrCluster extends SolrCloudTestCase {
     assertEquals(4, count[0]);
 
     assertEquals(5, solrCluster.nodes().size());
-    SolrZkClient zkClient = cloudClient.getZkStateReader().getZkClient();
+    SolrZkClient zkClient = ZkStateReader.from(cloudClient).getZkClient();
     zkClient.create(ZkStateReader.CONFIGS_ZKNODE + "/conf1/a", null, CreateMode.PERSISTENT, true);
     zkClient.create(
         ZkStateReader.CONFIGS_ZKNODE + "/conf1/a/aa1", new byte[1024], CreateMode.PERSISTENT, true);
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestLeaderElectionWithEmptyReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestLeaderElectionWithEmptyReplica.java
index b5c0344..951b470 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestLeaderElectionWithEmptyReplica.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestLeaderElectionWithEmptyReplica.java
@@ -64,7 +64,7 @@ public class TestLeaderElectionWithEmptyReplica extends SolrCloudTestCase {
     solrClient.commit();
 
     // find the leader node
-    Replica replica = solrClient.getZkStateReader().getLeaderRetry(COLLECTION_NAME, "shard1");
+    Replica replica = cluster.getZkStateReader().getLeaderRetry(COLLECTION_NAME, "shard1");
     JettySolrRunner replicaJetty = null;
     List<JettySolrRunner> jettySolrRunners = cluster.getJettySolrRunners();
     for (JettySolrRunner jettySolrRunner : jettySolrRunners) {
@@ -90,17 +90,19 @@ public class TestLeaderElectionWithEmptyReplica extends SolrCloudTestCase {
     replicaJetty.start();
 
     // wait until everyone is active
-    solrClient.waitForState(
-        COLLECTION_NAME,
-        DEFAULT_TIMEOUT,
-        TimeUnit.SECONDS,
-        (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
+    cluster
+        .getZkStateReader()
+        .waitForState(
+            COLLECTION_NAME,
+            DEFAULT_TIMEOUT,
+            TimeUnit.SECONDS,
+            (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
 
     // now query each replica and check for consistency
     assertConsistentReplicas(
         solrClient,
-        solrClient
-            .getZkStateReader()
+        cluster
+            .getSolrClient()
             .getClusterState()
             .getCollection(COLLECTION_NAME)
             .getSlice("shard1"));
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestPrepRecovery.java b/solr/core/src/test/org/apache/solr/cloud/TestPrepRecovery.java
index 2f84888..b34f3f4 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestPrepRecovery.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestPrepRecovery.java
@@ -72,7 +72,7 @@ public class TestPrepRecovery extends SolrCloudTestCase {
         .process(solrClient);
 
     // now delete the leader
-    Replica leader = solrClient.getZkStateReader().getLeaderRetry(collectionName, "shard1");
+    Replica leader = cluster.getZkStateReader().getLeaderRetry(collectionName, "shard1");
     CollectionAdminRequest.deleteReplica(collectionName, "shard1", leader.getName())
         .process(solrClient);
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
index 9563c65..edfd6ad 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
@@ -115,12 +115,7 @@ public class TestPullReplica extends SolrCloudTestCase {
         jetty.start();
       }
     }
-    if (cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getClusterState()
-            .getCollectionOrNull(collectionName)
-        != null) {
+    if (cluster.getSolrClient().getClusterState().getCollectionOrNull(collectionName) != null) {
       log.info("tearDown deleting collection");
       CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
       log.info("Collection deleted");
@@ -413,7 +408,7 @@ public class TestPullReplica extends SolrCloudTestCase {
     addDocs(500);
     List<Replica.State> statesSeen = new ArrayList<>(3);
     cluster
-        .getSolrClient()
+        .getZkStateReader()
         .registerCollectionStateWatcher(
             collectionName,
             (liveNodes, collectionState) -> {
@@ -545,15 +540,12 @@ public class TestPullReplica extends SolrCloudTestCase {
     Replica leader = docCollection.getSlice("shard1").getLeader();
     assertTrue(
         leader == null
-            || !leader.isActive(
-                cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes()));
+            || !leader.isActive(cluster.getSolrClient().getClusterState().getLiveNodes()));
 
     // Pull replica on the other hand should be active
     Replica pullReplica =
         docCollection.getSlice("shard1").getReplicas(EnumSet.of(Replica.Type.PULL)).get(0);
-    assertTrue(
-        pullReplica.isActive(
-            cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes()));
+    assertTrue(pullReplica.isActive(cluster.getSolrClient().getClusterState().getLiveNodes()));
 
     long highestTerm = 0L;
     try (ZkShardTerms zkShardTerms = new ZkShardTerms(collectionName, "shard1", zkClient())) {
@@ -605,13 +597,12 @@ public class TestPullReplica extends SolrCloudTestCase {
     unIgnoreException("No registered leader was found"); // Should have a leader from now on
 
     // Validate that the new nrt replica is the leader now
-    cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName);
+    cluster.getZkStateReader().forceUpdateCollection(collectionName);
     docCollection = getCollectionState(collectionName);
     leader = docCollection.getSlice("shard1").getLeader();
     assertTrue(
         leader != null
-            && leader.isActive(
-                cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes()));
+            && leader.isActive(cluster.getSolrClient().getClusterState().getLiveNodes()));
 
     // If jetty is restarted, the replication is not forced, and replica doesn't replicate from
     // leader until new docs are added. Is this the correct behavior? Why should these two cases be
@@ -725,14 +716,14 @@ public class TestPullReplica extends SolrCloudTestCase {
 
   static void waitForDeletion(String collection) throws InterruptedException, KeeperException {
     TimeOut t = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-    while (cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection(collection)) {
+    while (cluster.getSolrClient().getClusterState().hasCollection(collection)) {
       log.info("Collection not yet deleted");
       try {
         Thread.sleep(100);
         if (t.hasTimedOut()) {
           fail("Timed out waiting for collection " + collection + " to be deleted.");
         }
-        cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection);
+        cluster.getZkStateReader().forceUpdateCollection(collection);
       } catch (SolrException e) {
         return;
       }
@@ -764,7 +755,7 @@ public class TestPullReplica extends SolrCloudTestCase {
       boolean activeOnly)
       throws KeeperException, InterruptedException {
     if (updateCollection) {
-      cluster.getSolrClient().getZkStateReader().forceUpdateCollection(coll);
+      cluster.getZkStateReader().forceUpdateCollection(coll);
     }
     DocCollection docCollection = getCollectionState(coll);
     assertNotNull(docCollection);
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java b/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java
index 6341e1c..50e2c27 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java
@@ -117,12 +117,7 @@ public class TestPullReplicaErrorHandling extends SolrCloudTestCase {
 
   @Override
   public void tearDown() throws Exception {
-    if (cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getClusterState()
-            .getCollectionOrNull(collectionName)
-        != null) {
+    if (cluster.getSolrClient().getClusterState().getCollectionOrNull(collectionName) != null) {
       log.info("tearDown deleting collection");
       CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
       log.info("Collection deleted");
@@ -303,7 +298,7 @@ public class TestPullReplicaErrorHandling extends SolrCloudTestCase {
       int numWriter, int numActive, int numPassive, boolean updateCollection, boolean activeOnly)
       throws KeeperException, InterruptedException {
     if (updateCollection) {
-      cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName);
+      cluster.getZkStateReader().forceUpdateCollection(collectionName);
     }
     DocCollection docCollection = getCollectionState(collectionName);
     assertNotNull(docCollection);
@@ -354,14 +349,14 @@ public class TestPullReplicaErrorHandling extends SolrCloudTestCase {
 
   private void waitForDeletion(String collection) throws InterruptedException, KeeperException {
     TimeOut t = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-    while (cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection(collection)) {
+    while (cluster.getSolrClient().getClusterState().hasCollection(collection)) {
       log.info("Collection not yet deleted");
       try {
         Thread.sleep(100);
         if (t.hasTimedOut()) {
           fail("Timed out waiting for collection " + collection + " to be deleted.");
         }
-        cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection);
+        cluster.getZkStateReader().forceUpdateCollection(collection);
       } catch (SolrException e) {
         return;
       }
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRandomFlRTGCloud.java b/solr/core/src/test/org/apache/solr/cloud/TestRandomFlRTGCloud.java
index 5ae7ac8..3efd579 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestRandomFlRTGCloud.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestRandomFlRTGCloud.java
@@ -40,16 +40,14 @@ import org.apache.solr.client.solrj.ResponseParser;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.impl.NoOpResponseParser;
-import org.apache.solr.client.solrj.impl.XMLResponseParser;
+import org.apache.solr.client.solrj.impl.*;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
@@ -644,7 +642,7 @@ public class TestRandomFlRTGCloud extends SolrCloudTestCase {
   public static void waitForRecoveriesToFinish(CloudSolrClient client) throws Exception {
     assert null != client.getDefaultCollection();
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        client.getDefaultCollection(), client.getZkStateReader(), true, true, 330);
+        client.getDefaultCollection(), ZkStateReader.from(client), true, true, 330);
   }
 
   /**
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java
index 86be73e..9eb53fa 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java
@@ -82,7 +82,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
     waitForRecoveriesToFinish("a1x2", true);
     waitForRecoveriesToFinish("b1x1", true);
 
-    cloudClient.getZkStateReader().forceUpdateCollection("b1x1");
+    ZkStateReader.from(cloudClient).forceUpdateCollection("b1x1");
 
     // get direct access to the metrics counters for each core/replica we're interested to monitor
     // them
@@ -103,7 +103,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
     assertEquals("Sanity Check: we know there should be 2 replicas", 2, counters.size());
 
     // send queries to the node that doesn't host any core/replica and see where it routes them
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    ClusterState clusterState = cloudClient.getClusterState();
     DocCollection b1x1 = clusterState.getCollection("b1x1");
     Collection<Replica> replicas = b1x1.getSlice("shard1").getReplicas();
     assertEquals(1, replicas.size());
@@ -153,18 +153,13 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
 
     waitForRecoveriesToFinish("football", true);
 
-    cloudClient.getZkStateReader().forceUpdateCollection("football");
+    ZkStateReader.from(cloudClient).forceUpdateCollection("football");
 
     Replica leader = null;
     Replica notLeader = null;
 
     Collection<Replica> replicas =
-        cloudClient
-            .getZkStateReader()
-            .getClusterState()
-            .getCollection("football")
-            .getSlice("shard1")
-            .getReplicas();
+        cloudClient.getClusterState().getCollection("football").getSlice("shard1").getReplicas();
     for (Replica replica : replicas) {
       if (replica.getStr(ZkStateReader.LEADER_PROP) != null) {
         leader = replica;
@@ -213,7 +208,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
     }
 
     verifyReplicaStatus(
-        cloudClient.getZkStateReader(),
+        ZkStateReader.from(cloudClient),
         "football",
         "shard1",
         notLeader.getName(),
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java b/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java
index 0238f00..2f7bc18 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java
@@ -18,7 +18,15 @@ package org.apache.solr.cloud;
 
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 import org.apache.lucene.util.LuceneTestCase;
@@ -76,7 +84,7 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
   public void removeAllProperties() throws KeeperException, InterruptedException {
     forceUpdateCollectionStatus();
     DocCollection docCollection =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME);
+        cluster.getSolrClient().getClusterState().getCollection(COLLECTION_NAME);
     for (Slice slice : docCollection.getSlices()) {
       for (Replica rep : slice.getReplicas()) {
         rep.getProperties()
@@ -158,7 +166,7 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
     // First set the property in some replica in some slice
     forceUpdateCollectionStatus();
     DocCollection docCollection =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME);
+        cluster.getSolrClient().getClusterState().getCollection(COLLECTION_NAME);
 
     Slice[] slices = docCollection.getSlices().toArray(new Slice[0]);
     Slice slice = slices[random().nextInt(slices.length)];
@@ -179,12 +187,7 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
       // insure that no other replica in that slice has the property when we return.
       while (timeout.hasTimedOut() == false) {
         forceUpdateCollectionStatus();
-        modColl =
-            cluster
-                .getSolrClient()
-                .getZkStateReader()
-                .getClusterState()
-                .getCollection(COLLECTION_NAME);
+        modColl = cluster.getSolrClient().getClusterState().getCollection(COLLECTION_NAME);
         modSlice = modColl.getSlice(slice.getName());
         rightRep =
             modSlice
@@ -231,11 +234,7 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
 
     log.error(
         "Leaders are not all preferres {}",
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getClusterState()
-            .getCollection(COLLECTION_NAME));
+        cluster.getSolrClient().getClusterState().getCollection(COLLECTION_NAME));
     // Show the errors
     checkPreferredsAreLeaders(true);
   }
@@ -246,9 +245,8 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
   private void checkElectionQueues() throws KeeperException, InterruptedException {
 
     DocCollection docCollection =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME);
-    Set<String> liveNodes =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
+        cluster.getSolrClient().getClusterState().getCollection(COLLECTION_NAME);
+    Set<String> liveNodes = cluster.getSolrClient().getClusterState().getLiveNodes();
 
     for (Slice slice : docCollection.getSlices()) {
       Set<Replica> liveReplicas = new HashSet<>();
@@ -270,7 +268,6 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
 
     List<String> leaderQueue =
         cluster
-            .getSolrClient()
             .getZkStateReader()
             .getZkClient()
             .getChildren(
@@ -308,7 +305,7 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
       throws KeeperException, InterruptedException {
     forceUpdateCollectionStatus();
     DocCollection docCollection =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME);
+        cluster.getSolrClient().getClusterState().getCollection(COLLECTION_NAME);
     for (Slice slice : docCollection.getSlices()) {
       for (Replica rep : slice.getReplicas()) {
         if (rep.getBool("property.preferredleader", false)) {
@@ -363,12 +360,7 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
     DocCollection docCollection = null;
     while (timeout.hasTimedOut() == false) {
       forceUpdateCollectionStatus();
-      docCollection =
-          cluster
-              .getSolrClient()
-              .getZkStateReader()
-              .getClusterState()
-              .getCollection(COLLECTION_NAME);
+      docCollection = cluster.getSolrClient().getClusterState().getCollection(COLLECTION_NAME);
       int maxPropCount = Integer.MAX_VALUE;
       int minPropCount = Integer.MIN_VALUE;
       for (Slice slice : docCollection.getSlices()) {
@@ -399,12 +391,7 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
     DocCollection docCollection = null;
     while (timeout.hasTimedOut() == false) {
       forceUpdateCollectionStatus();
-      docCollection =
-          cluster
-              .getSolrClient()
-              .getZkStateReader()
-              .getClusterState()
-              .getCollection(COLLECTION_NAME);
+      docCollection = cluster.getSolrClient().getClusterState().getCollection(COLLECTION_NAME);
       failure = false;
       for (Map.Entry<String, String> ent : expectedShardReplicaMap.entrySet()) {
         Replica rep = docCollection.getSlice(ent.getKey()).getReplica(ent.getValue());
@@ -638,13 +625,8 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
     }
     while (timeout.hasTimedOut() == false) {
       forceUpdateCollectionStatus();
-      docCollection =
-          cluster
-              .getSolrClient()
-              .getZkStateReader()
-              .getClusterState()
-              .getCollection(COLLECTION_NAME);
-      liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
+      docCollection = cluster.getSolrClient().getClusterState().getCollection(COLLECTION_NAME);
+      liveNodes = cluster.getSolrClient().getClusterState().getLiveNodes();
       boolean expectedInactive = true;
 
       for (Slice slice : docCollection.getSlices()) {
@@ -677,13 +659,8 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
     while (timeout.hasTimedOut() == false) {
       forceUpdateCollectionStatus();
       DocCollection docCollection =
-          cluster
-              .getSolrClient()
-              .getZkStateReader()
-              .getClusterState()
-              .getCollection(COLLECTION_NAME);
-      Set<String> liveNodes =
-          cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
+          cluster.getSolrClient().getClusterState().getCollection(COLLECTION_NAME);
+      Set<String> liveNodes = cluster.getSolrClient().getClusterState().getLiveNodes();
       boolean allActive = true;
       for (Slice slice : docCollection.getSlices()) {
         for (Replica rep : slice.getReplicas()) {
@@ -707,14 +684,13 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
     // find all the live nodes for each slice, assign the leader to the first replica that is in the
     // lowest position on live_nodes
     List<String> liveNodes =
-        new ArrayList<>(
-            cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
+        new ArrayList<>(cluster.getSolrClient().getClusterState().getLiveNodes());
     Collections.shuffle(liveNodes, random());
 
     Map<String, String> uniquePropMap = new TreeMap<>();
     forceUpdateCollectionStatus();
     DocCollection docCollection =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME);
+        cluster.getSolrClient().getClusterState().getCollection(COLLECTION_NAME);
     for (Slice slice : docCollection.getSlices()) {
       Replica changedRep = null;
       int livePos = Integer.MAX_VALUE;
@@ -755,12 +731,7 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
         "There should be exactly one replica with value "
             + prop
             + " set to true per shard: "
-            + cluster
-                .getSolrClient()
-                .getZkStateReader()
-                .getClusterState()
-                .getCollection(COLLECTION_NAME)
-                .toString());
+            + cluster.getSolrClient().getClusterState().getCollection(COLLECTION_NAME).toString());
     return null; // keeps IDE happy.
   }
 
@@ -769,7 +740,7 @@ public class TestRebalanceLeaders extends SolrCloudTestCase {
       throws KeeperException, InterruptedException {
     forceUpdateCollectionStatus();
     DocCollection docCollection =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME);
+        cluster.getSolrClient().getClusterState().getCollection(COLLECTION_NAME);
 
     for (Slice slice : docCollection.getSlices()) {
       int propfCount = 0;
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRequestForwarding.java b/solr/core/src/test/org/apache/solr/cloud/TestRequestForwarding.java
index 85db70c..2c811f2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestRequestForwarding.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestRequestForwarding.java
@@ -22,7 +22,6 @@ import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.common.cloud.ZkStateReader;
 import org.junit.Test;
 
 @SuppressSSL
@@ -76,9 +75,8 @@ public class TestRequestForwarding extends SolrTestCaseJ4 {
     response = create.process(solrCluster.getSolrClient());
 
     if (response.getStatus() != 0 || response.getErrorMessages() != null) {
-      fail("Could not create collection. Response" + response.toString());
+      fail("Could not create collection. Response" + response);
     }
-    ZkStateReader zkStateReader = solrCluster.getSolrClient().getZkStateReader();
     solrCluster.waitForActiveCollection(name, 2, 2);
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestSegmentSorting.java b/solr/core/src/test/org/apache/solr/cloud/TestSegmentSorting.java
index 7f817bc..9316101 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestSegmentSorting.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestSegmentSorting.java
@@ -26,7 +26,6 @@ import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.schema.SchemaRequest.Field;
 import org.apache.solr.client.solrj.response.RequestStatusState;
 import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.core.CoreDescriptor;
 import org.junit.After;
 import org.junit.Before;
@@ -81,8 +80,6 @@ public class TestSegmentSorting extends SolrCloudTestCase {
     } else { // async
       assertEquals(RequestStatusState.COMPLETED, cmd.processAndWait(cloudSolrClient, 30));
     }
-
-    ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
     cluster.waitForActiveCollection(collectionName, NUM_SHARDS, NUM_SHARDS * REPLICATION_FACTOR);
 
     cloudSolrClient.setDefaultCollection(collectionName);
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestShortCircuitedRequests.java b/solr/core/src/test/org/apache/solr/cloud/TestShortCircuitedRequests.java
index 73b5351..c34007e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestShortCircuitedRequests.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestShortCircuitedRequests.java
@@ -37,12 +37,7 @@ public class TestShortCircuitedRequests extends AbstractFullDistribZkTestBase {
     waitForRecoveriesToFinish(false);
     assertEquals(
         4,
-        cloudClient
-            .getZkStateReader()
-            .getClusterState()
-            .getCollection(DEFAULT_COLLECTION)
-            .getSlices()
-            .size());
+        cloudClient.getClusterStateProvider().getCollection(DEFAULT_COLLECTION).getSlices().size());
     index("id", "a!doc1"); // shard3
     index("id", "b!doc1"); // shard1
     index("id", "c!doc1"); // shard2
@@ -53,11 +48,7 @@ public class TestShortCircuitedRequests extends AbstractFullDistribZkTestBase {
 
     // query shard3 directly with _route_=a! so that we trigger the short circuited request path
     Replica shard3 =
-        cloudClient
-            .getZkStateReader()
-            .getClusterState()
-            .getCollection(DEFAULT_COLLECTION)
-            .getLeader("shard3");
+        cloudClient.getClusterState().getCollection(DEFAULT_COLLECTION).getLeader("shard3");
     String nodeName = shard3.getNodeName();
     SolrClient shard3Client = getClient(nodeName);
     QueryResponse response =
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestSkipOverseerOperations.java b/solr/core/src/test/org/apache/solr/cloud/TestSkipOverseerOperations.java
index 5b0f1a9..1869ced 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestSkipOverseerOperations.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestSkipOverseerOperations.java
@@ -86,8 +86,7 @@ public class TestSkipOverseerOperations extends SolrCloudTestCase {
         .process(cluster.getSolrClient());
     cluster.waitForActiveCollection("collection1", 2, 2);
 
-    ZkStateReader reader = cluster.getSolrClient().getZkStateReader();
-
+    ZkStateReader reader = cluster.getZkStateReader();
     List<String> nodes = new ArrayList<>();
     for (JettySolrRunner solrRunner : notOverseerNodes) {
       nodes.add(solrRunner.getNodeName());
@@ -173,8 +172,7 @@ public class TestSkipOverseerOperations extends SolrCloudTestCase {
 
     cluster.waitForActiveCollection(collection, 2, 4);
 
-    ZkStateReader reader = cluster.getSolrClient().getZkStateReader();
-
+    ZkStateReader reader = cluster.getZkStateReader();
     List<String> nodes = new ArrayList<>();
     for (JettySolrRunner solrRunner : notOverseerNodes) {
       nodes.add(solrRunner.getNodeName());
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestStressCloudBlindAtomicUpdates.java b/solr/core/src/test/org/apache/solr/cloud/TestStressCloudBlindAtomicUpdates.java
index b5e1dc9..306c536 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestStressCloudBlindAtomicUpdates.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestStressCloudBlindAtomicUpdates.java
@@ -47,6 +47,7 @@ import org.apache.solr.client.solrj.response.schema.SchemaResponse.FieldTypeResp
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.SolrInputField;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.ExecutorUtil;
@@ -524,9 +525,9 @@ public class TestStressCloudBlindAtomicUpdates extends SolrCloudTestCase {
 
   public static void waitForRecoveriesToFinish(CloudSolrClient client) throws Exception {
     assert null != client.getDefaultCollection();
-    client.getZkStateReader().forceUpdateCollection(client.getDefaultCollection());
+    ZkStateReader.from(client).forceUpdateCollection(client.getDefaultCollection());
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        client.getDefaultCollection(), client.getZkStateReader(), true, true, 330);
+        client.getDefaultCollection(), ZkStateReader.from(client), true, true, 330);
   }
 
   /**
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestStressInPlaceUpdates.java b/solr/core/src/test/org/apache/solr/cloud/TestStressInPlaceUpdates.java
index c1950e5..1052241 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestStressInPlaceUpdates.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestStressInPlaceUpdates.java
@@ -674,9 +674,9 @@ public class TestStressInPlaceUpdates extends AbstractFullDistribZkTestBase {
    * SOLR-8733.
    */
   public SolrClient getClientForLeader() throws KeeperException, InterruptedException {
-    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-    cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
+    zkStateReader.forceUpdateCollection(DEFAULT_COLLECTION);
+    ClusterState clusterState = zkStateReader.getClusterState();
     Replica leader = null;
     Slice shard1 = clusterState.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1);
     leader = shard1.getLeader();
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestStressLiveNodes.java b/solr/core/src/test/org/apache/solr/cloud/TestStressLiveNodes.java
index 8feb6cf..00d57f3 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestStressLiveNodes.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestStressLiveNodes.java
@@ -109,7 +109,7 @@ public class TestStressLiveNodes extends SolrCloudTestCase {
     ArrayList<String> result = null;
 
     for (int i = 0; i < 10; i++) {
-      result = new ArrayList<>(CLOUD_CLIENT.getZkStateReader().getClusterState().getLiveNodes());
+      result = new ArrayList<>(cluster.getSolrClient().getClusterState().getLiveNodes());
       if (expectedCount != result.size()) {
         if (log.isInfoEnabled()) {
           log.info(
@@ -145,7 +145,7 @@ public class TestStressLiveNodes extends SolrCloudTestCase {
 
       // only here do we forcibly update the cached live nodes so we don't have to wait for it to
       // catch up with all the ephemeral nodes that vanished after the last iteration
-      CLOUD_CLIENT.getZkStateReader().updateLiveNodes();
+      cluster.getZkStateReader().updateLiveNodes();
 
       // sanity check that our Cloud Client's local state knows about the 1 (real) live node in our
       // cluster
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestTlogReplayVsRecovery.java b/solr/core/src/test/org/apache/solr/cloud/TestTlogReplayVsRecovery.java
index 6a79f64..29df9d3 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestTlogReplayVsRecovery.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestTlogReplayVsRecovery.java
@@ -183,25 +183,24 @@ public class TestTlogReplayVsRecovery extends SolrCloudTestCase {
     expectThrows(
         TimeoutException.class,
         "Did not time out waiting for new leader, out of sync replica became leader",
-        () -> {
-          cluster
-              .getSolrClient()
-              .waitForState(
-                  COLLECTION,
-                  10,
-                  TimeUnit.SECONDS,
-                  (state) -> {
-                    Replica newLeader = state.getSlice("shard1").getLeader();
-                    if (newLeader != null
-                        && !newLeader.getName().equals(leader.getName())
-                        && newLeader.getState() == Replica.State.ACTIVE) {
-                      // this is is the bad case, our "bad" state was found before timeout
-                      log.error("WTF: New Leader={}", newLeader);
-                      return true;
-                    }
-                    return false; // still no bad state, wait for timeout
-                  });
-        });
+        () ->
+            cluster
+                .getZkStateReader()
+                .waitForState(
+                    COLLECTION,
+                    10,
+                    TimeUnit.SECONDS,
+                    (state) -> {
+                      Replica newLeader = state.getSlice("shard1").getLeader();
+                      if (newLeader != null
+                          && !newLeader.getName().equals(leader.getName())
+                          && newLeader.getState() == Replica.State.ACTIVE) {
+                        // this is is the bad case, our "bad" state was found before timeout
+                        log.error("WTF: New Leader={}", newLeader);
+                        return true;
+                      }
+                      return false; // still no bad state, wait for timeout
+                    }));
 
     log.info("Enabling TestInjection.updateLogReplayRandomPause");
     TestInjection.updateLogReplayRandomPause = "true:100";
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java
index 6319e8a..074d833 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java
@@ -121,12 +121,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
         jetty.start();
       }
     }
-    if (cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getClusterState()
-            .getCollectionOrNull(collectionName)
-        != null) {
+    if (cluster.getSolrClient().getClusterState().getCollectionOrNull(collectionName) != null) {
       log.info("tearDown deleting collection");
       CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
       waitForDeletion(collectionName);
@@ -773,8 +768,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
     new UpdateRequest().deleteByQuery("*:*").commit(cluster.getSolrClient(), collectionName);
 
     // Find a replica which isn't leader
-    DocCollection docCollection =
-        cloudClient.getZkStateReader().getClusterState().getCollection(collectionName);
+    DocCollection docCollection = cloudClient.getClusterState().getCollection(collectionName);
     Slice slice = docCollection.getSlices().iterator().next();
     Replica newLeader = null;
     for (Replica replica : slice.getReplicas()) {
@@ -800,11 +794,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
     TimeOut timeout = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
     while (!timeout.hasTimedOut()) {
       Map<String, Slice> slices =
-          cloudClient
-              .getZkStateReader()
-              .getClusterState()
-              .getCollection(collectionName)
-              .getSlicesMap();
+          cloudClient.getClusterState().getCollection(collectionName).getSlicesMap();
       Replica me = slices.get(slice.getName()).getReplica(newLeader.getName());
       if (me.getBool("property.preferredleader", false)) {
         break;
@@ -829,8 +819,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
       Replica leader = docCollection.getSlice(slice.getName()).getLeader();
       if (leader != null
           && leader.getName().equals(newLeader.getName())
-          && leader.isActive(
-              cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes())) {
+          && leader.isActive(cloudClient.getClusterState().getLiveNodes())) {
         break;
       }
       Thread.sleep(100);
@@ -855,8 +844,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
         (liveNodes, collectionState) -> {
           Replica leader = collectionState.getLeader(shardName);
           if (leader == null
-              || !leader.isActive(
-                  cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes())) {
+              || !leader.isActive(cluster.getSolrClient().getClusterState().getLiveNodes())) {
             return false;
           }
           return oldLeaderJetty == null
@@ -940,7 +928,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
 
   private String getBaseUrl() {
     DocCollection collection =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(collectionName);
+        cluster.getSolrClient().getClusterState().getCollection(collectionName);
     Slice slice = collection.getSlice("shard1");
     return slice.getLeader().getCoreUrl();
   }
@@ -994,8 +982,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
       throws IOException, SolrServerException, InterruptedException {
     TimeOut t = new TimeOut(timeout, TimeUnit.SECONDS, TimeSource.NANO_TIME);
     for (Replica r : replicas) {
-      if (!r.isActive(
-          cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes())) {
+      if (!r.isActive(cluster.getSolrClient().getClusterState().getLiveNodes())) {
         continue;
       }
       try (HttpSolrClient replicaClient = getHttpSolrClient(r.getCoreUrl())) {
@@ -1020,13 +1007,13 @@ public class TestTlogReplica extends SolrCloudTestCase {
 
   private void waitForDeletion(String collection) throws InterruptedException, KeeperException {
     TimeOut t = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-    while (cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection(collection)) {
+    while (cluster.getSolrClient().getClusterState().hasCollection(collection)) {
       try {
         Thread.sleep(100);
         if (t.hasTimedOut()) {
           fail("Timed out waiting for collection " + collection + " to be deleted.");
         }
-        cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection);
+        cluster.getZkStateReader().forceUpdateCollection(collection);
       } catch (SolrException e) {
         return;
       }
@@ -1041,7 +1028,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
       boolean activeOnly)
       throws KeeperException, InterruptedException {
     if (updateCollection) {
-      cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName);
+      cluster.getZkStateReader().forceUpdateCollection(collectionName);
     }
     DocCollection docCollection = getCollectionState(collectionName);
     assertNotNull(docCollection);
@@ -1115,12 +1102,11 @@ public class TestTlogReplica extends SolrCloudTestCase {
     };
   }
 
-  private List<SolrCore> getSolrCore(boolean isLeader) {
+  private List<SolrCore> getSolrCore(boolean isLeader) throws IOException {
     List<SolrCore> rs = new ArrayList<>();
 
     CloudSolrClient cloudClient = cluster.getSolrClient();
-    DocCollection docCollection =
-        cloudClient.getZkStateReader().getClusterState().getCollection(collectionName);
+    DocCollection docCollection = cloudClient.getClusterState().getCollection(collectionName);
 
     for (JettySolrRunner solrRunner : cluster.getJettySolrRunners()) {
       if (solrRunner.getCoreContainer() == null) continue;
@@ -1155,11 +1141,10 @@ public class TestTlogReplica extends SolrCloudTestCase {
     }
   }
 
-  private List<JettySolrRunner> getSolrRunner(boolean isLeader) {
+  private List<JettySolrRunner> getSolrRunner(boolean isLeader) throws IOException {
     List<JettySolrRunner> rs = new ArrayList<>();
     CloudSolrClient cloudClient = cluster.getSolrClient();
-    DocCollection docCollection =
-        cloudClient.getZkStateReader().getClusterState().getCollection(collectionName);
+    DocCollection docCollection = cloudClient.getClusterState().getCollection(collectionName);
     for (JettySolrRunner solrRunner : cluster.getJettySolrRunners()) {
       if (solrRunner.getCoreContainer() == null) continue;
       for (SolrCore solrCore : solrRunner.getCoreContainer().getCores()) {
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestTolerantUpdateProcessorCloud.java b/solr/core/src/test/org/apache/solr/cloud/TestTolerantUpdateProcessorCloud.java
index 7806985..a6d3bef 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestTolerantUpdateProcessorCloud.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestTolerantUpdateProcessorCloud.java
@@ -117,7 +117,7 @@ public class TestTolerantUpdateProcessorCloud extends SolrCloudTestCase {
 
     cluster.waitForActiveCollection(COLLECTION_NAME, NUM_SHARDS, REPLICATION_FACTOR * NUM_SHARDS);
 
-    ZkStateReader zkStateReader = CLOUD_CLIENT.getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     // really hackish way to get a URL for specific nodes based on shard/replica hosting
     // inspired by TestMiniSolrCloudCluster
     HashMap<String, String> urlMap = new HashMap<>();
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestWaitForStateWithJettyShutdowns.java b/solr/core/src/test/org/apache/solr/cloud/TestWaitForStateWithJettyShutdowns.java
index 091555c..645e262 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestWaitForStateWithJettyShutdowns.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestWaitForStateWithJettyShutdowns.java
@@ -49,7 +49,7 @@ public class TestWaitForStateWithJettyShutdowns extends SolrTestCaseJ4 {
           .process(cluster.getSolrClient());
 
       log.info("Sanity check that our collection has come online");
-      cluster.getSolrClient().waitForState(col_name, 30, TimeUnit.SECONDS, clusterShape(1, 1));
+      cluster.getZkStateReader().waitForState(col_name, 30, TimeUnit.SECONDS, clusterShape(1, 1));
 
       log.info("Shutdown 1 node");
       final JettySolrRunner nodeToStop = cluster.getJettySolrRunner(0);
@@ -62,7 +62,7 @@ public class TestWaitForStateWithJettyShutdowns extends SolrTestCaseJ4 {
       // down)...
       log.info("Now check if waitForState will recognize we already have the exepcted state");
       cluster
-          .getSolrClient()
+          .getZkStateReader()
           .waitForState(col_name, 500, TimeUnit.MILLISECONDS, clusterShape(1, 0));
 
     } finally {
@@ -84,7 +84,7 @@ public class TestWaitForStateWithJettyShutdowns extends SolrTestCaseJ4 {
 
       log.info("Sanity check that our collection has come online");
       cluster
-          .getSolrClient()
+          .getZkStateReader()
           .waitForState(col_name, 30, TimeUnit.SECONDS, SolrCloudTestCase.clusterShape(1, 1));
 
       // HACK implementation detail...
@@ -102,7 +102,7 @@ public class TestWaitForStateWithJettyShutdowns extends SolrTestCaseJ4 {
               () -> {
                 try {
                   cluster
-                      .getSolrClient()
+                      .getZkStateReader()
                       .waitForState(
                           col_name,
                           180,
@@ -115,7 +115,6 @@ public class TestWaitForStateWithJettyShutdowns extends SolrTestCaseJ4 {
                 return;
               },
               null);
-
       log.info("Awaiting latch...");
       if (!latch.await(120, TimeUnit.SECONDS)) {
         fail(
diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkFailoverTest.java b/solr/core/src/test/org/apache/solr/cloud/ZkFailoverTest.java
index b914665..0f04dd3 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ZkFailoverTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ZkFailoverTest.java
@@ -96,7 +96,7 @@ public class ZkFailoverTest extends SolrCloudTestCase {
   }
 
   private void waitForLiveNodes(int numNodes) throws InterruptedException, KeeperException {
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     for (int i = 0; i < 100; i++) {
       zkStateReader.updateLiveNodes();
       if (zkStateReader.getClusterState().getLiveNodes().size() == numNodes) return;
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java
index 6f70e73..cb2ee8e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java
@@ -49,10 +49,7 @@ public class CollectionReloadTest extends SolrCloudTestCase {
         .process(cluster.getSolrClient());
 
     Replica leader =
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getLeaderRetry(testCollectionName, "shard1", DEFAULT_TIMEOUT);
+        cluster.getZkStateReader().getLeaderRetry(testCollectionName, "shard1", DEFAULT_TIMEOUT);
 
     long coreStartTime = getCoreStatus(leader).getCoreStartTime().getTime();
     CollectionAdminRequest.reloadCollection(testCollectionName).process(cluster.getSolrClient());
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
index 79c9d40..4707f2a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
@@ -143,7 +143,7 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
             .processAndWait(client, MAX_TIMEOUT_SECONDS);
     assertSame("CreateShard did not complete", RequestStatusState.COMPLETED, state);
 
-    client.getZkStateReader().forceUpdateCollection(collection);
+    cluster.getZkStateReader().forceUpdateCollection(collection);
 
     // Add a doc to shard2 to make sure shard2 was created properly
     SolrInputDocument doc = new SolrInputDocument();
@@ -166,7 +166,7 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
     assertSame("AddReplica did not complete", RequestStatusState.COMPLETED, state);
 
     // cloudClient watch might take a couple of seconds to reflect it
-    client
+    cluster
         .getZkStateReader()
         .waitForState(
             collection,
@@ -207,9 +207,9 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
     }
 
     Slice shard1 =
-        client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
+        cluster.getSolrClient().getClusterState().getCollection(collection).getSlice("shard1");
     Replica replica = shard1.getReplicas().iterator().next();
-    for (String liveNode : client.getZkStateReader().getClusterState().getLiveNodes()) {
+    for (String liveNode : cluster.getSolrClient().getClusterState().getLiveNodes()) {
       if (!replica.getNodeName().equals(liveNode)) {
         state =
             new CollectionAdminRequest.MoveReplica(collection, replica.getName(), liveNode)
@@ -218,10 +218,9 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
         break;
       }
     }
-    client.getZkStateReader().forceUpdateCollection(collection);
+    cluster.getZkStateReader().forceUpdateCollection(collection);
 
-    shard1 =
-        client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
+    shard1 = cluster.getSolrClient().getClusterState().getCollection(collection).getSlice("shard1");
     String replicaName = shard1.getReplicas().iterator().next().getName();
     state =
         CollectionAdminRequest.deleteReplica(collection, "shard1", replicaName)
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java
index 60a87a0..0ae66ab 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java
@@ -26,10 +26,7 @@ import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.*;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.zookeeper.KeeperException;
@@ -59,7 +56,7 @@ public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBas
     ClusterState clusterState = null;
     Replica replica = null;
     for (int idx = 0; idx < 300; ++idx) {
-      clusterState = client.getZkStateReader().getClusterState();
+      clusterState = client.getClusterState();
       final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
       replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
       if (replica == null) {
@@ -97,7 +94,7 @@ public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBas
 
     // Keep trying while Overseer writes the ZK state for up to 30 seconds.
     for (int idx = 0; idx < 300; ++idx) {
-      clusterState = client.getZkStateReader().getClusterState();
+      clusterState = client.getClusterState();
       final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
       replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
       if (replica == null) {
@@ -142,7 +139,7 @@ public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBas
 
     DocCollection col = null;
     for (int idx = 0; idx < 300; ++idx) {
-      ClusterState clusterState = client.getZkStateReader().getClusterState();
+      ClusterState clusterState = client.getClusterState();
 
       col = clusterState.getCollection(collectionName);
       if (col == null) {
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
index 0068753..4b5623b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
@@ -46,20 +46,9 @@ import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.cloud.AbstractDistribZkTestBase;
-import org.apache.solr.cloud.BasicDistributedZkTest;
-import org.apache.solr.cloud.SolrCloudTestCase;
-import org.apache.solr.cloud.StoppableIndexingThread;
+import org.apache.solr.cloud.*;
 import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CompositeIdRouter;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.HashBasedRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.cloud.*;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -130,10 +119,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     waitForThingsToLevelOut(15, TimeUnit.SECONDS);
 
     DocCollection defCol =
-        cloudClient
-            .getZkStateReader()
-            .getClusterState()
-            .getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+        cloudClient.getClusterState().getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
     Replica replica = defCol.getReplicas().get(0);
     String nodeName = replica.getNodeName();
 
@@ -144,8 +130,9 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     create.setCreateNodeSet(nodeName);
     create.process(cloudClient);
 
-    cloudClient.waitForState(
-        collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 1));
+    ZkStateReader.from(cloudClient)
+        .waitForState(
+            collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 1));
 
     try (CloudSolrClient client =
         getCloudSolrClient(
@@ -172,8 +159,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
           waitForRecoveriesToFinish(collectionName, true);
           // let's wait to see parent shard become inactive
           CountDownLatch latch = new CountDownLatch(1);
-          client
-              .getZkStateReader()
+          ZkStateReader.from(client)
               .registerCollectionStateWatcher(
                   collectionName,
                   (liveNodes, collectionState) -> {
@@ -196,7 +182,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
             fail("Sub-shards did not become active even after waiting for 1 minute");
           }
 
-          int liveNodeCount = client.getZkStateReader().getClusterState().getLiveNodes().size();
+          int liveNodeCount = client.getClusterState().getLiveNodes().size();
 
           // restart the sub-shard leader node
           String stoppedNodeName = null;
@@ -216,8 +202,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
             fail("We could not find a jetty to kill for replica: " + replica.getCoreUrl());
           }
 
-          cloudClient
-              .getZkStateReader()
+          ZkStateReader.from(client)
               .waitForLiveNodes(
                   30, TimeUnit.SECONDS, SolrCloudTestCase.containsLiveNode(stoppedNodeName));
 
@@ -228,7 +213,6 @@ public class ShardSplitTest extends BasicDistributedZkTest {
           // this is to avoid flakiness of test because of NoHttpResponseExceptions
           String control_collection =
               client
-                  .getZkStateReader()
                   .getClusterState()
                   .getCollection("control_collection")
                   .getReplicas()
@@ -241,13 +225,16 @@ public class ShardSplitTest extends BasicDistributedZkTest {
             state = addReplica.processAndWait(control, 30);
           }
 
-          cloudClient.waitForState(
-              collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(2, 4));
+          ZkStateReader.from(cloudClient)
+              .waitForState(
+                  collectionName,
+                  (long) 30,
+                  TimeUnit.SECONDS,
+                  SolrCloudTestCase.activeClusterShape(2, 4));
 
           if (state == RequestStatusState.COMPLETED) {
             CountDownLatch newReplicaLatch = new CountDownLatch(1);
-            client
-                .getZkStateReader()
+            ZkStateReader.from(client)
                 .registerCollectionStateWatcher(
                     collectionName,
                     (liveNodes, collectionState) -> {
@@ -268,7 +255,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
             newReplicaLatch.await(30, TimeUnit.SECONDS);
             // check consistency of sub-shard replica explicitly because checkShardConsistency
             // methods doesn't handle new shards/replica so well.
-            ClusterState clusterState = client.getZkStateReader().getClusterState();
+            ClusterState clusterState = client.getClusterState();
             DocCollection collection = clusterState.getCollection(collectionName);
             int numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_0));
             assertEquals(
@@ -351,7 +338,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     }
 
     // assert that sub-shards cores exist and sub-shard is in construction state
-    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
     zkStateReader.forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
     ClusterState state = zkStateReader.getClusterState();
     DocCollection collection = state.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
@@ -410,9 +397,9 @@ public class ShardSplitTest extends BasicDistributedZkTest {
             collectionName, "conf1", 1, 2, 0, 2); // TODO tlog replicas disabled right now.
     create.process(cloudClient);
 
-    cloudClient.waitForState(
-        collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 4));
-
+    ZkStateReader.from(cloudClient)
+        .waitForState(
+            collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 4));
     waitForRecoveriesToFinish(collectionName, false);
 
     for (int i = 0; i < 100; i++) {
@@ -427,11 +414,12 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     CollectionAdminResponse rsp = splitShard.process(cloudClient);
     waitForThingsToLevelOut(30, TimeUnit.SECONDS);
 
-    cloudClient.waitForState(
-        collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(2, 12));
+    ZkStateReader.from(cloudClient)
+        .waitForState(
+            collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(2, 12));
 
-    cloudClient.getZkStateReader().forceUpdateCollection(collectionName);
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    ZkStateReader.from(cloudClient).forceUpdateCollection(collectionName);
+    ClusterState clusterState = cloudClient.getClusterState();
     DocCollection coll = clusterState.getCollection(collectionName);
     log.info("coll: {}", coll);
 
@@ -505,7 +493,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     AtomicBoolean killed = new AtomicBoolean(false);
     Runnable monkey =
         () -> {
-          ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+          ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
           zkStateReader.registerCollectionStateWatcher(
               AbstractDistribZkTestBase.DEFAULT_COLLECTION,
               (liveNodes, collectionState) -> {
@@ -580,8 +568,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
         log.info("Starting shard1 leader jetty at port {}", cjetty.jetty.getLocalPort());
       }
       cjetty.jetty.start();
-      cloudClient
-          .getZkStateReader()
+      ZkStateReader.from(cloudClient)
           .forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
       if (log.isInfoEnabled()) {
         log.info(
@@ -596,8 +583,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
         waitForRecoveriesToFinish(AbstractDistribZkTestBase.DEFAULT_COLLECTION, true);
         // let's wait for the overseer to switch shard states
         CountDownLatch latch = new CountDownLatch(1);
-        cloudClient
-            .getZkStateReader()
+        ZkStateReader.from(cloudClient)
             .registerCollectionStateWatcher(
                 AbstractDistribZkTestBase.DEFAULT_COLLECTION,
                 (liveNodes, collectionState) -> {
@@ -648,7 +634,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
       // check consistency of sub-shard replica explicitly because checkShardConsistency methods
       // doesn't handle new shards/replica so well.
       if (areSubShardsActive.get()) {
-        ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+        ClusterState clusterState = cloudClient.getClusterState();
         DocCollection collection =
             clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
         int numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_0));
@@ -676,9 +662,9 @@ public class ShardSplitTest extends BasicDistributedZkTest {
         CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2);
     create.process(cloudClient);
 
-    cloudClient.waitForState(
-        collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 2));
-
+    ZkStateReader.from(cloudClient)
+        .waitForState(
+            collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 2));
     waitForRecoveriesToFinish(collectionName, false);
 
     TestInjection.splitLatch = new CountDownLatch(1); // simulate a long split operation
@@ -700,7 +686,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
       TimeOut timeOut = new TimeOut(30, TimeUnit.SECONDS, TimeSource.NANO_TIME);
       while (!timeOut.hasTimedOut()) {
         timeOut.sleep(500);
-        if (cloudClient.getZkStateReader().getZkClient().exists(path, true)) {
+        if (ZkStateReader.from(cloudClient).getZkClient().exists(path, true)) {
           log.info("=== found lock node");
           break;
         }
@@ -718,13 +704,13 @@ public class ShardSplitTest extends BasicDistributedZkTest {
       // make sure the lock still exists
       assertTrue(
           "lock znode expected but missing",
-          cloudClient.getZkStateReader().getZkClient().exists(path, true));
+          ZkStateReader.from(cloudClient).getZkClient().exists(path, true));
       // let the first split proceed
       TestInjection.splitLatch.countDown();
       timeOut = new TimeOut(30, TimeUnit.SECONDS, TimeSource.NANO_TIME);
       while (!timeOut.hasTimedOut()) {
         timeOut.sleep(500);
-        if (!cloudClient.getZkStateReader().getZkClient().exists(path, true)) {
+        if (!ZkStateReader.from(cloudClient).getZkClient().exists(path, true)) {
           break;
         }
       }
@@ -757,8 +743,9 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     assertEquals(0, response.getStatus());
 
     try {
-      cloudClient.waitForState(
-          collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 2));
+      ZkStateReader.from(cloudClient)
+          .waitForState(
+              collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 2));
     } catch (TimeoutException e) {
       new RuntimeException("Timeout waiting for 1shards and 2 replicas.", e);
     }
@@ -772,7 +759,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
   }
 
   private void incompleteOrOverlappingCustomRangeTest() throws Exception {
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    ClusterState clusterState = cloudClient.getClusterState();
     final DocRouter router =
         clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
     Slice shard1 =
@@ -819,7 +806,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
   }
 
   private void splitByUniqueKeyTest() throws Exception {
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    ClusterState clusterState = cloudClient.getClusterState();
     final DocRouter router =
         clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
     Slice shard1 =
@@ -840,10 +827,9 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     final int[] docCounts = new int[ranges.size()];
     int numReplicas = shard1.getReplicas().size();
 
-    cloudClient
-        .getZkStateReader()
+    ZkStateReader.from(cloudClient)
         .forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-    clusterState = cloudClient.getZkStateReader().getClusterState();
+    clusterState = cloudClient.getClusterState();
     if (log.isDebugEnabled()) {
       log.debug(
           "-- COLLECTION: {}",
@@ -947,13 +933,12 @@ public class ShardSplitTest extends BasicDistributedZkTest {
 
     waitForRecoveriesToFinish(false);
 
-    String url =
-        getUrlFromZk(
-            getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
+    getCommonCloudSolrClient();
+    String url = getUrlFromZk(cloudClient.getClusterState(), collectionName);
 
     try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
 
-      ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+      ClusterState clusterState = cloudClient.getClusterState();
       final DocRouter router = clusterState.getCollection(collectionName).getRouter();
       Slice shard1 = clusterState.getCollection(collectionName).getSlice(SHARD1);
       DocRouter.Range shard1Range =
@@ -1019,15 +1004,14 @@ public class ShardSplitTest extends BasicDistributedZkTest {
 
     waitForRecoveriesToFinish(false);
 
-    String url =
-        getUrlFromZk(
-            getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
+    getCommonCloudSolrClient();
+    String url = getUrlFromZk(cloudClient.getClusterState(), collectionName);
 
     try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
 
       String splitKey = "b!";
 
-      ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+      ClusterState clusterState = cloudClient.getClusterState();
       final DocRouter router = clusterState.getCollection(collectionName).getRouter();
       Slice shard1 = clusterState.getCollection(collectionName).getSlice(SHARD1);
       DocRouter.Range shard1Range =
@@ -1131,11 +1115,11 @@ public class ShardSplitTest extends BasicDistributedZkTest {
 
   protected void checkDocCountsAndShardStates(
       int[] docCounts, int numReplicas, Set<String> documentIds) throws Exception {
-    ClusterState clusterState = null;
+    ClusterState clusterState;
     Slice slice1_0 = null, slice1_1 = null;
     int i = 0;
     for (i = 0; i < 10; i++) {
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
       clusterState = zkStateReader.getClusterState();
       slice1_0 =
           clusterState
@@ -1201,7 +1185,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
     query.set("distrib", false);
 
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    ClusterState clusterState = cloudClient.getClusterState();
     Slice slice =
         clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice(shard);
     long[] numFound = new long[slice.getReplicasMap().size()];
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/SimpleCollectionCreateDeleteTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/SimpleCollectionCreateDeleteTest.java
index 4a1da18..948e901 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/SimpleCollectionCreateDeleteTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/SimpleCollectionCreateDeleteTest.java
@@ -43,7 +43,7 @@ public class SimpleCollectionCreateDeleteTest extends AbstractFullDistribZkTestB
   public void testCreateAndDeleteThenCreateAgain() throws Exception {
     String overseerNode =
         OverseerCollectionConfigSetProcessor.getLeaderNode(
-            cloudClient.getZkStateReader().getZkClient());
+            ZkStateReader.from(cloudClient).getZkClient());
     String notOverseerNode = null;
     for (CloudJettyRunner cloudJetty : cloudJettys) {
       if (!overseerNode.equals(cloudJetty.nodeName)) {
@@ -117,8 +117,7 @@ public class SimpleCollectionCreateDeleteTest extends AbstractFullDistribZkTestB
     if (request.get("success") != null) {
       // collection exists now
       assertTrue(
-          cloudClient
-              .getZkStateReader()
+          ZkStateReader.from(cloudClient)
               .getZkClient()
               .exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, false));
 
@@ -231,6 +230,6 @@ public class SimpleCollectionCreateDeleteTest extends AbstractFullDistribZkTestB
   }
 
   public SolrZkClient getZkClient() {
-    return cloudClient.getZkStateReader().getZkClient();
+    return ZkStateReader.from(cloudClient).getZkClient();
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/SplitByPrefixTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/SplitByPrefixTest.java
index 8a999a8..20a5982 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/SplitByPrefixTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/SplitByPrefixTest.java
@@ -28,10 +28,7 @@ import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.CompositeIdRouter;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.*;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -202,8 +199,7 @@ public class SplitByPrefixTest extends SolrCloudTestCase {
     // We can use the router to find the shards for the middle prefixes and they should be
     // different.
 
-    DocCollection collection =
-        client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME);
+    DocCollection collection = client.getClusterState().getCollection(COLLECTION_NAME);
     Collection<Slice> slices1 =
         collection
             .getRouter()
@@ -242,7 +238,7 @@ public class SplitByPrefixTest extends SolrCloudTestCase {
         COLLECTION_NAME,
         activeClusterShape(4, 7));
 
-    collection = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME);
+    collection = client.getClusterState().getCollection(COLLECTION_NAME);
     slices1 =
         collection.getRouter().getSearchSlicesSingle(uniquePrefixes.get(0).key, null, collection);
     slices2 =
@@ -270,7 +266,7 @@ public class SplitByPrefixTest extends SolrCloudTestCase {
         COLLECTION_NAME,
         activeClusterShape(5, 9));
 
-    collection = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME);
+    collection = client.getClusterState().getCollection(COLLECTION_NAME);
     slices1 =
         collection.getRouter().getSearchSlicesSingle(uniquePrefixes.get(0).key, null, collection);
     slice1 = slices1.iterator().next();
@@ -293,7 +289,7 @@ public class SplitByPrefixTest extends SolrCloudTestCase {
         COLLECTION_NAME,
         activeClusterShape(6, 11));
 
-    collection = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME);
+    collection = client.getClusterState().getCollection(COLLECTION_NAME);
     slices1 =
         collection.getRouter().getSearchSlicesSingle(uniquePrefixes.get(0).key, null, collection);
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
index b5347e1..f7e1667 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
@@ -82,8 +82,8 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
       createCollection(null, COLLECTION_NAME1, 1, 1, client, null, "conf1");
     }
 
-    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);
-    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME1, 1);
+    waitForCollection(ZkStateReader.from(cloudClient), COLLECTION_NAME, 2);
+    waitForCollection(ZkStateReader.from(cloudClient), COLLECTION_NAME1, 1);
     waitForRecoveriesToFinish(COLLECTION_NAME, false);
     waitForRecoveriesToFinish(COLLECTION_NAME1, false);
 
@@ -200,7 +200,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
           CollectionAdminRequest.createCollection("test_repFactorColl", "conf1", 1, 3, 0, 0);
       client.request(req);
 
-      waitForCollection(cloudClient.getZkStateReader(), "test_repFactorColl", 1);
+      waitForCollection(ZkStateReader.from(client), "test_repFactorColl", 1);
       waitForRecoveriesToFinish("test_repFactorColl", false);
 
       // Assert that replicationFactor has also been set to 3
@@ -225,7 +225,10 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
     final String collection = "deleted_collection";
     try (CloudSolrClient client = createCloudClient(null)) {
       copyConfigUp(
-          TEST_PATH().resolve("configsets"), "cloud-minimal", configSet, client.getZkHost());
+          TEST_PATH().resolve("configsets"),
+          "cloud-minimal",
+          configSet,
+          client.getClusterStateProvider().getQuorumHosts());
 
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.CREATE.toString());
@@ -238,13 +241,13 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
 
       client.request(request);
 
-      waitForCollection(cloudClient.getZkStateReader(), collection, 1);
+      waitForCollection(ZkStateReader.from(client), collection, 1);
       waitForRecoveriesToFinish(collection, false);
 
       // Now try deleting the configset and doing a clusterstatus.
       String parent = ZkConfigSetService.CONFIGS_ZKNODE + "/" + configSet;
-      deleteThemAll(client.getZkStateReader().getZkClient(), parent);
-      client.getZkStateReader().forciblyRefreshAllClusterStateSlow();
+      deleteThemAll(ZkStateReader.from(client).getZkClient(), parent);
+      ZkStateReader.from(client).forciblyRefreshAllClusterStateSlow();
 
       final CollectionAdminRequest.ClusterStatus req = CollectionAdminRequest.getClusterStatus();
       NamedList<?> rsp = client.request(req);
@@ -367,7 +370,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
       JettySolrRunner jetty = chaosMonkey.getShard("shard1", 0);
       String nodeName = jetty.getNodeName();
       jetty.stop();
-      ZkStateReader zkStateReader = client.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(client);
       zkStateReader.waitForState(
           COLLECTION_NAME,
           30,
@@ -396,11 +399,9 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
       // to the restarted server.
       // If this is the case, it will throw an HTTP Exception, and we don't retry Admin requests.
       try (CloudSolrClient newClient = createCloudClient(null)) {
-        newClient
-            .getZkStateReader()
+        ZkStateReader.from(newClient)
             .waitForLiveNodes(30, TimeUnit.SECONDS, (o, n) -> n != null && n.contains(nodeName));
-        newClient
-            .getZkStateReader()
+        ZkStateReader.from(newClient)
             .waitForState(
                 COLLECTION_NAME,
                 30,
@@ -656,7 +657,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
   private void clusterStatusRolesTest() throws Exception {
     try (CloudSolrClient client = createCloudClient(null)) {
       client.connect();
-      Replica replica = client.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1);
+      Replica replica = ZkStateReader.from(client).getLeaderRetry(DEFAULT_COLLECTION, SHARD1);
 
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set("action", CollectionParams.CollectionAction.ADDROLE.toString());
@@ -707,7 +708,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
     try (CloudSolrClient client = createCloudClient(null)) {
       client.connect();
       Map<String, Slice> slices =
-          client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
+          client.getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
       List<String> sliceList = new ArrayList<>(slices.keySet());
       String c1_s1 = sliceList.get(0);
       List<String> replicasList = new ArrayList<>(slices.get(c1_s1).getReplicasMap().keySet());
@@ -718,12 +719,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
       replicasList = new ArrayList<>(slices.get(c1_s2).getReplicasMap().keySet());
       String c1_s2_r1 = replicasList.get(0);
 
-      slices =
-          client
-              .getZkStateReader()
-              .getClusterState()
-              .getCollection(COLLECTION_NAME1)
-              .getSlicesMap();
+      slices = client.getClusterState().getCollection(COLLECTION_NAME1).getSlicesMap();
       sliceList = new ArrayList<>(slices.keySet());
       String c2_s1 = sliceList.get(0);
       replicasList = new ArrayList<>(slices.get(c2_s1).getReplicasMap().keySet());
@@ -1213,8 +1209,8 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
       CloudSolrClient client, String collectionName, String replicaName, String... props)
       throws KeeperException, InterruptedException {
 
-    client.getZkStateReader().forceUpdateCollection(collectionName);
-    ClusterState clusterState = client.getZkStateReader().getClusterState();
+    ZkStateReader.from(client).forceUpdateCollection(collectionName);
+    ClusterState clusterState = client.getClusterState();
     final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
     if (docCollection == null || docCollection.getReplica(replicaName) == null) {
       fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
index f9bae8d..69ae6a0 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
@@ -127,7 +127,7 @@ public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
     assertEquals(1, rsp.getResults().getNumFound());
 
     // remove a server not hosting any replicas
-    ZkStateReader zkStateReader = client.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(client);
     zkStateReader.forceUpdateCollection(collectionName);
     ClusterState clusterState = zkStateReader.getClusterState();
     Map<String, JettySolrRunner> jettyMap = new HashMap<>();
@@ -165,7 +165,7 @@ public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
 
     CollectionAdminRequest.deleteCollection(collectionName).process(client);
     AbstractDistribZkTestBase.waitForCollectionToDisappear(
-        collectionName, client.getZkStateReader(), true, 330);
+        collectionName, ZkStateReader.from(client), true, 330);
 
     // create it again
     createCollection(collectionName, null);
@@ -195,8 +195,7 @@ public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
 
     // check the collection's corelessness
     int coreCount = 0;
-    DocCollection docCollection =
-        client.getZkStateReader().getClusterState().getCollection(collectionName);
+    DocCollection docCollection = client.getClusterState().getCollection(collectionName);
     for (Map.Entry<String, Slice> entry : docCollection.getSlicesMap().entrySet()) {
       coreCount += entry.getValue().getReplicasMap().entrySet().size();
     }
@@ -205,7 +204,7 @@ public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
     // delete the collection
     CollectionAdminRequest.deleteCollection(collectionName).process(client);
     AbstractDistribZkTestBase.waitForCollectionToDisappear(
-        collectionName, client.getZkStateReader(), true, 330);
+        collectionName, ZkStateReader.from(client), true, 330);
   }
 
   @Test
@@ -227,7 +226,7 @@ public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
     // create collection
     createCollection(collectionName, null);
 
-    ZkStateReader zkStateReader = client.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(client);
 
     // modify collection
     final int numDocs = 1 + random().nextInt(10);
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java
index 936268c..71aa664 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java
@@ -28,6 +28,7 @@ import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -58,7 +59,7 @@ public class TestReplicaProperties extends ReplicaPropertiesBase {
       createCollection(null, COLLECTION_NAME, shards, rFactor, client, null, "conf1");
     }
 
-    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);
+    waitForCollection(ZkStateReader.from(cloudClient), COLLECTION_NAME, 2);
     waitForRecoveriesToFinish(COLLECTION_NAME, false);
 
     listCollection();
@@ -149,7 +150,7 @@ public class TestReplicaProperties extends ReplicaPropertiesBase {
 
       // Should be able to set non-unique-per-slice values in several places.
       Map<String, Slice> slices =
-          client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
+          client.getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
       List<String> sliceList = new ArrayList<>(slices.keySet());
       String c1_s1 = sliceList.get(0);
       List<String> replicasList = new ArrayList<>(slices.get(c1_s1).getReplicasMap().keySet());
@@ -244,7 +245,7 @@ public class TestReplicaProperties extends ReplicaPropertiesBase {
     // Keep trying while Overseer writes the ZK state for up to 30 seconds.
     for (int idx = 0; idx < 300; ++idx) {
       lastFailMsg = "";
-      ClusterState clusterState = client.getZkStateReader().getClusterState();
+      ClusterState clusterState = client.getClusterState();
       for (Slice slice : clusterState.getCollection(collectionName).getSlices()) {
         boolean foundLeader = false;
         boolean foundPreferred = false;
diff --git a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkCollectionPropsCachingTest.java b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkCollectionPropsCachingTest.java
index b8fffd0..2e69f58 100644
--- a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkCollectionPropsCachingTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkCollectionPropsCachingTest.java
@@ -64,7 +64,7 @@ public class ZkCollectionPropsCachingTest extends SolrCloudTestCase {
 
   @Test
   public void testReadWriteCached() throws InterruptedException, IOException {
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
 
     CollectionProperties collectionProps = new CollectionProperties(zkClient());
 
@@ -87,13 +87,9 @@ public class ZkCollectionPropsCachingTest extends SolrCloudTestCase {
     }
   }
 
-  private void checkValue(String propertyName, String expectedValue) throws InterruptedException {
+  private void checkValue(String propertyName, String expectedValue) {
     final Object value =
-        cluster
-            .getSolrClient()
-            .getZkStateReader()
-            .getCollectionProperties(collectionName)
-            .get(propertyName);
+        cluster.getZkStateReader().getCollectionProperties(collectionName).get(propertyName);
     assertEquals("Unexpected value for collection property: " + propertyName, expectedValue, value);
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/cluster/placement/impl/PlacementPluginIntegrationTest.java b/solr/core/src/test/org/apache/solr/cluster/placement/impl/PlacementPluginIntegrationTest.java
index 00299a69b..516390f 100644
--- a/solr/core/src/test/org/apache/solr/cluster/placement/impl/PlacementPluginIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cluster/placement/impl/PlacementPluginIntegrationTest.java
@@ -112,7 +112,7 @@ public class PlacementPluginIntegrationTest extends SolrCloudTestCase {
     assertTrue(rsp.isSuccess());
     cluster.waitForActiveCollection(COLLECTION, 2, 4);
     // use Solr-specific API to verify the expected placements
-    ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState();
+    ClusterState clusterState = cloudManager.getClusterState();
     DocCollection collection = clusterState.getCollectionOrNull(COLLECTION);
     assertNotNull(collection);
     Map<String, AtomicInteger> coresByNode = new HashMap<>();
@@ -275,11 +275,7 @@ public class PlacementPluginIntegrationTest extends SolrCloudTestCase {
             .process(cluster.getSolrClient());
     assertTrue(rsp.isSuccess());
     cluster.waitForActiveCollection(SECONDARY_COLLECTION, 1, 3);
-    DocCollection secondary =
-        cloudManager
-            .getClusterStateProvider()
-            .getClusterState()
-            .getCollection(SECONDARY_COLLECTION);
+    DocCollection secondary = cloudManager.getClusterState().getCollection(SECONDARY_COLLECTION);
     Set<String> secondaryNodes = new HashSet<>();
     secondary.forEachReplica((shard, replica) -> secondaryNodes.add(replica.getNodeName()));
 
@@ -290,8 +286,7 @@ public class PlacementPluginIntegrationTest extends SolrCloudTestCase {
     assertTrue(rsp.isSuccess());
     cluster.waitForActiveCollection(COLLECTION, 2, 4);
     // make sure the primary replicas were placed on the nodeset
-    DocCollection primary =
-        cloudManager.getClusterStateProvider().getClusterState().getCollection(COLLECTION);
+    DocCollection primary = cloudManager.getClusterState().getCollection(COLLECTION);
     primary.forEachReplica(
         (shard, replica) ->
             assertTrue(
diff --git a/solr/core/src/test/org/apache/solr/cluster/placement/impl/SimpleClusterAbstractionsTest.java b/solr/core/src/test/org/apache/solr/cluster/placement/impl/SimpleClusterAbstractionsTest.java
index 5d2e522..ea5856e 100644
--- a/solr/core/src/test/org/apache/solr/cluster/placement/impl/SimpleClusterAbstractionsTest.java
+++ b/solr/core/src/test/org/apache/solr/cluster/placement/impl/SimpleClusterAbstractionsTest.java
@@ -52,7 +52,7 @@ public class SimpleClusterAbstractionsTest extends SolrCloudTestCase {
 
   @Test
   public void testBasic() throws Exception {
-    ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState();
+    ClusterState clusterState = cloudManager.getClusterState();
     Cluster cluster = new SimpleClusterAbstractionsImpl.ClusterImpl(cloudManager);
     assertNotNull(cluster);
     Set<Node> nodes = cluster.getLiveNodes();
diff --git a/solr/core/src/test/org/apache/solr/core/BlobRepositoryCloudTest.java b/solr/core/src/test/org/apache/solr/core/BlobRepositoryCloudTest.java
index 0746fc3..c3c7d50 100644
--- a/solr/core/src/test/org/apache/solr/core/BlobRepositoryCloudTest.java
+++ b/solr/core/src/test/org/apache/solr/core/BlobRepositoryCloudTest.java
@@ -97,7 +97,7 @@ public class BlobRepositoryCloudTest extends SolrCloudTestCase {
 
   // TODO: move this up to parent class?
   private static String findLiveNodeURI() {
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     return zkStateReader.getBaseUrlForNodeName(
         zkStateReader
             .getClusterState()
diff --git a/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCloudSnapshots.java b/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCloudSnapshots.java
index e60a464..81a16fb 100644
--- a/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCloudSnapshots.java
+++ b/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCloudSnapshots.java
@@ -40,6 +40,7 @@ import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Replica.State;
 import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
 import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager.SnapshotMetaData;
@@ -122,8 +123,7 @@ public class TestSolrCloudSnapshots extends SolrCloudTestCase {
       Thread.sleep(5000);
 
       // Figure out if at-least one replica is "down".
-      DocCollection collState =
-          solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
+      DocCollection collState = solrClient.getClusterState().getCollection(collectionName);
       for (Slice s : collState.getSlices()) {
         for (Replica replica : s.getReplicas()) {
           if (replica.getState() == State.DOWN) {
@@ -151,8 +151,7 @@ public class TestSolrCloudSnapshots extends SolrCloudTestCase {
         meta.getReplicaSnapshots().stream()
             .collect(Collectors.toMap(CoreSnapshotMetaData::getCoreName, Function.identity()));
 
-    DocCollection collectionState =
-        solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
+    DocCollection collectionState = solrClient.getClusterState().getCollection(collectionName);
     assertEquals(2, collectionState.getActiveSlices().size());
     for (Slice shard : collectionState.getActiveSlices()) {
       assertEquals(2, shard.getReplicas().size());
@@ -219,17 +218,13 @@ public class TestSolrCloudSnapshots extends SolrCloudTestCase {
         assertEquals(RequestStatusState.COMPLETED, restore.processAndWait(solrClient, 30)); // async
       }
       AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-          restoreCollectionName,
-          cluster.getSolrClient().getZkStateReader(),
-          log.isDebugEnabled(),
-          true,
-          30);
+          restoreCollectionName, ZkStateReader.from(solrClient), log.isDebugEnabled(), true, 30);
       BackupRestoreUtils.verifyDocs(nDocs, solrClient, restoreCollectionName);
     }
 
     // Check collection property
     Map<String, String> collectionProperties =
-        solrClient.getZkStateReader().getCollectionProperties(restoreCollectionName);
+        ZkStateReader.from(solrClient).getCollectionProperties(restoreCollectionName);
     if (collectionPropertySet) {
       assertEquals(
           "Snapshot restore hasn't restored collection properties",
@@ -254,8 +249,7 @@ public class TestSolrCloudSnapshots extends SolrCloudTestCase {
       }
 
       if (replicaToDelete != null) {
-        collectionState =
-            solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
+        collectionState = solrClient.getClusterState().getCollection(collectionName);
         for (Slice s : collectionState.getSlices()) {
           for (Replica r : s.getReplicas()) {
             if (r.getCoreName().equals(replicaToDelete.getCoreName())) {
@@ -280,7 +274,7 @@ public class TestSolrCloudSnapshots extends SolrCloudTestCase {
 
     // Wait for a while so that the cluster state updates are propagated to the client side.
     Thread.sleep(2000);
-    collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
+    collectionState = solrClient.getClusterState().getCollection(collectionName);
 
     for (Slice shard : collectionState.getActiveSlices()) {
       for (Replica replica : shard.getReplicas()) {
@@ -325,7 +319,7 @@ public class TestSolrCloudSnapshots extends SolrCloudTestCase {
       assertEquals(0, deleteCol.process(solrClient).getStatus());
       assertTrue(
           SolrSnapshotManager.listSnapshots(
-                  solrClient.getZkStateReader().getZkClient(), collectionName)
+                  ZkStateReader.from(solrClient).getZkClient(), collectionName)
               .isEmpty());
     }
   }
diff --git a/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCoreSnapshots.java b/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCoreSnapshots.java
index 78f4abe..8168469 100644
--- a/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCoreSnapshots.java
+++ b/solr/core/src/test/org/apache/solr/core/snapshots/TestSolrCoreSnapshots.java
@@ -100,8 +100,7 @@ public class TestSolrCoreSnapshots extends SolrCloudTestCase {
     String location = createTempDir().toFile().getAbsolutePath();
     int nDocs = BackupRestoreUtils.indexDocs(cluster.getSolrClient(), collectionName, docsSeed);
 
-    DocCollection collectionState =
-        solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
+    DocCollection collectionState = solrClient.getClusterState().getCollection(collectionName);
     assertEquals(1, collectionState.getActiveSlices().size());
     Slice shard = collectionState.getActiveSlices().iterator().next();
     assertEquals(1, shard.getReplicas().size());
@@ -195,8 +194,7 @@ public class TestSolrCoreSnapshots extends SolrCloudTestCase {
 
     int nDocs = BackupRestoreUtils.indexDocs(cluster.getSolrClient(), collectionName, docsSeed);
 
-    DocCollection collectionState =
-        solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
+    DocCollection collectionState = solrClient.getClusterState().getCollection(collectionName);
     assertEquals(1, collectionState.getActiveSlices().size());
     Slice shard = collectionState.getActiveSlices().iterator().next();
     assertEquals(1, shard.getReplicas().size());
diff --git a/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java b/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java
index 524a809..e92210b 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestBlobHandler.java
@@ -64,8 +64,7 @@ public class TestBlobHandler extends AbstractFullDistribZkTestBase {
       response1 = createCollectionRequest.process(client);
       assertEquals(0, response1.getStatus());
       assertTrue(response1.isSuccess());
-      DocCollection sysColl =
-          cloudClient.getZkStateReader().getClusterState().getCollection(".system");
+      DocCollection sysColl = cloudClient.getClusterState().getCollection(".system");
       Replica replica = sysColl.getActiveSlicesMap().values().iterator().next().getLeader();
 
       String baseUrl = replica.getBaseUrl();
diff --git a/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java b/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java
index e3e01d2..5e9e077 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestConfigReload.java
@@ -62,11 +62,9 @@ public class TestConfigReload extends AbstractFullDistribZkTestBase {
   }
 
   private void reloadTest() throws Exception {
-    SolrZkClient client = cloudClient.getZkStateReader().getZkClient();
+    SolrZkClient client = ZkStateReader.from(cloudClient).getZkClient();
     if (log.isInfoEnabled()) {
-      log.info(
-          "live_nodes_count :  {}",
-          cloudClient.getZkStateReader().getClusterState().getLiveNodes());
+      log.info("live_nodes_count :  {}", cloudClient.getClusterState().getLiveNodes());
     }
     String confPath = ZkConfigSetService.CONFIGS_ZKNODE + "/conf1/";
     //    checkConfReload(client, confPath + ConfigOverlay.RESOURCE_NAME, "overlay");
@@ -93,8 +91,7 @@ public class TestConfigReload extends AbstractFullDistribZkTestBase {
     }
     Integer newVersion = newStat.getVersion();
     long maxTimeoutSeconds = 60;
-    DocCollection coll =
-        cloudClient.getZkStateReader().getClusterState().getCollection("collection1");
+    DocCollection coll = cloudClient.getClusterState().getCollection("collection1");
     List<String> urls = new ArrayList<>();
     for (Slice slice : coll.getSlices()) {
       for (Replica replica : slice.getReplicas())
diff --git a/solr/core/src/test/org/apache/solr/handler/TestReqParamsAPI.java b/solr/core/src/test/org/apache/solr/handler/TestReqParamsAPI.java
index 48559cb..e41df89 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestReqParamsAPI.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestReqParamsAPI.java
@@ -82,7 +82,7 @@ public class TestReqParamsAPI extends SolrCloudTestCase {
 
   private void testReqParams() throws Exception {
     CloudSolrClient cloudClient = cluster.getSolrClient();
-    DocCollection coll = cloudClient.getZkStateReader().getClusterState().getCollection(COLL_NAME);
+    DocCollection coll = cloudClient.getClusterState().getCollection(COLL_NAME);
     List<String> urls = new ArrayList<>();
     for (Slice slice : coll.getSlices()) {
       for (Replica replica : slice.getReplicas())
@@ -106,7 +106,7 @@ public class TestReqParamsAPI extends SolrCloudTestCase {
     TestSolrConfigHandler.runConfigCommand(writeHarness, "/config", payload);
 
     AbstractFullDistribZkTestBase.waitForRecoveriesToFinish(
-        COLL_NAME, cloudClient.getZkStateReader(), false, true, 90);
+        COLL_NAME, ZkStateReader.from(cloudClient), false, true, 90);
 
     payload =
         " {\n"
diff --git a/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerCloud.java b/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerCloud.java
index 69d2ba9..f804cb8 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerCloud.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerCloud.java
@@ -18,6 +18,7 @@ package org.apache.solr.handler;
 
 import static java.util.Arrays.asList;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -80,8 +81,9 @@ public class TestSolrConfigHandlerCloud extends AbstractFullDistribZkTestBase {
     TestSolrConfigHandler.reqhandlertests(writeHarness, testServerBaseUrl, cloudClient);
   }
 
-  public static String getRandomServer(CloudSolrClient cloudClient, String collName) {
-    DocCollection coll = cloudClient.getZkStateReader().getClusterState().getCollection(collName);
+  public static String getRandomServer(CloudSolrClient cloudClient, String collName)
+      throws IOException {
+    DocCollection coll = cloudClient.getClusterState().getCollection(collName);
     List<String> urls = new ArrayList<>();
     for (Slice slice : coll.getSlices()) {
       for (Replica replica : slice.getReplicas())
@@ -91,8 +93,7 @@ public class TestSolrConfigHandlerCloud extends AbstractFullDistribZkTestBase {
   }
 
   private void testReqParams() throws Exception {
-    DocCollection coll =
-        cloudClient.getZkStateReader().getClusterState().getCollection("collection1");
+    DocCollection coll = cloudClient.getClusterState().getCollection("collection1");
     List<String> urls = new ArrayList<>();
     for (Slice slice : coll.getSlices()) {
       for (Replica replica : slice.getReplicas())
diff --git a/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerConcurrent.java b/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerConcurrent.java
index 5f9f3a4..ff51bf2 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerConcurrent.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestSolrConfigHandlerConcurrent.java
@@ -138,8 +138,7 @@ public class TestSolrConfigHandlerConcurrent extends AbstractFullDistribZkTestBa
         return;
       }
 
-      DocCollection coll =
-          cloudClient.getZkStateReader().getClusterState().getCollection("collection1");
+      DocCollection coll = cloudClient.getClusterState().getCollection("collection1");
       List<String> urls = new ArrayList<>();
       for (Slice slice : coll.getSlices()) {
         for (Replica replica : slice.getReplicas())
diff --git a/solr/core/src/test/org/apache/solr/handler/TestStressIncrementalBackup.java b/solr/core/src/test/org/apache/solr/handler/TestStressIncrementalBackup.java
index 39c2730..509495c 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestStressIncrementalBackup.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestStressIncrementalBackup.java
@@ -75,7 +75,6 @@ public class TestStressIncrementalBackup extends SolrCloudTestCase {
     Replica r =
         cluster
             .getSolrClient()
-            .getZkStateReader()
             .getClusterState()
             .getCollection(DEFAULT_TEST_COLLECTION_NAME)
             .getActiveSlices()
diff --git a/solr/core/src/test/org/apache/solr/handler/TestStressThreadBackup.java b/solr/core/src/test/org/apache/solr/handler/TestStressThreadBackup.java
index 5909084..ec7a116 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestStressThreadBackup.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestStressThreadBackup.java
@@ -363,7 +363,6 @@ public class TestStressThreadBackup extends SolrCloudTestCase {
     Replica r =
         cluster
             .getSolrClient()
-            .getZkStateReader()
             .getClusterState()
             .getCollection(DEFAULT_TEST_COLLECTION_NAME)
             .getActiveSlices()
diff --git a/solr/core/src/test/org/apache/solr/handler/TestSystemCollAutoCreate.java b/solr/core/src/test/org/apache/solr/handler/TestSystemCollAutoCreate.java
index 44c8c97..7220870 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestSystemCollAutoCreate.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestSystemCollAutoCreate.java
@@ -18,13 +18,10 @@
 package org.apache.solr.handler;
 
 import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
-import org.apache.solr.common.cloud.DocCollection;
 
 public class TestSystemCollAutoCreate extends AbstractFullDistribZkTestBase {
   public void testAutoCreate() throws Exception {
     TestBlobHandler.checkBlobPost(
         cloudJettys.get(0).jetty.getBaseUrl().toExternalForm(), cloudClient);
-    DocCollection sysColl =
-        cloudClient.getZkStateReader().getClusterState().getCollection(".system");
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/handler/component/CustomHighlightComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/CustomHighlightComponentTest.java
index 4f489f9..8b7ebf5 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/CustomHighlightComponentTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/CustomHighlightComponentTest.java
@@ -118,7 +118,7 @@ public class CustomHighlightComponentTest extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(COLLECTION, "conf", numShards, numReplicas)
         .processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        COLLECTION, cluster.getSolrClient().getZkStateReader(), false, true, DEFAULT_TIMEOUT);
+        COLLECTION, cluster.getZkStateReader(), false, true, DEFAULT_TIMEOUT);
   }
 
   @Test
diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentOptimizationTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentOptimizationTest.java
index bf1d45a..416866e 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentOptimizationTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedQueryComponentOptimizationTest.java
@@ -63,7 +63,7 @@ public class DistributedQueryComponentOptimizationTest extends SolrCloudTestCase
     CollectionAdminRequest.createCollection(COLLECTION, "conf", 3, 1)
         .processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
     cluster
-        .getSolrClient()
+        .getZkStateReader()
         .waitForState(
             COLLECTION,
             DEFAULT_TIMEOUT,
@@ -726,7 +726,7 @@ public class DistributedQueryComponentOptimizationTest extends SolrCloudTestCase
       String... values) {
     TrackingShardHandlerFactory.ShardRequestAndParams getByIdRequest =
         trackingQueue.getShardRequestByPurpose(
-            cluster.getSolrClient().getZkStateReader(), collection, shard, purpose);
+            cluster.getZkStateReader(), collection, shard, purpose);
     assertParamsEquals(getByIdRequest, paramName, values);
   }
 
diff --git a/solr/core/src/test/org/apache/solr/handler/component/SearchHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/component/SearchHandlerTest.java
index a3f66eb..e043392 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/SearchHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/SearchHandlerTest.java
@@ -151,11 +151,7 @@ public class SearchHandlerTest extends SolrTestCaseJ4 {
       assertTrue(rsp.getResponseHeader().getBooleanArg("zkConnected"));
 
       Collection<Slice> slices =
-          cloudSolrClient
-              .getZkStateReader()
-              .getClusterState()
-              .getCollection(collectionName)
-              .getSlices();
+          cloudSolrClient.getClusterState().getCollection(collectionName).getSlices();
       Slice slice = getRandomEntry(slices);
       Replica replica = getRandomEntry(slice.getReplicas());
       JettySolrRunner jetty = miniCluster.getReplicaJetty(replica);
@@ -202,11 +198,7 @@ public class SearchHandlerTest extends SolrTestCaseJ4 {
       assertTrue(rsp.getResponseHeader().getBooleanArg("zkConnected"));
 
       Collection<Slice> slices =
-          cloudSolrClient
-              .getZkStateReader()
-              .getClusterState()
-              .getCollection(collectionName)
-              .getSlices();
+          cloudSolrClient.getClusterState().getCollection(collectionName).getSlices();
       Slice disconnectedSlice = getRandomEntry(slices);
       Replica disconnectedReplica = getRandomEntry(disconnectedSlice.getReplicas());
       JettySolrRunner disconnectedJetty = miniCluster.getReplicaJetty(disconnectedReplica);
@@ -258,11 +250,7 @@ public class SearchHandlerTest extends SolrTestCaseJ4 {
       assertTrue(rsp.getResponseHeader().getBooleanArg("zkConnected"));
 
       Collection<Slice> slices =
-          cloudSolrClient
-              .getZkStateReader()
-              .getClusterState()
-              .getCollection(collectionName)
-              .getSlices();
+          cloudSolrClient.getClusterState().getCollection(collectionName).getSlices();
       Slice disconnectedSlice = getRandomEntry(slices);
       Replica disconnectedReplica = getRandomEntry(disconnectedSlice.getReplicas());
 
diff --git a/solr/core/src/test/org/apache/solr/handler/component/ShardsAllowListTest.java b/solr/core/src/test/org/apache/solr/handler/component/ShardsAllowListTest.java
index 99ba72f..7a53bab 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/ShardsAllowListTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/ShardsAllowListTest.java
@@ -284,7 +284,6 @@ public class ShardsAllowListTest extends MultiSolrCloudTestCase {
   private String getShardUrl(String shardName, MiniSolrCloudCluster cluster) {
     return cluster
         .getSolrClient()
-        .getZkStateReader()
         .getClusterState()
         .getCollection(COLLECTION_NAME)
         .getSlice(shardName)
diff --git a/solr/core/src/test/org/apache/solr/handler/component/TestTrackingShardHandlerFactory.java b/solr/core/src/test/org/apache/solr/handler/component/TestTrackingShardHandlerFactory.java
index f531414..c9995fd 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/TestTrackingShardHandlerFactory.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/TestTrackingShardHandlerFactory.java
@@ -26,6 +26,7 @@ import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.core.CoreContainer;
 import org.junit.Test;
 
@@ -97,20 +98,20 @@ public class TestTrackingShardHandlerFactory extends AbstractFullDistribZkTestBa
 
     TrackingShardHandlerFactory.ShardRequestAndParams getTopIdsRequest =
         trackingQueue.getShardRequestByPurpose(
-            client.getZkStateReader(), collectionName, "shard1", ShardRequest.PURPOSE_GET_TOP_IDS);
+            ZkStateReader.from(client), collectionName, "shard1", ShardRequest.PURPOSE_GET_TOP_IDS);
     assertNotNull(getTopIdsRequest);
     getTopIdsRequest =
         trackingQueue.getShardRequestByPurpose(
-            client.getZkStateReader(), collectionName, "shard2", ShardRequest.PURPOSE_GET_TOP_IDS);
+            ZkStateReader.from(client), collectionName, "shard2", ShardRequest.PURPOSE_GET_TOP_IDS);
     assertNotNull(getTopIdsRequest);
 
     TrackingShardHandlerFactory.ShardRequestAndParams getFieldsRequest =
         trackingQueue.getShardRequestByPurpose(
-            client.getZkStateReader(), collectionName, "shard1", ShardRequest.PURPOSE_GET_FIELDS);
+            ZkStateReader.from(client), collectionName, "shard1", ShardRequest.PURPOSE_GET_FIELDS);
     assertNotNull(getFieldsRequest);
     getFieldsRequest =
         trackingQueue.getShardRequestByPurpose(
-            client.getZkStateReader(), collectionName, "shard2", ShardRequest.PURPOSE_GET_FIELDS);
+            ZkStateReader.from(client), collectionName, "shard2", ShardRequest.PURPOSE_GET_FIELDS);
     assertNotNull(getFieldsRequest);
 
     int numRequests = 0;
diff --git a/solr/core/src/test/org/apache/solr/handler/component/UpdateLogCloudTest.java b/solr/core/src/test/org/apache/solr/handler/component/UpdateLogCloudTest.java
index 5a8893f..85d9bbe 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/UpdateLogCloudTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/UpdateLogCloudTest.java
@@ -68,7 +68,7 @@ public class UpdateLogCloudTest extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(COLLECTION, "conf", NUM_SHARDS, NUM_REPLICAS)
         .processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        COLLECTION, cluster.getSolrClient().getZkStateReader(), false, true, DEFAULT_TIMEOUT);
+        COLLECTION, cluster.getZkStateReader(), false, true, DEFAULT_TIMEOUT);
   }
 
   @After
@@ -102,7 +102,7 @@ public class UpdateLogCloudTest extends SolrCloudTestCase {
 
     cluster.getJettySolrRunner(specialIdx).stop();
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        COLLECTION, cluster.getSolrClient().getZkStateReader(), false, true, DEFAULT_TIMEOUT);
+        COLLECTION, cluster.getZkStateReader(), false, true, DEFAULT_TIMEOUT);
 
     new UpdateRequest()
         .add(sdoc("id", "1", "a_t", "one"))
@@ -112,7 +112,7 @@ public class UpdateLogCloudTest extends SolrCloudTestCase {
 
     cluster.getJettySolrRunner(specialIdx).start();
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        COLLECTION, cluster.getSolrClient().getZkStateReader(), false, true, DEFAULT_TIMEOUT);
+        COLLECTION, cluster.getZkStateReader(), false, true, DEFAULT_TIMEOUT);
 
     int idx = 0;
     for (SolrClient solrClient : solrClients) {
diff --git a/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java b/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
index 1e604d4..a9751e1 100644
--- a/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
+++ b/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
@@ -90,7 +90,7 @@ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
     CloudSolrClient client = cluster.getSolrClient();
     client.setDefaultCollection(people);
 
-    ZkStateReader zkStateReader = client.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(client);
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(people, zkStateReader, true, true, 30);
 
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(depts, zkStateReader, false, true, 30);
diff --git a/solr/core/src/test/org/apache/solr/schema/ManagedSchemaRoundRobinCloudTest.java b/solr/core/src/test/org/apache/solr/schema/ManagedSchemaRoundRobinCloudTest.java
index 9cd45bc..0562fec 100644
--- a/solr/core/src/test/org/apache/solr/schema/ManagedSchemaRoundRobinCloudTest.java
+++ b/solr/core/src/test/org/apache/solr/schema/ManagedSchemaRoundRobinCloudTest.java
@@ -47,7 +47,7 @@ public class ManagedSchemaRoundRobinCloudTest extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(COLLECTION, CONFIG, NUM_SHARDS, 1)
         .process(cluster.getSolrClient());
     cluster
-        .getSolrClient()
+        .getZkStateReader()
         .waitForState(
             COLLECTION,
             DEFAULT_TIMEOUT,
diff --git a/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldManagedSchemaCloudTest.java b/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldManagedSchemaCloudTest.java
index c44884b..39cdfb8 100644
--- a/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldManagedSchemaCloudTest.java
+++ b/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldManagedSchemaCloudTest.java
@@ -41,7 +41,7 @@ public class PreAnalyzedFieldManagedSchemaCloudTest extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(COLLECTION, CONFIG, 2, 1)
         .process(cluster.getSolrClient());
     cluster
-        .getSolrClient()
+        .getZkStateReader()
         .waitForState(
             COLLECTION,
             DEFAULT_TIMEOUT,
diff --git a/solr/core/src/test/org/apache/solr/schema/SchemaApiFailureTest.java b/solr/core/src/test/org/apache/solr/schema/SchemaApiFailureTest.java
index 690df76..9e7717f 100644
--- a/solr/core/src/test/org/apache/solr/schema/SchemaApiFailureTest.java
+++ b/solr/core/src/test/org/apache/solr/schema/SchemaApiFailureTest.java
@@ -40,7 +40,7 @@ public class SchemaApiFailureTest extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(COLLECTION, 2, 1) // _default configset
         .process(cluster.getSolrClient());
     cluster
-        .getSolrClient()
+        .getZkStateReader()
         .waitForState(
             COLLECTION,
             DEFAULT_TIMEOUT,
diff --git a/solr/core/src/test/org/apache/solr/schema/TestCloudSchemaless.java b/solr/core/src/test/org/apache/solr/schema/TestCloudSchemaless.java
index f2598d5..01fbb11 100644
--- a/solr/core/src/test/org/apache/solr/schema/TestCloudSchemaless.java
+++ b/solr/core/src/test/org/apache/solr/schema/TestCloudSchemaless.java
@@ -88,13 +88,9 @@ public class TestCloudSchemaless extends AbstractFullDistribZkTestBase {
     // First, add a bunch of documents in a single update with the same new field.
     // This tests that the replicas properly handle schema additions.
 
+    getCommonCloudSolrClient();
     int slices =
-        getCommonCloudSolrClient()
-            .getZkStateReader()
-            .getClusterState()
-            .getCollection("collection1")
-            .getActiveSlices()
-            .size();
+        cloudClient.getClusterState().getCollection("collection1").getActiveSlices().size();
     int trials = 50;
     // generate enough docs so that we can expect at least a doc per slice
     int numDocsPerTrial = (int) (slices * (Math.log(slices) + 1));
diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestCloudJSONFacetJoinDomain.java b/solr/core/src/test/org/apache/solr/search/facet/TestCloudJSONFacetJoinDomain.java
index 71b5cdd..d0e2442 100644
--- a/solr/core/src/test/org/apache/solr/search/facet/TestCloudJSONFacetJoinDomain.java
+++ b/solr/core/src/test/org/apache/solr/search/facet/TestCloudJSONFacetJoinDomain.java
@@ -41,6 +41,7 @@ import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.cloud.TestCloudPivotFacet;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -1062,6 +1063,6 @@ public class TestCloudJSONFacetJoinDomain extends SolrCloudTestCase {
   public static void waitForRecoveriesToFinish(CloudSolrClient client) throws Exception {
     assert null != client.getDefaultCollection();
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        client.getDefaultCollection(), client.getZkStateReader(), true, true, 330);
+        client.getDefaultCollection(), ZkStateReader.from(client), true, true, 330);
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestCloudJSONFacetSKG.java b/solr/core/src/test/org/apache/solr/search/facet/TestCloudJSONFacetSKG.java
index 9ae7a0e..92c9eed 100644
--- a/solr/core/src/test/org/apache/solr/search/facet/TestCloudJSONFacetSKG.java
+++ b/solr/core/src/test/org/apache/solr/search/facet/TestCloudJSONFacetSKG.java
@@ -44,6 +44,7 @@ import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.cloud.AbstractDistribZkTestBase;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.junit.AfterClass;
@@ -904,7 +905,7 @@ public class TestCloudJSONFacetSKG extends SolrCloudTestCase {
   public static void waitForRecoveriesToFinish(CloudSolrClient client) throws Exception {
     assert null != client.getDefaultCollection();
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        client.getDefaultCollection(), client.getZkStateReader(), true, true, 330);
+        client.getDefaultCollection(), ZkStateReader.from(client), true, true, 330);
   }
 
   /** helper macro: fails on null keys, skips pairs with null values */
diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestCloudJSONFacetSKGEquiv.java b/solr/core/src/test/org/apache/solr/search/facet/TestCloudJSONFacetSKGEquiv.java
index cc73d97..f4d98e5 100644
--- a/solr/core/src/test/org/apache/solr/search/facet/TestCloudJSONFacetSKGEquiv.java
+++ b/solr/core/src/test/org/apache/solr/search/facet/TestCloudJSONFacetSKGEquiv.java
@@ -45,6 +45,7 @@ import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.cloud.AbstractDistribZkTestBase;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -1310,7 +1311,7 @@ public class TestCloudJSONFacetSKGEquiv extends SolrCloudTestCase {
   public static void waitForRecoveriesToFinish(CloudSolrClient client) throws Exception {
     assert null != client.getDefaultCollection();
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        client.getDefaultCollection(), client.getZkStateReader(), true, true, 330);
+        client.getDefaultCollection(), ZkStateReader.from(client), true, true, 330);
   }
 
   /** helper macro: fails on null keys, skips pairs with null values */
diff --git a/solr/core/src/test/org/apache/solr/search/join/CrossCollectionJoinQueryTest.java b/solr/core/src/test/org/apache/solr/search/join/CrossCollectionJoinQueryTest.java
index cc408e4..07464a6 100644
--- a/solr/core/src/test/org/apache/solr/search/join/CrossCollectionJoinQueryTest.java
+++ b/solr/core/src/test/org/apache/solr/search/join/CrossCollectionJoinQueryTest.java
@@ -197,7 +197,7 @@ public class CrossCollectionJoinQueryTest extends SolrCloudTestCase {
           String.format(
               Locale.ROOT,
               "{!join method=crossCollection zkHost=\"%s\" fromIndex=products from=product_id_s to=product_id_s}size_s:M",
-              cluster.getSolrClient().getZkHost()),
+              client.getClusterStateProvider().getQuorumHosts()),
           true);
 
       // Test the ability to set other parameters on crossCollection join and have them passed
diff --git a/solr/core/src/test/org/apache/solr/search/join/TestCloudNestedDocsSort.java b/solr/core/src/test/org/apache/solr/search/join/TestCloudNestedDocsSort.java
index d26bd76..a5f373b 100644
--- a/solr/core/src/test/org/apache/solr/search/join/TestCloudNestedDocsSort.java
+++ b/solr/core/src/test/org/apache/solr/search/join/TestCloudNestedDocsSort.java
@@ -69,10 +69,9 @@ public class TestCloudNestedDocsSort extends SolrCloudTestCase {
     client = cluster.getSolrClient();
     client.setDefaultCollection("collection1");
 
-    ZkStateReader zkStateReader = client.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(client);
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
         "collection1", zkStateReader, true, true, 30);
-
     {
       int id = 42;
       final List<SolrInputDocument> docs = new ArrayList<>();
diff --git a/solr/core/src/test/org/apache/solr/servlet/HttpSolrCallCloudTest.java b/solr/core/src/test/org/apache/solr/servlet/HttpSolrCallCloudTest.java
index f34fc68..518e8ab 100644
--- a/solr/core/src/test/org/apache/solr/servlet/HttpSolrCallCloudTest.java
+++ b/solr/core/src/test/org/apache/solr/servlet/HttpSolrCallCloudTest.java
@@ -53,7 +53,7 @@ public class HttpSolrCallCloudTest extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(COLLECTION, "config", NUM_SHARD, REPLICA_FACTOR)
         .process(cluster.getSolrClient());
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        COLLECTION, cluster.getSolrClient().getZkStateReader(), false, true, 30);
+        COLLECTION, cluster.getZkStateReader(), false, true, 30);
   }
 
   @Test
diff --git a/solr/core/src/test/org/apache/solr/update/DeleteByIdWithRouterFieldTest.java b/solr/core/src/test/org/apache/solr/update/DeleteByIdWithRouterFieldTest.java
index be7d4ec..5f5a827 100644
--- a/solr/core/src/test/org/apache/solr/update/DeleteByIdWithRouterFieldTest.java
+++ b/solr/core/src/test/org/apache/solr/update/DeleteByIdWithRouterFieldTest.java
@@ -33,10 +33,7 @@ import org.apache.solr.cloud.CloudInspectUtil;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.*;
 import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.params.SolrParams;
 import org.junit.After;
@@ -77,7 +74,7 @@ public class DeleteByIdWithRouterFieldTest extends SolrCloudTestCase {
 
     cluster.getSolrClient().setDefaultCollection(COLL);
 
-    ClusterState clusterState = cluster.getSolrClient().getClusterStateProvider().getClusterState();
+    ClusterState clusterState = cluster.getSolrClient().getClusterState();
     for (Replica replica : clusterState.getCollection(COLL).getReplicas()) {
       clients.add(getHttpSolrClient(replica.getCoreUrl()));
     }
@@ -104,8 +101,7 @@ public class DeleteByIdWithRouterFieldTest extends SolrCloudTestCase {
 
   private void checkShardsConsistentNumFound() throws Exception {
     final SolrParams params = params("q", "*:*", "distrib", "false");
-    final DocCollection collection =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLL);
+    final DocCollection collection = cluster.getSolrClient().getClusterState().getCollection(COLL);
     for (Map.Entry<String, Slice> entry : collection.getActiveSlicesMap().entrySet()) {
       final String shardName = entry.getKey();
       final Slice slice = entry.getValue();
@@ -291,8 +287,7 @@ public class DeleteByIdWithRouterFieldTest extends SolrCloudTestCase {
    */
   public void testGlassBoxUpdateRequestRoutesToShards() throws Exception {
 
-    final DocCollection docCol =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLL);
+    final DocCollection docCol = cluster.getSolrClient().getClusterState().getCollection(COLL);
     // we don't need "real" urls for all replicas, just something we can use as lookup keys for
     // verification so we'll use the shard names as "leader urls"
     final Map<String, List<String>> urlMap =
diff --git a/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java b/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java
index ab89d43..4dd45e6 100644
--- a/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java
+++ b/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java
@@ -212,10 +212,10 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
   }
 
   private void mapReplicasToClients() throws KeeperException, InterruptedException {
-    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-    cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-    Replica leader = null;
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
+    zkStateReader.forceUpdateCollection(DEFAULT_COLLECTION);
+    ClusterState clusterState = zkStateReader.getClusterState();
+    Replica leader;
     Slice shard1 = clusterState.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1);
     leader = shard1.getLeader();
 
@@ -1264,8 +1264,8 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
     // Check every 10ms, 100 times, for a replica to go down (& assert that it doesn't)
     for (int i = 0; i < 100; i++) {
       Thread.sleep(10);
-      cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
-      ClusterState state = cloudClient.getZkStateReader().getClusterState();
+      ZkStateReader.from(cloudClient).forceUpdateCollection(DEFAULT_COLLECTION);
+      ClusterState state = cloudClient.getClusterState();
 
       int numActiveReplicas = 0;
       for (Replica rep : state.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1).getReplicas())
@@ -1364,11 +1364,11 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
 
       try (ZkShardTerms zkShardTerms =
           new ZkShardTerms(
-              DEFAULT_COLLECTION, SHARD1, cloudClient.getZkStateReader().getZkClient())) {
+              DEFAULT_COLLECTION, SHARD1, ZkStateReader.from(cloudClient).getZkClient())) {
         for (int i = 0; i < 100; i++) {
           Thread.sleep(10);
-          cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
-          ClusterState state = cloudClient.getZkStateReader().getClusterState();
+          ZkStateReader.from(cloudClient).forceUpdateCollection(DEFAULT_COLLECTION);
+          ClusterState state = cloudClient.getClusterState();
 
           int numActiveReplicas = 0;
           for (Replica rep :
@@ -1487,9 +1487,8 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
     return ur;
   }
 
-  private String getBaseUrl(String id) {
-    DocCollection collection =
-        cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
+  private String getBaseUrl(String id) throws IOException {
+    DocCollection collection = cloudClient.getClusterState().getCollection(DEFAULT_COLLECTION);
     Slice slice = collection.getRouter().getTargetSlice(id, null, null, null, collection);
     String baseUrl = slice.getLeader().getCoreUrl();
     return baseUrl;
@@ -1660,8 +1659,8 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
     // Check every 10ms, 100 times, for a replica to go down (& assert that it doesn't)
     for (int i = 0; i < 100; i++) {
       Thread.sleep(10);
-      cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
-      ClusterState state = cloudClient.getZkStateReader().getClusterState();
+      ZkStateReader.from(cloudClient).forceUpdateCollection(DEFAULT_COLLECTION);
+      ClusterState state = cloudClient.getClusterState();
 
       int numActiveReplicas = 0;
       for (Replica rep : state.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1).getReplicas())
diff --git a/solr/core/src/test/org/apache/solr/update/processor/RoutedAliasUpdateProcessorTest.java b/solr/core/src/test/org/apache/solr/update/processor/RoutedAliasUpdateProcessorTest.java
index 14ef7e9..4821682 100644
--- a/solr/core/src/test/org/apache/solr/update/processor/RoutedAliasUpdateProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/update/processor/RoutedAliasUpdateProcessorTest.java
@@ -81,25 +81,17 @@ public abstract class RoutedAliasUpdateProcessorTest extends SolrCloudTestCase {
         Thread.sleep(500);
       }
     }
-    try {
-      DocCollection confirmCollection =
-          cluster
-              .getSolrClient()
-              .getClusterStateProvider()
-              .getClusterState()
-              .getCollectionOrNull(collection);
-      assertNotNull(
-          "Unable to find collection we were waiting for after done waiting", confirmCollection);
-    } catch (IOException e) {
-      fail("exception getting collection we were waiting for and have supposedly created already");
-    }
+    var confirmCollection =
+        cluster.getSolrClient().getClusterState().getCollectionOrNull(collection);
+    assertNotNull(
+        "Unable to find collection we were waiting for after done waiting", confirmCollection);
   }
 
   private boolean haveCollection(String alias, String collection) {
     // separated into separate lines to make it easier to track down an NPE that occurred once
     // 3000 runs if it shows up again...
     CloudSolrClient solrClient = cluster.getSolrClient();
-    ZkStateReader zkStateReader = solrClient.getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     Aliases aliases = zkStateReader.getAliases();
     Map<String, List<String>> collectionAliasListMap = aliases.getCollectionAliasListMap();
     List<String> strings = collectionAliasListMap.get(alias);
diff --git a/solr/core/src/test/org/apache/solr/update/processor/TemplateUpdateProcessorTest.java b/solr/core/src/test/org/apache/solr/update/processor/TemplateUpdateProcessorTest.java
index 878238b..49891ca 100644
--- a/solr/core/src/test/org/apache/solr/update/processor/TemplateUpdateProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/update/processor/TemplateUpdateProcessorTest.java
@@ -91,8 +91,7 @@ public class TemplateUpdateProcessorTest extends SolrCloudTestCase {
                 CollectionAdminRequest.createCollection("c", "conf1", 1, 1)
                     .setPerReplicaState(SolrCloudTestCase.USE_PER_REPLICA_STATE));
     Utils.toJSONString(result.asMap(4));
-    AbstractFullDistribZkTestBase.waitForCollection(
-        cluster.getSolrClient().getZkStateReader(), "c", 1);
+    AbstractFullDistribZkTestBase.waitForCollection(cluster.getZkStateReader(), "c", 1);
     cluster.getSolrClient().request(add, "c");
     QueryResponse rsp =
         cluster.getSolrClient().query("c", new ModifiableSolrParams().add("q", "id:1"));
diff --git a/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java b/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java
index 3abc1b8..572667a 100644
--- a/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessorTest.java
@@ -1276,7 +1276,7 @@ public class TimeRoutedAliasUpdateProcessorTest extends RoutedAliasUpdateProcess
     assertEquals(1, resp.getResults().getNumFound());
 
     // now knock out the collection backing our alias
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(solrClient);
     Aliases aliases = zkStateReader.getAliases();
     List<String> collections = aliases.getCollectionAliasListMap().get(alias);
     for (String collection : collections) {
diff --git a/solr/modules/analytics/src/java/org/apache/solr/analytics/stream/AnalyticsShardRequestManager.java b/solr/modules/analytics/src/java/org/apache/solr/analytics/stream/AnalyticsShardRequestManager.java
index bb7fdb8..d07e5a4 100644
--- a/solr/modules/analytics/src/java/org/apache/solr/analytics/stream/AnalyticsShardRequestManager.java
+++ b/solr/modules/analytics/src/java/org/apache/solr/analytics/stream/AnalyticsShardRequestManager.java
@@ -102,7 +102,7 @@ public class AnalyticsShardRequestManager {
   protected void pickShards(String collection) throws IOException {
     try {
 
-      ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudSolrClient);
       ClusterState clusterState = zkStateReader.getClusterState();
       Set<String> liveNodes = clusterState.getLiveNodes();
 
diff --git a/solr/modules/analytics/src/test/org/apache/solr/analytics/SolrAnalyticsTestCase.java b/solr/modules/analytics/src/test/org/apache/solr/analytics/SolrAnalyticsTestCase.java
index e115f0d..ed5984a 100644
--- a/solr/modules/analytics/src/test/org/apache/solr/analytics/SolrAnalyticsTestCase.java
+++ b/solr/modules/analytics/src/test/org/apache/solr/analytics/SolrAnalyticsTestCase.java
@@ -58,7 +58,7 @@ public class SolrAnalyticsTestCase extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(COLLECTIONORALIAS, "conf", 2, 1)
         .process(cluster.getSolrClient());
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        COLLECTIONORALIAS, cluster.getSolrClient().getZkStateReader(), false, true, TIMEOUT);
+        COLLECTIONORALIAS, cluster.getZkStateReader(), false, true, TIMEOUT);
 
     new UpdateRequest().deleteByQuery("*:*").commit(cluster.getSolrClient(), COLLECTIONORALIAS);
 
diff --git a/solr/modules/hadoop-auth/build.gradle b/solr/modules/hadoop-auth/build.gradle
index 8aab68c..5dd2233 100644
--- a/solr/modules/hadoop-auth/build.gradle
+++ b/solr/modules/hadoop-auth/build.gradle
@@ -87,6 +87,8 @@ dependencies {
 
   testImplementation 'org.apache.lucene:lucene-core'
 
+  testImplementation project(':solr:solrj')
+
   // classes like solr.ICUCollationField, used by TestSolrCloudWithSecureImpersonation for example.
   testRuntimeOnly project(':solr:modules:analysis-extras')
 
diff --git a/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestRuleBasedAuthorizationWithKerberos.java b/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestRuleBasedAuthorizationWithKerberos.java
index 72c64e3..2d3407e 100644
--- a/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestRuleBasedAuthorizationWithKerberos.java
+++ b/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestRuleBasedAuthorizationWithKerberos.java
@@ -28,6 +28,7 @@ import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.cloud.AbstractDistribZkTestBase;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.util.LogLevel;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -120,6 +121,6 @@ public class TestRuleBasedAuthorizationWithKerberos extends SolrCloudTestCase {
         CollectionAdminRequest.deleteCollection(collectionName);
     deleteReq.process(solrClient);
     AbstractDistribZkTestBase.waitForCollectionToDisappear(
-        collectionName, solrClient.getZkStateReader(), true, 330);
+        collectionName, ZkStateReader.from(solrClient), true, 330);
   }
 }
diff --git a/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithHadoopAuthPlugin.java b/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithHadoopAuthPlugin.java
index 009f3dd..8c6d5a6 100644
--- a/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithHadoopAuthPlugin.java
+++ b/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithHadoopAuthPlugin.java
@@ -23,6 +23,7 @@ import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.cloud.AbstractDistribZkTestBase;
 import org.apache.solr.cloud.SolrCloudAuthTestCase;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -87,7 +88,7 @@ public class TestSolrCloudWithHadoopAuthPlugin extends SolrCloudAuthTestCase {
         CollectionAdminRequest.deleteCollection(collectionName);
     deleteReq.process(solrClient);
     AbstractDistribZkTestBase.waitForCollectionToDisappear(
-        collectionName, solrClient.getZkStateReader(), true, 330);
+        collectionName, ZkStateReader.from(solrClient), true, 330);
     // cookie was used to avoid re-authentication
     assertAuthMetricsMinimums(6, 4, 0, 2, 0, 0);
   }
diff --git a/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithKerberosAlt.java b/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithKerberosAlt.java
index 0edf81f..f266537 100644
--- a/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithKerberosAlt.java
+++ b/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestSolrCloudWithKerberosAlt.java
@@ -29,6 +29,7 @@ import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.cloud.AbstractDistribZkTestBase;
 import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.util.BadZookeeperThreadsFilter;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -91,7 +92,7 @@ public class TestSolrCloudWithKerberosAlt extends SolrCloudTestCase {
     CollectionAdminRequest.deleteCollection(collectionName).process(client);
 
     AbstractDistribZkTestBase.waitForCollectionToDisappear(
-        collectionName, client.getZkStateReader(), true, 330);
+        collectionName, ZkStateReader.from(client), true, 330);
   }
 
   @Override
diff --git a/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestZkAclsWithHadoopAuth.java b/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestZkAclsWithHadoopAuth.java
index 95a7c69..ae1c1c8 100644
--- a/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestZkAclsWithHadoopAuth.java
+++ b/solr/modules/hadoop-auth/src/test/org/apache/solr/security/hadoop/TestZkAclsWithHadoopAuth.java
@@ -103,7 +103,7 @@ public class TestZkAclsWithHadoopAuth extends SolrCloudTestCase {
       checkSecurityACLs(keeper, "/security");
 
       // Now test all ZK tree.
-      String zkHost = cluster.getSolrClient().getZkHost();
+      String zkHost = cluster.getSolrClient().getClusterStateProvider().getQuorumHosts();
       String zkChroot = zkHost.contains("/") ? zkHost.substring(zkHost.indexOf("/")) : null;
       walkZkTree(keeper, zkChroot, "/");
     }
diff --git a/solr/modules/hdfs/src/java/org/apache/solr/hdfs/snapshots/SolrSnapshotsTool.java b/solr/modules/hdfs/src/java/org/apache/solr/hdfs/snapshots/SolrSnapshotsTool.java
index c7c8226..44906ff 100644
--- a/solr/modules/hdfs/src/java/org/apache/solr/hdfs/snapshots/SolrSnapshotsTool.java
+++ b/solr/modules/hdfs/src/java/org/apache/solr/hdfs/snapshots/SolrSnapshotsTool.java
@@ -254,8 +254,7 @@ public class SolrSnapshotsTool implements Closeable, CLIO {
           "The snapshot named " + snapshotName + " is not found for collection " + collectionName);
     }
 
-    DocCollection collectionState =
-        solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
+    DocCollection collectionState = solrClient.getClusterState().getCollection(collectionName);
     for (Slice s : collectionState.getSlices()) {
       List<CoreSnapshotMetaData> replicaSnaps = meta.getReplicaSnapshotsForShard(s.getName());
       // Prepare a list of *existing* replicas (since one or more replicas could have been deleted
diff --git a/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/MoveReplicaHdfsFailoverTest.java b/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/MoveReplicaHdfsFailoverTest.java
index a83708e..58d9dd2 100644
--- a/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/MoveReplicaHdfsFailoverTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/MoveReplicaHdfsFailoverTest.java
@@ -92,7 +92,7 @@ public class MoveReplicaHdfsFailoverTest extends SolrCloudTestCase {
             .process(cluster.getSolrClient());
 
     ulogDir += "/tlog";
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     assertTrue(ClusterStateUtil.waitForAllActiveAndLiveReplicas(zkStateReader, 120000));
 
     DocCollection docCollection = zkStateReader.getClusterState().getCollection(coll);
@@ -171,34 +171,24 @@ public class MoveReplicaHdfsFailoverTest extends SolrCloudTestCase {
     Replica replica = getCollectionState(coll).getReplicas().iterator().next();
 
     cluster.getJettySolrRunners().get(0).stop();
-    assertTrue(
-        ClusterStateUtil.waitForAllReplicasNotLive(
-            cluster.getSolrClient().getZkStateReader(), 20000));
+    assertTrue(ClusterStateUtil.waitForAllReplicasNotLive(cluster.getZkStateReader(), 20000));
 
     // move replica from node0 -> node1
     new CollectionAdminRequest.MoveReplica(
             coll, replica.getName(), cluster.getJettySolrRunner(1).getNodeName())
         .process(cluster.getSolrClient());
-    assertTrue(
-        ClusterStateUtil.waitForAllActiveAndLiveReplicas(
-            cluster.getSolrClient().getZkStateReader(), 20000));
+    assertTrue(ClusterStateUtil.waitForAllActiveAndLiveReplicas(cluster.getZkStateReader(), 20000));
 
     cluster.getJettySolrRunners().get(1).stop();
-    assertTrue(
-        ClusterStateUtil.waitForAllReplicasNotLive(
-            cluster.getSolrClient().getZkStateReader(), 20000));
+    assertTrue(ClusterStateUtil.waitForAllReplicasNotLive(cluster.getZkStateReader(), 20000));
 
     // node0 will delete it replica because of CloudUtil.checkSharedFSFailoverReplaced()
     cluster.getJettySolrRunners().get(0).start();
     Thread.sleep(5000);
-    assertTrue(
-        ClusterStateUtil.waitForAllReplicasNotLive(
-            cluster.getSolrClient().getZkStateReader(), 20000));
+    assertTrue(ClusterStateUtil.waitForAllReplicasNotLive(cluster.getZkStateReader(), 20000));
 
     cluster.getJettySolrRunners().get(1).start();
-    assertTrue(
-        ClusterStateUtil.waitForAllActiveAndLiveReplicas(
-            cluster.getSolrClient().getZkStateReader(), 20000));
+    assertTrue(ClusterStateUtil.waitForAllActiveAndLiveReplicas(cluster.getZkStateReader(), 20000));
 
     assertEquals(1, getCollectionState(coll).getReplicas().size());
     assertEquals(
@@ -216,30 +206,22 @@ public class MoveReplicaHdfsFailoverTest extends SolrCloudTestCase {
     Replica replica = getCollectionState(coll).getReplicas().iterator().next();
 
     cluster.getJettySolrRunners().get(0).stop();
-    assertTrue(
-        ClusterStateUtil.waitForAllReplicasNotLive(
-            cluster.getSolrClient().getZkStateReader(), 20000));
+    assertTrue(ClusterStateUtil.waitForAllReplicasNotLive(cluster.getZkStateReader(), 20000));
 
     // move replica from node0 -> node1
     new CollectionAdminRequest.MoveReplica(
             coll, replica.getName(), cluster.getJettySolrRunner(1).getNodeName())
         .process(cluster.getSolrClient());
-    assertTrue(
-        ClusterStateUtil.waitForAllActiveAndLiveReplicas(
-            cluster.getSolrClient().getZkStateReader(), 20000));
+    assertTrue(ClusterStateUtil.waitForAllActiveAndLiveReplicas(cluster.getZkStateReader(), 20000));
 
     cluster.getJettySolrRunners().get(1).stop();
-    assertTrue(
-        ClusterStateUtil.waitForAllReplicasNotLive(
-            cluster.getSolrClient().getZkStateReader(), 20000));
+    assertTrue(ClusterStateUtil.waitForAllReplicasNotLive(cluster.getZkStateReader(), 20000));
 
     cluster.getJettySolrRunners().get(1).start();
     // node0 will delete it replica because of CloudUtil.checkSharedFSFailoverReplaced()
     cluster.getJettySolrRunners().get(0).start();
     Thread.sleep(5000);
-    assertTrue(
-        ClusterStateUtil.waitForAllActiveAndLiveReplicas(
-            cluster.getSolrClient().getZkStateReader(), 20000));
+    assertTrue(ClusterStateUtil.waitForAllActiveAndLiveReplicas(cluster.getZkStateReader(), 20000));
 
     assertEquals(1, getCollectionState(coll).getReplicas().size());
     assertEquals(
diff --git a/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/SharedFileSystemAutoReplicaFailoverTest.java b/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/SharedFileSystemAutoReplicaFailoverTest.java
index bf9f303..6a1596b 100644
--- a/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/SharedFileSystemAutoReplicaFailoverTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/SharedFileSystemAutoReplicaFailoverTest.java
@@ -231,19 +231,20 @@ public class SharedFileSystemAutoReplicaFailoverTest extends AbstractFullDistrib
 
     assertEquals(
         4,
-        ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1));
+        ClusterStateUtil.getLiveAndActiveReplicaCount(
+            ZkStateReader.from(cloudClient), collection1));
     assertTrue(
-        ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection2)
+        ClusterStateUtil.getLiveAndActiveReplicaCount(ZkStateReader.from(cloudClient), collection2)
             < 4);
 
     // collection3 has maxShardsPerNode=1, there are 4 standard jetties and one control jetty and 2
     // nodes stopped
     ClusterStateUtil.waitForLiveAndActiveReplicaCount(
-        cloudClient.getZkStateReader(), collection3, 3, 30000);
+        ZkStateReader.from(cloudClient), collection3, 3, 30000);
 
     // collection4 has maxShardsPerNode=5 and setMaxShardsPerNode=5
     ClusterStateUtil.waitForLiveAndActiveReplicaCount(
-        cloudClient.getZkStateReader(), collection4, 5, 30000);
+        ZkStateReader.from(cloudClient), collection4, 5, 30000);
 
     // all docs should be queried after failover
     cloudClient.commit(); // to query all docs
@@ -253,10 +254,11 @@ public class SharedFileSystemAutoReplicaFailoverTest extends AbstractFullDistrib
     // collection1 should still be at 4
     assertEquals(
         4,
-        ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1));
+        ClusterStateUtil.getLiveAndActiveReplicaCount(
+            ZkStateReader.from(cloudClient), collection1));
     // and collection2 less than 4
     assertTrue(
-        ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection2)
+        ClusterStateUtil.getLiveAndActiveReplicaCount(ZkStateReader.from(cloudClient), collection2)
             < 4);
 
     assertUlogDir(collections);
@@ -273,7 +275,7 @@ public class SharedFileSystemAutoReplicaFailoverTest extends AbstractFullDistrib
 
     assertTrue(
         "Timeout waiting for all not live",
-        waitingForReplicasNotLive(cloudClient.getZkStateReader(), 45000, stoppedJetties));
+        waitingForReplicasNotLive(ZkStateReader.from(cloudClient), 45000, stoppedJetties));
 
     ChaosMonkey.start(stoppedJetties);
     controlJetty.start();
@@ -341,10 +343,10 @@ public class SharedFileSystemAutoReplicaFailoverTest extends AbstractFullDistrib
   }
 
   /** After failover, ulogDir should not be changed. */
-  private void assertUlogDir(String... collections) {
+  private void assertUlogDir(String... collections) throws IOException {
     for (String collection : collections) {
       Collection<Slice> slices =
-          cloudClient.getZkStateReader().getClusterState().getCollection(collection).getSlices();
+          cloudClient.getClusterState().getCollection(collection).getSlices();
       for (Slice slice : slices) {
         for (Replica replica : slice.getReplicas()) {
           Map<String, Object> properties = replica.getProperties();
@@ -424,10 +426,10 @@ public class SharedFileSystemAutoReplicaFailoverTest extends AbstractFullDistrib
 
   private void assertSliceAndReplicaCount(
       String collection, int numSlices, int numReplicas, int timeOutInMs)
-      throws InterruptedException {
+      throws InterruptedException, IOException {
     TimeOut timeOut = new TimeOut(timeOutInMs, TimeUnit.MILLISECONDS, TimeSource.NANO_TIME);
     while (!timeOut.hasTimedOut()) {
-      ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+      ClusterState clusterState = cloudClient.getClusterState();
       Collection<Slice> slices = clusterState.getCollection(collection).getActiveSlices();
       if (slices.size() == numSlices) {
         boolean isMatch = true;
@@ -453,8 +455,8 @@ public class SharedFileSystemAutoReplicaFailoverTest extends AbstractFullDistrib
             + " numReplicas="
             + numReplicas
             + " but found "
-            + cloudClient.getZkStateReader().getClusterState().getCollection(collection)
+            + cloudClient.getClusterState().getCollection(collection)
             + " with /live_nodes: "
-            + cloudClient.getZkStateReader().getClusterState().getLiveNodes());
+            + cloudClient.getClusterState().getLiveNodes());
   }
 }
diff --git a/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/StressHdfsTest.java b/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/StressHdfsTest.java
index 60b02f0..13be05d 100644
--- a/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/StressHdfsTest.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/StressHdfsTest.java
@@ -40,10 +40,7 @@ import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.cloud.AbstractBasicDistributedZkTestBase;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.*;
 import org.apache.solr.common.params.CollectionParams.CollectionAction;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -163,7 +160,7 @@ public class StressHdfsTest extends AbstractBasicDistributedZkTestBase {
     waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false);
 
     // data dirs should be in zk, SOLR-8913
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    ClusterState clusterState = cloudClient.getClusterState();
     final DocCollection docCollection =
         clusterState.getCollectionOrNull(DELETE_DATA_DIR_COLLECTION);
     assertNotNull("Could not find :" + DELETE_DATA_DIR_COLLECTION, docCollection);
@@ -176,10 +173,10 @@ public class StressHdfsTest extends AbstractBasicDistributedZkTestBase {
     }
 
     cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION);
-    cloudClient.getZkStateReader().forceUpdateCollection(DELETE_DATA_DIR_COLLECTION);
-
+    ZkStateReader.from(cloudClient).forceUpdateCollection(DELETE_DATA_DIR_COLLECTION);
     for (int i = 1; i < nShards + 1; i++) {
-      cloudClient.getZkStateReader().getLeaderRetry(DELETE_DATA_DIR_COLLECTION, "shard" + i, 30000);
+      ZkStateReader.from(cloudClient)
+          .getLeaderRetry(DELETE_DATA_DIR_COLLECTION, "shard" + i, 30000);
     }
 
     // collect the data dirs
@@ -229,10 +226,7 @@ public class StressHdfsTest extends AbstractBasicDistributedZkTestBase {
     cloudClient.request(request);
 
     final TimeOut timeout = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-    while (cloudClient
-        .getZkStateReader()
-        .getClusterState()
-        .hasCollection(DELETE_DATA_DIR_COLLECTION)) {
+    while (cloudClient.getClusterState().hasCollection(DELETE_DATA_DIR_COLLECTION)) {
       if (timeout.hasTimedOut()) {
         throw new AssertionError("Timeout waiting to see removed collection leave clusterstate");
       }
diff --git a/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/api/collections/TestHdfsCloudBackupRestore.java b/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/api/collections/TestHdfsCloudBackupRestore.java
index e982af3..b07d232 100644
--- a/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/api/collections/TestHdfsCloudBackupRestore.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/hdfs/cloud/api/collections/TestHdfsCloudBackupRestore.java
@@ -217,7 +217,7 @@ public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCa
 
     BackupManager mgr =
         BackupManager.forRestore(
-            repo, solrClient.getZkStateReader(), repo.resolve(baseLoc, backupName));
+            repo, cluster.getZkStateReader(), repo.resolve(baseLoc, backupName));
     BackupProperties props = mgr.readBackupProperties();
     assertNotNull(props);
     assertEquals(collectionName, props.getCollection());
diff --git a/solr/modules/hdfs/src/test/org/apache/solr/hdfs/handler/TestHdfsBackupRestoreCore.java b/solr/modules/hdfs/src/test/org/apache/solr/hdfs/handler/TestHdfsBackupRestoreCore.java
index 447e52c..b0ddc37 100644
--- a/solr/modules/hdfs/src/test/org/apache/solr/hdfs/handler/TestHdfsBackupRestoreCore.java
+++ b/solr/modules/hdfs/src/test/org/apache/solr/hdfs/handler/TestHdfsBackupRestoreCore.java
@@ -186,8 +186,7 @@ public class TestHdfsBackupRestoreCore extends SolrCloudTestCase {
 
     int nDocs = BackupRestoreUtils.indexDocs(solrClient, collectionName, docsSeed);
 
-    DocCollection collectionState =
-        solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
+    DocCollection collectionState = solrClient.getClusterState().getCollection(collectionName);
     assertEquals(1, collectionState.getActiveSlices().size());
     Slice shard = collectionState.getActiveSlices().iterator().next();
     assertEquals(1, shard.getReplicas().size());
diff --git a/solr/modules/ltr/src/test/org/apache/solr/ltr/TestLTROnSolrCloud.java b/solr/modules/ltr/src/test/org/apache/solr/ltr/TestLTROnSolrCloud.java
index e38e489..11b0b43 100644
--- a/solr/modules/ltr/src/test/org/apache/solr/ltr/TestLTROnSolrCloud.java
+++ b/solr/modules/ltr/src/test/org/apache/solr/ltr/TestLTROnSolrCloud.java
@@ -31,7 +31,6 @@ import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.cloud.MiniSolrCloudCluster;
 import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.ltr.feature.FieldValueFeature;
 import org.apache.solr.ltr.feature.OriginalScoreFeature;
 import org.apache.solr.ltr.feature.SolrFeature;
@@ -353,9 +352,8 @@ public class TestLTROnSolrCloud extends TestRerankBase {
     response = create.process(solrCluster.getSolrClient());
 
     if (response.getStatus() != 0 || response.getErrorMessages() != null) {
-      fail("Could not create collection. Response" + response.toString());
+      fail("Could not create collection. Response" + response);
     }
-    ZkStateReader zkStateReader = solrCluster.getSolrClient().getZkStateReader();
     solrCluster.waitForActiveCollection(name, numShards, numShards * numReplicas);
   }
 
diff --git a/solr/modules/sql/src/java/org/apache/solr/handler/sql/SolrSchema.java b/solr/modules/sql/src/java/org/apache/solr/handler/sql/SolrSchema.java
index f354f8d..9bdac75 100644
--- a/solr/modules/sql/src/java/org/apache/solr/handler/sql/SolrSchema.java
+++ b/solr/modules/sql/src/java/org/apache/solr/handler/sql/SolrSchema.java
@@ -87,7 +87,7 @@ class SolrSchema extends AbstractSchema implements Closeable {
   protected Map<String, Table> getTableMap() {
     String zk = this.properties.getProperty("zk");
     CloudSolrClient cloudSolrClient = solrClientCache.getCloudSolrClient(zk);
-    ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudSolrClient);
     ClusterState clusterState = zkStateReader.getClusterState();
 
     final ImmutableMap.Builder<String, Table> builder = ImmutableMap.builder();
diff --git a/solr/prometheus-exporter/src/java/org/apache/solr/prometheus/scraper/SolrCloudScraper.java b/solr/prometheus-exporter/src/java/org/apache/solr/prometheus/scraper/SolrCloudScraper.java
index 188914f..2a6c6c3 100644
--- a/solr/prometheus-exporter/src/java/org/apache/solr/prometheus/scraper/SolrCloudScraper.java
+++ b/solr/prometheus-exporter/src/java/org/apache/solr/prometheus/scraper/SolrCloudScraper.java
@@ -53,8 +53,7 @@ public class SolrCloudScraper extends SolrScraper {
   public Map<String, MetricSamples> pingAllCores(MetricsQuery query) throws IOException {
     Map<String, HttpSolrClient> httpSolrClients = createHttpSolrClients();
 
-    Map<String, DocCollection> collectionState =
-        solrClient.getClusterStateProvider().getClusterState().getCollectionsMap();
+    Map<String, DocCollection> collectionState = solrClient.getClusterState().getCollectionsMap();
 
     List<Replica> replicas =
         collectionState.values().stream()
@@ -128,12 +127,7 @@ public class SolrCloudScraper extends SolrScraper {
   }
 
   private Set<String> getBaseUrls() throws IOException {
-    return solrClient
-        .getClusterStateProvider()
-        .getClusterState()
-        .getCollectionsMap()
-        .values()
-        .stream()
+    return solrClient.getClusterState().getCollectionsMap().values().stream()
         .map(DocCollection::getReplicas)
         .flatMap(List::stream)
         .map(Replica::getBaseUrl)
@@ -141,7 +135,7 @@ public class SolrCloudScraper extends SolrScraper {
   }
 
   private Set<String> getCollections() throws IOException {
-    return solrClient.getClusterStateProvider().getClusterState().getCollectionStates().keySet();
+    return solrClient.getClusterState().getCollectionStates().keySet();
   }
 
   @Override
diff --git a/solr/prometheus-exporter/src/java/org/apache/solr/prometheus/scraper/SolrScraper.java b/solr/prometheus-exporter/src/java/org/apache/solr/prometheus/scraper/SolrScraper.java
index 2b86efb..2ebc561 100644
--- a/solr/prometheus-exporter/src/java/org/apache/solr/prometheus/scraper/SolrScraper.java
+++ b/solr/prometheus-exporter/src/java/org/apache/solr/prometheus/scraper/SolrScraper.java
@@ -160,7 +160,7 @@ public abstract class SolrScraper implements Closeable {
 
           if (client instanceof CloudSolrClient) {
             labelNames.add("zk_host");
-            labelValues.add(((CloudSolrClient) client).getZkHost());
+            labelValues.add(((CloudSolrClient) client).getClusterStateProvider().getQuorumHosts());
           }
 
           // Deduce core if not there
diff --git a/solr/prometheus-exporter/src/test/org/apache/solr/prometheus/PrometheusExporterTestBase.java b/solr/prometheus-exporter/src/test/org/apache/solr/prometheus/PrometheusExporterTestBase.java
index 5bfbca1..0b0e826 100644
--- a/solr/prometheus-exporter/src/test/org/apache/solr/prometheus/PrometheusExporterTestBase.java
+++ b/solr/prometheus-exporter/src/test/org/apache/solr/prometheus/PrometheusExporterTestBase.java
@@ -66,7 +66,7 @@ public class PrometheusExporterTestBase extends SolrCloudTestCase {
         .process(cluster.getSolrClient());
 
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        COLLECTION, cluster.getSolrClient().getZkStateReader(), true, true, TIMEOUT);
+        COLLECTION, cluster.getZkStateReader(), true, true, TIMEOUT);
 
     Helpers.indexAllDocs(cluster.getSolrClient());
   }
diff --git a/solr/prometheus-exporter/src/test/org/apache/solr/prometheus/scraper/SolrCloudScraperTest.java b/solr/prometheus-exporter/src/test/org/apache/solr/prometheus/scraper/SolrCloudScraperTest.java
index abeaf99..947d533 100644
--- a/solr/prometheus-exporter/src/test/org/apache/solr/prometheus/scraper/SolrCloudScraperTest.java
+++ b/solr/prometheus-exporter/src/test/org/apache/solr/prometheus/scraper/SolrCloudScraperTest.java
@@ -30,10 +30,7 @@ import java.util.concurrent.ExecutorService;
 import java.util.stream.Collectors;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.NoOpResponseParser;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.*;
 import org.apache.solr.common.util.ExecutorUtil;
 import org.apache.solr.common.util.IOUtils;
 import org.apache.solr.common.util.SolrNamedThreadFactory;
@@ -73,7 +70,7 @@ public class SolrCloudScraperTest extends PrometheusExporterTestBase {
   }
 
   private ClusterState getClusterState() {
-    return cluster.getSolrClient().getZkStateReader().getClusterState();
+    return cluster.getSolrClient().getClusterState();
   }
 
   private DocCollection getCollectionState() {
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/DelegatingClusterStateProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/DelegatingClusterStateProvider.java
index 35ff43d..b8e7322 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/DelegatingClusterStateProvider.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/DelegatingClusterStateProvider.java
@@ -79,7 +79,7 @@ public class DelegatingClusterStateProvider implements ClusterStateProvider {
   }
 
   @Override
-  public ClusterState getClusterState() throws IOException {
+  public ClusterState getClusterState() {
     if (delegate != null) {
       return delegate.getClusterState();
     } else {
@@ -124,4 +124,12 @@ public class DelegatingClusterStateProvider implements ClusterStateProvider {
       delegate.close();
     }
   }
+
+  @Override
+  public String getQuorumHosts() {
+    if (delegate != null) {
+      return delegate.getQuorumHosts();
+    }
+    return null;
+  }
 }
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/SolrCloudManager.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/SolrCloudManager.java
index e682c16..3c5e5d8 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/SolrCloudManager.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/SolrCloudManager.java
@@ -23,6 +23,7 @@ import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.common.SolrCloseable;
+import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.util.ObjectCache;
 import org.apache.solr.common.util.TimeSource;
 
@@ -37,6 +38,10 @@ public interface SolrCloudManager extends SolrCloseable {
 
   ClusterStateProvider getClusterStateProvider();
 
+  default ClusterState getClusterState() throws IOException {
+    return getClusterStateProvider().getClusterState();
+  }
+
   NodeStateProvider getNodeStateProvider();
 
   DistribStateManager getDistribStateManager();
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java
index 16c7420..c2390d0 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java
@@ -241,6 +241,10 @@ public abstract class BaseCloudSolrClient extends SolrClient {
 
   public abstract ClusterStateProvider getClusterStateProvider();
 
+  public ClusterState getClusterState() {
+    return getClusterStateProvider().getClusterState();
+  }
+
   protected abstract boolean wasCommError(Throwable t);
 
   @Override
@@ -273,23 +277,6 @@ public abstract class BaseCloudSolrClient extends SolrClient {
     getLbClient().setRequestWriter(requestWriter);
   }
 
-  /**
-   * @return the zkHost value used to connect to zookeeper.
-   */
-  public String getZkHost() {
-    return assertZKStateProvider().zkHost;
-  }
-
-  public ZkStateReader getZkStateReader() {
-    if (getClusterStateProvider() instanceof ZkClientClusterStateProvider) {
-      ZkClientClusterStateProvider provider =
-          (ZkClientClusterStateProvider) getClusterStateProvider();
-      getClusterStateProvider().connect();
-      return provider.zkStateReader;
-    }
-    throw new IllegalStateException("This has no Zk stateReader");
-  }
-
   /** Sets the default collection for request */
   public void setDefaultCollection(String collection) {
     this.defaultCollection = collection;
@@ -300,16 +287,6 @@ public abstract class BaseCloudSolrClient extends SolrClient {
     return defaultCollection;
   }
 
-  /** Set the connect timeout to the zookeeper ensemble in ms */
-  public void setZkConnectTimeout(int zkConnectTimeout) {
-    assertZKStateProvider().zkConnectTimeout = zkConnectTimeout;
-  }
-
-  /** Set the timeout to the zookeeper ensemble in ms */
-  public void setZkClientTimeout(int zkClientTimeout) {
-    assertZKStateProvider().zkClientTimeout = zkClientTimeout;
-  }
-
   /** Gets whether direct updates are sent in parallel */
   public boolean isParallelUpdates() {
     return parallelUpdates;
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseHttpClusterStateProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseHttpClusterStateProvider.java
index 5722822..93eccdc 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseHttpClusterStateProvider.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseHttpClusterStateProvider.java
@@ -278,7 +278,7 @@ public abstract class BaseHttpClusterStateProvider implements ClusterStateProvid
   }
 
   @Override
-  public ClusterState getClusterState() throws IOException {
+  public ClusterState getClusterState() {
     for (String nodeName : liveNodes) {
       String baseUrl = Utils.getBaseUrlForNodeName(nodeName, urlScheme);
       try (SolrClient client = getSolrClient(baseUrl)) {
@@ -358,4 +358,12 @@ public abstract class BaseHttpClusterStateProvider implements ClusterStateProvid
 
   // This exception is not meant to escape this class it should be caught and wrapped.
   private class NotACollectionException extends Exception {}
+
+  @Override
+  public String getQuorumHosts() {
+    if (this.liveNodes == null) {
+      return null;
+    }
+    return String.join(",", this.liveNodes);
+  }
 }
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClusterStateProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClusterStateProvider.java
index 6f6fb04..7c7aa2f 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClusterStateProvider.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ClusterStateProvider.java
@@ -70,7 +70,7 @@ public interface ClusterStateProvider extends SolrCloseable {
   }
 
   /** Obtain the current cluster state. */
-  ClusterState getClusterState() throws IOException;
+  ClusterState getClusterState();
 
   default DocCollection getCollection(String name) throws IOException {
     return getClusterState().getCollectionOrNull(name);
@@ -100,4 +100,6 @@ public interface ClusterStateProvider extends SolrCloseable {
   String getPolicyNameByCollection(String coll);
 
   void connect();
+
+  String getQuorumHosts();
 }
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientCloudManager.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientCloudManager.java
index 3e61cff..02649a7 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientCloudManager.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrClientCloudManager.java
@@ -68,7 +68,7 @@ public class SolrClientCloudManager implements SolrCloudManager {
       DistributedQueueFactory queueFactory, CloudSolrClient solrClient, ObjectCache objectCache) {
     this.queueFactory = queueFactory;
     this.solrClient = solrClient;
-    this.zkStateReader = solrClient.getZkStateReader();
+    this.zkStateReader = ZkStateReader.from(solrClient);
     this.zkClient = zkStateReader.getZkClient();
     this.stateManager = new ZkDistribStateManager(zkClient);
     this.isClosed = false;
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java
index 98afa04..140fd90 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ZkClientClusterStateProvider.java
@@ -39,15 +39,24 @@ public class ZkClientClusterStateProvider implements ClusterStateProvider {
 
   volatile ZkStateReader zkStateReader;
   private boolean closeZkStateReader = true;
-  String zkHost;
-  int zkConnectTimeout = 15000;
-  int zkClientTimeout = 45000;
+  private final String zkHost;
+  private int zkConnectTimeout = 15000;
+  private int zkClientTimeout = 45000;
 
   private volatile boolean isClosed = false;
 
+  /** Extracts this from the client, or throws an exception if of the wrong type. */
+  public static ZkClientClusterStateProvider from(BaseCloudSolrClient client) {
+    if (client.getClusterStateProvider() instanceof ZkClientClusterStateProvider) {
+      return (ZkClientClusterStateProvider) client.getClusterStateProvider();
+    }
+    throw new IllegalArgumentException("This client does not use ZK");
+  }
+
   public ZkClientClusterStateProvider(ZkStateReader zkStateReader) {
     this.zkStateReader = zkStateReader;
     this.closeZkStateReader = false;
+    this.zkHost = null;
   }
 
   public ZkClientClusterStateProvider(Collection<String> zkHosts, String chroot) {
@@ -110,7 +119,7 @@ public class ZkClientClusterStateProvider implements ClusterStateProvider {
   }
 
   @Override
-  public ClusterState getClusterState() throws IOException {
+  public ClusterState getClusterState() {
     return getZkStateReader().getClusterState();
   }
 
@@ -216,6 +225,11 @@ public class ZkClientClusterStateProvider implements ClusterStateProvider {
   }
 
   @Override
+  public String getQuorumHosts() {
+    return getZkStateReader().getZkClient().getZkServerAddress();
+  }
+
+  @Override
   public String toString() {
     return zkHost;
   }
@@ -224,4 +238,29 @@ public class ZkClientClusterStateProvider implements ClusterStateProvider {
   public boolean isClosed() {
     return isClosed;
   }
+
+  /**
+   * @return the zkHost value used to connect to zookeeper.
+   */
+  public String getZkHost() {
+    return zkHost;
+  }
+
+  public int getZkConnectTimeout() {
+    return zkConnectTimeout;
+  }
+
+  /** Set the connect timeout to the zookeeper ensemble in ms */
+  public void setZkConnectTimeout(int zkConnectTimeout) {
+    this.zkConnectTimeout = zkConnectTimeout;
+  }
+
+  public int getZkClientTimeout() {
+    return zkClientTimeout;
+  }
+
+  /** Set the timeout to the zookeeper ensemble in ms */
+  public void setZkClientTimeout(int zkClientTimeout) {
+    this.zkClientTimeout = zkClientTimeout;
+  }
 }
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/ConnectionImpl.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/ConnectionImpl.java
index 101b0b1..548bf19 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/ConnectionImpl.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/ConnectionImpl.java
@@ -164,7 +164,7 @@ class ConnectionImpl implements Connection {
 
   @Override
   public String getCatalog() throws SQLException {
-    return this.client.getZkHost();
+    return client.getClusterStateProvider().getQuorumHosts();
   }
 
   @Override
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/DatabaseMetaDataImpl.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/DatabaseMetaDataImpl.java
index c54eaec..5c16232 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/DatabaseMetaDataImpl.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/DatabaseMetaDataImpl.java
@@ -30,6 +30,7 @@ import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
 import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.SimpleOrderedMap;
 
 class DatabaseMetaDataImpl implements DatabaseMetaData {
@@ -113,11 +114,11 @@ class DatabaseMetaDataImpl implements DatabaseMetaData {
     sysQuery.setRequestHandler("/admin/info/system");
 
     CloudSolrClient cloudSolrClient = this.connection.getClient();
-    Set<String> liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes();
+    Set<String> liveNodes = cloudSolrClient.getClusterState().getLiveNodes();
     SolrClient solrClient = null;
     for (String node : liveNodes) {
       try {
-        String nodeURL = cloudSolrClient.getZkStateReader().getBaseUrlForNodeName(node);
+        String nodeURL = ZkStateReader.from(cloudSolrClient).getBaseUrlForNodeName(node);
         solrClient = new Builder(nodeURL).build();
 
         QueryResponse rsp = solrClient.query(sysQuery);
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java
index 70a974c..7d78830 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/sql/StatementImpl.java
@@ -76,7 +76,7 @@ class StatementImpl implements Statement {
 
   protected SolrStream constructStream(String sql) throws IOException {
     try {
-      ZkStateReader zkStateReader = this.connection.getClient().getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(this.connection.getClient());
       Slice[] slices =
           CloudSolrStream.getSlices(this.connection.getCollection(), zkStateReader, true);
 
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FeaturesSelectionStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FeaturesSelectionStream.java
index ae6870c..52df95f 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FeaturesSelectionStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FeaturesSelectionStream.java
@@ -280,7 +280,7 @@ public class FeaturesSelectionStream extends TupleStream implements Expressible
 
   private List<String> getShardUrls() throws IOException {
     try {
-      ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudSolrClient);
 
       Slice[] slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);
 
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TextLogitStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TextLogitStream.java
index 3b9674c..b3c98da 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TextLogitStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TextLogitStream.java
@@ -383,7 +383,7 @@ public class TextLogitStream extends TupleStream implements Expressible {
 
   protected List<String> getShardUrls() throws IOException {
     try {
-      ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudSolrClient);
 
       Slice[] slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);
 
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
index 45e4efe..b1450a5 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
@@ -420,7 +420,7 @@ public class TopicStream extends CloudSolrStream implements Expressible {
 
   private void getCheckpoints() throws IOException {
     this.checkpoints = new HashMap<>();
-    ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudSolrClient);
 
     Slice[] slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);
 
@@ -501,7 +501,7 @@ public class TopicStream extends CloudSolrStream implements Expressible {
   }
 
   private void getPersistedCheckpoints() throws IOException {
-    ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudSolrClient);
     Slice[] slices = CloudSolrStream.getSlices(checkpointCollection, zkStateReader, false);
 
     ClusterState clusterState = zkStateReader.getClusterState();
@@ -536,7 +536,7 @@ public class TopicStream extends CloudSolrStream implements Expressible {
 
   protected void constructStreams() throws IOException {
     try {
-      ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudSolrClient);
       Slice[] slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);
 
       ModifiableSolrParams mParams = new ModifiableSolrParams(params);
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TupleStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TupleStream.java
index 2f6c9ae..b3fe795 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TupleStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TupleStream.java
@@ -149,7 +149,7 @@ public abstract class TupleStream implements Closeable, Serializable, MapWriter
     }
 
     CloudSolrClient cloudSolrClient = solrClientCache.getCloudSolrClient(zkHost);
-    ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudSolrClient);
     ClusterState clusterState = zkStateReader.getClusterState();
     Slice[] slices = CloudSolrStream.getSlices(collection, zkStateReader, true);
     Set<String> liveNodes = clusterState.getLiveNodes();
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
index 5395d64..b25e5f5 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
@@ -47,6 +47,8 @@ import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Predicate;
 import java.util.function.UnaryOperator;
 import java.util.stream.Collectors;
+import org.apache.solr.client.solrj.impl.BaseCloudSolrClient;
+import org.apache.solr.client.solrj.impl.ZkClientClusterStateProvider;
 import org.apache.solr.common.AlreadyClosedException;
 import org.apache.solr.common.Callable;
 import org.apache.solr.common.SolrCloseable;
@@ -221,6 +223,20 @@ public class ZkStateReader implements SolrCloseable {
   // only kept to identify if the cleaner has already been started.
   private Future<?> collectionPropsCacheCleaner;
 
+  /**
+   * Gets the ZkStateReader inside a ZK based SolrClient.
+   *
+   * @throws IllegalArgumentException if solrClient isn't ZK based.
+   */
+  public static ZkStateReader from(BaseCloudSolrClient solrClient) {
+    try {
+      var provider = (ZkClientClusterStateProvider) solrClient.getClusterStateProvider();
+      return provider.getZkStateReader();
+    } catch (ClassCastException e) {
+      throw new IllegalArgumentException("client has no Zk stateReader", e);
+    }
+  }
+
   private static class CollectionWatch<T> {
 
     int coreRefCount = 0;
@@ -1651,6 +1667,8 @@ public class ZkStateReader implements SolrCloseable {
    * are encouraged to use the more specific methods register methods as it may reduce the number of
    * ZooKeeper watchers needed, and reduce the amount of network/cpu used.
    *
+   * @param collection the collection to watch
+   * @param stateWatcher a watcher that will be called when the state changes
    * @see #registerDocCollectionWatcher
    * @see #registerLiveNodesListener
    */
@@ -1669,7 +1687,7 @@ public class ZkStateReader implements SolrCloseable {
   }
 
   /**
-   * Register a DocCollectionWatcher to be called when the state of a collection changes
+   * Register a DocCollectionWatcher to be called when the cluster state for a collection changes.
    *
    * <p>The Watcher will automatically be removed when it's <code>onStateChanged</code> returns
    * <code>true</code>
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientBuilderTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientBuilderTest.java
index e0bcdd3..dd5b8a9 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientBuilderTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientBuilderTest.java
@@ -43,7 +43,7 @@ public class CloudHttp2SolrClientBuilderTest extends SolrTestCase {
         new CloudHttp2SolrClient.Builder(
                 Collections.singletonList(ANY_ZK_HOST), Optional.of(ANY_CHROOT))
             .build()) {
-      final String clientZkHost = createdClient.getZkHost();
+      final String clientZkHost = ZkClientClusterStateProvider.from(createdClient).getZkHost();
 
       assertTrue(clientZkHost.contains(ANY_ZK_HOST));
     }
@@ -56,7 +56,7 @@ public class CloudHttp2SolrClientBuilderTest extends SolrTestCase {
     zkHostList.add(ANY_OTHER_ZK_HOST);
     try (CloudHttp2SolrClient createdClient =
         new CloudHttp2SolrClient.Builder(zkHostList, Optional.of(ANY_CHROOT)).build()) {
-      final String clientZkHost = createdClient.getZkHost();
+      final String clientZkHost = ZkClientClusterStateProvider.from(createdClient).getZkHost();
 
       assertTrue(clientZkHost.contains(ANY_ZK_HOST));
       assertTrue(clientZkHost.contains(ANY_OTHER_ZK_HOST));
@@ -70,7 +70,7 @@ public class CloudHttp2SolrClientBuilderTest extends SolrTestCase {
     zkHosts.add(ANY_OTHER_ZK_HOST);
     try (CloudHttp2SolrClient createdClient =
         new CloudHttp2SolrClient.Builder(zkHosts, Optional.of(ANY_CHROOT)).build()) {
-      final String clientZkHost = createdClient.getZkHost();
+      final String clientZkHost = ZkClientClusterStateProvider.from(createdClient).getZkHost();
 
       assertTrue(clientZkHost.contains(ANY_ZK_HOST));
       assertTrue(clientZkHost.contains(ANY_OTHER_ZK_HOST));
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientMultiConstructorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientMultiConstructorTest.java
index f5ff770..5ee6acc 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientMultiConstructorTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientMultiConstructorTest.java
@@ -71,7 +71,7 @@ public class CloudHttp2SolrClientMultiConstructorTest extends SolrTestCase {
     try (CloudHttp2SolrClient client =
         new CloudHttp2SolrClient.Builder(new ArrayList<>(hosts), Optional.ofNullable(clientChroot))
             .build()) {
-      assertEquals(sb.toString(), client.getZkHost());
+      assertEquals(sb.toString(), ZkClientClusterStateProvider.from(client).getZkHost());
     }
   }
 
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientTest.java
index effe17e..c727c93 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudHttp2SolrClientTest.java
@@ -344,7 +344,7 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
     // Test that queries with _route_ params are routed by the client
 
     // Track request counts on each node before query calls
-    ClusterState clusterState = cluster.getSolrClient().getZkStateReader().getClusterState();
+    ClusterState clusterState = cluster.getSolrClient().getClusterState();
     DocCollection col = clusterState.getCollection("routing_collection");
     Map<String, Long> requestCountsMap = Maps.newHashMap();
     for (Slice slice : col.getSlices()) {
@@ -752,8 +752,7 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
         .process(cluster.getSolrClient());
     cluster.waitForActiveCollection(COLLECTION, 2, 2);
 
-    DocCollection coll =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION);
+    DocCollection coll = cluster.getSolrClient().getClusterState().getCollection(COLLECTION);
     Replica r = coll.getSlices().iterator().next().getReplicas().iterator().next();
 
     SolrQuery q = new SolrQuery().setQuery("*:*");
@@ -795,10 +794,9 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
       }
     }
     String theNode = null;
-    Set<String> liveNodes =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
+    Set<String> liveNodes = cluster.getSolrClient().getClusterState().getLiveNodes();
     for (String s : liveNodes) {
-      String n = cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(s);
+      String n = cluster.getZkStateReader().getBaseUrlForNodeName(s);
       if (!allNodesOfColl.contains(n)) {
         theNode = n;
         break;
@@ -826,7 +824,7 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
   @Test
   public void testShutdown() throws IOException {
     try (CloudSolrClient client = getCloudSolrClient(DEAD_HOST_1)) {
-      client.setZkConnectTimeout(100);
+      ZkClientClusterStateProvider.from(client).setZkConnectTimeout(100);
       client.connect();
       fail("Expected exception");
     } catch (SolrException e) {
@@ -844,7 +842,7 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
 
     try (CloudSolrClient client =
         getCloudSolrClient(cluster.getZkServer().getZkAddress() + "/xyz/foo")) {
-      client.setZkClientTimeout(1000 * 60);
+      ZkClientClusterStateProvider.from(client).setZkClientTimeout(1000 * 60);
       client.connect();
       fail("Expected exception");
     }
@@ -961,7 +959,7 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
 
     // determine the coreNodeName of only current replica
     Collection<Slice> slices =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COL).getSlices();
+        cluster.getSolrClient().getClusterState().getCollection(COL).getSlices();
     assertEquals(1, slices.size()); // sanity check
     Slice slice = slices.iterator().next();
     assertEquals(1, slice.getReplicas().size()); // sanity check
@@ -992,8 +990,7 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
               .process(cluster.getSolrClient())
               .getStatus());
       AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-          COL, cluster.getSolrClient().getZkStateReader(), true, true, 330);
-
+          COL, cluster.getZkStateReader(), true, true, 330);
       // ...and delete our original leader.
       assertEquals(
           "Couldn't create collection",
@@ -1004,7 +1001,7 @@ public class CloudHttp2SolrClientTest extends SolrCloudTestCase {
               .process(cluster.getSolrClient())
               .getStatus());
       AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-          COL, cluster.getSolrClient().getZkStateReader(), true, true, 330);
+          COL, cluster.getZkStateReader(), true, true, 330);
 
       // stale_client's collection state cache should now only point at a leader that no longer
       // exists.
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBuilderTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBuilderTest.java
index 76c6820..d45dcf1 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBuilderTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientBuilderTest.java
@@ -35,8 +35,7 @@ public class CloudSolrClientBuilderTest extends SolrTestCase {
   public void testSingleZkHostSpecified() throws IOException {
     try (CloudSolrClient createdClient =
         new Builder(Collections.singletonList(ANY_ZK_HOST), Optional.of(ANY_CHROOT)).build()) {
-      final String clientZkHost = createdClient.getZkHost();
-
+      final String clientZkHost = ZkClientClusterStateProvider.from(createdClient).getZkHost();
       assertTrue(clientZkHost.contains(ANY_ZK_HOST));
     }
   }
@@ -47,8 +46,7 @@ public class CloudSolrClientBuilderTest extends SolrTestCase {
     zkHostList.add(ANY_ZK_HOST);
     zkHostList.add(ANY_OTHER_ZK_HOST);
     try (CloudSolrClient createdClient = new Builder(zkHostList, Optional.of(ANY_CHROOT)).build()) {
-      final String clientZkHost = createdClient.getZkHost();
-
+      final String clientZkHost = ZkClientClusterStateProvider.from(createdClient).getZkHost();
       assertTrue(clientZkHost.contains(ANY_ZK_HOST));
       assertTrue(clientZkHost.contains(ANY_OTHER_ZK_HOST));
     }
@@ -60,8 +58,7 @@ public class CloudSolrClientBuilderTest extends SolrTestCase {
     zkHosts.add(ANY_ZK_HOST);
     zkHosts.add(ANY_OTHER_ZK_HOST);
     try (CloudSolrClient createdClient = new Builder(zkHosts, Optional.of(ANY_CHROOT)).build()) {
-      final String clientZkHost = createdClient.getZkHost();
-
+      final String clientZkHost = ZkClientClusterStateProvider.from(createdClient).getZkHost();
       assertTrue(clientZkHost.contains(ANY_ZK_HOST));
       assertTrue(clientZkHost.contains(ANY_OTHER_ZK_HOST));
     }
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
index b76cb2f..919563b 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
@@ -71,7 +71,7 @@ public class CloudSolrClientMultiConstructorTest extends SolrTestCase {
     try (CloudSolrClient client =
         (new CloudSolrClient.Builder(new ArrayList<>(hosts), Optional.ofNullable(clientChroot))
             .build())) {
-      assertEquals(sb.toString(), client.getZkHost());
+      assertEquals(sb.toString(), ZkClientClusterStateProvider.from(client).getZkHost());
     }
   }
 
@@ -99,7 +99,7 @@ public class CloudSolrClientMultiConstructorTest extends SolrTestCase {
     final Optional<String> chrootOption =
         withChroot == false ? Optional.empty() : Optional.of(chroot);
     try (CloudSolrClient client = new CloudSolrClient.Builder(hosts, chrootOption).build()) {
-      assertEquals(sb.toString(), client.getZkHost());
+      assertEquals(sb.toString(), ZkClientClusterStateProvider.from(client).getZkHost());
     }
   }
 
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
index ee3f3d9..a67df6d 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
@@ -44,7 +44,11 @@ import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.request.*;
+import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.request.V2Request;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.RequestStatusState;
 import org.apache.solr.client.solrj.response.SolrPingResponse;
@@ -55,7 +59,13 @@ import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.*;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.PerReplicaStates;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.ShardParams;
@@ -333,7 +343,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
     // Test that queries with _route_ params are routed by the client
 
     // Track request counts on each node before query calls
-    ClusterState clusterState = cluster.getSolrClient().getZkStateReader().getClusterState();
+    ClusterState clusterState = cluster.getSolrClient().getClusterState();
     DocCollection col = clusterState.getCollection("routing_collection");
     Map<String, Long> requestCountsMap = Maps.newHashMap();
     for (Slice slice : col.getSlices()) {
@@ -744,8 +754,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
         .process(cluster.getSolrClient());
     cluster.waitForActiveCollection(COLLECTION, 2, 2);
 
-    DocCollection coll =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION);
+    DocCollection coll = cluster.getSolrClient().getClusterState().getCollection(COLLECTION);
     Replica r = coll.getSlices().iterator().next().getReplicas().iterator().next();
 
     SolrQuery q = new SolrQuery().setQuery("*:*");
@@ -787,10 +796,9 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
       }
     }
     String theNode = null;
-    Set<String> liveNodes =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
+    Set<String> liveNodes = cluster.getSolrClient().getClusterState().getLiveNodes();
     for (String s : liveNodes) {
-      String n = cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(s);
+      String n = cluster.getZkStateReader().getBaseUrlForNodeName(s);
       if (!allNodesOfColl.contains(n)) {
         theNode = n;
         break;
@@ -818,7 +826,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
   @Test
   public void testShutdown() throws IOException {
     try (CloudSolrClient client = getCloudSolrClient(DEAD_HOST_1)) {
-      client.setZkConnectTimeout(100);
+      ZkClientClusterStateProvider.from(client).setZkConnectTimeout(100);
       SolrException ex = expectThrows(SolrException.class, client::connect);
       assertTrue(ex.getCause() instanceof TimeoutException);
     }
@@ -830,7 +838,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
   public void testWrongZkChrootTest() throws IOException {
     try (CloudSolrClient client =
         getCloudSolrClient(cluster.getZkServer().getZkAddress() + "/xyz/foo")) {
-      client.setZkClientTimeout(1000 * 60);
+      ZkClientClusterStateProvider.from(client).setZkConnectTimeout(1000 * 60);
       SolrException ex = expectThrows(SolrException.class, client::connect);
       MatcherAssert.assertThat(
           "Wrong error message for empty chRoot",
@@ -949,7 +957,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
 
     // determine the coreNodeName of only current replica
     Collection<Slice> slices =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COL).getSlices();
+        cluster.getSolrClient().getClusterState().getCollection(COL).getSlices();
     assertEquals(1, slices.size()); // sanity check
     Slice slice = slices.iterator().next();
     assertEquals(1, slice.getReplicas().size()); // sanity check
@@ -980,8 +988,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
               .process(cluster.getSolrClient())
               .getStatus());
       AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-          COL, cluster.getSolrClient().getZkStateReader(), true, true, 330);
-
+          COL, cluster.getZkStateReader(), true, true, 330);
       // ...and delete our original leader.
       assertEquals(
           "Couldn't create collection",
@@ -992,7 +999,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
               .process(cluster.getSolrClient())
               .getStatus());
       AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-          COL, cluster.getSolrClient().getZkStateReader(), true, true, 330);
+          COL, cluster.getZkStateReader(), true, true, 330);
 
       // stale_client's collection state cache should now only point at a leader that no longer
       // exists.
@@ -1147,7 +1154,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
     final SolrClient clientUnderTest = getRandomClient();
     final SolrPingResponse response = clientUnderTest.ping(testCollection);
     assertEquals("This should be OK", 0, response.getStatus());
-    DocCollection c = cluster.getSolrClient().getZkStateReader().getCollection(testCollection);
+    DocCollection c = cluster.getZkStateReader().getCollection(testCollection);
     c.forEachReplica((s, replica) -> assertNotNull(replica.getReplicaState()));
     PerReplicaStates prs =
         PerReplicaStates.fetch(
@@ -1170,7 +1177,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
         .build()
         .process(cluster.getSolrClient());
     cluster.waitForActiveCollection(testCollection, 2, 4);
-    c = cluster.getSolrClient().getZkStateReader().getCollection(testCollection);
+    c = cluster.getZkStateReader().getCollection(testCollection);
     c.forEachReplica((s, replica) -> assertNotNull(replica.getReplicaState()));
     prs =
         PerReplicaStates.fetch(
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java
index fa88df8..a9114cc 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java
@@ -36,7 +36,14 @@ import org.apache.solr.client.solrj.io.SolrClientCache;
 import org.apache.solr.client.solrj.io.Tuple;
 import org.apache.solr.client.solrj.io.comp.ComparatorOrder;
 import org.apache.solr.client.solrj.io.comp.FieldComparator;
-import org.apache.solr.client.solrj.io.stream.*;
+import org.apache.solr.client.solrj.io.stream.CloudSolrStream;
+import org.apache.solr.client.solrj.io.stream.FacetStream;
+import org.apache.solr.client.solrj.io.stream.HashJoinStream;
+import org.apache.solr.client.solrj.io.stream.RandomStream;
+import org.apache.solr.client.solrj.io.stream.ScoreNodesStream;
+import org.apache.solr.client.solrj.io.stream.SortStream;
+import org.apache.solr.client.solrj.io.stream.StreamContext;
+import org.apache.solr.client.solrj.io.stream.TupleStream;
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 import org.apache.solr.client.solrj.io.stream.metrics.CountMetric;
 import org.apache.solr.client.solrj.io.stream.metrics.MaxMetric;
@@ -85,7 +92,7 @@ public class GraphExpressionTest extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1)
         .process(cluster.getSolrClient());
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        COLLECTION, cluster.getSolrClient().getZkStateReader(), false, true, TIMEOUT);
+        COLLECTION, cluster.getZkStateReader(), false, true, TIMEOUT);
   }
 
   @Before
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java
index 768bd8b..2db9447 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/sql/JdbcTest.java
@@ -86,7 +86,7 @@ public class JdbcTest extends SolrCloudTestCase {
     cluster.waitForActiveCollection(collection, 2, 2);
 
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        collection, cluster.getSolrClient().getZkStateReader(), false, true, DEFAULT_TIMEOUT);
+        collection, cluster.getZkStateReader(), false, true, DEFAULT_TIMEOUT);
     if (useAlias) {
       CollectionAdminRequest.createAlias(COLLECTIONORALIAS, collection)
           .process(cluster.getSolrClient());
@@ -652,7 +652,7 @@ public class JdbcTest extends SolrCloudTestCase {
 
       CloudSolrClient solrClient = cluster.getSolrClient();
       solrClient.connect();
-      ZkStateReader zkStateReader = solrClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(solrClient);
 
       Set<String> collectionsSet = zkStateReader.getClusterState().getCollectionsMap().keySet();
       SortedSet<String> tables = new TreeSet<>(collectionsSet);
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/CloudAuthStreamTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/CloudAuthStreamTest.java
index 5f3422b..7e0f090 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/CloudAuthStreamTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/CloudAuthStreamTest.java
@@ -16,6 +16,8 @@
  */
 package org.apache.solr.client.solrj.io.stream;
 
+import static org.apache.solr.security.Sha256AuthenticationProvider.getSaltedHashedValue;
+
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
@@ -26,7 +28,6 @@ import java.util.Map;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 import java.util.stream.Collectors;
-
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.io.Tuple;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -47,8 +48,6 @@ import org.junit.BeforeClass;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.security.Sha256AuthenticationProvider.getSaltedHashedValue;
-
 /**
  * tests various streaming expressions (via the SolrJ {@link SolrStream} API) against a SolrCloud cluster
  * using both Authenticationand Role based Authorization
@@ -130,8 +129,13 @@ public class CloudAuthStreamTest extends SolrCloudTestCase {
     }
     
     for (String collection : Arrays.asList(COLLECTION_X, COLLECTION_Y)) {
-      cluster.getSolrClient().waitForState(collection, DEFAULT_TIMEOUT, TimeUnit.SECONDS,
-                                           (n, c) -> DocCollection.isFullyActive(n, c, 2, 2));
+      cluster
+          .getZkStateReader()
+          .waitForState(
+              collection,
+              DEFAULT_TIMEOUT,
+              TimeUnit.SECONDS,
+              (n, c) -> DocCollection.isFullyActive(n, c, 2, 2));
     }
 
     solrUrl = cluster.getRandomJetty(random()).getProxyBaseUrl().toString();
@@ -823,8 +827,8 @@ public class CloudAuthStreamTest extends SolrCloudTestCase {
    * Sigh.  DaemonStream requires polling the same core where the stream was exectured.
    */
   protected static String getRandomCoreUrl(final String collection) throws Exception {
-    final List<String> replicaUrls = 
-      cluster.getSolrClient().getZkStateReader().getClusterState()
+      final List<String> replicaUrls =
+      cluster.getSolrClient().getClusterState()
       .getCollectionOrNull(collection).getReplicas().stream()
       .map(Replica::getCoreUrl).collect(Collectors.toList());
     Collections.shuffle(replicaUrls, random());
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/JDBCStreamTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/JDBCStreamTest.java
index 0adeba1..b895cdd 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/JDBCStreamTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/JDBCStreamTest.java
@@ -27,7 +27,6 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Locale;
-
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.SolrTestCaseJ4.SuppressPointFields;
 import org.apache.solr.client.solrj.io.SolrClientCache;
@@ -79,8 +78,8 @@ public class JDBCStreamTest extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(collection, "conf", 2, 1)
         .setPerReplicaState(SolrCloudTestCase.USE_PER_REPLICA_STATE)
         .process(cluster.getSolrClient());
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collection, cluster.getSolrClient().getZkStateReader(),
-        false, true, TIMEOUT);
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(
+        collection, cluster.getZkStateReader(), false, true, TIMEOUT);
     if (useAlias) {
       CollectionAdminRequest.createAlias(COLLECTIONORALIAS, collection).process(cluster.getSolrClient());
     }
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
index 43d63f7..6535de7 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/MathExpressionTest.java
@@ -17,13 +17,14 @@
 
 package org.apache.solr.client.solrj.io.stream;
 
+import static org.apache.solr.client.solrj.io.stream.StreamAssert.assertList;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
@@ -40,8 +41,6 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import static org.apache.solr.client.solrj.io.stream.StreamAssert.assertList;
-
 @Slow
 @LuceneTestCase.SuppressCodecs({"Lucene3x", "Lucene40","Lucene41","Lucene42","Lucene45"})
 public class MathExpressionTest extends SolrCloudTestCase {
@@ -71,8 +70,8 @@ public class MathExpressionTest extends SolrCloudTestCase {
         .setPerReplicaState(SolrCloudTestCase.USE_PER_REPLICA_STATE)
         .setPerReplicaState(SolrCloudTestCase.USE_PER_REPLICA_STATE)
         .process(cluster.getSolrClient());
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collection, cluster.getSolrClient().getZkStateReader(),
-        false, true, TIMEOUT);
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(
+        collection, cluster.getZkStateReader(), false, true, TIMEOUT);
     if (useAlias) {
       CollectionAdminRequest.createAlias(COLLECTIONORALIAS, collection).process(cluster.getSolrClient());
     }
@@ -4043,7 +4042,7 @@ public void testCache() throws Exception {
     paramsLoc.set("expr", cexpr);
     paramsLoc.set("qt", "/stream");
     // find a node with a replica
-    ClusterState clusterState = cluster.getSolrClient().getClusterStateProvider().getClusterState();
+    ClusterState clusterState = cluster.getSolrClient().getClusterState();
     String collection = useAlias ? COLLECTIONORALIAS + "_collection" : COLLECTIONORALIAS;
     DocCollection coll = clusterState.getCollection(collection);
     String node = coll.getReplicas().iterator().next().getNodeName();
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/SelectWithEvaluatorsTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/SelectWithEvaluatorsTest.java
index cfea403..ec2c197 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/SelectWithEvaluatorsTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/SelectWithEvaluatorsTest.java
@@ -21,7 +21,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
-
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.io.SolrClientCache;
@@ -71,8 +70,8 @@ public class SelectWithEvaluatorsTest extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection(collection, "conf", 2, 1)
         .setPerReplicaState(SolrCloudTestCase.USE_PER_REPLICA_STATE)
         .process(cluster.getSolrClient());
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collection, cluster.getSolrClient().getZkStateReader(),
-        false, true, TIMEOUT);
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(
+        collection, cluster.getZkStateReader(), false, true, TIMEOUT);
     if (useAlias) {
       CollectionAdminRequest.createAlias(COLLECTIONORALIAS, collection).process(cluster.getSolrClient());
     }
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
index 9fc42d8..f38ec2f 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
@@ -16,6 +16,9 @@
  */
 package org.apache.solr.client.solrj.io.stream;
 
+import static org.apache.solr.client.solrj.io.stream.StreamAssert.assertList;
+import static org.apache.solr.client.solrj.io.stream.StreamAssert.assertMaps;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -23,7 +26,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
-
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.SolrTestCaseJ4;
@@ -70,9 +72,6 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import static org.apache.solr.client.solrj.io.stream.StreamAssert.assertList;
-import static org.apache.solr.client.solrj.io.stream.StreamAssert.assertMaps;
-
 @Slow
 @SolrTestCaseJ4.SuppressSSL
 @LuceneTestCase.SuppressCodecs({"Lucene3x", "Lucene40","Lucene41","Lucene42","Lucene45"})
@@ -103,9 +102,9 @@ public class StreamDecoratorTest extends SolrCloudTestCase {
         .process(cluster.getSolrClient());
     
     cluster.waitForActiveCollection(collection, 2, 2);
-    
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collection, cluster.getSolrClient().getZkStateReader(),
-        false, true, TIMEOUT);
+
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(
+        collection, cluster.getZkStateReader(), false, true, TIMEOUT);
     if (useAlias) {
       CollectionAdminRequest.createAlias(COLLECTIONORALIAS, collection).process(cluster.getSolrClient());
     }
@@ -3671,7 +3670,7 @@ public class StreamDecoratorTest extends SolrCloudTestCase {
     updateRequest.commit(cluster.getSolrClient(), "uknownCollection");
 
     // find a node with a replica
-    ClusterState clusterState = cluster.getSolrClient().getClusterStateProvider().getClusterState();
+    ClusterState clusterState = cluster.getSolrClient().getClusterState();
     DocCollection coll = clusterState.getCollection(COLLECTIONORALIAS);
     String node = coll.getReplicas().iterator().next().getNodeName();
     String url = null;
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java
index f647c26..50b60c5 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java
@@ -16,6 +16,8 @@
  */
 package org.apache.solr.client.solrj.io.stream;
 
+import static org.apache.solr.client.solrj.io.stream.StreamAssert.assertMaps;
+
 import java.io.IOException;
 import java.time.Instant;
 import java.util.ArrayList;
@@ -27,7 +29,6 @@ import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 import java.util.Set;
-
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrServerException;
@@ -65,8 +66,6 @@ import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 
-import static org.apache.solr.client.solrj.io.stream.StreamAssert.assertMaps;
-
 /**
 *  All base tests will be done with CloudSolrStream. Under the covers CloudSolrStream uses SolrStream so
 *  SolrStream will get fully exercised through these tests.
@@ -2340,7 +2339,7 @@ public void testParallelRankStream() throws Exception {
 
     StreamContext streamContext = new StreamContext();
     streamContext.setLocal(true);
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     List<String> strings = zkStateReader.aliasesManager.getAliases().resolveAliases(COLLECTIONORALIAS);
     String collName = strings.size() > 0 ? strings.get(0) : COLLECTIONORALIAS;
       zkStateReader.forceUpdateCollection(collName);
@@ -2587,7 +2586,7 @@ public void testParallelRankStream() throws Exception {
     streamContext.setRequestParams(params(ShardParams.SHARDS_PREFERENCE, ShardParams.SHARDS_PREFERENCE_REPLICA_TYPE + ":nrt"));
 
     try {
-      ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+      ZkStateReader zkStateReader = cluster.getZkStateReader();
       List<String> strings = zkStateReader.aliasesManager.getAliases().resolveAliases(MULTI_REPLICA_COLLECTIONORALIAS);
       String collName = strings.size() > 0 ? strings.get(0) : MULTI_REPLICA_COLLECTIONORALIAS;
       Map<String, String> replicaTypeMap = mapReplicasToReplicaType(zkStateReader.getClusterState().getCollectionOrNull(collName));
@@ -2709,7 +2708,7 @@ public void testParallelRankStream() throws Exception {
       }
 
       List<String> baseUrls = new LinkedList<>();
-      ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+      ZkStateReader zkStateReader = cluster.getZkStateReader();
       List<String> resolved = zkStateReader.aliasesManager.getAliases().resolveAliases(MULTI_REPLICA_COLLECTIONORALIAS);
       Set<String> liveNodes = zkStateReader.getClusterState().getLiveNodes();
       int expectedNumStreams = 0;
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV2Request.java b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV2Request.java
index c3ea723..197ce8e 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV2Request.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestV2Request.java
@@ -178,7 +178,7 @@ public class TestV2Request extends SolrCloudTestCase {
                     + "}")
             .build());
 
-    ClusterState cs = cluster.getSolrClient().getClusterStateProvider().getClusterState();
+    ClusterState cs = cluster.getSolrClient().getClusterState();
     System.out.println("livenodes: " + cs.getLiveNodes());
 
     String[] node = new String[1];
@@ -192,8 +192,7 @@ public class TestV2Request extends SolrCloudTestCase {
               if (!s.equals(node[0])) testNode[0] = s;
             });
 
-    String testServer =
-        cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(testNode[0]);
+    String testServer = cluster.getZkStateReader().getBaseUrlForNodeName(testNode[0]);
     V2Request v2r =
         new V2Request.Builder("/c/v2forward/_introspect")
             .withMethod(SolrRequest.METHOD.GET)
diff --git a/solr/solrj/src/test/org/apache/solr/common/cloud/PerReplicaStatesIntegrationTest.java b/solr/solrj/src/test/org/apache/solr/common/cloud/PerReplicaStatesIntegrationTest.java
index 9372e2d..d8a16b0 100644
--- a/solr/solrj/src/test/org/apache/solr/common/cloud/PerReplicaStatesIntegrationTest.java
+++ b/solr/solrj/src/test/org/apache/solr/common/cloud/PerReplicaStatesIntegrationTest.java
@@ -69,7 +69,7 @@ public class PerReplicaStatesIntegrationTest extends SolrCloudTestCase {
       final SolrClient clientUnderTest = cluster.getSolrClient();
       final SolrPingResponse response = clientUnderTest.ping(testCollection);
       assertEquals("This should be OK", 0, response.getStatus());
-      DocCollection c = cluster.getSolrClient().getZkStateReader().getCollection(testCollection);
+      DocCollection c = cluster.getZkStateReader().getCollection(testCollection);
       c.forEachReplica((s, replica) -> assertNotNull(replica.getReplicaState()));
       PerReplicaStates prs =
           PerReplicaStates.fetch(
@@ -92,7 +92,7 @@ public class PerReplicaStatesIntegrationTest extends SolrCloudTestCase {
           .build()
           .process(cluster.getSolrClient());
       cluster.waitForActiveCollection(testCollection, 2, 4);
-      c = cluster.getSolrClient().getZkStateReader().getCollection(testCollection);
+      c = cluster.getZkStateReader().getCollection(testCollection);
       c.forEachReplica((s, replica) -> assertNotNull(replica.getReplicaState()));
       prs =
           PerReplicaStates.fetch(
@@ -123,7 +123,7 @@ public class PerReplicaStatesIntegrationTest extends SolrCloudTestCase {
           .process(cluster.getSolrClient());
       cluster.waitForActiveCollection(testCollection, 1, 1);
 
-      DocCollection c = cluster.getSolrClient().getZkStateReader().getCollection(testCollection);
+      DocCollection c = cluster.getZkStateReader().getCollection(testCollection);
       c.forEachReplica((s, replica) -> assertNotNull(replica.getReplicaState()));
       String collectionPath = ZkStateReader.getCollectionPath(testCollection);
       PerReplicaStates prs =
@@ -139,7 +139,7 @@ public class PerReplicaStatesIntegrationTest extends SolrCloudTestCase {
       cluster.waitForActiveCollection(testCollection, 1, 2);
       prs = PerReplicaStates.fetch(collectionPath, SolrCloudTestCase.cluster.getZkClient(), null);
       assertEquals(2, prs.states.size());
-      c = cluster.getSolrClient().getZkStateReader().getCollection(testCollection);
+      c = cluster.getZkStateReader().getCollection(testCollection);
       prs.states.forEachEntry((s, state) -> assertEquals(Replica.State.ACTIVE, state.state));
 
       String replicaName = null;
@@ -158,7 +158,7 @@ public class PerReplicaStatesIntegrationTest extends SolrCloudTestCase {
               replicaName);
         }
         jsr.stop();
-        c = cluster.getSolrClient().getZkStateReader().getCollection(testCollection);
+        c = cluster.getZkStateReader().getCollection(testCollection);
         if (log.isInfoEnabled()) {
           log.info("after down node, state.json v: {}", c.getZNodeVersion());
         }
@@ -207,7 +207,6 @@ public class PerReplicaStatesIntegrationTest extends SolrCloudTestCase {
               COLL, Collections.singletonMap(PER_REPLICA_STATE, "false"))
           .process(cluster.getSolrClient());
       cluster
-          .getSolrClient()
           .getZkStateReader()
           .waitForState(
               COLL,
@@ -219,7 +218,6 @@ public class PerReplicaStatesIntegrationTest extends SolrCloudTestCase {
               COLL, Collections.singletonMap(PER_REPLICA_STATE, "true"))
           .process(cluster.getSolrClient());
       cluster
-          .getSolrClient()
           .getZkStateReader()
           .waitForState(
               COLL,
diff --git a/solr/solrj/src/test/org/apache/solr/common/cloud/SolrZkClientTest.java b/solr/solrj/src/test/org/apache/solr/common/cloud/SolrZkClientTest.java
index 299042f..086c915 100644
--- a/solr/solrj/src/test/org/apache/solr/common/cloud/SolrZkClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/common/cloud/SolrZkClientTest.java
@@ -198,13 +198,11 @@ public class SolrZkClientTest extends SolrCloudTestCase {
 
     // Thread.sleep(600000);
 
-    solrClient
-        .getZkStateReader()
+    ZkStateReader.from(solrClient)
         .getZkClient()
         .getData(
             "/collections/" + getSaferTestName() + "/collectionprops.json", wrapped1A, null, true);
-    solrClient
-        .getZkStateReader()
+    ZkStateReader.from(solrClient)
         .getZkClient()
         .getData(
             "/collections/" + getSaferTestName() + "/collectionprops.json", wrapped2A, null, true);
@@ -219,13 +217,11 @@ public class SolrZkClientTest extends SolrCloudTestCase {
     }
     assertEquals(1, calls.get()); // same wrapped watch set twice, only invoked once
 
-    solrClient
-        .getZkStateReader()
+    ZkStateReader.from(solrClient)
         .getZkClient()
         .getData(
             "/collections/" + getSaferTestName() + "/collectionprops.json", wrapped1A, null, true);
-    solrClient
-        .getZkStateReader()
+    ZkStateReader.from(solrClient)
         .getZkClient()
         .getData(
             "/collections/" + getSaferTestName() + "/collectionprops.json", wrappedB, null, true);
diff --git a/solr/solrj/src/test/org/apache/solr/common/cloud/TestCloudCollectionsListeners.java b/solr/solrj/src/test/org/apache/solr/common/cloud/TestCloudCollectionsListeners.java
index 9d82dac..eb63261 100644
--- a/solr/solrj/src/test/org/apache/solr/common/cloud/TestCloudCollectionsListeners.java
+++ b/solr/solrj/src/test/org/apache/solr/common/cloud/TestCloudCollectionsListeners.java
@@ -82,14 +82,14 @@ public class TestCloudCollectionsListeners extends SolrCloudTestCase {
           oldResults.put(1, oldCollections);
           newResults.put(1, newCollections);
         };
-    client.getZkStateReader().registerCloudCollectionsListener(watcher1);
+    ZkStateReader.from(client).registerCloudCollectionsListener(watcher1);
     CloudCollectionsListener watcher2 =
         (oldCollections, newCollections) -> {
           log.info("New set of collections: {}, {}", oldCollections, newCollections);
           oldResults.put(2, oldCollections);
           newResults.put(2, newCollections);
         };
-    client.getZkStateReader().registerCloudCollectionsListener(watcher2);
+    ZkStateReader.from(client).registerCloudCollectionsListener(watcher2);
 
     assertFalse(
         "CloudCollectionsListener not triggered after registration",
@@ -108,11 +108,12 @@ public class TestCloudCollectionsListeners extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection("testcollection1", "config", 4, 1)
         .setPerReplicaState(SolrCloudTestCase.USE_PER_REPLICA_STATE)
         .processAndWait(client, MAX_WAIT_TIMEOUT);
-    client.waitForState(
-        "testcollection1",
-        MAX_WAIT_TIMEOUT,
-        TimeUnit.SECONDS,
-        (n, c) -> DocCollection.isFullyActive(n, c, 4, 1));
+    ZkStateReader.from(client)
+        .waitForState(
+            "testcollection1",
+            MAX_WAIT_TIMEOUT,
+            TimeUnit.SECONDS,
+            (n, c) -> DocCollection.isFullyActive(n, c, 4, 1));
 
     assertFalse(
         "CloudCollectionsListener has new collection in old set of collections",
@@ -128,7 +129,7 @@ public class TestCloudCollectionsListeners extends SolrCloudTestCase {
         "CloudCollectionsListener doesn't have new collection in new set of collections",
         newResults.get(2).contains("testcollection1"));
 
-    client.getZkStateReader().removeCloudCollectionsListener(watcher1);
+    ZkStateReader.from(client).removeCloudCollectionsListener(watcher1);
 
     CollectionAdminRequest.createCollection("testcollection2", "config", 4, 1)
         .setPerReplicaState(SolrCloudTestCase.USE_PER_REPLICA_STATE)
@@ -161,7 +162,7 @@ public class TestCloudCollectionsListeners extends SolrCloudTestCase {
     CollectionAdminRequest.deleteCollection("testcollection2")
         .processAndWait(client, MAX_WAIT_TIMEOUT);
 
-    client.getZkStateReader().removeCloudCollectionsListener(watcher2);
+    ZkStateReader.from(client).removeCloudCollectionsListener(watcher2);
   }
 
   @Test
@@ -188,14 +189,14 @@ public class TestCloudCollectionsListeners extends SolrCloudTestCase {
           oldResults.put(1, oldCollections);
           newResults.put(1, newCollections);
         };
-    client.getZkStateReader().registerCloudCollectionsListener(watcher1);
+    ZkStateReader.from(client).registerCloudCollectionsListener(watcher1);
     CloudCollectionsListener watcher2 =
         (oldCollections, newCollections) -> {
           log.info("New set of collections: {}, {}", oldCollections, newCollections);
           oldResults.put(2, oldCollections);
           newResults.put(2, newCollections);
         };
-    client.getZkStateReader().registerCloudCollectionsListener(watcher2);
+    ZkStateReader.from(client).registerCloudCollectionsListener(watcher2);
 
     assertEquals(
         "CloudCollectionsListener has old collection with size > 0 after registration",
@@ -244,7 +245,7 @@ public class TestCloudCollectionsListeners extends SolrCloudTestCase {
         "CloudCollectionsListener doesn't notify of collection that exists",
         newResults.get(2).contains("testcollection2"));
 
-    client.getZkStateReader().removeCloudCollectionsListener(watcher2);
+    ZkStateReader.from(client).removeCloudCollectionsListener(watcher2);
 
     CollectionAdminRequest.deleteCollection("testcollection2")
         .processAndWait(client, MAX_WAIT_TIMEOUT);
@@ -271,6 +272,6 @@ public class TestCloudCollectionsListeners extends SolrCloudTestCase {
         "CloudCollectionsListener called after removal",
         newResults.get(2).contains("testcollection2"));
 
-    client.getZkStateReader().removeCloudCollectionsListener(watcher1);
+    ZkStateReader.from(client).removeCloudCollectionsListener(watcher1);
   }
 }
diff --git a/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java b/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java
index fb7cd4c..3508a95 100644
--- a/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java
+++ b/solr/solrj/src/test/org/apache/solr/common/cloud/TestCollectionStateWatchers.java
@@ -72,7 +72,7 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
     return executor.submit(
         () -> {
           try {
-            cluster.getSolrClient().waitForState(collection, timeout, unit, predicate);
+            cluster.getZkStateReader().waitForState(collection, timeout, unit, predicate);
           } catch (InterruptedException | TimeoutException e) {
             return Boolean.FALSE;
           }
@@ -126,11 +126,13 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
         .setPerReplicaState(SolrCloudTestCase.USE_PER_REPLICA_STATE)
         .processAndWait(client, MAX_WAIT_TIMEOUT);
 
-    client.waitForState(
-        "testcollection",
-        MAX_WAIT_TIMEOUT,
-        TimeUnit.SECONDS,
-        (n, c) -> DocCollection.isFullyActive(n, c, CLUSTER_SIZE, 1));
+    ZkStateReader.from(client)
+        .waitForState(
+            "testcollection",
+            (long) MAX_WAIT_TIMEOUT,
+            TimeUnit.SECONDS,
+            (CollectionStatePredicate)
+                (n, c) -> DocCollection.isFullyActive(n, c, CLUSTER_SIZE, 1));
 
     final JettySolrRunner extraJetty = cluster.startJettySolrRunner();
     final JettySolrRunner jettyToShutdown =
@@ -141,23 +143,24 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
 
     // shutdown a node and check that we get notified about the change
     final CountDownLatch latch = new CountDownLatch(1);
-    client.registerCollectionStateWatcher(
-        "testcollection",
-        (liveNodes, collectionState) -> {
-          int nodesWithActiveReplicas = 0;
-          log.info("State changed: {}", collectionState);
-          for (Slice slice : collectionState) {
-            for (Replica replica : slice) {
-              if (replica.isActive(liveNodes)) nodesWithActiveReplicas++;
-            }
-          }
-          if (liveNodes.size() == CLUSTER_SIZE
-              && expectedNodesWithActiveReplicas == nodesWithActiveReplicas) {
-            latch.countDown();
-            return true;
-          }
-          return false;
-        });
+    ZkStateReader.from(client)
+        .registerCollectionStateWatcher(
+            "testcollection",
+            (liveNodes, collectionState) -> {
+              int nodesWithActiveReplicas = 0;
+              log.info("State changed: {}", collectionState);
+              for (Slice slice : collectionState) {
+                for (Replica replica : slice) {
+                  if (replica.isActive(liveNodes)) nodesWithActiveReplicas++;
+                }
+              }
+              if (liveNodes.size() == CLUSTER_SIZE
+                  && expectedNodesWithActiveReplicas == nodesWithActiveReplicas) {
+                latch.countDown();
+                return true;
+              }
+              return false;
+            });
 
     cluster.stopJettySolrRunner(jettyToShutdown);
     cluster.waitForJettyToStop(jettyToShutdown);
@@ -170,7 +173,7 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
         "CollectionStateWatcher wasn't cleared after completion",
         MAX_WAIT_TIMEOUT,
         TimeUnit.SECONDS,
-        () -> client.getZkStateReader().getStateWatchers("testcollection").isEmpty());
+        () -> ZkStateReader.from(client).getStateWatchers("testcollection").isEmpty());
   }
 
   @Test
@@ -182,12 +185,13 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
         .processAndWait(client, MAX_WAIT_TIMEOUT);
 
     final CountDownLatch latch = new CountDownLatch(1);
-    client.registerCollectionStateWatcher(
-        "currentstate",
-        (n, c) -> {
-          latch.countDown();
-          return false;
-        });
+    ZkStateReader.from(client)
+        .registerCollectionStateWatcher(
+            "currentstate",
+            (n, c) -> {
+              latch.countDown();
+              return false;
+            });
 
     assertTrue(
         "CollectionStateWatcher isn't called on new registration",
@@ -195,15 +199,16 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
     assertEquals(
         "CollectionStateWatcher should be retained",
         1,
-        client.getZkStateReader().getStateWatchers("currentstate").size());
+        ZkStateReader.from(client).getStateWatchers("currentstate").size());
 
     final CountDownLatch latch2 = new CountDownLatch(1);
-    client.registerCollectionStateWatcher(
-        "currentstate",
-        (n, c) -> {
-          latch2.countDown();
-          return true;
-        });
+    ZkStateReader.from(client)
+        .registerCollectionStateWatcher(
+            "currentstate",
+            (n, c) -> {
+              latch2.countDown();
+              return true;
+            });
 
     assertTrue(
         "CollectionStateWatcher isn't called when registering for already-watched collection",
@@ -212,7 +217,7 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
         "CollectionStateWatcher should be removed",
         MAX_WAIT_TIMEOUT,
         TimeUnit.SECONDS,
-        () -> client.getZkStateReader().getStateWatchers("currentstate").size() == 1);
+        () -> ZkStateReader.from(client).getStateWatchers("currentstate").size() == 1);
   }
 
   @Test
@@ -223,17 +228,22 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
         .setPerReplicaState(SolrCloudTestCase.USE_PER_REPLICA_STATE)
         .processAndWait(client, MAX_WAIT_TIMEOUT);
 
-    client.waitForState(
-        "waitforstate",
-        MAX_WAIT_TIMEOUT,
-        TimeUnit.SECONDS,
-        (n, c) -> DocCollection.isFullyActive(n, c, 1, 1));
+    ZkStateReader.from(client)
+        .waitForState(
+            "waitforstate",
+            (long) MAX_WAIT_TIMEOUT,
+            TimeUnit.SECONDS,
+            (CollectionStatePredicate) (n1, c1) -> DocCollection.isFullyActive(n1, c1, 1, 1));
 
     // several goes, to check that we're not getting delayed state changes
     for (int i = 0; i < 10; i++) {
       try {
-        client.waitForState(
-            "waitforstate", 1, TimeUnit.SECONDS, (n, c) -> DocCollection.isFullyActive(n, c, 1, 1));
+        ZkStateReader.from(client)
+            .waitForState(
+                "waitforstate",
+                (long) 1,
+                TimeUnit.SECONDS,
+                (CollectionStatePredicate) (n, c) -> DocCollection.isFullyActive(n, c, 1, 1));
       } catch (TimeoutException e) {
         fail("waitForState should return immediately if the predicate is already satisfied");
       }
@@ -263,14 +273,18 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
     expectThrows(
         TimeoutException.class,
         () -> {
-          client.waitForState(
-              "nosuchcollection", 1, TimeUnit.SECONDS, ((liveNodes, collectionState) -> false));
+          ZkStateReader.from(client)
+              .waitForState(
+                  "nosuchcollection",
+                  (long) 1,
+                  TimeUnit.SECONDS,
+                  ((liveNodes, collectionState) -> false));
         });
     waitFor(
         "Watchers for collection should be removed after timeout",
         MAX_WAIT_TIMEOUT,
         TimeUnit.SECONDS,
-        () -> client.getZkStateReader().getStateWatchers("nosuchcollection").isEmpty());
+        () -> ZkStateReader.from(client).getStateWatchers("nosuchcollection").isEmpty());
   }
 
   @Test
@@ -281,11 +295,12 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
         .setPerReplicaState(SolrCloudTestCase.USE_PER_REPLICA_STATE)
         .processAndWait(client, MAX_WAIT_TIMEOUT);
 
-    client.waitForState(
-        "falsepredicate",
-        MAX_WAIT_TIMEOUT,
-        TimeUnit.SECONDS,
-        (n, c) -> DocCollection.isFullyActive(n, c, 4, 1));
+    ZkStateReader.from(client)
+        .waitForState(
+            "falsepredicate",
+            (long) MAX_WAIT_TIMEOUT,
+            TimeUnit.SECONDS,
+            (CollectionStatePredicate) (n, c) -> DocCollection.isFullyActive(n, c, 4, 1));
 
     final CountDownLatch firstCall = new CountDownLatch(1);
 
@@ -324,23 +339,24 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
     CloudSolrClient client = cluster.getSolrClient();
     assertTrue(
         "There should be no watchers for a non-existent collection!",
-        client.getZkStateReader().getStateWatchers("no-such-collection").isEmpty());
+        ZkStateReader.from(client).getStateWatchers("no-such-collection").isEmpty());
 
     expectThrows(
         TimeoutException.class,
         () -> {
-          client.waitForState(
-              "no-such-collection",
-              10,
-              TimeUnit.MILLISECONDS,
-              (n, c) -> DocCollection.isFullyActive(n, c, 1, 1));
+          ZkStateReader.from(client)
+              .waitForState(
+                  "no-such-collection",
+                  (long) 10,
+                  TimeUnit.MILLISECONDS,
+                  (CollectionStatePredicate) (n, c) -> DocCollection.isFullyActive(n, c, 1, 1));
         });
 
     waitFor(
         "Watchers for collection should be removed after timeout",
         MAX_WAIT_TIMEOUT,
         TimeUnit.SECONDS,
-        () -> client.getZkStateReader().getStateWatchers("no-such-collection").isEmpty());
+        () -> ZkStateReader.from(client).getStateWatchers("no-such-collection").isEmpty());
   }
 
   @Test
@@ -379,7 +395,7 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
         "CollectionStateWatcher should be removed",
         MAX_WAIT_TIMEOUT,
         TimeUnit.SECONDS,
-        () -> client.getZkStateReader().getStateWatchers("test_collection").size() == 0);
+        () -> ZkStateReader.from(client).getStateWatchers("test_collection").size() == 0);
 
     future =
         waitInBackground(
@@ -396,6 +412,6 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
         "CollectionStateWatcher should be removed",
         MAX_WAIT_TIMEOUT,
         TimeUnit.SECONDS,
-        () -> client.getZkStateReader().getStateWatchers("test_collection").size() == 0);
+        () -> ZkStateReader.from(client).getStateWatchers("test_collection").size() == 0);
   }
 }
diff --git a/solr/solrj/src/test/org/apache/solr/common/cloud/TestDocCollectionWatcher.java b/solr/solrj/src/test/org/apache/solr/common/cloud/TestDocCollectionWatcher.java
index f043585..b15cc36 100644
--- a/solr/solrj/src/test/org/apache/solr/common/cloud/TestDocCollectionWatcher.java
+++ b/solr/solrj/src/test/org/apache/solr/common/cloud/TestDocCollectionWatcher.java
@@ -18,6 +18,7 @@
 package org.apache.solr.common.cloud;
 
 import java.lang.invoke.MethodHandles;
+import java.util.Objects;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
@@ -72,7 +73,7 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
     return executor.submit(
         () -> {
           try {
-            cluster.getSolrClient().waitForState(collection, timeout, unit, predicate);
+            cluster.getZkStateReader().waitForState(collection, timeout, unit, predicate);
           } catch (InterruptedException | TimeoutException e) {
             return Boolean.FALSE;
           }
@@ -96,7 +97,7 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
               }
             });
     try {
-      if (future.get(timeout, unit) == true) {
+      if (future.get(timeout, unit)) {
         return;
       }
     } catch (TimeoutException e) {
@@ -114,12 +115,13 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
         .processAndWait(client, MAX_WAIT_TIMEOUT);
 
     final CountDownLatch latch = new CountDownLatch(1);
-    client.registerDocCollectionWatcher(
-        "currentstate",
-        (c) -> {
-          latch.countDown();
-          return false;
-        });
+    ZkStateReader.from(client)
+        .registerDocCollectionWatcher(
+            "currentstate",
+            (c) -> {
+              latch.countDown();
+              return false;
+            });
 
     assertTrue(
         "DocCollectionWatcher isn't called on new registration",
@@ -127,15 +129,16 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
     assertEquals(
         "DocCollectionWatcher should be retained",
         1,
-        client.getZkStateReader().getStateWatchers("currentstate").size());
+        ZkStateReader.from(client).getStateWatchers("currentstate").size());
 
     final CountDownLatch latch2 = new CountDownLatch(1);
-    client.registerDocCollectionWatcher(
-        "currentstate",
-        (c) -> {
-          latch2.countDown();
-          return true;
-        });
+    ZkStateReader.from(client)
+        .registerDocCollectionWatcher(
+            "currentstate",
+            (c) -> {
+              latch2.countDown();
+              return true;
+            });
 
     assertTrue(
         "DocCollectionWatcher isn't called when registering for already-watched collection",
@@ -144,7 +147,7 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
         "DocCollectionWatcher should be removed",
         MAX_WAIT_TIMEOUT,
         TimeUnit.SECONDS,
-        () -> client.getZkStateReader().getStateWatchers("currentstate").size() == 1);
+        () -> ZkStateReader.from(client).getStateWatchers("currentstate").size() == 1);
   }
 
   @Test
@@ -154,17 +157,22 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
     CollectionAdminRequest.createCollection("waitforstate", "config", 1, 1)
         .processAndWait(client, MAX_WAIT_TIMEOUT);
 
-    client.waitForState(
-        "waitforstate",
-        MAX_WAIT_TIMEOUT,
-        TimeUnit.SECONDS,
-        (n, c) -> DocCollection.isFullyActive(n, c, 1, 1));
+    ZkStateReader.from(client)
+        .waitForState(
+            "waitforstate",
+            MAX_WAIT_TIMEOUT,
+            TimeUnit.SECONDS,
+            (n1, c1) -> DocCollection.isFullyActive(n1, c1, 1, 1));
 
     // several goes, to check that we're not getting delayed state changes
     for (int i = 0; i < 10; i++) {
       try {
-        client.waitForState(
-            "waitforstate", 1, TimeUnit.SECONDS, (n, c) -> DocCollection.isFullyActive(n, c, 1, 1));
+        ZkStateReader.from(client)
+            .waitForState(
+                "waitforstate",
+                1,
+                TimeUnit.SECONDS,
+                (n, c) -> DocCollection.isFullyActive(n, c, 1, 1));
       } catch (TimeoutException e) {
         fail("waitForState should return immediately if the predicate is already satisfied");
       }
@@ -175,7 +183,7 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
   public void testCanWaitForNonexistantCollection() throws Exception {
 
     Future<Boolean> future =
-        waitInBackground("delayed", MAX_WAIT_TIMEOUT, TimeUnit.SECONDS, (c) -> (null != c));
+        waitInBackground("delayed", MAX_WAIT_TIMEOUT, TimeUnit.SECONDS, Objects::nonNull);
 
     CollectionAdminRequest.createCollection("delayed", "config", 1, 1)
         .processAndWait(cluster.getSolrClient(), MAX_WAIT_TIMEOUT);
@@ -188,15 +196,18 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
     CloudSolrClient client = cluster.getSolrClient();
     expectThrows(
         TimeoutException.class,
-        () -> {
-          client.waitForState(
-              "nosuchcollection", 1, TimeUnit.SECONDS, ((liveNodes, collectionState) -> false));
-        });
+        () ->
+            ZkStateReader.from(client)
+                .waitForState(
+                    "nosuchcollection",
+                    1,
+                    TimeUnit.SECONDS,
+                    ((liveNodes, collectionState) -> false)));
     waitFor(
         "Watchers for collection should be removed after timeout",
         MAX_WAIT_TIMEOUT,
         TimeUnit.SECONDS,
-        () -> client.getZkStateReader().getStateWatchers("nosuchcollection").isEmpty());
+        () -> ZkStateReader.from(client).getStateWatchers("nosuchcollection").isEmpty());
   }
 
   @Test
@@ -207,11 +218,12 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
         .processAndWait(client, MAX_WAIT_TIMEOUT);
 
     // create collection with 1 shard 1 replica...
-    client.waitForState(
-        "falsepredicate",
-        MAX_WAIT_TIMEOUT,
-        TimeUnit.SECONDS,
-        (n, c) -> DocCollection.isFullyActive(n, c, 1, 1));
+    ZkStateReader.from(client)
+        .waitForState(
+            "falsepredicate",
+            MAX_WAIT_TIMEOUT,
+            TimeUnit.SECONDS,
+            (n1, c1) -> DocCollection.isFullyActive(n1, c1, 1, 1));
 
     // set watcher waiting for at least 3 replicas (will fail initially)
     final AtomicInteger runCount = new AtomicInteger(0);
@@ -234,17 +246,17 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
     // add a 2nd replica...
     CollectionAdminRequest.addReplicaToShard("falsepredicate", "shard1")
         .processAndWait(client, MAX_WAIT_TIMEOUT);
-    client.waitForState(
-        "falsepredicate",
-        MAX_WAIT_TIMEOUT,
-        TimeUnit.SECONDS,
-        (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
+    ZkStateReader.from(client)
+        .waitForState(
+            "falsepredicate",
+            MAX_WAIT_TIMEOUT,
+            TimeUnit.SECONDS,
+            (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
 
     // confirm watcher has run at least once and has been retained...
     final int runCountSnapshot = runCount.get();
     assertTrue(0 < runCountSnapshot);
-    assertEquals(1, client.getZkStateReader().getStateWatchers("falsepredicate").size());
-
+    assertEquals(1, ZkStateReader.from(client).getStateWatchers("falsepredicate").size());
     // now add a 3rd replica...
     CollectionAdminRequest.addReplicaToShard("falsepredicate", "shard1")
         .processAndWait(client, MAX_WAIT_TIMEOUT);
@@ -256,7 +268,7 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
         "DocCollectionWatcher should be removed",
         MAX_WAIT_TIMEOUT,
         TimeUnit.SECONDS,
-        () -> client.getZkStateReader().getStateWatchers("falsepredicate").size() == 0);
+        () -> ZkStateReader.from(client).getStateWatchers("falsepredicate").size() == 0);
   }
 
   @Test
@@ -264,7 +276,7 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
     CloudSolrClient client = cluster.getSolrClient();
     assertTrue(
         "There should be no watchers for a non-existent collection!",
-        client.getZkStateReader().getStateWatchers("no-such-collection").isEmpty());
+        ZkStateReader.from(client).getStateWatchers("no-such-collection").isEmpty());
 
     expectThrows(
         TimeoutException.class,
@@ -276,7 +288,7 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
         "Watchers for collection should be removed after timeout",
         MAX_WAIT_TIMEOUT,
         TimeUnit.SECONDS,
-        () -> client.getZkStateReader().getStateWatchers("no-such-collection").isEmpty());
+        () -> ZkStateReader.from(client).getStateWatchers("no-such-collection").isEmpty());
   }
 
   @Test
@@ -284,14 +296,15 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase {
     final CloudSolrClient client = cluster.getSolrClient();
     CollectionAdminRequest.createCollection("tobedeleted", "config", 1, 1).process(client);
 
-    client.waitForState(
-        "tobedeleted",
-        MAX_WAIT_TIMEOUT,
-        TimeUnit.SECONDS,
-        (n, c) -> DocCollection.isFullyActive(n, c, 1, 1));
+    ZkStateReader.from(client)
+        .waitForState(
+            "tobedeleted",
+            MAX_WAIT_TIMEOUT,
+            TimeUnit.SECONDS,
+            (n, c1) -> DocCollection.isFullyActive(n, c1, 1, 1));
 
     Future<Boolean> future =
-        waitInBackground("tobedeleted", MAX_WAIT_TIMEOUT, TimeUnit.SECONDS, (c) -> c == null);
+        waitInBackground("tobedeleted", MAX_WAIT_TIMEOUT, TimeUnit.SECONDS, Objects::isNull);
 
     CollectionAdminRequest.deleteCollection("tobedeleted").process(client);
 
diff --git a/solr/solrj/src/test/org/apache/solr/common/cloud/TestPerReplicaStates.java b/solr/solrj/src/test/org/apache/solr/common/cloud/TestPerReplicaStates.java
index 3c8aa46..942002d 100644
--- a/solr/solrj/src/test/org/apache/solr/common/cloud/TestPerReplicaStates.java
+++ b/solr/solrj/src/test/org/apache/solr/common/cloud/TestPerReplicaStates.java
@@ -100,7 +100,7 @@ public class TestPerReplicaStates extends SolrCloudTestCase {
       cluster.getZkClient().create(root + "/" + state, null, CreateMode.PERSISTENT, true);
     }
 
-    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = cluster.getZkStateReader();
     PerReplicaStates rs = PerReplicaStates.fetch(root, zkStateReader.getZkClient(), null);
     assertEquals(3, rs.states.size());
     assertTrue(rs.cversion >= 5);
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZk2TestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZk2TestBase.java
index 8668907..371281d 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZk2TestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZk2TestBase.java
@@ -128,7 +128,7 @@ public abstract class AbstractBasicDistributedZk2TestBase extends AbstractFullDi
 
       // TODO: bring this to its own method?
       // try indexing to a leader that has no replicas up
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
       ZkNodeProps leaderProps = zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, SHARD2);
 
       String nodeName = leaderProps.getStr(ZkStateReader.NODE_NAME_PROP);
@@ -170,10 +170,10 @@ public abstract class AbstractBasicDistributedZk2TestBase extends AbstractFullDi
             .process(cloudClient)
             .isSuccess());
 
-    waitForCollection(cloudClient.getZkStateReader(), ONE_NODE_COLLECTION, 1);
-    waitForRecoveriesToFinish(ONE_NODE_COLLECTION, cloudClient.getZkStateReader(), false);
+    waitForCollection(ZkStateReader.from(cloudClient), ONE_NODE_COLLECTION, 1);
+    waitForRecoveriesToFinish(ONE_NODE_COLLECTION, ZkStateReader.from(cloudClient), false);
 
-    cloudClient.getZkStateReader().getLeaderRetry(ONE_NODE_COLLECTION, SHARD1, 30000);
+    ZkStateReader.from(cloudClient).getLeaderRetry(ONE_NODE_COLLECTION, SHARD1, 30000);
 
     int docs = 2;
     for (SolrClient client : clients) {
@@ -266,8 +266,7 @@ public abstract class AbstractBasicDistributedZk2TestBase extends AbstractFullDi
     query("q", "*:*", "sort", "n_tl1 desc");
 
     int oldLiveNodes =
-        cloudClient
-            .getZkStateReader()
+        ZkStateReader.from(cloudClient)
             .getZkClient()
             .getChildren(ZkStateReader.LIVE_NODES_ZKNODE, null, true)
             .size();
@@ -295,7 +294,7 @@ public abstract class AbstractBasicDistributedZk2TestBase extends AbstractFullDi
 
     long numFound1 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
 
-    cloudClient.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1, 60000);
+    ZkStateReader.from(cloudClient).getLeaderRetry(DEFAULT_COLLECTION, SHARD1, 60000);
 
     try {
       index_specific(
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZkTestBase.java
index 0586f3b..3f517ba 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractBasicDistributedZkTestBase.java
@@ -188,7 +188,7 @@ public abstract class AbstractBasicDistributedZkTestBase extends AbstractFullDis
   protected void test() throws Exception {
     // setLoggingLevel(null);
 
-    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
     // make sure we have leaders for each shard
     for (int j = 1; j < sliceCount; j++) {
       zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + j, 10000);
@@ -714,7 +714,7 @@ public abstract class AbstractBasicDistributedZkTestBase extends AbstractFullDis
 
     newSearcherHook.waitForSearcher(DEFAULT_COLLECTION, 2, 20000, false);
 
-    ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader().getClusterState();
+    ClusterState clusterState = getCommonCloudSolrClient().getClusterState();
     DocCollection dColl = clusterState.getCollection(DEFAULT_COLLECTION);
 
     assertSliceCounts("should have found 2 docs, 300 and 301", before + 2, dColl);
@@ -876,8 +876,7 @@ public abstract class AbstractBasicDistributedZkTestBase extends AbstractFullDis
       throws Exception {
     AtomicLong total = new AtomicLong(-1);
     try {
-      getCommonCloudSolrClient()
-          .getZkStateReader()
+      ZkStateReader.from(getCommonCloudSolrClient())
           .waitForState(
               DEFAULT_COLLECTION,
               waitMillis,
@@ -1073,9 +1072,9 @@ public abstract class AbstractBasicDistributedZkTestBase extends AbstractFullDis
     printLayout();
 
     cloudJettys.get(0).jetty.start();
-    cloudClient.getZkStateReader().forceUpdateCollection("multiunload2");
+    ZkStateReader.from(cloudClient).forceUpdateCollection("multiunload2");
     try {
-      cloudClient.getZkStateReader().getLeaderRetry("multiunload2", "shard1", 30000);
+      ZkStateReader.from(cloudClient).getLeaderRetry("multiunload2", "shard1", 30000);
     } catch (SolrException e) {
       printLayout();
       throw e;
@@ -1184,7 +1183,7 @@ public abstract class AbstractBasicDistributedZkTestBase extends AbstractFullDis
   }
 
   protected ZkCoreNodeProps getLeaderUrlFromZk(String collection, String slice) {
-    ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader().getClusterState();
+    ClusterState clusterState = getCommonCloudSolrClient().getClusterState();
     ZkNodeProps leader = clusterState.getCollection(collection).getLeader(slice);
     if (leader == null) {
       throw new RuntimeException("Could not find leader:" + collection + " " + slice);
@@ -1351,9 +1350,9 @@ public abstract class AbstractBasicDistributedZkTestBase extends AbstractFullDis
 
     // no one should be recovering
     waitForRecoveriesToFinish(
-        oneInstanceCollection2, getCommonCloudSolrClient().getZkStateReader(), false, true);
+        oneInstanceCollection2, ZkStateReader.from(getCommonCloudSolrClient()), false, true);
 
-    assertAllActive(oneInstanceCollection2, getCommonCloudSolrClient().getZkStateReader());
+    assertAllActive(oneInstanceCollection2, ZkStateReader.from(getCommonCloudSolrClient()));
 
     // printLayout();
 
@@ -1388,7 +1387,7 @@ public abstract class AbstractBasicDistributedZkTestBase extends AbstractFullDis
     assertEquals(3, allDocs);
 
     // we added a role of none on these creates - check for it
-    ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(getCommonCloudSolrClient());
     zkStateReader.forceUpdateCollection(oneInstanceCollection2);
     Map<String, Slice> slices =
         zkStateReader.getClusterState().getCollection(oneInstanceCollection2).getSlicesMap();
@@ -1397,7 +1396,6 @@ public abstract class AbstractBasicDistributedZkTestBase extends AbstractFullDis
     ZkCoreNodeProps props =
         new ZkCoreNodeProps(
             getCommonCloudSolrClient()
-                .getZkStateReader()
                 .getClusterState()
                 .getCollection(oneInstanceCollection2)
                 .getLeader("shard1"));
@@ -1426,8 +1424,7 @@ public abstract class AbstractBasicDistributedZkTestBase extends AbstractFullDis
           });
 
       try {
-        getCommonCloudSolrClient()
-            .getZkStateReader()
+        ZkStateReader.from(getCommonCloudSolrClient())
             .waitForState(
                 oneInstanceCollection2,
                 20000,
@@ -1506,8 +1503,8 @@ public abstract class AbstractBasicDistributedZkTestBase extends AbstractFullDis
     SolrClient client4 = collectionClients.get(3);
 
     waitForRecoveriesToFinish(
-        oneInstanceCollection, getCommonCloudSolrClient().getZkStateReader(), false);
-    assertAllActive(oneInstanceCollection, getCommonCloudSolrClient().getZkStateReader());
+        oneInstanceCollection, ZkStateReader.from(getCommonCloudSolrClient()), false);
+    assertAllActive(oneInstanceCollection, ZkStateReader.from(getCommonCloudSolrClient()));
 
     client2.add(getDoc(id, "1"));
     client3.add(getDoc(id, "2"));
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java
index 5f13262..b0e4b4f 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java
@@ -144,7 +144,7 @@ public abstract class AbstractChaosMonkeyNothingIsSafeTestBase
     try (CloudSolrClient ourCloudClient = createCloudClient(DEFAULT_COLLECTION)) {
       handle.clear();
       handle.put("timestamp", SKIPVAL);
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
       // make sure we have leaders for each shard
       for (int j = 1; j < sliceCount; j++) {
         zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + j, 10000);
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
index 21bbdaf..8a2679a 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
@@ -245,7 +245,7 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
       CloudSolrClient cloudClient, String shardName, Replica oldLeader, TimeOut timeOut)
       throws Exception {
     log.info("Will wait for a node to become leader for {} secs", timeOut.timeLeft(SECONDS));
-    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
     zkStateReader.forceUpdateCollection(DEFAULT_COLLECTION);
 
     for (; ; ) {
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
index cc3ca38..b138b90 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
@@ -323,7 +323,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     cloudClient = createCloudClient(DEFAULT_COLLECTION);
     cloudClient.connect();
 
-    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
 
     chaosMonkey =
         new ChaosMonkey(
@@ -433,12 +433,13 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
             .process(cloudClient)
             .getStatus());
 
-    cloudClient.waitForState(
-        DEFAULT_COLLECTION,
-        30,
-        TimeUnit.SECONDS,
-        // expect sliceCount active shards, but no active replicas
-        SolrCloudTestCase.activeClusterShape(sliceCount, 0));
+    // expect sliceCount active shards, but no active replicas
+    ZkStateReader.from(cloudClient)
+        .waitForState(
+            DEFAULT_COLLECTION,
+            30,
+            TimeUnit.SECONDS,
+            SolrCloudTestCase.activeClusterShape(sliceCount, 0));
 
     ExecutorService customThreadPool =
         ExecutorUtil.newMDCAwareCachedThreadPool(new SolrNamedThreadFactory("closeThreadPool"));
@@ -640,7 +641,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     this.jettys.addAll(jettys);
     this.clients.addAll(clients);
 
-    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
     // make sure we have a leader for each shard
     for (int i = 1; i <= sliceCount; i++) {
       zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + i, 10000);
@@ -667,8 +668,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     if (log.isInfoEnabled()) {
       log.info("waitForLiveNode: {}", j.getNodeName());
     }
-    cloudClient
-        .getZkStateReader()
+    ZkStateReader.from(cloudClient)
         .waitForLiveNodes(
             30, TimeUnit.SECONDS, SolrCloudTestCase.containsLiveNode(j.getNodeName()));
   }
@@ -680,8 +680,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
         "Waiting to see {} active replicas in collection: {}", expectedNumReplicas, collection);
     AtomicInteger nReplicas = new AtomicInteger();
     try {
-      client
-          .getZkStateReader()
+      ZkStateReader.from(client)
           .waitForState(
               collection,
               30,
@@ -929,7 +928,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
   protected void updateMappingsFromZk(
       List<JettySolrRunner> jettys, List<SolrClient> clients, boolean allowOverSharding)
       throws Exception {
-    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
     zkStateReader.forceUpdateCollection(DEFAULT_COLLECTION);
     cloudJettys.clear();
     shardToJetty.clear();
@@ -1144,8 +1143,9 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     controlClient.add(doc);
   }
 
-  protected ZkCoreNodeProps getLeaderUrlFromZk(String collection, String slice) {
-    ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader().getClusterState();
+  protected ZkCoreNodeProps getLeaderUrlFromZk(String collection, String slice) throws IOException {
+    getCommonCloudSolrClient();
+    ClusterState clusterState = cloudClient.getClusterState();
     final DocCollection docCollection = clusterState.getCollectionOrNull(collection);
     if (docCollection != null && docCollection.getLeader(slice) != null) {
       return new ZkCoreNodeProps(docCollection.getLeader(slice));
@@ -1166,17 +1166,17 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
   } // serial commit...
 
   protected void waitForRecoveriesToFinish(boolean verbose) throws Exception {
-    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
     super.waitForRecoveriesToFinish(DEFAULT_COLLECTION, zkStateReader, verbose);
   }
 
   protected void waitForRecoveriesToFinish(String collection, boolean verbose) throws Exception {
-    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
     super.waitForRecoveriesToFinish(collection, zkStateReader, verbose);
   }
 
   protected void waitForRecoveriesToFinish(boolean verbose, long timeoutSeconds) throws Exception {
-    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
     waitForRecoveriesToFinish(DEFAULT_COLLECTION, zkStateReader, verbose, true, timeoutSeconds);
   }
 
@@ -1475,7 +1475,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     ArrayList<SolrClient> shardClients = new ArrayList<>(7);
 
     updateMappingsFromZk(jettys, clients);
-    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
     List<CloudJettyRunner> solrJetties = shardToJetty.get(shard);
     assertNotNull("no jetties found for shard: " + shard, solrJetties);
 
@@ -1536,7 +1536,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     String failMessage = null;
     if (verbose) System.err.println("check const of " + shard);
     int cnt = 0;
-    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
     assertEquals(
         "The client count does not match up with the shard count for slice:" + shard,
         zkStateReader
@@ -1649,7 +1649,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
         }
         boolean live = false;
         String nodeName = props.getStr(ZkStateReader.NODE_NAME_PROP);
-        ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+        ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
         if (zkStateReader.getClusterState().liveNodesContain(nodeName)) {
           live = true;
         }
@@ -1837,7 +1837,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
           }
         }
       }
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
       long count = 0;
       final Replica.State currentState =
           Replica.State.getState(cjetty.info.getStr(ZkStateReader.STATE_PROP));
@@ -2103,12 +2103,13 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     }
 
     try {
-      cloudClient.waitForState(
-          collectionName,
-          30,
-          TimeUnit.SECONDS,
-          SolrCloudTestCase.activeClusterShape(
-              numShards, numShards * (numNrtReplicas + numTlogReplicas + numPullReplicas)));
+      ZkStateReader.from(cloudClient)
+          .waitForState(
+              collectionName,
+              30,
+              TimeUnit.SECONDS,
+              SolrCloudTestCase.activeClusterShape(
+                  numShards, numShards * (numNrtReplicas + numTlogReplicas + numPullReplicas)));
     } catch (TimeoutException e) {
       throw new RuntimeException(
           "Timeout waiting for "
@@ -2233,9 +2234,10 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
   private String checkCollectionExpectations(
       String collectionName,
       List<Integer> numShardsNumReplicaList,
-      List<String> nodesAllowedToRunShards) {
-    ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader().getClusterState();
-
+      List<String> nodesAllowedToRunShards)
+      throws IOException {
+    getCommonCloudSolrClient();
+    ClusterState clusterState = cloudClient.getClusterState();
     int expectedSlices = numShardsNumReplicaList.get(0);
     // The Math.min thing is here, because we expect replication-factor to be reduced to if there
     // are not enough live nodes to spread all shards of a collection over different nodes
@@ -2422,7 +2424,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     while (System.nanoTime() < timeout) {
       Replica tmp = null;
       try {
-        tmp = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, shardId);
+        tmp = ZkStateReader.from(cloudClient).getLeaderRetry(testCollectionName, shardId);
       } catch (Exception exc) {
       }
       if (tmp != null && "active".equals(tmp.getStr(ZkStateReader.STATE_PROP))) {
@@ -2452,7 +2454,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
 
     Map<String, Replica> notLeaders = new HashMap<>();
 
-    ZkStateReader zkr = cloudClient.getZkStateReader();
+    ZkStateReader zkr = ZkStateReader.from(cloudClient);
     zkr.forceUpdateCollection(testCollectionName); // force the state to be fresh
 
     ClusterState cs = zkr.getClusterState();
@@ -2461,11 +2463,12 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     boolean allReplicasUp = false;
     long waitMs = 0L;
     long maxWaitMs = maxWaitSecs * 1000L;
-    Replica leader = null;
+    Replica leader;
     ZkShardTerms zkShardTerms =
-        new ZkShardTerms(testCollectionName, shardId, cloudClient.getZkStateReader().getZkClient());
+        new ZkShardTerms(
+            testCollectionName, shardId, ZkStateReader.from(cloudClient).getZkClient());
     while (waitMs < maxWaitMs && !allReplicasUp) {
-      cs = cloudClient.getZkStateReader().getClusterState();
+      cs = cloudClient.getClusterState();
       assertNotNull(cs);
       final DocCollection docCollection = cs.getCollectionOrNull(testCollectionName);
       assertNotNull("No collection found for " + testCollectionName, docCollection);
@@ -2540,9 +2543,9 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
   }
 
   protected String printClusterStateInfo(String collection) throws Exception {
-    cloudClient.getZkStateReader().forceUpdateCollection(collection);
-    String cs = null;
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    ZkStateReader.from(cloudClient).forceUpdateCollection(collection);
+    String cs;
+    ClusterState clusterState = cloudClient.getClusterState();
     if (collection != null) {
       cs = clusterState.getCollection(collection).toString();
     } else {
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractMoveReplicaTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractMoveReplicaTestBase.java
index 97ebe74..973f458 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractMoveReplicaTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractMoveReplicaTestBase.java
@@ -39,6 +39,7 @@ import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.util.IdUtils;
 import org.junit.After;
@@ -120,7 +121,7 @@ public abstract class AbstractMoveReplicaTestBase extends SolrCloudTestCase {
     addDocs(coll, 100);
 
     Replica replica = getRandomReplica(coll, cloudClient);
-    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+    Set<String> liveNodes = cloudClient.getClusterState().getLiveNodes();
     ArrayList<String> l = new ArrayList<>(liveNodes);
     Collections.shuffle(l, random());
     String targetNode = null;
@@ -132,8 +133,7 @@ public abstract class AbstractMoveReplicaTestBase extends SolrCloudTestCase {
     }
     assertNotNull(targetNode);
     String shardId = null;
-    for (Slice slice :
-        cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices()) {
+    for (Slice slice : cloudClient.getClusterState().getCollection(coll).getSlices()) {
       if (slice.getReplicas().contains(replica)) {
         shardId = slice.getName();
       }
@@ -282,7 +282,7 @@ public abstract class AbstractMoveReplicaTestBase extends SolrCloudTestCase {
       replica = getRandomReplica(coll, cloudClient);
     } while (!replica.getNodeName().equals(overseerLeader) && count-- > 0);
     assertNotNull("could not find non-overseer replica???", replica);
-    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+    Set<String> liveNodes = cloudClient.getClusterState().getLiveNodes();
     ArrayList<String> l = new ArrayList<>(liveNodes);
     Collections.shuffle(l, random());
     String targetNode = null;
@@ -326,8 +326,7 @@ public abstract class AbstractMoveReplicaTestBase extends SolrCloudTestCase {
 
     if (log.isInfoEnabled()) {
       log.info(
-          "--- current collection state: {}",
-          cloudClient.getZkStateReader().getClusterState().getCollection(coll));
+          "--- current collection state: {}", cloudClient.getClusterState().getCollection(coll));
     }
     assertEquals(
         100, cluster.getSolrClient().query(coll, new SolrQuery("*:*")).getResults().getNumFound());
@@ -343,9 +342,8 @@ public abstract class AbstractMoveReplicaTestBase extends SolrCloudTestCase {
     return new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
   }
 
-  private Replica getRandomReplica(String coll, CloudSolrClient cloudClient) {
-    List<Replica> replicas =
-        cloudClient.getZkStateReader().getClusterState().getCollection(coll).getReplicas();
+  private Replica getRandomReplica(String coll, CloudSolrClient cloudClient) throws IOException {
+    List<Replica> replicas = cloudClient.getClusterState().getCollection(coll).getReplicas();
     Collections.shuffle(replicas, random());
     return replicas.get(0);
   }
@@ -368,7 +366,7 @@ public abstract class AbstractMoveReplicaTestBase extends SolrCloudTestCase {
       CloudSolrClient cloudClient, String nodeName, String collectionName, String replicaType)
       throws IOException, SolrServerException {
     try (HttpSolrClient coreclient =
-        getHttpSolrClient(cloudClient.getZkStateReader().getBaseUrlForNodeName(nodeName))) {
+        getHttpSolrClient(ZkStateReader.from(cloudClient).getBaseUrlForNodeName(nodeName))) {
       CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
       if (status.getCoreStatus().size() == 0) {
         return 0;
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRecoveryZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRecoveryZkTestBase.java
index e4dc3d5..1a45e99 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRecoveryZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRecoveryZkTestBase.java
@@ -122,7 +122,7 @@ public abstract class AbstractRecoveryZkTestBase extends SolrCloudTestCase {
 
     new UpdateRequest().commit(cluster.getSolrClient(), collection);
 
-    cluster.getSolrClient().waitForState(collection, 120, TimeUnit.SECONDS, clusterShape(1, 2));
+    cluster.getZkStateReader().waitForState(collection, 120, TimeUnit.SECONDS, clusterShape(1, 2));
 
     // test that leader and replica have same doc count
     state = getCollectionState(collection);
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRestartWhileUpdatingTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRestartWhileUpdatingTestBase.java
index 9cb7683..132f70e 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRestartWhileUpdatingTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractRestartWhileUpdatingTestBase.java
@@ -22,6 +22,7 @@ import java.util.List;
 import java.util.concurrent.TimeUnit;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -144,7 +145,7 @@ public abstract class AbstractRestartWhileUpdatingTestBase extends AbstractFullD
 
     Thread.sleep(5000);
 
-    waitForRecoveriesToFinish(DEFAULT_COLLECTION, cloudClient.getZkStateReader(), false, true);
+    waitForRecoveriesToFinish(DEFAULT_COLLECTION, ZkStateReader.from(cloudClient), false, true);
 
     for (StoppableIndexingThread thread : threads) {
       thread.join();
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractSyncSliceTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractSyncSliceTestBase.java
index 9b3a14e..c106581 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractSyncSliceTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractSyncSliceTestBase.java
@@ -208,7 +208,7 @@ public abstract class AbstractSyncSliceTestBase extends AbstractFullDistribZkTes
   private void waitTillAllNodesActive() throws Exception {
     for (int i = 0; i < 60; i++) {
       Thread.sleep(3000);
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
       ClusterState clusterState = zkStateReader.getClusterState();
       DocCollection collection1 = clusterState.getCollection("collection1");
       Slice slice = collection1.getSlice("shard1");
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractTlogReplayBufferedWhileIndexingTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractTlogReplayBufferedWhileIndexingTestBase.java
index 55447f6..d08e3ae 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractTlogReplayBufferedWhileIndexingTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractTlogReplayBufferedWhileIndexingTestBase.java
@@ -23,6 +23,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.util.TestInjection;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -102,7 +103,7 @@ public abstract class AbstractTlogReplayBufferedWhileIndexingTestBase
 
     Thread.sleep(2000);
 
-    waitForRecoveriesToFinish(DEFAULT_COLLECTION, cloudClient.getZkStateReader(), false, true);
+    waitForRecoveriesToFinish(DEFAULT_COLLECTION, ZkStateReader.from(cloudClient), false, true);
 
     for (StoppableIndexingThread thread : threads) {
       thread.safeStop();
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractUnloadDistributedZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractUnloadDistributedZkTestBase.java
index 3a509dd..85d40fa 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractUnloadDistributedZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractUnloadDistributedZkTestBase.java
@@ -81,11 +81,9 @@ public abstract class AbstractUnloadDistributedZkTestBase
     final TimeOut timeout = new TimeOut(45, TimeUnit.SECONDS, TimeSource.NANO_TIME);
     Boolean isPresent = null; // null meaning "don't know"
     while (null == isPresent || shouldBePresent != isPresent) {
+      getCommonCloudSolrClient();
       final DocCollection docCollection =
-          getCommonCloudSolrClient()
-              .getZkStateReader()
-              .getClusterState()
-              .getCollectionOrNull(collectionName);
+          cloudClient.getClusterState().getCollectionOrNull(collectionName);
       final Collection<Slice> slices =
           (docCollection != null) ? docCollection.getSlices() : Collections.emptyList();
       if (timeout.hasTimedOut()) {
@@ -181,9 +179,10 @@ public abstract class AbstractUnloadDistributedZkTestBase
     // printLayout();
     // the collection should still be present (as of SOLR-5209 replica removal does not cascade to
     // remove the slice and collection)
+    getCommonCloudSolrClient();
     assertTrue(
         "No longer found collection " + collection,
-        getCommonCloudSolrClient().getZkStateReader().getClusterState().hasCollection(collection));
+        cloudClient.getClusterState().hasCollection(collection));
   }
 
   protected SolrCore getFirstCore(String collection, JettySolrRunner jetty) {
@@ -208,7 +207,8 @@ public abstract class AbstractUnloadDistributedZkTestBase
             .setCreateNodeSet(jetty1.getNodeName())
             .process(cloudClient)
             .getStatus());
-    ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
+    getCommonCloudSolrClient();
+    ZkStateReader zkStateReader = ZkStateReader.from(cloudClient);
 
     zkStateReader.forceUpdateCollection("unloadcollection");
 
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
index 8ea1692..f1c5101 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
@@ -27,7 +27,19 @@ import java.nio.charset.Charset;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Properties;
+import java.util.Random;
+import java.util.Set;
+import java.util.SortedMap;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.CountDownLatch;
@@ -50,7 +62,15 @@ import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.ConfigSetAdminRequest;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.*;
+import org.apache.solr.common.cloud.Aliases;
+import org.apache.solr.common.cloud.ClusterProperties;
+import org.apache.solr.common.cloud.CollectionStatePredicate;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkMaintenanceUtils;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.util.ExecutorUtil;
 import org.apache.solr.common.util.IOUtils;
@@ -399,10 +419,9 @@ public class MiniSolrCloudCluster {
     }
     log.info("waitForNode: {}", nodeName);
 
-    ZkStateReader reader = getSolrClient().getZkStateReader();
-
-    reader.waitForLiveNodes(
-        timeoutSeconds, TimeUnit.SECONDS, (o, n) -> n != null && n.contains(nodeName));
+    getZkStateReader()
+        .waitForLiveNodes(
+            timeoutSeconds, TimeUnit.SECONDS, (o, n) -> n != null && n.contains(nodeName));
   }
 
   /**
@@ -438,6 +457,11 @@ public class MiniSolrCloudCluster {
     return zkServer;
   }
 
+  /** The {@link ZkStateReader} inside {@link #getSolrClient()}. */
+  public ZkStateReader getZkStateReader() {
+    return ZkStateReader.from(getSolrClient());
+  }
+
   /**
    * @return Unmodifiable list of all the currently started Solr Jettys.
    */
@@ -601,16 +625,12 @@ public class MiniSolrCloudCluster {
 
   /** Delete all collections (and aliases) */
   public void deleteAllCollections() throws Exception {
-    try (ZkStateReader reader = new ZkStateReader(solrClient.getZkStateReader().getZkClient())) {
+    try (ZkStateReader reader = new ZkStateReader(getZkClient())) {
       final CountDownLatch latch = new CountDownLatch(1);
       reader.registerCloudCollectionsListener(
-          new CloudCollectionsListener() {
-
-            @Override
-            public void onChange(Set<String> oldCollections, Set<String> newCollections) {
-              if (newCollections != null && newCollections.size() == 0) {
-                latch.countDown();
-              }
+          (oldCollections, newCollections) -> {
+            if (newCollections != null && newCollections.size() == 0) {
+              latch.countDown();
             }
           });
 
@@ -627,11 +647,7 @@ public class MiniSolrCloudCluster {
       }
 
       for (String collection : reader.getClusterState().getCollectionStates().keySet()) {
-        reader.waitForState(
-            collection,
-            15,
-            TimeUnit.SECONDS,
-            (collectionState) -> collectionState == null ? true : false);
+        reader.waitForState(collection, 15, TimeUnit.SECONDS, Objects::isNull);
       }
     }
 
@@ -714,7 +730,7 @@ public class MiniSolrCloudCluster {
   }
 
   public SolrZkClient getZkClient() {
-    return solrClient.getZkStateReader().getZkClient();
+    return getZkStateReader().getZkClient();
   }
 
   /**
@@ -843,7 +859,7 @@ public class MiniSolrCloudCluster {
     AtomicReference<DocCollection> state = new AtomicReference<>();
     AtomicReference<Set<String>> liveNodesLastSeen = new AtomicReference<>();
     try {
-      getSolrClient()
+      getZkStateReader()
           .waitForState(
               collection,
               wait,
@@ -877,7 +893,7 @@ public class MiniSolrCloudCluster {
     AtomicReference<DocCollection> state = new AtomicReference<>();
     AtomicReference<Set<String>> liveNodesLastSeen = new AtomicReference<>();
     try {
-      getSolrClient()
+      getZkStateReader()
           .waitForState(
               collection,
               wait,
@@ -949,9 +965,8 @@ public class MiniSolrCloudCluster {
 
     log.info("waitForJettyToStop: {}", nodeName);
 
-    ZkStateReader reader = getSolrClient().getZkStateReader();
     try {
-      reader.waitForLiveNodes(15, TimeUnit.SECONDS, (o, n) -> !n.contains(nodeName));
+      getZkStateReader().waitForLiveNodes(15, TimeUnit.SECONDS, (o, n) -> !n.contains(nodeName));
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
       throw new SolrException(ErrorCode.SERVER_ERROR, "interrupted", e);
@@ -1226,14 +1241,12 @@ public class MiniSolrCloudCluster {
               securityJson,
               trackJettyMetrics,
               formatZkServer);
-      CloudSolrClient client = cluster.getSolrClient();
       for (Config config : configs) {
         cluster.uploadConfigSet(config.path, config.name);
       }
 
       if (clusterProperties.size() > 0) {
-        ClusterProperties props =
-            new ClusterProperties(cluster.getSolrClient().getZkStateReader().getZkClient());
+        ClusterProperties props = new ClusterProperties(cluster.getZkClient());
         for (Map.Entry<String, Object> entry : clusterProperties.entrySet()) {
           props.setClusterProperty(entry.getKey(), entry.getValue());
         }
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/MultiSolrCloudTestCase.java b/solr/test-framework/src/java/org/apache/solr/cloud/MultiSolrCloudTestCase.java
index 1b62e0b..fbe1fba 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/MultiSolrCloudTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/MultiSolrCloudTestCase.java
@@ -75,11 +75,7 @@ public abstract class MultiSolrCloudTestCase extends SolrTestCaseJ4 {
             .processAndWait(cluster.getSolrClient(), SolrCloudTestCase.DEFAULT_TIMEOUT);
 
         AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-            collection,
-            cluster.getSolrClient().getZkStateReader(),
-            false,
-            true,
-            SolrCloudTestCase.DEFAULT_TIMEOUT);
+            collection, cluster.getZkStateReader(), false, true, SolrCloudTestCase.DEFAULT_TIMEOUT);
       } catch (Exception e) {
         throw new RuntimeException(e);
       }
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
index a78fb72..9caff80 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
@@ -82,9 +82,9 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
   protected static volatile MiniSolrCloudCluster cluster;
 
   protected static SolrZkClient zkClient() {
-    ZkStateReader reader = cluster.getSolrClient().getZkStateReader();
+    ZkStateReader reader = cluster.getZkStateReader();
     if (reader == null) cluster.getSolrClient().connect();
-    return cluster.getSolrClient().getZkStateReader().getZkClient();
+    return cluster.getZkStateReader().getZkClient();
   }
 
   /**
@@ -126,11 +126,7 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
 
   /** Get the collection state for a particular collection */
   protected static DocCollection getCollectionState(String collectionName) {
-    return cluster
-        .getSolrClient()
-        .getZkStateReader()
-        .getClusterState()
-        .getCollection(collectionName);
+    return cluster.getSolrClient().getClusterState().getCollection(collectionName);
   }
 
   protected static void waitForState(
@@ -158,7 +154,7 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
     AtomicReference<Set<String>> liveNodesLastSeen = new AtomicReference<>();
     try {
       cluster
-          .getSolrClient()
+          .getZkStateReader()
           .waitForState(
               collection,
               timeout,
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
index 615eceb..dfcec10 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
@@ -44,6 +44,7 @@ import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.ImplicitDocRouter;
 import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.params.CoreAdminParams;
 import org.junit.AfterClass;
@@ -193,9 +194,6 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
     String backupLocation = getBackupLocation();
     String backupName = BACKUPNAME_PREFIX + testSuffix;
 
-    DocCollection backupCollection =
-        solrClient.getZkStateReader().getClusterState().getCollection(getCollectionName());
-
     log.info("Triggering Backup command");
 
     {
@@ -223,7 +221,6 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
           "Failed collection is still in the clusterstate: "
               + cluster
                   .getSolrClient()
-                  .getClusterStateProvider()
                   .getClusterState()
                   .getCollectionOrNull(restoreCollectionName),
           CollectionAdminRequest.listCollections(solrClient),
@@ -294,7 +291,6 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
   private int getActiveSliceCount(String collectionName) {
     return cluster
         .getSolrClient()
-        .getZkStateReader()
         .getClusterState()
         .getCollection(collectionName)
         .getActiveSlices()
@@ -333,8 +329,7 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
     String backupName = BACKUPNAME_PREFIX + testSuffix;
 
     CloudSolrClient client = cluster.getSolrClient();
-    DocCollection backupCollection =
-        client.getZkStateReader().getClusterState().getCollection(collectionName);
+    DocCollection backupCollection = client.getClusterState().getCollection(collectionName);
 
     Map<String, Integer> origShardToDocCount = getShardToDocCountMap(client, backupCollection);
     assert origShardToDocCount.isEmpty() == false;
@@ -404,15 +399,10 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
       assertEquals(RequestStatusState.COMPLETED, restore.processAndWait(client, 60)); // async
     }
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        restoreCollectionName,
-        cluster.getSolrClient().getZkStateReader(),
-        log.isDebugEnabled(),
-        true,
-        30);
+        restoreCollectionName, ZkStateReader.from(client), log.isDebugEnabled(), true, 30);
 
     // Check the number of results are the same
-    DocCollection restoreCollection =
-        client.getZkStateReader().getClusterState().getCollection(restoreCollectionName);
+    DocCollection restoreCollection = client.getClusterState().getCollection(restoreCollectionName);
     assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
     // Re-index same docs (should be identical docs given same random seed) and test we have the
     // same result.  Helps
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCollectionsAPIDistributedZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCollectionsAPIDistributedZkTestBase.java
index 63d303c..d56895f 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCollectionsAPIDistributedZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractCollectionsAPIDistributedZkTestBase.java
@@ -320,7 +320,7 @@ public abstract class AbstractCollectionsAPIDistributedZkTestBase extends SolrCl
 
     TimeUnit.MILLISECONDS.sleep(1000);
     // in both cases, the collection should have default to the core name
-    cluster.getSolrClient().getZkStateReader().forceUpdateCollection("noconfig");
+    cluster.getZkStateReader().forceUpdateCollection("noconfig");
     assertFalse(
         CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains("noconfig"));
   }
@@ -330,8 +330,7 @@ public abstract class AbstractCollectionsAPIDistributedZkTestBase extends SolrCl
     CollectionAdminRequest.createCollection("nodes_used_collection", "conf", 2, 2)
         .process(cluster.getSolrClient());
 
-    Set<String> liveNodes =
-        cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
+    Set<String> liveNodes = cluster.getSolrClient().getClusterState().getLiveNodes();
 
     List<String> createNodeList = new ArrayList<>(liveNodes);
 
@@ -453,7 +452,7 @@ public abstract class AbstractCollectionsAPIDistributedZkTestBase extends SolrCl
     // TODO: we should not need this...beast test well when trying to fix
     Thread.sleep(1000);
 
-    cluster.getSolrClient().getZkStateReader().forciblyRefreshAllClusterStateSlow();
+    cluster.getZkStateReader().forciblyRefreshAllClusterStateSlow();
 
     new UpdateRequest()
         .add("id", "6")
@@ -502,7 +501,7 @@ public abstract class AbstractCollectionsAPIDistributedZkTestBase extends SolrCl
                 n, c, req.getNumShards(), req.getReplicationFactor());
           });
 
-      ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
+      ZkStateReader zkStateReader = cluster.getZkStateReader();
       // make sure we have leaders for each shard
       for (int z = 1; z < createRequests[j].getNumShards(); z++) {
         zkStateReader.getLeaderRetry(collectionName, "shard" + z, 10000);
@@ -650,8 +649,7 @@ public abstract class AbstractCollectionsAPIDistributedZkTestBase extends SolrCl
     cluster.waitForActiveCollection(collectionName, 2, 4);
 
     ArrayList<String> nodeList =
-        new ArrayList<>(
-            cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
+        new ArrayList<>(cluster.getSolrClient().getClusterState().getLiveNodes());
     Collections.shuffle(nodeList, random());
 
     CollectionAdminResponse response =
@@ -662,7 +660,7 @@ public abstract class AbstractCollectionsAPIDistributedZkTestBase extends SolrCl
 
     assertEquals(
         "Replica should be created on the right node",
-        cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)),
+        cluster.getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)),
         newReplica.getBaseUrl());
 
     Path instancePath = createTempDir();
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractIncrementalBackupTest.java b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractIncrementalBackupTest.java
index 15e1582..d876341 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractIncrementalBackupTest.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractIncrementalBackupTest.java
@@ -59,6 +59,7 @@ import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.DirectoryFactory;
 import org.apache.solr.core.SolrCore;
@@ -175,11 +176,7 @@ public abstract class AbstractIncrementalBackupTest extends SolrCloudTestCase {
       log.info("Restored from backup, took {}ms", timeTaken);
       t = System.nanoTime();
       AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-          restoreCollectionName,
-          cluster.getSolrClient().getZkStateReader(),
-          log.isDebugEnabled(),
-          false,
-          3);
+          restoreCollectionName, ZkStateReader.from(solrClient), log.isDebugEnabled(), false, 3);
       timeTaken = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t);
       log.info("Restored collection healthy, took {}ms", timeTaken);
       numFound =
@@ -414,11 +411,7 @@ public abstract class AbstractIncrementalBackupTest extends SolrCloudTestCase {
         .process(solrClient);
 
     AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-        restoreCollectionName,
-        cluster.getSolrClient().getZkStateReader(),
-        log.isDebugEnabled(),
-        true,
-        30);
+        restoreCollectionName, ZkStateReader.from(solrClient), log.isDebugEnabled(), true, 30);
 
     // check num docs are the same
     assertEquals(