You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2020/07/14 03:04:38 UTC

[lucene-solr] branch reference_impl updated (54d7039 -> b018247)

This is an automated email from the ASF dual-hosted git repository.

markrmiller pushed a change to branch reference_impl
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git.


 discard 54d7039  #133 - Remove clusterstate force update.
     new b018247  #133 - Remove clusterstate force update.

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (54d7039)
            \
             N -- N -- N   refs/heads/reference_impl (b018247)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)


[lucene-solr] 01/01: #133 - Remove clusterstate force update.

Posted by ma...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

markrmiller pushed a commit to branch reference_impl
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit b018247404aebe8c8e7d71c7f6e38c8498f014ab
Author: markrmiller@gmail.com <ma...@gmail.com>
AuthorDate: Mon Jul 13 22:04:21 2020 -0500

    #133 - Remove clusterstate force update.
---
 .../src/java/org/apache/solr/api/V2HttpCall.java   |  8 ++-
 .../solr/cloud/api/collections/SplitShardCmd.java  |  8 +--
 .../apache/solr/handler/CdcrRequestHandler.java    |  5 --
 .../solr/handler/admin/RebalanceLeaders.java       |  3 -
 .../java/org/apache/solr/servlet/HttpSolrCall.java |  1 -
 .../apache/solr/cloud/BasicDistributedZkTest.java  |  2 -
 .../org/apache/solr/cloud/ForceLeaderTest.java     |  8 +--
 .../solr/cloud/HttpPartitionOnCommitTest.java      |  2 -
 .../org/apache/solr/cloud/HttpPartitionTest.java   |  1 -
 .../solr/cloud/LeaderElectionContextKeyTest.java   |  2 +-
 .../org/apache/solr/cloud/MigrateRouteKeyTest.java |  1 -
 .../test/org/apache/solr/cloud/OverseerTest.java   |  2 -
 .../org/apache/solr/cloud/TestPullReplica.java     |  6 +-
 .../solr/cloud/TestPullReplicaErrorHandling.java   |  5 +-
 .../solr/cloud/TestRandomRequestDistribution.java  |  1 -
 .../solr/cloud/TestStressInPlaceUpdates.java       |  2 +-
 .../org/apache/solr/cloud/TestTlogReplica.java     |  5 +-
 .../cloud/TestTolerantUpdateProcessorCloud.java    |  1 -
 .../org/apache/solr/cloud/ZkControllerTest.java    |  2 -
 .../CollectionsAPIAsyncDistributedZkTest.java      |  5 +-
 .../cloud/api/collections/TestCollectionAPI.java   |  3 -
 .../TestCollectionsAPIViaSolrCloudCluster.java     |  4 +-
 .../org/apache/solr/cloud/hdfs/StressHdfsTest.java |  1 -
 .../solr/cloud/overseer/ZkStateReaderTest.java     | 18 +++--
 .../solr/cloud/overseer/ZkStateWriterTest.java     |  7 +-
 .../solr/update/TestInPlaceUpdatesDistrib.java     |  7 +-
 .../apache/solr/common/cloud/ZkStateReader.java    | 81 ----------------------
 .../solr/client/solrj/io/stream/StreamingTest.java |  2 +-
 .../solr/cloud/AbstractDistribZkTestBase.java      |  2 -
 .../solr/cloud/AbstractFullDistribZkTestBase.java  |  4 +-
 .../java/org/apache/solr/cloud/ChaosMonkey.java    |  8 +--
 31 files changed, 33 insertions(+), 174 deletions(-)

diff --git a/solr/core/src/java/org/apache/solr/api/V2HttpCall.java b/solr/core/src/java/org/apache/solr/api/V2HttpCall.java
index d9dca56..c58fc47 100644
--- a/solr/core/src/java/org/apache/solr/api/V2HttpCall.java
+++ b/solr/core/src/java/org/apache/solr/api/V2HttpCall.java
@@ -31,6 +31,7 @@ import java.util.function.Supplier;
 
 import com.google.common.collect.ImmutableSet;
 import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.common.ParWork;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.annotation.SolrThreadSafe;
 import org.apache.solr.common.cloud.DocCollection;
@@ -50,6 +51,7 @@ import org.apache.solr.security.AuthorizationContext;
 import org.apache.solr.servlet.HttpSolrCall;
 import org.apache.solr.servlet.SolrDispatchFilter;
 import org.apache.solr.servlet.SolrRequestParsers;
+import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -212,9 +214,11 @@ public class V2HttpCall extends HttpSolrCall {
     // ensure our view is up to date before trying again
     try {
       zkStateReader.aliasesManager.update();
-      zkStateReader.forceUpdateCollection(collectionsList.get(0));
     } catch (Exception e) {
-      log.error("Error trying to update state while resolving collection.", e);
+      ParWork.propegateInterrupt("Error trying to update state while resolving collection.", e);
+      if (e instanceof KeeperException.SessionExpiredException) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
+      }
       //don't propagate exception on purpose
     }
     return logic.get();
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
index 8276bab..8b61804 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
@@ -127,7 +127,6 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
 
     log.debug("Split shard invoked: {}", message);
     ZkStateReader zkStateReader = ocmh.zkStateReader;
-    zkStateReader.forceUpdateCollection(collectionName);
     AtomicReference<String> slice = new AtomicReference<>();
     slice.set(message.getStr(ZkStateReader.SHARD_ID_PROP));
     Set<String> offlineSlices = new HashSet<>();
@@ -680,12 +679,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
                                    List<String> subSlices, Set<String> offlineSlices) {
     log.info("Cleaning up after a failed split of {}/{}", collectionName, parentShard);
     // get the latest state
-    try {
-      zkStateReader.forceUpdateCollection(collectionName);
-    } catch (KeeperException | InterruptedException e) {
-      log.warn("Cleanup failed after failed split of {}/{} : (force update collection)", collectionName, parentShard, e);
-      return;
-    }
+
     ClusterState clusterState = zkStateReader.getClusterState();
     DocCollection coll = clusterState.getCollectionOrNull(collectionName);
 
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java
index 6c4daa5..655f388 100644
--- a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java
@@ -400,11 +400,6 @@ public class CdcrRequestHandler extends RequestHandlerBase implements SolrCoreAw
   private void handleCollectionCheckpointAction(SolrQueryRequest req, SolrQueryResponse rsp)
       throws IOException, SolrServerException {
     ZkController zkController = core.getCoreContainer().getZkController();
-    try {
-      zkController.getZkStateReader().forceUpdateCollection(collection);
-    } catch (Exception e) {
-      log.warn("Error when updating cluster state", e);
-    }
     ClusterState cstate = zkController.getClusterState();
     DocCollection docCollection = cstate.getCollectionOrNull(collection);
     Collection<Slice> shards = docCollection == null? null : docCollection.getActiveSlices();
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java b/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
index 81900c3..547c550 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
@@ -176,7 +176,6 @@ class RebalanceLeaders {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
           String.format(Locale.ROOT, "The " + COLLECTION_PROP + " is required for the Rebalance Leaders command."));
     }
-    coreContainer.getZkController().getZkStateReader().forceUpdateCollection(collectionName);
     ClusterState clusterState = coreContainer.getZkController().getClusterState();
 
     DocCollection dc = clusterState.getCollection(collectionName);
@@ -208,7 +207,6 @@ class RebalanceLeaders {
         }
       }
       TimeUnit.MILLISECONDS.sleep(100);
-      coreContainer.getZkController().getZkStateReader().forciblyRefreshAllClusterStateSlow();
     }
     addAnyFailures();
   }
@@ -393,7 +391,6 @@ class RebalanceLeaders {
         }
       }
       TimeUnit.MILLISECONDS.sleep(100);
-      zkStateReader.forciblyRefreshAllClusterStateSlow();
     }
     return -1;
   }
diff --git a/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java b/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
index db38cf7..01c9589 100644
--- a/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
+++ b/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
@@ -482,7 +482,6 @@ public class HttpSolrCall {
       if (!retry) {
         // we couldn't find a core to work with, try reloading aliases & this collection
         cores.getZkController().getZkStateReader().aliasesManager.update();
-        cores.getZkController().zkStateReader.forceUpdateCollection(collectionName); // TODO: remove
         action = RETRY;
       }
     }
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
index 7f6522e..d3e7cdf 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
@@ -739,7 +739,6 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
     printLayout();
 
     cloudJettys.get(0).jetty.start();
-    cloudClient.getZkStateReader().forceUpdateCollection("multiunload2");
     try {
       cloudClient.getZkStateReader().getLeaderRetry("multiunload2", "shard1", 30000);
     } catch (SolrException e) {
@@ -1030,7 +1029,6 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
     
     // we added a role of none on these creates - check for it
     ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
-    zkStateReader.forceUpdateCollection(oneInstanceCollection2);
     Map<String,Slice> slices = zkStateReader.getClusterState().getCollection(oneInstanceCollection2).getSlicesMap();
     assertNotNull(slices);
     
diff --git a/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java b/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
index 7d917b4..2b99ff6 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ForceLeaderTest.java
@@ -108,10 +108,9 @@ public class ForceLeaderTest extends HttpPartitionTest {
       putNonLeadersIntoLowerTerm(testCollectionName, SHARD1, zkController, leader, notLeaders, cloudClient);
 
       for (Replica replica : notLeaders) {
-        waitForState(testCollectionName, replica.getName(), State.DOWN, 60000);
+        waitForState(testCollectionName, replica.getName(), State.DOWN, 10000);
       }
-      waitForState(testCollectionName, leader.getName(), State.DOWN, 60000);
-      cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName);
+      waitForState(testCollectionName, leader.getName(), State.DOWN, 10000);
       ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
       int numActiveReplicas = getNumberOfActiveReplicas(clusterState, testCollectionName, SHARD1);
       assertEquals("Expected only 0 active replica but found " + numActiveReplicas +
@@ -139,7 +138,6 @@ public class ForceLeaderTest extends HttpPartitionTest {
       // By now we have an active leader. Wait for recoveries to begin
       waitForRecoveriesToFinish(testCollectionName, cloudClient.getZkStateReader(), true);
 
-      cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName);
       clusterState = cloudClient.getZkStateReader().getClusterState();
       if (log.isInfoEnabled()) {
         log.info("After forcing leader: {}", clusterState.getCollection(testCollectionName).getSlice(SHARD1));
@@ -262,7 +260,7 @@ public class ForceLeaderTest extends HttpPartitionTest {
     getProxyForReplica(leader).reopen();
     leaderJetty.start();
     waitForRecoveriesToFinish(collection, cloudClient.getZkStateReader(), true);
-    cloudClient.getZkStateReader().forceUpdateCollection(collection);
+
     ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
     if (log.isInfoEnabled()) {
       log.info("After bringing back leader: {}", clusterState.getCollection(collection).getSlice(SHARD1));
diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionOnCommitTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionOnCommitTest.java
index c673bf7..2028f82 100644
--- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionOnCommitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionOnCommitTest.java
@@ -112,7 +112,6 @@ public class HttpPartitionOnCommitTest extends BasicDistributedZkTest {
 
     Thread.sleep(sleepMsBeforeHealPartition);
 
-    cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName); // get the latest state
     leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
     assertSame("Leader was not active", Replica.State.ACTIVE, leader.getState());
 
@@ -165,7 +164,6 @@ public class HttpPartitionOnCommitTest extends BasicDistributedZkTest {
     sendCommitWithRetry(replica);
     Thread.sleep(sleepMsBeforeHealPartition);
 
-    cloudClient.getZkStateReader().forceUpdateCollection(testCollectionName); // get the latest state
     leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
     assertSame("Leader was not active", Replica.State.ACTIVE, leader.getState());
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
index 7a269fa..64315a4 100644
--- a/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/HttpPartitionTest.java
@@ -647,7 +647,6 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
     final RTimer timer = new RTimer();
 
     ZkStateReader zkr = cloudClient.getZkStateReader();
-    zkr.forceUpdateCollection(testCollectionName);
     ClusterState cs = zkr.getClusterState();
     boolean allReplicasUp = false;
     long waitMs = 0L;
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionContextKeyTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionContextKeyTest.java
index 0dc4c16..4df2393 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionContextKeyTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionContextKeyTest.java
@@ -69,7 +69,7 @@ public class LeaderElectionContextKeyTest extends SolrCloudTestCase {
   @Test
   public void test() throws KeeperException, InterruptedException, IOException, SolrServerException {
     ZkStateReader stateReader = cluster.getSolrClient().getZkStateReader();
-    stateReader.forceUpdateCollection(TEST_COLLECTION_1);
+
     ClusterState clusterState = stateReader.getClusterState();
     // The test assume that TEST_COLLECTION_1 and TEST_COLLECTION_2 will have identical layout
     // ( same replica's name on every shard )
diff --git a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java
index 695ce19..eae66e8 100644
--- a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java
@@ -67,7 +67,6 @@ public class MigrateRouteKeyTest extends SolrCloudTestCase {
     boolean ruleRemoved = false;
     long expiryTime = finishTime + TimeUnit.NANOSECONDS.convert(60, TimeUnit.SECONDS);
     while (System.nanoTime() < expiryTime) {
-      cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection);
       state = getCollectionState(collection);
       slice = state.getSlice(shard);
       Map<String,RoutingRule> routingRules = slice.getRoutingRules();
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
index 8241c6f..263a7a9 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
@@ -701,7 +701,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
       reader.waitForState(COLLECTION, 5000,
             TimeUnit.MILLISECONDS, (liveNodes, collectionState) -> collectionState != null && collectionState.getReplica(core_node) == null);
 
-      reader.forceUpdateCollection(COLLECTION);
       // as of SOLR-5209 core removal does not cascade to remove the slice and collection
       assertTrue(COLLECTION +" should remain after removal of the last core", 
           reader.getClusterState().hasCollection(COLLECTION));
@@ -1059,7 +1058,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
 
       mockController.publishState(COLLECTION, "core1", "core_node1","shard1", Replica.State.RECOVERING, 1, true, overseers.get(0));
 
-      reader.forceUpdateCollection(COLLECTION);
       ClusterState state = reader.getClusterState();
 
       int numFound = 0;
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
index f0af144..6d0602a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestPullReplica.java
@@ -481,7 +481,6 @@ public class TestPullReplica extends SolrCloudTestCase {
     unIgnoreException("No registered leader was found"); // Should have a leader from now on
 
     // Validate that the new nrt replica is the leader now
-    cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName);
     docCollection = getCollectionState(collectionName);
     leader = docCollection.getSlice("shard1").getLeader();
     assertTrue(leader != null && leader.isActive(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes()));
@@ -575,7 +574,6 @@ public class TestPullReplica extends SolrCloudTestCase {
         if (t.hasTimedOut()) {
           fail("Timed out waiting for collection " + collection + " to be deleted.");
         }
-        cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection);
       } catch(SolrException e) {
         return;
       }
@@ -584,9 +582,7 @@ public class TestPullReplica extends SolrCloudTestCase {
   }
 
   private DocCollection assertNumberOfReplicas(int numNrtReplicas, int numTlogReplicas, int numPullReplicas, boolean updateCollection, boolean activeOnly) throws KeeperException, InterruptedException {
-    if (updateCollection) {
-      cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName);
-    }
+
     DocCollection docCollection = getCollectionState(collectionName);
     assertNotNull(docCollection);
     assertEquals("Unexpected number of writer replicas: " + docCollection, numNrtReplicas,
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java b/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java
index 63ac70d..e9c6218 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestPullReplicaErrorHandling.java
@@ -269,9 +269,7 @@ public void testCantConnectToPullReplica() throws Exception {
   }
   
   private DocCollection assertNumberOfReplicas(int numWriter, int numActive, int numPassive, boolean updateCollection, boolean activeOnly) throws KeeperException, InterruptedException {
-    if (updateCollection) {
-      cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName);
-    }
+
     DocCollection docCollection = getCollectionState(collectionName);
     assertNotNull(docCollection);
     assertEquals("Unexpected number of writer replicas: " + docCollection, numWriter, 
@@ -316,7 +314,6 @@ public void testCantConnectToPullReplica() throws Exception {
         if (t.hasTimedOut()) {
           fail("Timed out waiting for collection " + collection + " to be deleted.");
         }
-        cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection);
       } catch(SolrException e) {
         return;
       }
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java
index 37a134e..1e8dc97 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestRandomRequestDistribution.java
@@ -148,7 +148,6 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
 
     waitForRecoveriesToFinish("football", true);
 
-    cloudClient.getZkStateReader().forceUpdateCollection("football");
 
     Replica leader = null;
     Replica notLeader = null;
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestStressInPlaceUpdates.java b/solr/core/src/test/org/apache/solr/cloud/TestStressInPlaceUpdates.java
index d6506c2..4dbd1d1 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestStressInPlaceUpdates.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestStressInPlaceUpdates.java
@@ -583,7 +583,7 @@ public class TestStressInPlaceUpdates extends SolrCloudBridgeTestCase {
    */
   public SolrClient getClientForLeader() throws KeeperException, InterruptedException {
     ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-    cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
+
     ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
     Replica leader = null;
     Slice shard1 = clusterState.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1);
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java b/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java
index 1a7e5f5..3d683e4 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestTlogReplica.java
@@ -821,7 +821,6 @@ public class TestTlogReplica extends SolrCloudTestCase {
         if (t.hasTimedOut()) {
           fail("Timed out waiting for collection " + collection + " to be deleted.");
         }
-        cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collection);
       } catch(SolrException e) {
         return;
       }
@@ -830,9 +829,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
   }
 
   private DocCollection assertNumberOfReplicas(int numNrtReplicas, int numTlogReplicas, int numPullReplicas, boolean updateCollection, boolean activeOnly) throws KeeperException, InterruptedException {
-    if (updateCollection) {
-      cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName);
-    }
+
     DocCollection docCollection = getCollectionState(collectionName);
     assertNotNull(docCollection);
     assertEquals("Unexpected number of nrt replicas: " + docCollection, numNrtReplicas,
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestTolerantUpdateProcessorCloud.java b/solr/core/src/test/org/apache/solr/cloud/TestTolerantUpdateProcessorCloud.java
index a43e2f9..84a1e8d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestTolerantUpdateProcessorCloud.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestTolerantUpdateProcessorCloud.java
@@ -132,7 +132,6 @@ public class TestTolerantUpdateProcessorCloud extends SolrCloudTestCase {
       String nodeKey = jetty.getHost() + ":" + jetty.getLocalPort() + jetty.getBaseUrl().replace("/","_");
       urlMap.put(nodeKey, jettyURL.toString());
     }
-    zkStateReader.forceUpdateCollection(COLLECTION_NAME);
     ClusterState clusterState = zkStateReader.getClusterState();
     for (Slice slice : clusterState.getCollection(COLLECTION_NAME).getSlices()) {
       String shardName = slice.getName();
diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java b/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java
index 6cf4af4..53124e0 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java
@@ -305,8 +305,6 @@ public class ZkControllerTest extends SolrTestCaseJ4 {
         propMap.put(ZkStateReader.STATE_PROP, "down");
         zkController.getOverseerJobQueue().offer(Utils.toJSON(propMap));
 
-        zkController.getZkStateReader().forciblyRefreshAllClusterStateSlow();
-
         long now = System.nanoTime();
         long timeout = now + TimeUnit.NANOSECONDS.convert(5, TimeUnit.SECONDS);
         zkController.publishAndWaitForDownStates(5);
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
index 8d544d9..8c4f8ba 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
@@ -144,8 +144,6 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
         .processAndWait(client, MAX_TIMEOUT_SECONDS);
     assertSame("CreateShard did not complete", RequestStatusState.COMPLETED, state);
 
-    client.getZkStateReader().forceUpdateCollection(collection);
-    
     //Add a doc to shard2 to make sure shard2 was created properly
     SolrInputDocument doc = new SolrInputDocument();
     doc.addField("id", numDocs + 1);
@@ -208,8 +206,7 @@ public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
         break;
       }
     }
-    client.getZkStateReader().forceUpdateCollection(collection);
-    
+
     shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
     String replicaName = shard1.getReplicas().iterator().next().getName();
     state = CollectionAdminRequest.deleteReplica(collection, "shard1", replicaName)
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
index 736f44f..d505309 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
@@ -268,7 +268,6 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
       // Now try deleting the configset and doing a clusterstatus.
       String parent = ZkConfigManager.CONFIGS_ZKNODE + "/" + configSet;
       deleteThemAll(client.getZkStateReader().getZkClient(), parent);
-      client.getZkStateReader().forciblyRefreshAllClusterStateSlow();
 
       final CollectionAdminRequest.ClusterStatus req = CollectionAdminRequest.getClusterStatus();
       NamedList<Object> rsp = client.request(req);
@@ -923,7 +922,6 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
 
       CollectionAdminRequest.migrateCollectionFormat("testClusterStateMigration").process(client);
 
-      client.getZkStateReader().forceUpdateCollection("testClusterStateMigration");
 
       assertEquals(2, client.getZkStateReader().getClusterState().getCollection("testClusterStateMigration").getStateFormat());
 
@@ -1033,7 +1031,6 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
   private Map<String, String> getProps(CloudSolrClient client, String collectionName, String replicaName, String... props)
       throws KeeperException, InterruptedException {
 
-    client.getZkStateReader().forceUpdateCollection(collectionName);
     ClusterState clusterState = client.getZkStateReader().getClusterState();
     final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
     if (docCollection == null || docCollection.getReplica(replicaName) == null) {
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
index 548499c..f3bad87 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
@@ -137,7 +137,7 @@ public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
 
     // remove a server not hosting any replicas
     ZkStateReader zkStateReader = client.getZkStateReader();
-    zkStateReader.forceUpdateCollection(collectionName);
+
     ClusterState clusterState = zkStateReader.getClusterState();
     Map<String,JettySolrRunner> jettyMap = new HashMap<>();
     for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
@@ -244,7 +244,7 @@ public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
     assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
 
     // the test itself
-    zkStateReader.forceUpdateCollection(collectionName);
+
     final ClusterState clusterState = zkStateReader.getClusterState();
 
     final Set<Integer> leaderIndices = new HashSet<>();
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
index 66af1c6..c32cd24 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
@@ -172,7 +172,6 @@ public class StressHdfsTest extends BasicDistributedZkTest {
     }
     
     cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION);
-    cloudClient.getZkStateReader().forceUpdateCollection(DELETE_DATA_DIR_COLLECTION);
     
     for (int i = 1; i < nShards + 1; i++) {
       cloudClient.getZkStateReader().getLeaderRetry(DELETE_DATA_DIR_COLLECTION, "shard" + i, 30000);
diff --git a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateReaderTest.java b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateReaderTest.java
index 279b164..1a75ac9 100644
--- a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateReaderTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateReaderTest.java
@@ -99,11 +99,11 @@ public class ZkStateReaderTest extends SolrTestCaseJ4 {
         boolean exists = zkClient.exists(ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json", true);
         assertFalse(exists);
 
-        if (explicitRefresh) {
-          reader.forceUpdateCollection("c1");
-        } else {
+       // if (explicitRefresh) {
+          //reader.forceUpdateCollection("c1");
+       // } else {
           reader.waitForState("c1", TIMEOUT, TimeUnit.SECONDS, (n, c) -> c != null);
-        }
+       // }
 
         DocCollection collection = reader.getClusterState().getCollection("c1");
         assertEquals(1, collection.getStateFormat());
@@ -122,12 +122,12 @@ public class ZkStateReaderTest extends SolrTestCaseJ4 {
         boolean exists = zkClient.exists(ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json", true);
         assertTrue(exists);
 
-        if (explicitRefresh) {
-          reader.forceUpdateCollection("c1");
-        } else {
+     //   if (explicitRefresh) {
+    //      reader.forceUpdateCollection("c1");
+      //  } else {
           reader.waitForState("c1", TIMEOUT, TimeUnit.SECONDS,
               (n, c) -> c != null && c.getStateFormat() == 2);
-        }
+    //    }
 
         DocCollection collection = reader.getClusterState().getCollection("c1");
         assertEquals(2, collection.getStateFormat());
@@ -163,7 +163,6 @@ public class ZkStateReaderTest extends SolrTestCaseJ4 {
           new DocCollection("c1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json"));
       writer.enqueueUpdate(reader.getClusterState(), Collections.singletonList(c1), null);
       writer.writePendingUpdates(reader.getClusterState());
-      reader.forceUpdateCollection("c1");
 
       assertTrue(reader.getClusterState().getCollectionRef("c1").isLazilyLoaded());
       reader.registerCore("c1");
@@ -247,7 +246,6 @@ public class ZkStateReaderTest extends SolrTestCaseJ4 {
       assertNull(reader.getClusterState().getCollectionRef("c1"));
 
       zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
-      reader.forceUpdateCollection("c1");
 
       // Still no c1 collection, despite a collection path.
       assertNull(reader.getClusterState().getCollectionRef("c1"));
diff --git a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateWriterTest.java b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateWriterTest.java
index 639e5d1..8b30ee2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateWriterTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/overseer/ZkStateWriterTest.java
@@ -221,8 +221,7 @@ public class ZkStateWriterTest extends SolrTestCaseJ4 {
         writer.enqueueUpdate(reader.getClusterState(), Collections.singletonList(c1), null);
         writer.writePendingUpdates(reader.getClusterState());
 
-        reader.forceUpdateCollection("c1");
-        reader.forceUpdateCollection("c2");
+
         ClusterState clusterState = reader.getClusterState(); // keep a reference to the current cluster state object
         assertTrue(clusterState.hasCollection("c1"));
         assertFalse(clusterState.hasCollection("c2"));
@@ -304,8 +303,6 @@ public class ZkStateWriterTest extends SolrTestCaseJ4 {
         byte[] data = zkClient.getData(ZkStateReader.getCollectionPath("c2"), null, null, true);
         zkClient.setData(ZkStateReader.getCollectionPath("c2"), data, true);
 
-        // get the most up-to-date state
-        reader.forceUpdateCollection("c2");
         state = reader.getClusterState();
         log.info("Cluster state: {}", state);
         assertTrue(state.hasCollection("c2"));
@@ -315,8 +312,6 @@ public class ZkStateWriterTest extends SolrTestCaseJ4 {
         writer.enqueueUpdate(state, Collections.singletonList(c2), null);
         assertTrue(writer.hasPendingUpdates());
 
-        // get the most up-to-date state
-        reader.forceUpdateCollection("c2");
         state = reader.getClusterState();
 
         // Will trigger flush
diff --git a/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java b/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java
index e06911d..6d19a98 100644
--- a/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java
+++ b/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java
@@ -195,7 +195,6 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
   
   private void mapReplicasToClients() throws KeeperException, InterruptedException {
     ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-    cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
     ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
     Replica leader = null;
     Slice shard1 = clusterState.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1);
@@ -1006,7 +1005,7 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
     // Check every 10ms, 100 times, for a replica to go down (& assert that it doesn't)
     for (int i=0; i<100; i++) {
       Thread.sleep(10);
-      cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
+
       ClusterState state = cloudClient.getZkStateReader().getClusterState();
 
       int numActiveReplicas = 0;
@@ -1079,7 +1078,7 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
       try (ZkShardTerms zkShardTerms = new ZkShardTerms(DEFAULT_COLLECTION, SHARD1, cloudClient.getZkStateReader().getZkClient())) {
         for (int i=0; i<100; i++) {
           Thread.sleep(10);
-          cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
+
           ClusterState state = cloudClient.getZkStateReader().getClusterState();
 
           int numActiveReplicas = 0;
@@ -1333,7 +1332,7 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
     // Check every 10ms, 100 times, for a replica to go down (& assert that it doesn't)
     for (int i=0; i<100; i++) {
       Thread.sleep(10);
-      cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
+
       ClusterState state = cloudClient.getZkStateReader().getClusterState();
 
       int numActiveReplicas = 0;
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
index 0ea782e..811d87f 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
@@ -366,87 +366,6 @@ public class ZkStateReader implements SolrCloseable {
   }
 
   /**
-   * Forcibly refresh cluster state from ZK. Do this only to avoid race conditions because it's expensive.
-   * <p>
-   * It is cheaper to call {@link #forceUpdateCollection(String)} on a single collection if you must.
-   *
-   * @lucene.internal
-   */
-  public void forciblyRefreshAllClusterStateSlow() throws KeeperException, InterruptedException {
-    synchronized (getUpdateLock()) {
-      if (clusterState == null) {
-        // Never initialized, just run normal initialization.
-        createClusterStateWatchersAndUpdate();
-        return;
-      }
-      // No need to set watchers because we should already have watchers registered for everything.
-      refreshCollectionList(null);
-      refreshLiveNodes(null);
-      refreshLegacyClusterState(null);
-      // Need a copy so we don't delete from what we're iterating over.
-      Collection<String> safeCopy = new ArrayList<>(watchedCollectionStates.keySet());
-      Set<String> updatedCollections = new HashSet<>();
-      for (String coll : safeCopy) {
-        DocCollection newState = fetchCollectionState(coll, null);
-        if (updateWatchedCollection(coll, newState)) {
-          updatedCollections.add(coll);
-        }
-      }
-      constructState(updatedCollections);
-    }
-  }
-
-  /**
-   * Forcibly refresh a collection's internal state from ZK. Try to avoid having to resort to this when
-   * a better design is possible.
-   */
-  //TODO shouldn't we call ZooKeeper.sync() at the right places to prevent reading a stale value?  We do so for aliases.
-  public void forceUpdateCollection(String collection) throws KeeperException, InterruptedException {
-
-    synchronized (getUpdateLock()) {
-      if (clusterState == null) {
-        log.warn("ClusterState watchers have not been initialized");
-        return;
-      }
-
-      ClusterState.CollectionRef ref = clusterState.getCollectionRef(collection);
-      if (ref == null || legacyCollectionStates.containsKey(collection)) {
-        // We either don't know anything about this collection (maybe it's new?) or it's legacy.
-        // First update the legacy cluster state.
-        log.debug("Checking legacy cluster state for collection {}", collection);
-        refreshLegacyClusterState(null);
-        if (!legacyCollectionStates.containsKey(collection)) {
-          // No dice, see if a new collection just got created.
-          LazyCollectionRef tryLazyCollection = new LazyCollectionRef(collection);
-          if (tryLazyCollection.get() != null) {
-            // What do you know, it exists!
-            log.debug("Adding lazily-loaded reference for collection {}", collection);
-            lazyCollectionStates.putIfAbsent(collection, tryLazyCollection);
-            constructState(Collections.singleton(collection));
-          }
-        }
-      } else if (ref.isLazilyLoaded()) {
-        log.debug("Refreshing lazily-loaded state for collection {}", collection);
-        if (ref.get() != null) {
-          return;
-        }
-        // Edge case: if there's no external collection, try refreshing legacy cluster state in case it's there.
-        refreshLegacyClusterState(null);
-      } else if (watchedCollectionStates.containsKey(collection)) {
-        // Exists as a watched collection, force a refresh.
-        log.debug("Forcing refresh of watched collection state for {}", collection);
-        DocCollection newState = fetchCollectionState(collection, null);
-        if (updateWatchedCollection(collection, newState)) {
-          constructState(Collections.singleton(collection));
-        }
-      } else {
-        log.error("Collection {} is not lazy or watched!", collection);
-      }
-    }
-
-  }
-
-  /**
    * Refresh the set of live nodes.
    */
   public void updateLiveNodes() throws KeeperException, InterruptedException {
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java
index 5769a9f..6e27605 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamingTest.java
@@ -2338,7 +2338,7 @@ public void testParallelRankStream() throws Exception {
     ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
     List<String> strings = zkStateReader.aliasesManager.getAliases().resolveAliases(COLLECTIONORALIAS);
     String collName = strings.size() > 0 ? strings.get(0) : COLLECTIONORALIAS;
-      zkStateReader.forceUpdateCollection(collName);
+
     DocCollection collection = zkStateReader.getClusterState().getCollectionOrNull(collName);
     List<Replica> replicas = collection.getReplicas();
     streamContext.getEntries().put("core",replicas.get(random().nextInt(replicas.size())).getCoreName());
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
index 228c18a..8e28cd0 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
@@ -215,7 +215,6 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
       throws Exception {
     log.info("Will wait for a node to become leader for {} secs", timeOut.timeLeft(SECONDS));
     ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-    zkStateReader.forceUpdateCollection(DEFAULT_COLLECTION);
 
     for (; ; ) {
       ClusterState clusterState = zkStateReader.getClusterState();
@@ -266,7 +265,6 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
   protected static void assertAllActive(String collection, ZkStateReader zkStateReader)
       throws KeeperException, InterruptedException {
 
-      zkStateReader.forceUpdateCollection(collection);
       ClusterState clusterState = zkStateReader.getClusterState();
       final DocCollection docCollection = clusterState.getCollectionOrNull(collection);
       if (docCollection == null || docCollection.getSlices() == null) {
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
index b438df3..5162030 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
@@ -885,7 +885,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
   
   protected void updateMappingsFromZk(List<JettySolrRunner> jettys, List<SolrClient> clients, boolean allowOverSharding) throws Exception {
     ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-    zkStateReader.forceUpdateCollection(DEFAULT_COLLECTION);
+
     cloudJettys.clear();
     shardToJetty.clear();
 
@@ -2141,7 +2141,6 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     log.info("## Collecting extra Replica.Type information of the cluster");
     zkStateReader.updateLiveNodes();
     StringBuilder builder = new StringBuilder();
-    zkStateReader.forceUpdateCollection(collectionName);
     DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
     for(Slice s:collection.getSlices()) {
       Replica leader = s.getLeader();
@@ -2161,7 +2160,6 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
 
   protected void waitForReplicationFromReplicas(String collectionName, ZkStateReader zkStateReader, TimeOut timeout) throws KeeperException, InterruptedException, IOException {
     log.info("waitForReplicationFromReplicas: {}", collectionName);
-    zkStateReader.forceUpdateCollection(collectionName);
     DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
     Map<String, CoreContainer> containers = new HashMap<>();
     for (JettySolrRunner runner:jettys) {
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java b/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java
index 7dfdc36..9da53e9 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java
@@ -417,9 +417,6 @@ public class ChaosMonkey {
     int numIndexersFoundInShard = 0;
     for (CloudJettyRunner cloudJetty : shardToJetty.get(sliceName)) {
       
-      // get latest cloud state
-      zkStateReader.forceUpdateCollection(collection);
-      
       DocCollection docCollection = zkStateReader.getClusterState().getCollection(collection);
       
       Slice slice = docCollection.getSlice(sliceName);
@@ -445,10 +442,7 @@ public class ChaosMonkey {
 
   private int checkIfKillIsLegal(String sliceName, int numActive) throws KeeperException, InterruptedException {
     for (CloudJettyRunner cloudJetty : shardToJetty.get(sliceName)) {
-      
-      // get latest cloud state
-      zkStateReader.forceUpdateCollection(collection);
-      
+
       DocCollection docCollection = zkStateReader.getClusterState().getCollection(collection);
       
       Slice slice = docCollection.getSlice(sliceName);