You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ab...@apache.org on 2021/01/11 09:46:58 UTC

[lucene-solr] branch jira/solr-15055 updated: SOLR-15055: Additional missing pieces + test fixes.

This is an automated email from the ASF dual-hosted git repository.

ab pushed a commit to branch jira/solr-15055
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git


The following commit(s) were added to refs/heads/jira/solr-15055 by this push:
     new 5cbe3da  SOLR-15055: Additional missing pieces + test fixes.
5cbe3da is described below

commit 5cbe3da816af4cfe01fbae6fbc71a637c0775a1c
Author: Andrzej Bialecki <ab...@apache.org>
AuthorDate: Mon Jan 11 10:46:17 2021 +0100

    SOLR-15055: Additional missing pieces + test fixes.
---
 .../solr/cloud/api/collections/MoveReplicaCmd.java |  21 +
 .../OverseerCollectionMessageHandler.java          |   6 +-
 .../solr/handler/admin/CollectionsHandler.java     |   2 +
 .../org/apache/solr/cloud/TestWithCollection.java  | 510 +++++++++++++++++++++
 4 files changed, 538 insertions(+), 1 deletion(-)

diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
index 451bfad..f567b2e 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
@@ -33,6 +33,7 @@ import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.util.NamedList;
@@ -125,6 +126,26 @@ public class MoveReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
       replica = sliceReplicas.iterator().next();
     }
 
+    if (coll.getStr(CollectionAdminParams.COLOCATED_WITH) != null) {
+      // we must ensure that moving this replica does not cause the co-location to break
+      String sourceNode = replica.getNodeName();
+      String colocatedCollectionName = coll.getStr(CollectionAdminParams.COLOCATED_WITH);
+      DocCollection colocatedCollection = clusterState.getCollectionOrNull(colocatedCollectionName);
+      if (colocatedCollection != null) {
+        if (colocatedCollection.getReplica((s, r) -> sourceNode.equals(r.getNodeName())) != null) {
+          // check if we have at least two replicas of the collection on the source node
+          // only then it is okay to move one out to another node
+          List<Replica> replicasOnSourceNode = coll.getReplicas(replica.getNodeName());
+          if (replicasOnSourceNode == null || replicasOnSourceNode.size() < 2) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+                "Collection: " + collection + " is co-located with collection: " + colocatedCollectionName
+                    + " and has a single replica: " + replica.getName() + " on node: " + replica.getNodeName()
+                    + " so it is not possible to move it to another node");
+          }
+        }
+      }
+    }
+
     log.info("Replica will be moved to node {}: {}", targetNode, replica);
     Slice slice = null;
     for (Slice s : coll.getSlices()) {
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
index 4150842..de781bd 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
@@ -103,6 +103,8 @@ import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
 import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
 import static org.apache.solr.common.params.CollectionAdminParams.COLLECTION;
+import static org.apache.solr.common.params.CollectionAdminParams.COLOCATED_WITH;
+import static org.apache.solr.common.params.CollectionAdminParams.WITH_COLLECTION;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
 import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
 import static org.apache.solr.common.params.CommonParams.NAME;
@@ -142,7 +144,9 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler,
       ZkStateReader.REPLICATION_FACTOR, "1",
       ZkStateReader.NRT_REPLICAS, "1",
       ZkStateReader.TLOG_REPLICAS, "0",
-      ZkStateReader.PULL_REPLICAS, "0"));
+      ZkStateReader.PULL_REPLICAS, "0",
+      WITH_COLLECTION, null,
+      COLOCATED_WITH, null));
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
   public static final String FAILURE_FIELD = "failure";
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index 399b87b..710846e 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -136,6 +136,7 @@ import static org.apache.solr.common.params.CollectionAdminParams.FOLLOW_ALIASES
 import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_NAME;
 import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_VALUE;
 import static org.apache.solr.common.params.CollectionAdminParams.SKIP_NODE_ASSIGNMENT;
+import static org.apache.solr.common.params.CollectionAdminParams.WITH_COLLECTION;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
 import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
 import static org.apache.solr.common.params.CommonAdminParams.IN_PLACE_MOVE;
@@ -462,6 +463,7 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
           TLOG_REPLICAS,
           NRT_REPLICAS,
           WAIT_FOR_FINAL_STATE,
+          WITH_COLLECTION,
           ALIAS);
 
       if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestWithCollection.java b/solr/core/src/test/org/apache/solr/cloud/TestWithCollection.java
new file mode 100644
index 0000000..e4abfb0
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/TestWithCollection.java
@@ -0,0 +1,510 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.V2Request;
+import org.apache.solr.client.solrj.request.beans.PluginMeta;
+import org.apache.solr.cluster.placement.PlacementPluginFactory;
+import org.apache.solr.cluster.placement.plugins.AffinityPlacementFactory;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.util.TimeOut;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static java.util.Collections.singletonMap;
+import static org.apache.solr.common.params.CollectionAdminParams.WITH_COLLECTION;
+
+/**
+ * Tests for co-locating a collection with another collection such that any Collection API
+ * always ensures that the co-location is never broken.
+ *
+ * See SOLR-11990 for more details.
+ */
+public class TestWithCollection extends SolrCloudTestCase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private static final int NUM_JETTIES = 2;
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    // placement plugins need metrics
+    System.setProperty("metricsEnabled", "true");
+    configureCluster(NUM_JETTIES)
+        .addConfig("conf", configset("cloud-minimal"))
+        .configure();
+    PluginMeta plugin = new PluginMeta();
+    plugin.name = PlacementPluginFactory.PLUGIN_NAME;
+    plugin.klass = AffinityPlacementFactory.class.getName();
+    V2Request req = new V2Request.Builder("/cluster/plugin")
+        .forceV2(true)
+        .POST()
+        .withPayload(singletonMap("add", plugin))
+        .build();
+    req.process(cluster.getSolrClient());
+
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    ClusterState clusterState = cluster.getSolrClient().getZkStateReader().getClusterState();
+    for (Map.Entry<String, ClusterState.CollectionRef> entry : clusterState.getCollectionStates().entrySet()) {
+      if (entry.getKey().contains("_xyz"))  {
+        try {
+          CollectionAdminRequest.deleteCollection(entry.getKey()).process(cluster.getSolrClient());
+        } catch (Exception e) {
+          log.error("Exception while deleting collection: " + entry.getKey());
+        }
+      }
+    }
+    cluster.deleteAllCollections();
+    cluster.getSolrClient().setDefaultCollection(null);
+
+    LATCH = new CountDownLatch(1);
+
+    int jettys = cluster.getJettySolrRunners().size();
+    if (jettys < NUM_JETTIES) {
+      for (int i = jettys; i < NUM_JETTIES; i++) {
+        cluster.startJettySolrRunner();
+      }
+    } else  {
+      for (int i = jettys; i > NUM_JETTIES; i--) {
+        cluster.stopJettySolrRunner(i - 1);
+      }
+    }
+  }
+
+  @Test
+  public void testCreateCollectionNoWithCollection() throws IOException, SolrServerException {
+    String prefix = "testCreateCollectionNoWithCollection";
+    String xyz = prefix + "_xyz";
+    String abc = prefix + "_abc";
+
+    CloudSolrClient solrClient = cluster.getSolrClient();
+    try {
+
+      CollectionAdminRequest.createCollection(xyz, 1, 1)
+          .setWithCollection(abc).process(solrClient);
+    } catch (HttpSolrClient.RemoteSolrException e) {
+      assertTrue(e.getMessage().contains("The 'withCollection' does not exist"));
+    }
+
+    CollectionAdminRequest.createCollection(abc, 2, 1)
+        .process(solrClient);
+    try {
+      CollectionAdminRequest.createCollection(xyz, 1, 1)
+          .setWithCollection(abc).process(solrClient);
+    } catch (HttpSolrClient.RemoteSolrException e) {
+      assertTrue(e.getMessage().contains("The `withCollection` must have only one shard, found: 2"));
+    }
+  }
+
+  public void testCreateCollection() throws Exception {
+    String prefix = "testCreateCollection";
+    String primaryCollectionName = prefix + "_primary";
+    String secondaryCollectionName = prefix + "_secondary";
+
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    String chosenNode = cluster.getRandomJetty(random()).getNodeName();
+    CollectionAdminRequest.createCollection(secondaryCollectionName, 1, 1)
+        .setCreateNodeSet(chosenNode) // randomize to avoid choosing the first node always
+        .process(solrClient);
+
+    CollectionAdminRequest.createCollection(primaryCollectionName, 1, 1)
+        .setWithCollection(secondaryCollectionName)
+        .process(solrClient);
+
+    DocCollection primaryCollection = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(primaryCollectionName);
+    assertNotNull(primaryCollection);
+    assertEquals(secondaryCollectionName, primaryCollection.getStr(WITH_COLLECTION));
+    Replica replica = primaryCollection.getReplicas().get(0);
+    String nodeName = replica.getNodeName();
+    DocCollection secondaryCollection = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(secondaryCollectionName);
+    Set<String> secondaryNodes = secondaryCollection.getReplicas().stream()
+        .map(r -> r.getNodeName()).collect(Collectors.toSet());
+
+
+    assertTrue(secondaryNodes.contains(nodeName));
+    assertTrue(secondaryNodes.contains(chosenNode));
+  }
+
+  @Test
+  public void testDeleteWithCollection() throws IOException, SolrServerException, InterruptedException {
+    String prefix = "testDeleteWithCollection";
+    String primaryCollectionName = prefix + "_primary";
+    String secondaryCollectionName = prefix + "_secondary";
+
+    CloudSolrClient solrClient = cluster.getSolrClient();
+    CollectionAdminRequest.createCollection(secondaryCollectionName, 1, 1)
+        .process(solrClient);
+    CollectionAdminRequest.createCollection(primaryCollectionName, 1, 1)
+        .setWithCollection(secondaryCollectionName)
+        .process(solrClient);
+    try {
+      CollectionAdminRequest.deleteCollection(secondaryCollectionName).process(solrClient);
+    } catch (HttpSolrClient.RemoteSolrException e) {
+      assertTrue(e.getMessage().contains("is co-located with collection"));
+    }
+
+    // delete the co-located collection first
+    CollectionAdminRequest.deleteCollection(primaryCollectionName).process(solrClient);
+    // deleting the with collection should succeed now
+    CollectionAdminRequest.deleteCollection(secondaryCollectionName).process(solrClient);
+
+    primaryCollectionName = primaryCollectionName + "_2";
+    secondaryCollectionName = secondaryCollectionName + "_2";
+    CollectionAdminRequest.createCollection(secondaryCollectionName, 1, 1)
+        .process(solrClient);
+    CollectionAdminRequest.createCollection(primaryCollectionName, 1, 1)
+        .setWithCollection(secondaryCollectionName)
+        .process(solrClient);
+    // sanity check
+    try {
+      CollectionAdminRequest.deleteCollection(secondaryCollectionName).process(solrClient);
+    } catch (HttpSolrClient.RemoteSolrException e) {
+      assertTrue(e.getMessage().contains("is co-located with collection"));
+    }
+
+    CollectionAdminRequest.modifyCollection(primaryCollectionName, null)
+        .unsetAttribute("withCollection")
+        .process(solrClient);
+    TimeOut timeOut = new TimeOut(5, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+    while (!timeOut.hasTimedOut()) {
+      DocCollection c1 = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(primaryCollectionName);
+      if (c1.getStr("withCollection") == null) break;
+      Thread.sleep(200);
+    }
+    DocCollection c1 = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(primaryCollectionName);
+    assertNull(c1.getStr("withCollection"));
+    CollectionAdminRequest.deleteCollection(secondaryCollectionName).process(solrClient);
+  }
+
+  @Test
+  public void testAddReplicaSimple() throws Exception {
+    String prefix = "testAddReplica";
+    String primaryCollectionName = prefix + "_primary";
+    String secondaryCollectionName = prefix + "_secondary";
+
+    CloudSolrClient solrClient = cluster.getSolrClient();
+    String chosenNode = cluster.getRandomJetty(random()).getNodeName();
+    log.info("Chosen node {} for collection {}", chosenNode, secondaryCollectionName);
+    CollectionAdminRequest.createCollection(secondaryCollectionName, 1, 1)
+        .setCreateNodeSet(chosenNode) // randomize to avoid choosing the first node always
+        .process(solrClient);
+    CollectionAdminRequest.createCollection(primaryCollectionName, 1, 1)
+        .setWithCollection(secondaryCollectionName)
+        .setCreateNodeSet(chosenNode)
+        .process(solrClient);
+
+    String otherNode = null;
+    for (JettySolrRunner jettySolrRunner : cluster.getJettySolrRunners()) {
+      if (!chosenNode.equals(jettySolrRunner.getNodeName())) {
+        otherNode = jettySolrRunner.getNodeName();
+      }
+    }
+    CollectionAdminRequest.addReplicaToShard(primaryCollectionName, "shard1")
+        .setNode(otherNode)
+        .process(solrClient);
+    DocCollection collection = solrClient.getZkStateReader().getClusterState().getCollection(primaryCollectionName);
+    DocCollection withCollection = solrClient.getZkStateReader().getClusterState().getCollection(secondaryCollectionName);
+
+    assertTrue(collection.getReplicas().stream().noneMatch(replica -> withCollection.getReplicas(replica.getNodeName()).isEmpty()));
+  }
+
+  @Test
+  public void testMoveReplicaMainCollection() throws Exception {
+    String prefix = "testMoveReplicaMainCollection";
+    String primaryCollectionName = prefix + "_primary";
+    String secondaryCollectionName = prefix + "_secondary";
+
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    String chosenNode = cluster.getRandomJetty(random()).getNodeName();
+    log.info("Chosen node {} for collection {}", chosenNode, secondaryCollectionName);
+    CollectionAdminRequest.createCollection(secondaryCollectionName, 1, 1)
+        .setCreateNodeSet(chosenNode) // randomize to avoid choosing the first node always
+        .process(solrClient);
+    CollectionAdminRequest.createCollection(primaryCollectionName, 1, 1)
+        .setWithCollection(secondaryCollectionName)
+        .setCreateNodeSet(chosenNode)
+        .process(solrClient);
+
+    String otherNode = null;
+    for (JettySolrRunner jettySolrRunner : cluster.getJettySolrRunners()) {
+      if (!chosenNode.equals(jettySolrRunner.getNodeName())) {
+        otherNode = jettySolrRunner.getNodeName();
+      }
+    }
+
+    DocCollection collection = solrClient.getZkStateReader().getClusterState().getCollection(primaryCollectionName);
+    DocCollection withCollection = solrClient.getZkStateReader().getClusterState().getCollection(secondaryCollectionName);
+    assertNull(collection.getReplicas(otherNode)); // sanity check
+    assertNull(withCollection.getReplicas(otherNode)); // sanity check
+
+    new CollectionAdminRequest.MoveReplica(primaryCollectionName, collection.getReplicas().iterator().next().getName(), otherNode)
+        .process(solrClient);
+//    zkClient().printLayoutToStdOut();
+    collection = solrClient.getZkStateReader().getClusterState().getCollection(primaryCollectionName); // refresh
+    DocCollection withCollectionRefreshed = solrClient.getZkStateReader().getClusterState().getCollection(secondaryCollectionName); // refresh
+    assertTrue(collection.getReplicas().stream().noneMatch(
+        replica -> withCollectionRefreshed.getReplicas(replica.getNodeName()) == null
+            || withCollectionRefreshed.getReplicas(replica.getNodeName()).isEmpty()));
+  }
+
+  @Test
+  public void testMoveReplicaWithCollection() throws Exception {
+    String prefix = "testMoveReplicaWithCollection";
+    String xyz = prefix + "_xyz";
+    String abc = prefix + "_abc";
+
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    String chosenNode = cluster.getRandomJetty(random()).getNodeName();
+    log.info("Chosen node {} for collection {}", chosenNode, abc);
+    CollectionAdminRequest.createCollection(abc, 1, 1)
+        .setCreateNodeSet(chosenNode) // randomize to avoid choosing the first node always
+        .process(solrClient);
+    CollectionAdminRequest.createCollection(xyz, 1, 1)
+        .setWithCollection(abc)
+        .process(solrClient);
+
+    DocCollection collection = solrClient.getZkStateReader().getClusterState().getCollection(xyz);
+    assertEquals(chosenNode, collection.getReplicas().iterator().next().getNodeName());
+
+    String otherNode = null;
+    for (JettySolrRunner jettySolrRunner : cluster.getJettySolrRunners()) {
+      if (!chosenNode.equals(jettySolrRunner.getNodeName())) {
+        otherNode = jettySolrRunner.getNodeName();
+      }
+    }
+
+    collection = solrClient.getZkStateReader().getClusterState().getCollection(xyz);
+    DocCollection withCollection = solrClient.getZkStateReader().getClusterState().getCollection(abc);
+    assertNull(collection.getReplicas(otherNode)); // sanity check
+    assertNull(withCollection.getReplicas(otherNode)); // sanity check
+
+    try {
+      new CollectionAdminRequest.MoveReplica(abc, collection.getReplicas().iterator().next().getName(), otherNode)
+          .process(solrClient);
+      fail("Expected moving a replica of 'withCollection': " + abc + " to fail");
+    } catch (HttpSolrClient.RemoteSolrException e) {
+      assertTrue(e.getMessage().contains("Collection: testMoveReplicaWithCollection_abc is co-located with collection: testMoveReplicaWithCollection_xyz"));
+    }
+//    zkClient().printLayoutToStdOut();
+    collection = solrClient.getZkStateReader().getClusterState().getCollection(xyz); // refresh
+    DocCollection withCollectionRefreshed = solrClient.getZkStateReader().getClusterState().getCollection(abc); // refresh
+
+    // sanity check that the failed move operation didn't actually change our co-location guarantees
+    assertTrue(collection.getReplicas().stream().noneMatch(
+        replica -> withCollectionRefreshed.getReplicas(replica.getNodeName()) == null
+            || withCollectionRefreshed.getReplicas(replica.getNodeName()).isEmpty()));
+  }
+
+  /**
+   * Tests that when a new node is added to the cluster and autoscaling framework
+   * moves replicas to the new node, we maintain all co-locating guarantees
+   */
+  public void testNodeAdded() throws Exception  {
+    String prefix = "testNodeAdded";
+    String xyz = prefix + "_xyz";
+    String abc = prefix + "_abc";
+
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    String chosenNode = cluster.getRandomJetty(random()).getNodeName();
+    log.info("Chosen node {} for collection {}", chosenNode, abc);
+    CollectionAdminRequest.createCollection(abc, 1, 1)
+        .setCreateNodeSet(chosenNode) // randomize to avoid choosing the first node always
+        .process(solrClient);
+    CollectionAdminRequest.createCollection(xyz, 1, 1)
+        .setWithCollection(abc)
+        .process(solrClient);
+
+    DocCollection collection = solrClient.getZkStateReader().getClusterState().getCollection(xyz);
+    assertEquals(chosenNode, collection.getReplicas().iterator().next().getNodeName());
+
+
+    Optional<JettySolrRunner> other = cluster.getJettySolrRunners()
+        .stream().filter(j -> !chosenNode.equals(j.getNodeName())).findAny();
+    String otherNode = other.orElseThrow(AssertionError::new).getNodeName();
+
+    // add an extra replica of abc collection on a different node
+    CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(abc, "shard1")
+        .setNode(otherNode);
+    addReplica.setWaitForFinalState(true);
+    addReplica.process(solrClient);
+
+    // refresh
+    collection = solrClient.getZkStateReader().getClusterState().getCollection(xyz);
+    DocCollection withCollection = solrClient.getZkStateReader().getClusterState().getCollection(abc);
+
+    // sanity check
+    assertColocated(collection, otherNode, withCollection);
+
+    assertEquals(1, collection.getReplicas().size());
+    Replica xyzReplica = collection.getReplicas().get(0);
+
+    // start a new node
+    JettySolrRunner newNode = cluster.startJettySolrRunner();
+    assertTrue("Action was not fired till 30 seconds", LATCH.await(30, TimeUnit.SECONDS));
+    // refresh
+    collection = solrClient.getZkStateReader().getClusterState().getCollection(xyz);
+    withCollection = solrClient.getZkStateReader().getClusterState().getCollection(abc);
+
+    // sanity check
+    assertColocated(collection, otherNode, withCollection);
+
+    // assert that the replica of xyz collection was not moved
+    assertNotNull(collection.getReplica(xyzReplica.getName()));
+    assertEquals(chosenNode, collection.getReplicas().get(0).getNodeName());
+
+    // add an extra replica of xyz collection -- this should be placed on the 'otherNode'
+    addReplica = CollectionAdminRequest.addReplicaToShard(xyz, "shard1");
+    addReplica.setWaitForFinalState(true);
+    addReplica.process(solrClient);
+
+    // refresh
+    collection = solrClient.getZkStateReader().getClusterState().getCollection(xyz);
+    withCollection = solrClient.getZkStateReader().getClusterState().getCollection(abc);
+
+    List<Replica> replicas = collection.getReplicas(otherNode);
+    assertNotNull(replicas);
+    assertEquals(1, replicas.size());
+    replicas = withCollection.getReplicas(otherNode);
+    assertNotNull(replicas);
+    assertEquals(1, replicas.size());
+
+    // add an extra replica of xyz collection -- this should be placed on the 'newNode'
+    addReplica = CollectionAdminRequest.addReplicaToShard(xyz, "shard1");
+    addReplica.setWaitForFinalState(true);
+    addReplica.process(solrClient);
+
+    // refresh
+    collection = solrClient.getZkStateReader().getClusterState().getCollection(xyz);
+    withCollection = solrClient.getZkStateReader().getClusterState().getCollection(abc);
+
+    assertNotNull(collection.getReplicas(newNode.getNodeName()));
+    replicas = collection.getReplicas(newNode.getNodeName());
+    assertNotNull(replicas);
+    assertEquals(1, replicas.size());
+    replicas = withCollection.getReplicas(newNode.getNodeName());
+    assertNotNull(replicas);
+    assertEquals(1, replicas.size());
+  }
+
+  public void testMultipleWithCollections() throws Exception {
+    String prefix = "testMultipleWithCollections";
+    String xyz = prefix + "_xyz";
+    String xyz2 = prefix + "_xyz2";
+    String abc = prefix + "_abc";
+    String abc2 = prefix + "_abc2";
+
+    // start 2 more nodes so we have 4 in total
+    cluster.startJettySolrRunner();
+    cluster.startJettySolrRunner();
+    cluster.waitForAllNodes(30);
+
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    String chosenNode = cluster.getJettySolrRunner(0).getNodeName();
+    log.info("Chosen node {} for collection {}", chosenNode, abc);
+
+    CollectionAdminRequest.createCollection(abc, 1, 1)
+        .setCreateNodeSet(chosenNode)
+        .process(solrClient);
+    CollectionAdminRequest.createCollection(xyz, 1, 1)
+        .setWithCollection(abc)
+        .process(solrClient);
+
+    String chosenNode2 = cluster.getJettySolrRunner(1).getNodeName();
+    log.info("Chosen node {} for collection {}", chosenNode2, abc2);
+    CollectionAdminRequest.createCollection(abc2, 1, 1)
+        .setCreateNodeSet(chosenNode2)
+        .process(solrClient);
+    CollectionAdminRequest.createCollection(xyz2, 1, 1)
+        .setWithCollection(abc2)
+        .process(solrClient);
+
+    // refresh
+    DocCollection collection = solrClient.getZkStateReader().getClusterState().getCollection(xyz);
+    DocCollection collection2 = solrClient.getZkStateReader().getClusterState().getCollection(xyz2);
+    DocCollection withCollection = solrClient.getZkStateReader().getClusterState().getCollection(abc);
+    DocCollection withCollection2 = solrClient.getZkStateReader().getClusterState().getCollection(abc2);
+
+    // sanity check
+    assertColocated(collection, chosenNode2, withCollection); // no replica should be on chosenNode2
+    assertColocated(collection2, chosenNode, withCollection2); // no replica should be on chosenNode
+
+    String chosenNode3 = cluster.getJettySolrRunner(2).getNodeName();
+    CollectionAdminRequest.addReplicaToShard(xyz, "shard1")
+        .setNode(chosenNode3)
+        .process(solrClient);
+    String chosenNode4 = cluster.getJettySolrRunner(2).getNodeName();
+    CollectionAdminRequest.addReplicaToShard(xyz2, "shard1")
+        .setNode(chosenNode4)
+        .process(solrClient);
+
+    collection = solrClient.getZkStateReader().getClusterState().getCollection(xyz);
+    collection2 = solrClient.getZkStateReader().getClusterState().getCollection(xyz2);
+    withCollection = solrClient.getZkStateReader().getClusterState().getCollection(abc);
+    withCollection2 = solrClient.getZkStateReader().getClusterState().getCollection(abc2);
+
+    // sanity check
+    assertColocated(collection, null, withCollection);
+    assertColocated(collection2, null, withCollection2);
+  }
+
+  /**
+   * Asserts that all replicas of collection are colocated with at least one
+   * replica of the withCollection and none of them should be on the given 'noneOnNode'.
+   */
+  private void assertColocated(DocCollection collection, String noneOnNode, DocCollection withCollection) {
+    // sanity check
+    assertTrue(collection.getReplicas().stream().noneMatch(
+        replica -> withCollection.getReplicas(replica.getNodeName()) == null
+            || withCollection.getReplicas(replica.getNodeName()).isEmpty()));
+
+    if (noneOnNode != null) {
+      assertTrue(collection.getReplicas().stream().noneMatch(
+          replica -> noneOnNode.equals(replica.getNodeName())));
+    }
+  }
+
+  private static CountDownLatch LATCH = new CountDownLatch(1);
+}