You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by sh...@apache.org on 2017/06/25 02:06:27 UTC

[05/47] lucene-solr:feature/autoscaling: SOLR-8256: Set legacyCloud=false as default

SOLR-8256: Set legacyCloud=false as default


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8e9d685a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8e9d685a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8e9d685a

Branch: refs/heads/feature/autoscaling
Commit: 8e9d685a402c03d6bf0691d79ae5030f38f09053
Parents: eff583e
Author: Cao Manh Dat <da...@apache.org>
Authored: Wed Jun 21 22:25:39 2017 +0700
Committer: Cao Manh Dat <da...@apache.org>
Committed: Wed Jun 21 22:25:39 2017 +0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  10 +
 .../org/apache/solr/cloud/CreateShardCmd.java   |  74 ++--
 .../java/org/apache/solr/cloud/Overseer.java    |   4 +-
 .../solr/cloud/BaseCdcrDistributedZkTest.java   |  24 +-
 .../solr/cloud/BasicDistributedZk2Test.java     |  26 +-
 .../solr/cloud/BasicDistributedZkTest.java      | 119 ++++---
 .../solr/cloud/ClusterStateUpdateTest.java      | 174 ++--------
 .../cloud/CollectionsAPIDistributedZkTest.java  | 125 ++-----
 .../solr/cloud/CollectionsAPISolrJTest.java     |  14 +-
 .../cloud/LeaderElectionIntegrationTest.java    | 297 +++++-----------
 .../cloud/LeaderFailureAfterFreshStartTest.java |   4 +-
 ...verseerCollectionConfigSetProcessorTest.java |  41 ++-
 .../org/apache/solr/cloud/OverseerTest.java     | 339 ++++++-------------
 .../org/apache/solr/cloud/ReplaceNodeTest.java  |   2 +-
 .../solr/cloud/ShardRoutingCustomTest.java      |  12 +
 .../solr/cloud/UnloadDistributedZkTest.java     | 169 ++++-----
 .../apache/solr/handler/TestConfigReload.java   |   2 +-
 .../solr/schema/TestCloudManagedSchema.java     |   2 +-
 .../solrj/request/CollectionAdminRequest.java   |   9 +
 .../solr/BaseDistributedSearchTestCase.java     |  15 +-
 .../solr/cloud/AbstractDistribZkTestBase.java   |  18 +-
 .../cloud/AbstractFullDistribZkTestBase.java    | 136 ++++----
 .../apache/solr/cloud/AbstractZkTestCase.java   |   6 +-
 23 files changed, 587 insertions(+), 1035 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index f15fa45..d754590 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -21,6 +21,14 @@ See the Quick Start guide at http://lucene.apache.org/solr/quickstart.html
 Upgrading from Solr 6.x
 ----------------------
 
+* the cluster property 'legacyCloud' is set to false from 7.0. This means 'zookeeper is the truth' by
+  default. If an entry for a replica does not exist in the state.json, that replica cannot get
+  registered. This may affect users who use that feature where they bring up replicas and they are
+  automatically registered as a part of a shard. However, it is possible to fall back to the old behavior by
+  setting the property legacyCloud=true , in the cluster properties using the following command
+
+  ./server/scripts/cloud-scripts/zkcli.sh -zkhost 127.0.0.1:2181  -cmd clusterprop -name legacyCloud -val true
+
 * HttpClientInterceptorPlugin is now HttpClientBuilderPlugin and must work with a 
   SolrHttpClientBuilder rather than an HttpClientConfigurer.
   
@@ -290,6 +298,8 @@ Other Changes
 
 * SOLR-4646: eDismax lowercaseOperators now defaults to "false" for luceneMatchVersion >= 7.0.0 (janhoy, David Smiley)
 
+* SOLR-8256: Set legacyCloud=false as default (Cao Manh Dat)
+
 ==================  6.7.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
index d3eb828..f96dd0c 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
@@ -18,10 +18,10 @@ package org.apache.solr.cloud;
 
 
 import java.lang.invoke.MethodHandles;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
@@ -32,16 +32,13 @@ import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.util.TimeOut;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static org.apache.solr.cloud.Assign.getNodesForNewReplicas;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
 import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
 import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
@@ -49,7 +46,6 @@ import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
 import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
 import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
 
 public class CreateShardCmd implements Cmd {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -67,11 +63,8 @@ public class CreateShardCmd implements Cmd {
     log.info("Create shard invoked: {}", message);
     if (collectionName == null || sliceName == null)
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'collection' and 'shard' are required parameters");
-    int numSlices = 1;
 
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
     DocCollection collection = clusterState.getCollection(collectionName);
-//    int repFactor = message.getInt(REPLICATION_FACTOR, collection.getInt(REPLICATION_FACTOR, 1));
     int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, collection.getInt(NRT_REPLICAS, collection.getInt(REPLICATION_FACTOR, 1))));
     int numPullReplicas = message.getInt(PULL_REPLICAS, collection.getInt(PULL_REPLICAS, 0));
     int numTlogReplicas = message.getInt(TLOG_REPLICAS, collection.getInt(TLOG_REPLICAS, 0));
@@ -88,26 +81,12 @@ public class CreateShardCmd implements Cmd {
     ZkStateReader zkStateReader = ocmh.zkStateReader;
     Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
     // wait for a while until we see the shard
-    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS);
-    boolean created = false;
-    while (!timeout.hasTimedOut()) {
-      Thread.sleep(100);
-      created = zkStateReader.getClusterState().getCollection(collectionName).getSlice(sliceName) != null;
-      if (created) break;
-    }
-    if (!created)
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not fully create shard: " + message.getStr(NAME));
-
-    String configName = message.getStr(COLL_CONF);
+    ocmh.waitForNewShard(collectionName, sliceName);
 
     String async = message.getStr(ASYNC);
-    Map<String, String> requestMap = null;
-    if (async != null) {
-      requestMap = new HashMap<>(totalReplicas, 1.0f);
-    }
     
     int createdNrtReplicas = 0, createdTlogReplicas = 0, createdPullReplicas = 0;
-
+    CountDownLatch countDownLatch = new CountDownLatch(totalReplicas);
     for (int j = 1; j <= totalReplicas; j++) {
       int coreNameNumber;
       Replica.Type typeToCreate;
@@ -131,20 +110,41 @@ public class CreateShardCmd implements Cmd {
           + " on " + nodeName);
 
       // Need to create new params for each request
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
-      params.set(CoreAdminParams.NAME, coreName);
-      params.set(CoreAdminParams.REPLICA_TYPE, typeToCreate.name());
-      params.set(COLL_CONF, configName);
-      params.set(CoreAdminParams.COLLECTION, collectionName);
-      params.set(CoreAdminParams.SHARD, sliceName);
-      params.set(ZkStateReader.NUM_SHARDS_PROP, numSlices);
-      ocmh.addPropertyParams(message, params);
-
-      ocmh.sendShardRequest(nodeName, params, shardHandler, async, requestMap);
+      ZkNodeProps addReplicasProps = new ZkNodeProps(
+          COLLECTION_PROP, collectionName,
+          SHARD_ID_PROP, sliceName,
+          CoreAdminParams.REPLICA_TYPE, typeToCreate.name(),
+          CoreAdminParams.NODE, nodeName,
+          CoreAdminParams.NAME, coreName);
+      Map<String, Object> propertyParams = new HashMap<>();
+      ocmh.addPropertyParams(message, propertyParams);;
+      addReplicasProps = addReplicasProps.plus(propertyParams);
+      if(async!=null) addReplicasProps.getProperties().put(ASYNC, async);
+      final NamedList addResult = new NamedList();
+      ocmh.addReplica(zkStateReader.getClusterState(), addReplicasProps, addResult, ()-> {
+        countDownLatch.countDown();
+        Object addResultFailure = addResult.get("failure");
+        if (addResultFailure != null) {
+          SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
+          if (failure == null) {
+            failure = new SimpleOrderedMap();
+            results.add("failure", failure);
+          }
+          failure.addAll((NamedList) addResultFailure);
+        } else {
+          SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
+          if (success == null) {
+            success = new SimpleOrderedMap();
+            results.add("success", success);
+          }
+          success.addAll((NamedList) addResult.get("success"));
+        }
+      });
     }
 
-    ocmh.processResponses(results, shardHandler, true, "Failed to create shard", async, requestMap, Collections.emptySet());
+    log.debug("Waiting for create shard action to complete");
+    countDownLatch.await(5, TimeUnit.MINUTES);
+    log.debug("Finished waiting for create shard action to complete");
 
     log.info("Finished create command on all shards for collection: " + collectionName);
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
index 9919e06..7e1f8c4 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
@@ -761,8 +761,8 @@ public class Overseer implements Closeable {
     }
   }
   public static boolean isLegacy(ZkStateReader stateReader) {
-    String legacyProperty = stateReader.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "true");
-    return !"false".equals(legacyProperty);
+    String legacyProperty = stateReader.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "false");
+    return "true".equals(legacyProperty);
   }
 
   public ZkStateReader getZkStateReader() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java
index 8a88959..ef2e224 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java
@@ -37,6 +37,7 @@ import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.common.SolrInputDocument;
@@ -46,6 +47,7 @@ import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.CommonParams;
@@ -59,6 +61,7 @@ import org.apache.solr.core.SolrCore;
 import org.apache.solr.handler.CdcrParams;
 import org.apache.solr.util.TimeOut;
 import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -69,6 +72,7 @@ import org.slf4j.LoggerFactory;
 import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
 import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
 import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CLUSTER_PROPS;
 import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
 import static org.apache.solr.handler.admin.CoreAdminHandler.COMPLETED;
@@ -142,6 +146,11 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
         zkStateReader.getZkClient().create(ZkStateReader.CLUSTER_PROPS,
             Utils.toJSON(Collections.singletonMap("urlScheme", "https")),
             CreateMode.PERSISTENT, true);
+      } catch (KeeperException.NodeExistsException e) {
+        ZkNodeProps props = ZkNodeProps.load(zkStateReader.getZkClient().getData(ZkStateReader.CLUSTER_PROPS,
+            null, null, true));
+        props = props.plus("urlScheme", "https");
+        zkStateReader.getZkClient().setData(CLUSTER_PROPS, Utils.toJSON(props), true);
       } finally {
         zkStateReader.close();
       }
@@ -567,7 +576,7 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
    */
   protected List<String> startServers(int nServer) throws Exception {
     String temporaryCollection = "tmp_collection";
-    System.setProperty("collection", temporaryCollection);
+
     for (int i = 1; i <= nServer; i++) {
       // give everyone there own solrhome
       File jettyDir = createTempDir("jetty").toFile();
@@ -577,6 +586,19 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
       jettys.add(jetty);
     }
 
+    try (SolrClient client = createCloudClient(temporaryCollection)) {
+      assertEquals(0, CollectionAdminRequest
+          .createCollection(temporaryCollection, shardCount, 1)
+          .setCreateNodeSet("")
+          .process(client).getStatus());
+      for (int i = 0; i < jettys.size(); i++) {
+        assertTrue(CollectionAdminRequest
+            .addReplicaToShard(temporaryCollection, "shard"+((i % shardCount) + 1))
+            .setNode(jettys.get(i).getNodeName())
+            .process(client).isSuccess());
+      }
+    }
+
     ZkStateReader zkStateReader = jettys.get(0).getCoreContainer().getZkController().getZkStateReader();
 
     // now wait till we see the leader for each shard

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
index 02692a0..b95d558 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
@@ -31,7 +31,7 @@ import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
@@ -153,21 +153,15 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
   }
   
   private void testNodeWithoutCollectionForwarding() throws Exception {
-    final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
-    try (HttpSolrClient client = getHttpSolrClient(baseUrl)) {
-      client.setConnectionTimeout(30000);
-      Create createCmd = new Create();
-      createCmd.setRoles("none");
-      createCmd.setCoreName(ONE_NODE_COLLECTION + "core");
-      createCmd.setCollection(ONE_NODE_COLLECTION);
-      createCmd.setNumShards(1);
-      createCmd.setDataDir(getDataDir(createTempDir(ONE_NODE_COLLECTION).toFile().getAbsolutePath()));
-      client.request(createCmd);
-    } catch (Exception e) {
-      e.printStackTrace();
-      fail(e.getMessage());
-    }
-    
+    assertEquals(0, CollectionAdminRequest
+        .createCollection(ONE_NODE_COLLECTION, 1, 1)
+        .setCreateNodeSet("")
+        .process(cloudClient).getStatus());
+    assertTrue(CollectionAdminRequest
+        .addReplicaToShard(ONE_NODE_COLLECTION, "shard1")
+        .setCoreName(ONE_NODE_COLLECTION + "core")
+        .process(cloudClient).isSuccess());
+
     waitForCollection(cloudClient.getZkStateReader(), ONE_NODE_COLLECTION, 1);
     waitForRecoveriesToFinish(ONE_NODE_COLLECTION, cloudClient.getZkStateReader(), false);
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
index 3ed6f74..ac96b62 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
@@ -25,8 +25,10 @@ import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.ContentStreamUpdateRequest;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.Unload;
@@ -570,19 +572,27 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
 
   protected void createCores(final HttpSolrClient client,
       ThreadPoolExecutor executor, final String collection, final int numShards, int cnt) {
+    try {
+      assertEquals(0, CollectionAdminRequest.createCollection(collection, numShards, 1)
+          .setCreateNodeSet("")
+          .process(client).getStatus());
+    } catch (SolrServerException | IOException e) {
+      throw new RuntimeException(e);
+    }
+    String nodeName = null;
+    for (JettySolrRunner jetty : jettys) {
+      if (client.getBaseURL().startsWith(jetty.getBaseUrl().toString())) {
+        nodeName = jetty.getNodeName();
+      }
+    }
     for (int i = 0; i < cnt; i++) {
       final int freezeI = i;
+      final String freezeNodename = nodeName;
       executor.execute(() -> {
-        Create createCmd = new Create();
-        createCmd.setCoreName(collection + freezeI);
-        createCmd.setCollection(collection);
-
-        createCmd.setNumShards(numShards);
         try {
-          String core3dataDir = createTempDir(collection).toFile().getAbsolutePath();
-          createCmd.setDataDir(getDataDir(core3dataDir));
-
-          client.request(createCmd);
+          assertTrue(CollectionAdminRequest.addReplicaToShard(collection, "shard"+((freezeI%numShards)+1))
+              .setCoreName(collection + freezeI)
+              .setNode(freezeNodename).process(client).isSuccess());
         } catch (SolrServerException | IOException e) {
           throw new RuntimeException(e);
         }
@@ -780,22 +790,20 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
 
   private void testANewCollectionInOneInstanceWithManualShardAssignement() throws Exception {
     log.info("### STARTING testANewCollectionInOneInstanceWithManualShardAssignement");
-    System.clearProperty("numShards");
+    assertEquals(0, CollectionAdminRequest.createCollection(oneInstanceCollection2, 2, 2)
+        .setCreateNodeSet("")
+        .setMaxShardsPerNode(4)
+        .process(cloudClient).getStatus());
+
     List<SolrClient> collectionClients = new ArrayList<>();
-    SolrClient client = clients.get(0);
-    final String baseUrl = ((HttpSolrClient) client).getBaseURL().substring(
-        0,
-        ((HttpSolrClient) client).getBaseURL().length()
-            - DEFAULT_COLLECTION.length() - 1);
-    createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 1, "slice1");
-    createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 2, "slice2");
-    createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 3, "slice2");
-    createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 4, "slice1");
-    
-   while (pending != null && pending.size() > 0) {
-      
-      Future<Object> future = completionService.take();
-      pending.remove(future);
+    for (int i = 0; i < 4; i++) {
+      CollectionAdminResponse resp = CollectionAdminRequest
+          .addReplicaToShard(oneInstanceCollection2, "shard" + ((i%2)+1))
+          .setNode(jettys.get(0).getNodeName())
+          .process(cloudClient);
+      for (String coreName : resp.getCollectionCoresStatus().keySet()) {
+        collectionClients.add(createNewSolrClient(coreName, jettys.get(0).getBaseUrl().toString()));
+      }
     }
     
     SolrClient client1 = collectionClients.get(0);
@@ -846,14 +854,11 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
     zkStateReader.forceUpdateCollection(oneInstanceCollection2);
     Map<String,Slice> slices = zkStateReader.getClusterState().getSlicesMap(oneInstanceCollection2);
     assertNotNull(slices);
-    String roles = slices.get("slice1").getReplicasMap().values().iterator().next().getStr(ZkStateReader.ROLES_PROP);
-    assertEquals("none", roles);
     
-    
-    ZkCoreNodeProps props = new ZkCoreNodeProps(getCommonCloudSolrClient().getZkStateReader().getClusterState().getLeader(oneInstanceCollection2, "slice1"));
+    ZkCoreNodeProps props = new ZkCoreNodeProps(getCommonCloudSolrClient().getZkStateReader().getClusterState().getLeader(oneInstanceCollection2, "shard1"));
     
     // now test that unloading a core gets us a new leader
-    try (HttpSolrClient unloadClient = getHttpSolrClient(baseUrl)) {
+    try (HttpSolrClient unloadClient = getHttpSolrClient(jettys.get(0).getBaseUrl().toString())) {
       unloadClient.setConnectionTimeout(15000);
       unloadClient.setSoTimeout(60000);
       Unload unloadCmd = new Unload(true);
@@ -864,7 +869,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
       unloadClient.request(unloadCmd);
 
       int tries = 50;
-      while (leader.equals(zkStateReader.getLeaderUrl(oneInstanceCollection2, "slice1", 10000))) {
+      while (leader.equals(zkStateReader.getLeaderUrl(oneInstanceCollection2, "shard1", 10000))) {
         Thread.sleep(100);
         if (tries-- == 0) {
           fail("Leader never changed");
@@ -911,22 +916,14 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
 
   private void testANewCollectionInOneInstance() throws Exception {
     log.info("### STARTING testANewCollectionInOneInstance");
+    CollectionAdminResponse response = CollectionAdminRequest.createCollection(oneInstanceCollection, 2, 2)
+        .setCreateNodeSet(jettys.get(0).getNodeName())
+        .setMaxShardsPerNode(4)
+        .process(cloudClient);
+    assertEquals(0, response.getStatus());
     List<SolrClient> collectionClients = new ArrayList<>();
-    SolrClient client = clients.get(0);
-    final String baseUrl = ((HttpSolrClient) client).getBaseURL().substring(
-        0,
-        ((HttpSolrClient) client).getBaseURL().length()
-            - DEFAULT_COLLECTION.length() - 1);
-    createCollection(oneInstanceCollection, collectionClients, baseUrl, 1);
-    createCollection(oneInstanceCollection, collectionClients, baseUrl, 2);
-    createCollection(oneInstanceCollection, collectionClients, baseUrl, 3);
-    createCollection(oneInstanceCollection, collectionClients, baseUrl, 4);
-    
-   while (pending != null && pending.size() > 0) {
-      
-      Future<Object> future = completionService.take();
-      if (future == null) return;
-      pending.remove(future);
+    for (String coreName : response.getCollectionCoresStatus().keySet()) {
+      collectionClients.add(createNewSolrClient(coreName, jettys.get(0).getBaseUrl().toString()));
     }
    
     SolrClient client1 = collectionClients.get(0);
@@ -1083,26 +1080,28 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
   }
   
   private void createNewCollection(final String collection) throws InterruptedException {
+    try {
+      assertEquals(0, CollectionAdminRequest
+          .createCollection(collection, 2, 1)
+          .setCreateNodeSet("")
+          .process(cloudClient).getStatus());
+    } catch (Exception e) {
+      e.printStackTrace();
+      //fails
+    }
     final List<SolrClient> collectionClients = new ArrayList<>();
     otherCollectionClients.put(collection, collectionClients);
-    int unique = 0;
-    for (final SolrClient client : clients) {
+    int unique = 0 ;
+    for (final JettySolrRunner runner : jettys) {
       unique++;
-      final String baseUrl = ((HttpSolrClient) client).getBaseURL()
-          .substring(
-              0,
-              ((HttpSolrClient) client).getBaseURL().length()
-                  - DEFAULT_COLLECTION.length() -1);
       final int frozeUnique = unique;
       Callable call = () -> {
 
-        try (HttpSolrClient client1 = getHttpSolrClient(baseUrl)) {
-          client1.setConnectionTimeout(15000);
-          client1.setSoTimeout(60000);
-          Create createCmd = new Create();
-          createCmd.setCoreName(collection);
-          createCmd.setDataDir(getDataDir(createTempDir(collection).toFile().getAbsolutePath()));
-          client1.request(createCmd);
+        try {
+          assertTrue(CollectionAdminRequest
+              .addReplicaToShard(collection, "shard"+ ((frozeUnique%2)+1))
+              .setNode(runner.getNodeName())
+              .process(cloudClient).isSuccess());
         } catch (Exception e) {
           e.printStackTrace();
           //fails
@@ -1110,7 +1109,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
         return null;
       };
      
-      collectionClients.add(createNewSolrClient(collection, baseUrl));
+      collectionClients.add(createNewSolrClient(collection, runner.getBaseUrl().toString()));
       pending.add(completionService.submit(call));
       while (pending != null && pending.size() > 0) {
         

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java b/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
index 0e9df87..da3ad28 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
@@ -16,25 +16,17 @@
  */
 package org.apache.solr.cloud;
 
-import java.io.File;
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
-import com.google.common.collect.ImmutableMap;
 import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CoreContainer;
-import org.apache.zookeeper.CreateMode;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -42,37 +34,21 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @Slow
-public class ClusterStateUpdateTest extends SolrTestCaseJ4  {
+public class ClusterStateUpdateTest extends SolrCloudTestCase  {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  protected ZkTestServer zkServer;
-
-  protected String zkDir;
-
-  private CoreContainer container1;
-
-  private CoreContainer container2;
-
-  private CoreContainer container3;
-
-  private File dataDir1;
-
-  private File dataDir2;
-
-  private File dataDir3;
-  
-  private File dataDir4;
-
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    configureCluster(3)
+        .addConfig("conf", configset("cloud-minimal"))
+        .configure();
 
-  private static volatile File solrHomeDirectory;
+  }
 
   @BeforeClass
-  public static void beforeClass() throws IOException {
-    solrHomeDirectory = createTempDir().toFile();
+  public static void beforeClass() {
     System.setProperty("solrcloud.skip.autorecovery", "true");
-    System.setProperty("genericCoreNodeNames", "false");
-    copyMinFullSetup(solrHomeDirectory);
-
   }
 
   @AfterClass
@@ -80,82 +56,16 @@ public class ClusterStateUpdateTest extends SolrTestCaseJ4  {
     System.clearProperty("solrcloud.skip.autorecovery");
     System.clearProperty("genericCoreNodeNames");
   }
-
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    System.setProperty("zkClientTimeout", "3000");
-    File tmpDir = createTempDir("zkData").toFile();
-    zkDir = tmpDir.getAbsolutePath();
-    zkServer = new ZkTestServer(zkDir);
-    zkServer.run();
-    System.setProperty("zkHost", zkServer.getZkAddress());
-    AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer
-        .getZkAddress(), "solrconfig.xml", "schema.xml");
-    
-    log.info("####SETUP_START " + getTestName());
-    dataDir1 = new File(tmpDir + File.separator + "data1");
-    dataDir1.mkdirs();
-    
-    dataDir2 = new File(tmpDir + File.separator + "data2");
-    dataDir2.mkdirs();
-    
-    dataDir3 = new File(tmpDir + File.separator + "data3");
-    dataDir3.mkdirs();
-    
-    dataDir4 = new File(tmpDir + File.separator + "data4");
-    dataDir4.mkdirs();
-    
-    // set some system properties for use by tests
-    System.setProperty("solr.test.sys.prop1", "propone");
-    System.setProperty("solr.test.sys.prop2", "proptwo");
-    
-    System.setProperty("solr.solr.home", TEST_HOME());
-    System.setProperty("hostPort", "1661");
-    System.setProperty("solr.data.dir", ClusterStateUpdateTest.this.dataDir1.getAbsolutePath());
-    container1 = new CoreContainer(solrHomeDirectory.getAbsolutePath());
-    container1.load();
-    System.clearProperty("hostPort");
-    
-    System.setProperty("hostPort", "1662");
-    System.setProperty("solr.data.dir", ClusterStateUpdateTest.this.dataDir2.getAbsolutePath());
-    container2 = new CoreContainer(solrHomeDirectory.getAbsolutePath());
-    container2.load();
-    System.clearProperty("hostPort");
-    
-    System.setProperty("hostPort", "1663");
-    System.setProperty("solr.data.dir", ClusterStateUpdateTest.this.dataDir3.getAbsolutePath());
-    container3 = new CoreContainer(solrHomeDirectory.getAbsolutePath());
-    container3.load();
-    System.clearProperty("hostPort");
-    System.clearProperty("solr.solr.home");
-    
-    log.info("####SETUP_END " + getTestName());
-    
-  }
-
   
   @Test
   public void testCoreRegistration() throws Exception {
     System.setProperty("solrcloud.update.delay", "1");
-    
-   
-    Map<String,Object> props2 = new HashMap<>();
-    props2.put("configName", "conf1");
-    ZkNodeProps zkProps2 = new ZkNodeProps(props2);
-    
-    SolrZkClient zkClient = new SolrZkClient(zkServer.getZkAddress(),
-        AbstractZkTestCase.TIMEOUT);
-    zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/testcore",
-        Utils.toJSON(zkProps2), CreateMode.PERSISTENT, true);
-    zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/testcore/shards",
-        CreateMode.PERSISTENT, true);
-    zkClient.close();
 
-    container1.create("testcore", ImmutableMap.of("dataDir", dataDir4.getAbsolutePath()));
-    
-    ZkController zkController2 = container2.getZkController();
+    assertEquals(0, CollectionAdminRequest.createCollection("testcore", 1,1)
+        .setCreateNodeSet(cluster.getJettySolrRunner(0).getNodeName())
+        .process(cluster.getSolrClient()).getStatus());
+
+    ZkController zkController2 = cluster.getJettySolrRunner(1).getCoreContainer().getZkController();
 
     String host = zkController2.getHostName();
     
@@ -184,19 +94,22 @@ public class ClusterStateUpdateTest extends SolrTestCaseJ4  {
 
     assertEquals(1, shards.size());
 
-    Replica zkProps = shards.get(host + ":1661_solr_testcore");
+    // assert this is core of container1
+    Replica zkProps = shards.get("core_node1");
 
     assertNotNull(zkProps);
 
-    assertEquals(host + ":1661_solr", zkProps.getStr(ZkStateReader.NODE_NAME_PROP));
+    assertEquals(host + ":" +cluster.getJettySolrRunner(0).getLocalPort()+"_solr", zkProps.getStr(ZkStateReader.NODE_NAME_PROP));
 
-    assertEquals("http://" + host + ":1661/solr", zkProps.getStr(ZkStateReader.BASE_URL_PROP));
+    assertEquals("http://" + host + ":"+cluster.getJettySolrRunner(0).getLocalPort()+"/solr", zkProps.getStr(ZkStateReader.BASE_URL_PROP));
 
+    // assert there are 3 live nodes
     Set<String> liveNodes = clusterState2.getLiveNodes();
     assertNotNull(liveNodes);
     assertEquals(3, liveNodes.size());
 
-    container3.shutdown();
+    // shut down node 2
+    cluster.stopJettySolrRunner(2);
 
     // slight pause (15s timeout) for watch to trigger
     for(int i = 0; i < (5 * 15); i++) {
@@ -208,52 +121,21 @@ public class ClusterStateUpdateTest extends SolrTestCaseJ4  {
 
     assertEquals(2, zkController2.getClusterState().getLiveNodes().size());
 
-    // quickly kill / start client
-
-    container2.getZkController().getZkClient().getSolrZooKeeper().getConnection()
-        .disconnect();
-    container2.shutdown();
-
-    System.setProperty("hostPort", "1662");
-    System.setProperty("solr.data.dir", ClusterStateUpdateTest.this.dataDir2.getAbsolutePath());
-    container2 = new CoreContainer(solrHomeDirectory.getAbsolutePath());
-    container2.load();
-    System.clearProperty("hostPort");
+    cluster.getJettySolrRunner(1).stop();
+    cluster.getJettySolrRunner(1).start();
     
     // pause for watch to trigger
     for(int i = 0; i < 200; i++) {
-      if (container1.getZkController().getClusterState().liveNodesContain(
-          container2.getZkController().getNodeName())) {
+      if (cluster.getJettySolrRunner(0).getCoreContainer().getZkController().getClusterState().liveNodesContain(
+          cluster.getJettySolrRunner(1).getCoreContainer().getZkController().getNodeName())) {
         break;
       }
       Thread.sleep(100);
     }
 
-    assertTrue(container1.getZkController().getClusterState().liveNodesContain(
-        container2.getZkController().getNodeName()));
+    assertTrue(cluster.getJettySolrRunner(0).getCoreContainer().getZkController().getClusterState().liveNodesContain(
+        cluster.getJettySolrRunner(1).getCoreContainer().getZkController().getNodeName()));
 
     // core.close();  // don't close - this core is managed by container1 now
   }
-
-  @Override
-  public void tearDown() throws Exception {
-    container1.shutdown();
-    container2.shutdown();
-    container3.shutdown();
-
-    zkServer.shutdown();
-    super.tearDown();
-    System.clearProperty("zkClientTimeout");
-    System.clearProperty("zkHost");
-    System.clearProperty("hostPort");
-    System.clearProperty("solrcloud.update.delay");
-    System.clearProperty("solr.data.dir");
-  }
-  
-  static void printLayout(String zkHost) throws Exception {
-    SolrZkClient zkClient = new SolrZkClient(
-        zkHost, AbstractZkTestCase.TIMEOUT);
-    zkClient.printLayoutToStdOut();
-    zkClient.close();
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
index ea8598b..bf0d567 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
@@ -41,16 +41,13 @@ import com.google.common.collect.ImmutableList;
 import org.apache.commons.io.IOUtils;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
 import org.apache.solr.client.solrj.request.CoreStatus;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.request.UpdateRequest;
@@ -157,16 +154,15 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
 
     final String collectionName = "halfdeletedcollection";
 
-    // create a core that simulates something left over from a partially-deleted collection
-    Create createCmd = new Create();
-    createCmd.setCoreName("halfdeletedcollection_shard1_replica1");
-    createCmd.setCollection(collectionName);
-    createCmd.setCollectionConfigName("conf");
+    assertEquals(0, CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
     String dataDir = createTempDir().toFile().getAbsolutePath();
-    createCmd.setDataDir(dataDir);
-    createCmd.setNumShards(2);
-
-    createCmd.process(cluster.getSolrClient());
+    // create a core that simulates something left over from a partially-deleted collection
+    assertTrue(CollectionAdminRequest
+        .addReplicaToShard(collectionName, "shard1")
+        .setDataDir(dataDir)
+        .process(cluster.getSolrClient()).isSuccess());
 
     CollectionAdminRequest.deleteCollection(collectionName)
         .process(cluster.getSolrClient());
@@ -282,32 +278,21 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
 
   @Test
   public void testCreateShouldFailOnExistingCore() throws Exception {
-    
-    // first we make a core with the core name the collections api
-    // will try and use - this will cause our mock fail
-    Create createCmd = new Create();
-    createCmd.setCoreName(Assign.buildCoreName("halfcollection", "shard1", Replica.Type.NRT, 1));
-    createCmd.setCollection("halfcollectionblocker");
-    String dataDir = createTempDir().toFile().getAbsolutePath();
-    createCmd.setDataDir(dataDir);
-    createCmd.setNumShards(1);
-    createCmd.setCollectionConfigName("conf");
-
-    try (SolrClient client = cluster.getJettySolrRunner(0).newClient()) {
-      client.request(createCmd);
-    }
-
-    createCmd = new Create();
-    createCmd.setCoreName(Assign.buildCoreName("halfcollection", "shard1", Replica.Type.NRT, 1));
-    createCmd.setCollection("halfcollectionblocker2");
-    dataDir = createTempDir().toFile().getAbsolutePath();
-    createCmd.setDataDir(dataDir);
-    createCmd.setNumShards(1);
-    createCmd.setCollectionConfigName("conf");
-
-    try (SolrClient client = cluster.getJettySolrRunner(1).newClient()) {
-      client.request(createCmd);
-    }
+    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker", "conf", 1, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker", "shard1")
+        .setNode(cluster.getJettySolrRunner(0).getNodeName())
+        .setCoreName(Assign.buildCoreName("halfcollection", "shard1", Replica.Type.NRT, 1))
+        .process(cluster.getSolrClient()).isSuccess());
+
+    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker2", "conf",1, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker2", "shard1")
+        .setNode(cluster.getJettySolrRunner(1).getNodeName())
+        .setCoreName(Assign.buildCoreName("halfcollection", "shard1", Replica.Type.NRT, 1))
+        .process(cluster.getSolrClient()).isSuccess());
 
     String nn1 = cluster.getJettySolrRunner(0).getNodeName();
     String nn2 = cluster.getJettySolrRunner(1).getNodeName();
@@ -328,73 +313,17 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
   }
 
   @Test
-  public void testNoCollectionSpecified() throws Exception {
-
-    // TODO - should we remove this behaviour?
-
-    assertFalse(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
-    assertFalse(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
-    
-    // try and create a SolrCore with no collection name
-    Create createCmd = new Create();
-    createCmd.setCoreName("corewithnocollection");
-    createCmd.setCollection("");
-    String dataDir = createTempDir().toFile().getAbsolutePath();
-    createCmd.setDataDir(dataDir);
-    createCmd.setNumShards(1);
-    createCmd.setCollectionConfigName("conf");
-
-    cluster.getSolrClient().request(createCmd);
-    
-    // try and create a SolrCore with no collection name
-    createCmd.setCollection(null);
-    createCmd.setCoreName("corewithnocollection2");
-
-    cluster.getSolrClient().request(createCmd);
-    
-    // in both cases, the collection should have default to the core name
-    cluster.getSolrClient().getZkStateReader().forceUpdateCollection("corewithnocollection");
-    cluster.getSolrClient().getZkStateReader().forceUpdateCollection("corewithnocollection2");
-    assertTrue(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
-    assertTrue(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
-  }
-
-  @Test
   public void testNoConfigSetExist() throws Exception {
 
-    final CloudSolrClient cloudClient = cluster.getSolrClient();
-
-    assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection3"));
-
-    // try and create a SolrCore with no collection name
-    Create createCmd = new Create();
-    createCmd.setCoreName("corewithnocollection3");
-    createCmd.setCollection("");
-    String dataDir = createTempDir().toFile().getAbsolutePath();
-    createCmd.setDataDir(dataDir);
-    createCmd.setNumShards(1);
-    createCmd.setCollectionConfigName("conf123");
-
     expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(createCmd);
+      CollectionAdminRequest.createCollection("noconfig", "conf123", 1, 1)
+          .process(cluster.getSolrClient());
     });
 
     TimeUnit.MILLISECONDS.sleep(1000);
     // in both cases, the collection should have default to the core name
-    cloudClient.getZkStateReader().forceUpdateCollection("corewithnocollection3");
-
-    Collection<Slice> slices = cloudClient.getZkStateReader().getClusterState().getActiveSlices("corewithnocollection3");
-    int replicaCount = 0;
-    if (slices != null) {
-      for (Slice slice : slices) {
-        replicaCount += slice.getReplicas().size();
-      }
-    }
-    assertEquals("replicaCount", 0, replicaCount);
-
-    // TODO - WTF? shouldn't this *not* contain the collection?
-    assertTrue(CollectionAdminRequest.listCollections(cloudClient).contains("corewithnocollection3"));
-
+    cluster.getSolrClient().getZkStateReader().forceUpdateCollection("noconfig");
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains("noconfig"));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
index 861bdcc..df3f8bc 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
@@ -269,20 +269,22 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
 
     // sanity check our expected default
     final ClusterProperties props = new ClusterProperties(zkClient());
-    assertEquals("Expecting prop to default to unset, test needs upated",
-                 props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), null);
+    assertEquals("Expecting legacyCloud to false as default",
+                 props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), "false");
     
-    CollectionAdminResponse response = CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
+    CollectionAdminResponse response = CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "true")
       .process(cluster.getSolrClient());
-
     assertEquals(0, response.getStatus());
-
-    assertEquals("Cluster property was not set", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), "false");
+    assertEquals("Cluster property was not set", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), "true");
 
     // Unset ClusterProp that we set.
     CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, null).process(cluster.getSolrClient());
     assertEquals("Cluster property was not unset", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), null);
 
+    response = CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
+        .process(cluster.getSolrClient());
+    assertEquals(0, response.getStatus());
+    assertEquals("Cluster property was not set", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), "false");
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java
index b92f1f7..82f5bce 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java
@@ -16,54 +16,21 @@
  */
 package org.apache.solr.cloud;
 
-import javax.xml.parsers.ParserConfigurationException;
 import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
 
-import com.google.common.collect.ImmutableMap;
 import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrResourceLoader;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.xml.sax.SAXException;
 
 @Slow
-public class LeaderElectionIntegrationTest extends SolrTestCaseJ4 {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  private final static int NUM_SHARD_REPLICAS = 5;
-
-  private static final Pattern HOST = Pattern
-      .compile(".*?\\:(\\d\\d\\d\\d)_.*");
-  
-  protected ZkTestServer zkServer;
-  
-  protected String zkDir;
-  
-  private Map<Integer,CoreContainer> containerMap = new HashMap<>();
-  
-  private Map<String,Set<Integer>> shardPorts = new HashMap<>();
-  
-  private SolrZkClient zkClient;
-
-  private ZkStateReader reader;
+public class LeaderElectionIntegrationTest extends SolrCloudTestCase {
+  private final static int NUM_REPLICAS_OF_SHARD1 = 5;
   
   @BeforeClass
   public static void beforeClass() {
@@ -73,171 +40,112 @@ public class LeaderElectionIntegrationTest extends SolrTestCaseJ4 {
   @Override
   public void setUp() throws Exception {
     super.setUp();
+    configureCluster(6)
+        .addConfig("conf", configset("cloud-minimal"))
+        .configure();
+  }
 
-    ignoreException("No UpdateLog found - cannot sync");
-    ignoreException("No UpdateLog found - cannot recover");
-    
-    System.setProperty("zkClientTimeout", "8000");
-    
-    zkDir = createTempDir("zkData").toFile().getAbsolutePath();
-    zkServer = new ZkTestServer(zkDir);
-    zkServer.run();
-    System.setProperty("zkHost", zkServer.getZkAddress());
-    AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(),
-        zkServer.getZkAddress(), "solrconfig.xml", "schema.xml");
-    
-    log.info("####SETUP_START " + getTestName());
-    
-    // set some system properties for use by tests
-    System.setProperty("solr.test.sys.prop1", "propone");
-    System.setProperty("solr.test.sys.prop2", "proptwo");
-    
-    for (int i = 7000; i < 7000 + NUM_SHARD_REPLICAS; i++) {
-      try {
-        setupContainer(i, "shard1");
-      } catch (Throwable t) {
-        log.error("!!!Could not start container:" + i + " The exception thrown was: " + t.getClass() + " " + t.getMessage());
-        fail("Could not start container:" + i + ". Reason:" + t.getClass() + " " + t.getMessage());
-      }
-    }
-    try {
-      setupContainer(3333, "shard2");
-    } catch (Throwable t) {
-      log.error("!!!Could not start container 3333. The exception thrown was: " + t.getClass() + " " + t.getMessage());
-      fail("Could not start container: 3333");
-    }
-    
-    zkClient = new SolrZkClient(zkServer.getZkAddress(),
-        AbstractZkTestCase.TIMEOUT);
-        
-    reader = new ZkStateReader(zkClient); 
-    reader.createClusterStateWatchersAndUpdate();
-    boolean initSuccessful = false;
-    for (int i = 0; i < 30; i++) {
-      List<String> liveNodes = zkClient.getChildren("/live_nodes", null, true);
-      if (liveNodes.size() == NUM_SHARD_REPLICAS + 1) {
-        // all nodes up
-        initSuccessful = true;
-        break;
-      }
-      Thread.sleep(1000);
-      log.info("Waiting for more nodes to come up, now: " + liveNodes.size()
-          + "/" + (NUM_SHARD_REPLICAS + 1));
-    }
-    if (!initSuccessful) {
-      fail("Init was not successful!");
+
+  private void createCollection(String collection) throws IOException, SolrServerException {
+    assertEquals(0, CollectionAdminRequest.createCollection(collection,
+        "conf", 2, 1)
+        .setMaxShardsPerNode(1).process(cluster.getSolrClient()).getStatus());
+    for (int i = 1; i < NUM_REPLICAS_OF_SHARD1; i++) {
+      assertTrue(
+          CollectionAdminRequest.addReplicaToShard(collection, "shard1").process(cluster.getSolrClient()).isSuccess()
+      );
     }
-    log.info("####SETUP_END " + getTestName());
   }
-     
-  private void setupContainer(int port, String shard) throws IOException,
-      ParserConfigurationException, SAXException {
-    Path data = createTempDir();
-    
-    System.setProperty("hostPort", Integer.toString(port));
-    System.setProperty("shard", shard);
-    System.setProperty("solr.data.dir", data.toString());
-    System.setProperty("solr.solr.home", TEST_HOME());
-    Set<Integer> ports = shardPorts.get(shard);
-    if (ports == null) {
-      ports = new HashSet<>();
-      shardPorts.put(shard, ports);
-    }
-    ports.add(port);
 
-    SolrResourceLoader loader = new SolrResourceLoader(createTempDir());
-    Files.copy(TEST_PATH().resolve("solr.xml"), loader.getInstancePath().resolve("solr.xml"));
-    CoreContainer container = new CoreContainer(loader);
-    container.load();
-    container.create("collection1_" + shard, ImmutableMap.of("collection", "collection1"));
-    containerMap.put(port, container);
-    System.clearProperty("solr.solr.home");
-    System.clearProperty("hostPort");
-  }
-  
   @Test
   public void testSimpleSliceLeaderElection() throws Exception {
+    String collection = "collection1";
+    createCollection(collection);
 
-    //printLayout(zkServer.getZkAddress());
     for (int i = 0; i < 4; i++) {
       // who is the leader?
-      String leader = getLeader();
-      
-      Set<Integer> shard1Ports = shardPorts.get("shard1");
-      
-      int leaderPort = getLeaderPort(leader);
-      assertTrue(shard1Ports.toString(), shard1Ports.contains(leaderPort));
-      
-      shard1Ports.remove(leaderPort);
-      
-      // kill the leader
-      containerMap.get(leaderPort).shutdown();
-      
-      //printLayout(zkServer.getZkAddress());
-      
+      String leader = getLeader(collection);
+      JettySolrRunner jetty = getRunner(leader);
+      assertNotNull(jetty);
+      assertTrue("shard1".equals(jetty.getCoreContainer().getCores().iterator().next()
+          .getCoreDescriptor().getCloudDescriptor().getShardId()));
+      jetty.stop();
+
       // poll until leader change is visible
       for (int j = 0; j < 90; j++) {
-        String currentLeader = getLeader();
+        String currentLeader = getLeader(collection);
         if(!leader.equals(currentLeader)) {
           break;
         }
         Thread.sleep(500);
       }
-      
-      leader = getLeader();
-      int newLeaderPort = getLeaderPort(leader);
+
+      leader = getLeader(collection);
       int retry = 0;
-      while (leaderPort == newLeaderPort) {
+      while (jetty == getRunner(leader)) {
         if (retry++ == 60) {
           break;
         }
         Thread.sleep(1000);
       }
-      
-      if (leaderPort == newLeaderPort) {
-        zkClient.printLayoutToStdOut();
-        fail("We didn't find a new leader! " + leaderPort + " was close, but it's still showing as the leader");
+
+      if (jetty == getRunner(leader)) {
+        cluster.getZkClient().printLayoutToStdOut();
+        fail("We didn't find a new leader! " + jetty + " was close, but it's still showing as the leader");
       }
-      
-      assertTrue("Could not find leader " + newLeaderPort + " in " + shard1Ports, shard1Ports.contains(newLeaderPort));
+
+      assertTrue("shard1".equals(getRunner(leader).getCoreContainer().getCores().iterator().next()
+          .getCoreDescriptor().getCloudDescriptor().getShardId()));
     }
-    
 
-  }
-  
-  @Test
-  public void testLeaderElectionAfterClientTimeout() throws Exception {
+    cluster.getJettySolrRunners().parallelStream().forEach(jetty -> {
+      if (jetty.isStopped())
+        try {
+          jetty.start();
+        } catch (Exception e) {
+          e.printStackTrace();
+        }
+    });
+    waitForState("Expected to see nodes come back " + collection, collection,
+        (n, c) -> {
+          return n.size() == 6;
+        });
+    CollectionAdminRequest.deleteCollection(collection).process(cluster.getSolrClient());
+
+    // testLeaderElectionAfterClientTimeout
+    collection = "collection2";
+    createCollection(collection);
+
     // TODO: work out the best timing here...
     System.setProperty("zkClientTimeout", Integer.toString(ZkTestServer.TICK_TIME * 2 + 100));
     // timeout the leader
-    String leader = getLeader();
-    int leaderPort = getLeaderPort(leader);
-    ZkController zkController = containerMap.get(leaderPort).getZkController();
+    String leader = getLeader(collection);
+    JettySolrRunner jetty = getRunner(leader);
+    ZkController zkController = jetty.getCoreContainer().getZkController();
 
     zkController.getZkClient().getSolrZooKeeper().closeCnxn();
-    long sessionId = zkClient.getSolrZooKeeper().getSessionId();
-    zkServer.expire(sessionId);
-    
+    cluster.getZkServer().expire(zkController.getZkClient().getSolrZooKeeper().getSessionId());
+
     for (int i = 0; i < 60; i++) { // wait till leader is changed
-      if (leaderPort != getLeaderPort(getLeader())) {
+      if (jetty != getRunner(getLeader(collection))) {
         break;
       }
       Thread.sleep(100);
     }
-    
+
     // make sure we have waited long enough for the first leader to have come back
     Thread.sleep(ZkTestServer.TICK_TIME * 2 + 100);
-    
+
     // kill everyone but the first leader that should have reconnected by now
-    for (Map.Entry<Integer,CoreContainer> entry : containerMap.entrySet()) {
-      if (entry.getKey() != leaderPort) {
-        entry.getValue().shutdown();
+    for (JettySolrRunner jetty2 : cluster.getJettySolrRunners()) {
+      if (jetty != jetty2) {
+        jetty2.stop();
       }
     }
 
     for (int i = 0; i < 320; i++) { // wait till leader is changed
       try {
-        if (leaderPort == getLeaderPort(getLeader())) {
+        if (jetty == getRunner(getLeader(collection))) {
           break;
         }
         Thread.sleep(100);
@@ -246,73 +154,26 @@ public class LeaderElectionIntegrationTest extends SolrTestCaseJ4 {
       }
     }
 
-    // the original leader should be leader again now - everyone else is down
-    // TODO: I saw this fail once...expected:<7000> but was:<7004>
-    assertEquals(leaderPort, getLeaderPort(getLeader()));
-    //printLayout(zkServer.getZkAddress());
-    //Thread.sleep(100000);
-  }
-  
-  private String getLeader() throws InterruptedException {
-    
-    ZkNodeProps props = reader.getLeaderRetry("collection1", "shard1", 30000);
-    String leader = props.getStr(ZkStateReader.NODE_NAME_PROP);
-    
-    return leader;
+    assertEquals(jetty, getRunner(getLeader(collection)));
   }
-  
-  private int getLeaderPort(String leader) {
-    Matcher m = HOST.matcher(leader);
-    int leaderPort = 0;
-    if (m.matches()) {
-      leaderPort = Integer.parseInt(m.group(1));
-    } else {
-      throw new IllegalStateException();
+
+  private JettySolrRunner getRunner(String nodeName) {
+    for (JettySolrRunner jettySolrRunner : cluster.getJettySolrRunners()){
+      if (!jettySolrRunner.isStopped() && nodeName.equals(jettySolrRunner.getNodeName())) return jettySolrRunner;
     }
-    return leaderPort;
+    return null;
   }
-  
-  @Override
-  public void tearDown() throws Exception {
 
-    if (zkClient != null) {
-      zkClient.close();
-    }
+  private String getLeader(String collection) throws InterruptedException {
     
-    if (reader != null) {
-      reader.close();
-    }
+    ZkNodeProps props = cluster.getSolrClient().getZkStateReader().getLeaderRetry(collection, "shard1", 30000);
+    String leader = props.getStr(ZkStateReader.NODE_NAME_PROP);
     
-    for (CoreContainer cc : containerMap.values()) {
-      if (!cc.isShutDown()) {
-        cc.shutdown();
-      }
-    }
-    zkServer.shutdown();
-    super.tearDown();
-    System.clearProperty("zkClientTimeout");
-    System.clearProperty("zkHost");
-    System.clearProperty("hostPort");
-    System.clearProperty("shard");
-    System.clearProperty("solrcloud.update.delay");
-  }
-  
-  private void printLayout(String zkHost) throws Exception {
-    SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT);
-    zkClient.printLayoutToStdOut();
-    zkClient.close();
+    return leader;
   }
-  
+
   @AfterClass
   public static void afterClass() throws InterruptedException {
     System.clearProperty("solrcloud.skip.autorecovery");
-    System.clearProperty("zkClientTimeout");
-    System.clearProperty("zkHost");
-    System.clearProperty("shard");
-    System.clearProperty("solr.data.dir");
-    System.clearProperty("solr.solr.home");
-    resetExceptionIgnores();
-    // wait just a bit for any zk client threads to outlast timeout
-    Thread.sleep(2000);
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/cloud/LeaderFailureAfterFreshStartTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderFailureAfterFreshStartTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderFailureAfterFreshStartTest.java
index 77dd6b6..8136d3e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderFailureAfterFreshStartTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderFailureAfterFreshStartTest.java
@@ -146,8 +146,8 @@ public class LeaderFailureAfterFreshStartTest extends AbstractFullDistribZkTestB
 
       // start the freshNode 
       restartNodes(singletonList(freshNode));
-      
-      String replicationProperties = (String) freshNode.jetty.getSolrHome() + "/cores/" +  DEFAULT_TEST_COLLECTION_NAME + "/data/replication.properties";
+      String coreName = freshNode.jetty.getCoreContainer().getCores().iterator().next().getName();
+      String replicationProperties = freshNode.jetty.getSolrHome() + "/cores/" +  coreName + "/data/replication.properties";
       String md5 = DigestUtils.md5Hex(Files.readAllBytes(Paths.get(replicationProperties)));
         
       // shutdown the original leader

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8e9d685a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
index 91da2c1..7f831df 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
@@ -27,7 +27,10 @@ import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.cloud.Overseer.LeaderStatus;
 import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
 import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
 import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
@@ -70,7 +73,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
   private static ClusterState clusterStateMock;
   private static SolrZkClient solrZkClientMock;
   private final Map zkMap = new HashMap();
-  private final Set collectionsSet = new HashSet();
+  private final Map<String, ClusterState.CollectionRef> collectionsSet = new HashMap<>();
+  private final List<ZkNodeProps> replicas = new ArrayList<>();
   private SolrResponse lastProcessMessageResult;
 
 
@@ -141,6 +145,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
 
     zkMap.clear();
     collectionsSet.clear();
+    replicas.clear();
   }
   
   @After
@@ -193,7 +198,27 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
     when(zkStateReaderMock.getZkClient()).thenReturn(solrZkClientMock);
     when(zkStateReaderMock.getClusterState()).thenReturn(clusterStateMock);
 
-    when(clusterStateMock.getCollections()).thenReturn(collectionsSet);
+    when(clusterStateMock.getCollection(anyString())).thenAnswer(invocation -> {
+      String key = invocation.getArgument(0);
+      if (!collectionsSet.containsKey(key)) return null;
+      DocCollection docCollection = collectionsSet.get(key).get();
+      Map<String, Map<String, Replica>> slices = new HashMap<>();
+      for (ZkNodeProps replica : replicas) {
+        if (!key.equals(replica.getStr(ZkStateReader.COLLECTION_PROP))) continue;
+
+        String slice = replica.getStr(ZkStateReader.SHARD_ID_PROP);
+        if (!slices.containsKey(slice)) slices.put(slice, new HashMap<>());
+        String replicaName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+        slices.get(slice).put(replicaName, new Replica(replicaName, replica.getProperties()));
+      }
+
+      Map<String, Slice> slicesMap = new HashMap<>();
+      for (Map.Entry<String, Map<String, Replica>> entry : slices.entrySet()) {
+        slicesMap.put(entry.getKey(), new Slice(entry.getKey(), entry.getValue(), null));
+      }
+
+      return docCollection.copyWithSlices(slicesMap);
+    });
     final Set<String> liveNodes = new HashSet<>();
     for (int i = 0; i < liveNodesCount; i++) {
       final String address = "localhost:" + (8963 + i) + "_solr";
@@ -202,13 +227,13 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
       when(zkStateReaderMock.getBaseUrlForNodeName(address)).thenAnswer(invocation -> address.replaceAll("_", "/"));
     }
 
-    when(zkStateReaderMock.getClusterProperty("legacyCloud", "true")).thenReturn("true");
+    when(zkStateReaderMock.getClusterProperty("legacyCloud", "false")).thenReturn("false");
 
     when(solrZkClientMock.getZkClientTimeout()).thenReturn(30000);
     
     when(clusterStateMock.hasCollection(anyString())).thenAnswer(invocation -> {
       String key = invocation.getArgument(0);
-      return collectionsSet.contains(key);
+      return collectionsSet.containsKey(key);
     });
 
     when(clusterStateMock.getLiveNodes()).thenReturn(liveNodes);
@@ -234,7 +259,11 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
       ZkNodeProps props = ZkNodeProps.load(bytes);
       if(CollectionParams.CollectionAction.CREATE.isEqual(props.getStr("operation"))){
         String collName = props.getStr("name") ;
-        if(collName != null) collectionsSet.add(collName);
+        if(collName != null) collectionsSet.put(collName, new ClusterState.CollectionRef(
+            new DocCollection(collName, new HashMap<>(), props.getProperties(), DocRouter.DEFAULT)));
+      }
+      if (CollectionParams.CollectionAction.ADDREPLICA.isEqual(props.getStr("operation"))) {
+        replicas.add(props);
       }
     } catch (Exception e) { }
   }
@@ -297,7 +326,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
     ArgumentCaptor<ModifiableSolrParams> paramsCaptor = ArgumentCaptor.forClass(ModifiableSolrParams.class);
     verify(shardHandlerMock, times(numberOfReplica * numberOfSlices))
         .submit(shardRequestCaptor.capture(), nodeUrlsWithoutProtocolPartCaptor.capture(), paramsCaptor.capture());
-    log.info("Datcmzz " + shardRequestCaptor.getAllValues().size());
+
     for (int i = 0; i < shardRequestCaptor.getAllValues().size(); i++) {
       ShardRequest shardRequest = shardRequestCaptor.getAllValues().get(i);
       String nodeUrlsWithoutProtocolPartCapture = nodeUrlsWithoutProtocolPartCaptor.getAllValues().get(i);