You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by sa...@apache.org on 2016/11/02 23:59:07 UTC

[10/50] [abbrv] lucene-solr:apiv2: SOLR-9132: Cut over some collections API and recovery tests

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f56d111a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
index 826a8e5..c1ad2bd 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
@@ -19,9 +19,7 @@ package org.apache.solr.cloud;
 import javax.management.MBeanServer;
 import javax.management.MBeanServerFactory;
 import javax.management.ObjectName;
-import java.io.File;
 import java.io.IOException;
-import java.lang.invoke.MethodHandles;
 import java.lang.management.ManagementFactory;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -31,16 +29,14 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.Objects;
-import java.util.Properties;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
+import com.google.common.collect.ImmutableList;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.lucene.util.TestUtil;
 import org.apache.solr.client.solrj.SolrClient;
@@ -54,453 +50,224 @@ import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.CoreAdminRequest;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
 import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.client.solrj.response.CoreAdminResponse;
-import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CollectionParams.CollectionAction;
 import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.StrUtils;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.core.SolrInfoMBean.Category;
 import org.apache.solr.util.TestInjection;
 import org.apache.solr.util.TimeOut;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
 import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.util.Utils.makeMap;
 
 /**
  * Tests the Cloud Collections API.
  */
 @Slow
-public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBase {
+public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
 
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private static final String DEFAULT_COLLECTION = "collection1";
-
-  // we randomly use a second config set rather than just one
-  private boolean secondConfigSet = random().nextBoolean();
-  
   @BeforeClass
   public static void beforeCollectionsAPIDistributedZkTest() {
     TestInjection.randomDelayInCoreCreation = "true:20";
     System.setProperty("validateAfterInactivity", "200");
   }
-  
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    
-    if (secondConfigSet ) {
-      String zkHost = zkServer.getZkHost();
-      String zkAddress = zkServer.getZkAddress();
-      SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT);
-      zkClient.makePath("/solr", false, true);
-      zkClient.close();
-
-      zkClient = new SolrZkClient(zkAddress, AbstractZkTestCase.TIMEOUT);
-
-      File solrhome = new File(TEST_HOME());
-      
-      // for now, always upload the config and schema to the canonical names
-      AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, getCloudSolrConfig(), "solrconfig.xml");
-      AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "schema.xml", "schema.xml");
-      AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "enumsConfig.xml", "enumsConfig.xml");
-      
-      AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "solrconfig.snippet.randomindexconfig.xml");
-      AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "stopwords.txt");
-      AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "protwords.txt");
-      AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "currency.xml");
-      AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "open-exchange-rates.json");
-      AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "mapping-ISOLatin1Accent.txt");
-      AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "old_synonyms.txt");
-      AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "synonyms.txt");
-      AbstractZkTestCase.putConfig("conf2", zkClient, solrhome, "elevate.xml");
-      zkClient.close();
-    }
-  }
-  
-  protected String getSolrXml() {
-    return "solr.xml";
-  }
 
-  
-  public CollectionsAPIDistributedZkTest() {
-    sliceCount = 2;
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(4)
+        .addConfig("conf", configset("cloud-minimal"))
+        .addConfig("conf2", configset("cloud-minimal-jmx"))
+        .configure();
   }
-  
-  @Override
-  protected void setDistributedParams(ModifiableSolrParams params) {
 
-    if (r.nextBoolean()) {
-      // don't set shards, let that be figured out from the cloud state
-    } else {
-      // use shard ids rather than physical locations
-      StringBuilder sb = new StringBuilder();
-      for (int i = 0; i < getShardCount(); i++) {
-        if (i > 0)
-          sb.append(',');
-        sb.append("shard" + (i + 3));
-      }
-      params.set("shards", sb.toString());
-    }
+  @Before
+  public void clearCluster() throws Exception {
+    cluster.deleteAllCollections();
   }
 
   @Test
-  @ShardsFixed(num = 4)
-  public void test() throws Exception {
-    waitForRecoveriesToFinish(false); // we need to fix no core tests still
-    testNodesUsedByCreate();
-    testNoConfigSetExist();
-    testCollectionsAPI();
-    testCollectionsAPIAddRemoveStress();
-    testErrorHandling();
-    testNoCollectionSpecified();
-    deletePartiallyCreatedCollection();
-    deleteCollectionRemovesStaleZkCollectionsNode();
-    clusterPropTest();
-    // last
-    deleteCollectionWithDownNodes();
-    addReplicaTest();
+  public void testCreationAndDeletion() throws Exception {
+
+    String collectionName = "created_and_deleted";
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1).process(cluster.getSolrClient());
+    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+                  .contains(collectionName));
+
+    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+        .contains(collectionName));
+
+    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
+
+
   }
 
-  private void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
-    
-    // we can use this client because we just want base url
-    final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
+  @Test
+  public void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
     
     String collectionName = "out_of_sync_collection";
+
+    // manually create a collections zknode
+    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
+
+    CollectionAdminRequest.deleteCollection(collectionName)
+        .process(cluster.getSolrClient());
+
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+                  .contains(collectionName));
     
-    List<Integer> numShardsNumReplicaList = new ArrayList<>();
-    numShardsNumReplicaList.add(2);
-    numShardsNumReplicaList.add(1);
-    
-    
-    cloudClient.getZkStateReader().getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
-    
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.DELETE.toString());
-    params.set("name", collectionName);
-    QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    
-    // there are remnants of the collection in zk, should work
-    makeRequest(baseUrl, request);
-    
-    assertCollectionNotExists(collectionName, 45);
-    
-    assertFalse(cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
+    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
 
   }
 
-  private void deletePartiallyCreatedCollection() throws Exception {
-    final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
-    String collectionName = "halfdeletedcollection";
+  @Test
+  public void deletePartiallyCreatedCollection() throws Exception {
+
+    final String collectionName = "halfdeletedcollection";
+
+    // create a core that simulates something left over from a partially-deleted collection
     Create createCmd = new Create();
     createCmd.setCoreName("halfdeletedcollection_shard1_replica1");
     createCmd.setCollection(collectionName);
+    createCmd.setCollectionConfigName("conf");
     String dataDir = createTempDir().toFile().getAbsolutePath();
     createCmd.setDataDir(dataDir);
     createCmd.setNumShards(2);
-    if (secondConfigSet) {
-      createCmd.setCollectionConfigName("conf1");
-    }
 
-    makeRequest(baseUrl, createCmd);
+    createCmd.process(cluster.getSolrClient());
 
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.DELETE.toString());
-    params.set("name", collectionName);
-    QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
+    CollectionAdminRequest.deleteCollection(collectionName)
+        .process(cluster.getSolrClient());
 
-    makeRequest(baseUrl, request);
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .process(cluster.getSolrClient());
+
+    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
 
-    assertCollectionNotExists(collectionName, 45);
-    
-    // now creating that collection should work
-    params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    params.set("name", collectionName);
-    params.set("numShards", 2);
-    request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    if (secondConfigSet) {
-      params.set("collection.configName", "conf1");
-    }
-    makeRequest(baseUrl, request);
   }
-  
-  private void deleteCollectionOnlyInZk() throws Exception {
-    final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
-    String collectionName = "onlyinzk";
 
-    cloudClient.getZkStateReader().getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
+  @Test
+  public void deleteCollectionOnlyInZk() throws Exception {
 
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.DELETE.toString());
-    params.set("name", collectionName);
-    QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
+    final String collectionName = "onlyinzk";
 
-    makeRequest(baseUrl, request);
+    // create the collections node, but nothing else
+    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
 
-    assertCollectionNotExists(collectionName, 45);
+    // delete via API - should remove collections node
+    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
     
     // now creating that collection should work
-    params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    params.set("name", collectionName);
-    params.set("numShards", 2);
-    request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    if (secondConfigSet) {
-      params.set("collection.configName", "conf1");
-    }
-    makeRequest(baseUrl, request);
-    
-    waitForRecoveriesToFinish(collectionName, false);
-    
-    params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.DELETE.toString());
-    params.set("name", collectionName);
-    request = new QueryRequest(params);
-    request.setPath("/admin/collections");
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .process(cluster.getSolrClient());
+    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
 
-    makeRequest(baseUrl, request);
   }
-  
-  private void deleteCollectionWithUnloadedCore() throws Exception {
-    final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
-    
-    String collectionName = "corealreadyunloaded";
-    try (SolrClient client = createNewSolrClient("", baseUrl)) {
-      createCollection(null, collectionName,  2, 1, 2, client, null, "conf1");
-    }
-    waitForRecoveriesToFinish(collectionName, false);
 
+  @Test
+  public void testBadActionNames() throws Exception {
+
+    // try a bad action
     ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.DELETE.toString());
+    params.set("action", "BADACTION");
+    String collectionName = "badactioncollection";
     params.set("name", collectionName);
-    QueryRequest request = new QueryRequest(params);
+    params.set("numShards", 2);
+    final QueryRequest request = new QueryRequest(params);
     request.setPath("/admin/collections");
 
-    NamedList<Object> result = makeRequest(baseUrl, request);
-    System.out.println("result:" + result);
-    Object failure = result.get("failure");
-    assertNull("We expect no failures", failure);
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
 
-    assertCollectionNotExists(collectionName, 45);
-    
-    // now creating that collection should work
-    params = new ModifiableSolrParams();
+  }
+
+  @Test
+  public void testMissingRequiredParameters() {
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
     params.set("action", CollectionAction.CREATE.toString());
-    params.set("name", collectionName);
     params.set("numShards", 2);
-    request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    if (secondConfigSet) {
-      params.set("collection.configName", "conf1");
-    }
-    makeRequest(baseUrl, request);
-    
-    params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.DELETE.toString());
-    params.set("name", collectionName);
-    request = new QueryRequest(params);
+    // missing required collection parameter
+    final SolrRequest request = new QueryRequest(params);
     request.setPath("/admin/collections");
 
-    makeRequest(baseUrl, request);
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
   }
-  
-  
-  private void deleteCollectionWithDownNodes() throws Exception {
-    String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
-    // now try to remove a collection when a couple of its nodes are down
-    if (secondConfigSet) {
-      try (SolrClient client = createNewSolrClient("", baseUrl)) {
-        createCollection(null, "halfdeletedcollection2", 3, 3, 6, client, null, "conf2");
-      }
-    } else {
-      try (SolrClient client = createNewSolrClient("", baseUrl)) {
-        createCollection(null, "halfdeletedcollection2", 3, 3, 6, client, null);
-      }
-    }
-    
-    waitForRecoveriesToFinish("halfdeletedcollection2", false);
-    
-    // stop a couple nodes
-    ChaosMonkey.stop(jettys.get(0));
-    ChaosMonkey.stop(jettys.get(1));
-    
-    // wait for leaders to settle out
-    for (int i = 1; i < 4; i++) {
-      cloudClient.getZkStateReader().getLeaderRetry("halfdeletedcollection2", "shard" + i, 30000);
-    }
-    
-    baseUrl = getBaseUrl((HttpSolrClient) clients.get(2));
-    
-    // remove a collection
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.DELETE.toString());
-    params.set("name", "halfdeletedcollection2");
-    QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    
-    makeRequest(baseUrl, request);
 
-    TimeOut timeout = new TimeOut(10, TimeUnit.SECONDS);
-    while (cloudClient.getZkStateReader().getClusterState().hasCollection("halfdeletedcollection2")) {
-      if (timeout.hasTimedOut()) {
-        throw new AssertionError("Timeout waiting to see removed collection leave clusterstate");
-      }
-      
-      Thread.sleep(200);
-    }
+  @Test
+  public void testTooManyReplicas() {
 
-    assertFalse("Still found collection that should be gone", cloudClient.getZkStateReader().getClusterState().hasCollection("halfdeletedcollection2"));
+    CollectionAdminRequest req = CollectionAdminRequest.createCollection("collection", "conf", 2, 10);
 
-  }
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(req);
+    });
 
-  private NamedList<Object> makeRequest(String baseUrl, SolrRequest request, int socketTimeout)
-      throws SolrServerException, IOException {
-    try (SolrClient client = createNewSolrClient("", baseUrl)) {
-      ((HttpSolrClient) client).setSoTimeout(socketTimeout);
-      return client.request(request);
-    }
   }
 
-  private NamedList<Object> makeRequest(String baseUrl, SolrRequest request)
-      throws SolrServerException, IOException {
-    try (SolrClient client = createNewSolrClient("", baseUrl)) {
-      ((HttpSolrClient) client).setSoTimeout(30000);
-      return client.request(request);
-    }
-  }
+  @Test
+  public void testMissingNumShards() {
 
-  private void testErrorHandling() throws Exception {
-    final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
-    
-    // try a bad action
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", "BADACTION");
-    String collectionName = "badactioncollection";
-    params.set("name", collectionName);
-    params.set("numShards", 2);
-    QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    boolean gotExp = false;
-    try {
-      makeRequest(baseUrl, request);
-    } catch (SolrException e) {
-      gotExp = true;
-    }
-    assertTrue(gotExp);
-    
-    
-    // leave out required param name
-    params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    params.set("numShards", 2);
-    collectionName = "collection";
-    // No Name
-    // params.set("name", collectionName);
-    if (secondConfigSet) {
-      params.set("collection.configName", "conf1");
-    }
-    request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    gotExp = false;
-    try {
-      makeRequest(baseUrl, request);
-    } catch (SolrException e) {
-      gotExp = true;
-    }
-    assertTrue(gotExp);
-    
-    // Too many replicas
-    params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    collectionName = "collection";
-    params.set("name", collectionName);
-    params.set("numShards", 2);
-    if (secondConfigSet) {
-      params.set("collection.configName", "conf1");
-    }
-    params.set(REPLICATION_FACTOR, 10);
-    request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    gotExp = false;
-    try {
-      makeRequest(baseUrl, request);
-    } catch (SolrException e) {
-      gotExp = true;
-    }
-    assertTrue(gotExp);
-    
     // No numShards should fail
-    params = new ModifiableSolrParams();
+    ModifiableSolrParams params = new ModifiableSolrParams();
     params.set("action", CollectionAction.CREATE.toString());
-    collectionName = "acollection";
-    params.set("name", collectionName);
+    params.set("name", "acollection");
     params.set(REPLICATION_FACTOR, 10);
-    if (secondConfigSet) {
-      params.set("collection.configName", "conf1");
-    }
-    request = new QueryRequest(params);
+    params.set("collection.configName", "conf");
+
+    final SolrRequest request = new QueryRequest(params);
     request.setPath("/admin/collections");
-    gotExp = false;
-    try {
-      makeRequest(baseUrl, request);
-    } catch (SolrException e) {
-      gotExp = true;
-    }
-    assertTrue(gotExp);
-    
-    // 0 numShards should fail
-    params = new ModifiableSolrParams();
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+
+  }
+
+  @Test
+  public void testZeroNumShards() {
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
     params.set("action", CollectionAction.CREATE.toString());
-    collectionName = "acollection";
-    params.set("name", collectionName);
+    params.set("name", "acollection");
     params.set(REPLICATION_FACTOR, 10);
     params.set("numShards", 0);
-    if (secondConfigSet) {
-      params.set("collection.configName", "conf1");
-    }
-    request = new QueryRequest(params);
+    params.set("collection.configName", "conf");
+
+    final SolrRequest request = new QueryRequest(params);
     request.setPath("/admin/collections");
-    gotExp = false;
-    try {
-      makeRequest(baseUrl, request);
-    } catch (SolrException e) {
-      gotExp = true;
-    }
-    assertTrue(gotExp);
-    
-    // Fail on one node
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+
+  }
+
+  @Test
+  public void testCreateShouldFailOnExistingCore() throws Exception {
     
     // first we make a core with the core name the collections api
     // will try and use - this will cause our mock fail
@@ -510,43 +277,33 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
     String dataDir = createTempDir().toFile().getAbsolutePath();
     createCmd.setDataDir(dataDir);
     createCmd.setNumShards(1);
-    if (secondConfigSet) {
-      createCmd.setCollectionConfigName("conf1");
+    createCmd.setCollectionConfigName("conf");
+
+    try (SolrClient client = cluster.getJettySolrRunner(0).newClient()) {
+      client.request(createCmd);
     }
-    makeRequest(baseUrl, createCmd);
-    
+
     createCmd = new Create();
     createCmd.setCoreName("halfcollection_shard1_replica1");
     createCmd.setCollection("halfcollectionblocker2");
     dataDir = createTempDir().toFile().getAbsolutePath();
     createCmd.setDataDir(dataDir);
     createCmd.setNumShards(1);
-    if (secondConfigSet) {
-      createCmd.setCollectionConfigName("conf1");
-    }
-    makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), createCmd);
-    
-    params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    collectionName = "halfcollection";
-    params.set("name", collectionName);
-    params.set("numShards", 2);
-    params.set("wt", "xml");
-    
-    if (secondConfigSet) {
-      params.set("collection.configName", "conf1");
+    createCmd.setCollectionConfigName("conf");
+
+    try (SolrClient client = cluster.getJettySolrRunner(1).newClient()) {
+      client.request(createCmd);
     }
+
+    String nn1 = cluster.getJettySolrRunner(0).getNodeName();
+    String nn2 = cluster.getJettySolrRunner(1).getNodeName();
+
+    CollectionAdminResponse resp = CollectionAdminRequest.createCollection("halfcollection", "conf", 2, 1)
+        .setCreateNodeSet(nn1 + "," + nn2)
+        .process(cluster.getSolrClient());
     
-    String nn1 = jettys.get(0).getCoreContainer().getZkController().getNodeName();
-    String nn2 =  jettys.get(1).getCoreContainer().getZkController().getNodeName();
-    
-    params.set(OverseerCollectionMessageHandler.CREATE_NODE_SET, nn1 + "," + nn2);
-    request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    NamedList<Object> resp = makeRequest(baseUrl, request, 60000);
-    
-    SimpleOrderedMap success = (SimpleOrderedMap) resp.get("success");
-    SimpleOrderedMap failure = (SimpleOrderedMap) resp.get("failure");
+    SimpleOrderedMap success = (SimpleOrderedMap) resp.getResponse().get("success");
+    SimpleOrderedMap failure = (SimpleOrderedMap) resp.getResponse().get("failure");
 
     assertNotNull(resp.toString(), success);
     assertNotNull(resp.toString(), failure);
@@ -555,10 +312,14 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
     String val2 = failure.getVal(0).toString();
     assertTrue(val1.contains("SolrException") || val2.contains("SolrException"));
   }
-  
-  private void testNoCollectionSpecified() throws Exception {
-    assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
-    assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
+
+  @Test
+  public void testNoCollectionSpecified() throws Exception {
+
+    // TODO - should we remove this behaviour?
+
+    assertFalse(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
+    assertFalse(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
     
     // try and create a SolrCore with no collection name
     Create createCmd = new Create();
@@ -567,26 +328,28 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
     String dataDir = createTempDir().toFile().getAbsolutePath();
     createCmd.setDataDir(dataDir);
     createCmd.setNumShards(1);
-    if (secondConfigSet) {
-      createCmd.setCollectionConfigName("conf1");
-    }
+    createCmd.setCollectionConfigName("conf");
 
-    makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), createCmd);
+    cluster.getSolrClient().request(createCmd);
     
     // try and create a SolrCore with no collection name
     createCmd.setCollection(null);
     createCmd.setCoreName("corewithnocollection2");
 
-    makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), createCmd);
+    cluster.getSolrClient().request(createCmd);
     
     // in both cases, the collection should have default to the core name
-    cloudClient.getZkStateReader().forceUpdateCollection("corewithnocollection");
-    cloudClient.getZkStateReader().forceUpdateCollection("corewithnocollection2");
-    assertTrue(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
-    assertTrue(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
+    cluster.getSolrClient().getZkStateReader().forceUpdateCollection("corewithnocollection");
+    cluster.getSolrClient().getZkStateReader().forceUpdateCollection("corewithnocollection2");
+    assertTrue(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
+    assertTrue(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
   }
 
-  private void testNoConfigSetExist() throws Exception {
+  @Test
+  public void testNoConfigSetExist() throws Exception {
+
+    final CloudSolrClient cloudClient = cluster.getSolrClient();
+
     assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection3"));
 
     // try and create a SolrCore with no collection name
@@ -597,14 +360,11 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
     createCmd.setDataDir(dataDir);
     createCmd.setNumShards(1);
     createCmd.setCollectionConfigName("conf123");
-    boolean gotExp = false;
-    try {
-      makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), createCmd);
-    } catch (SolrException e) {
-      gotExp = true;
-    }
 
-    assertTrue(gotExp);
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(createCmd);
+    });
+
     TimeUnit.MILLISECONDS.sleep(200);
     // in both cases, the collection should have default to the core name
     cloudClient.getZkStateReader().forceUpdateCollection("corewithnocollection3");
@@ -618,401 +378,162 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
     }
     assertEquals("replicaCount", 0, replicaCount);
 
-    CollectionAdminRequest.List list = new CollectionAdminRequest.List();
-    CollectionAdminResponse res = new CollectionAdminResponse();
-        res.setResponse(makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), list));
-    List<String> collections = (List<String>) res.getResponse().get("collections");
-    assertTrue(collections.contains("corewithnocollection3"));
+    // TODO - WTF? shouldn't this *not* contain the collection?
+    assertTrue(CollectionAdminRequest.listCollections(cloudClient).contains("corewithnocollection3"));
+
   }
 
-  private void testNodesUsedByCreate() throws Exception {
-    // we can use this client because we just want base url
-    final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
-    
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
+  @Test
+  public void testCoresAreDistributedAcrossNodes() throws Exception {
 
-    params.set("numShards", 2);
-    params.set(REPLICATION_FACTOR, 2);
-    String collectionName = "nodes_used_collection";
+    CollectionAdminRequest.createCollection("nodes_used_collection", "conf", 2, 2)
+        .process(cluster.getSolrClient());
 
-    params.set("name", collectionName);
-    
-    if (secondConfigSet) {
-      params.set("collection.configName", "conf1");
-    }
-    
-    QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    makeRequest(baseUrl, request);
-    
-    List<Integer> numShardsNumReplicaList = new ArrayList<>();
-    numShardsNumReplicaList.add(2);
-    numShardsNumReplicaList.add(2);
-    checkForCollection("nodes_used_collection", numShardsNumReplicaList , null);
+    Set<String> liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
 
     List<String> createNodeList = new ArrayList<>();
+    createNodeList.addAll(liveNodes);
 
-    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState()
-        .getLiveNodes();
-    
-    for (String node : liveNodes) {
-      createNodeList.add(node);
-    }
-
-    DocCollection col = cloudClient.getZkStateReader().getClusterState().getCollection("nodes_used_collection");
-    Collection<Slice> slices = col.getSlices();
-    for (Slice slice : slices) {
-      Collection<Replica> replicas = slice.getReplicas();
-      for (Replica replica : replicas) {
+    DocCollection collection = getCollectionState("nodes_used_collection");
+    for (Slice slice : collection.getSlices()) {
+      for (Replica replica : slice.getReplicas()) {
         createNodeList.remove(replica.getNodeName());
       }
     }
-    assertEquals(createNodeList.toString(), 1, createNodeList.size());
+
+    assertEquals(createNodeList.toString(), 0, createNodeList.size());
+
+  }
+
+  @Test
+  public void testDeleteNonExistentCollection() throws Exception {
+
+    SolrException e = expectThrows(SolrException.class, () -> {
+      CollectionAdminRequest.deleteCollection("unknown_collection").process(cluster.getSolrClient());
+    });
+
+    // create another collection should still work
+    CollectionAdminRequest.createCollection("acollectionafterbaddelete", "conf", 1, 2)
+        .process(cluster.getSolrClient());
+    waitForState("Collection creation after a bad delete failed", "acollectionafterbaddelete",
+        (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
+  }
+
+  @Test
+  public void testSpecificConfigsets() throws Exception {
+    CollectionAdminRequest.createCollection("withconfigset2", "conf2", 1, 1).process(cluster.getSolrClient());
+    byte[] data = zkClient().getData(ZkStateReader.COLLECTIONS_ZKNODE + "/" + "withconfigset2", null, null, true);
+    assertNotNull(data);
+    ZkNodeProps props = ZkNodeProps.load(data);
+    String configName = props.getStr(ZkController.CONFIGNAME_PROP);
+    assertEquals("conf2", configName);
+  }
+
+  @Test
+  public void testMaxNodesPerShard() throws Exception {
+
+    // test maxShardsPerNode
+    int numLiveNodes = cluster.getJettySolrRunners().size();
+    int numShards = (numLiveNodes/2) + 1;
+    int replicationFactor = 2;
+    int maxShardsPerNode = 1;
+
+    SolrException e = expectThrows(SolrException.class, () -> {
+      CollectionAdminRequest.createCollection("oversharded", "conf", numShards, replicationFactor)
+          .process(cluster.getSolrClient());
+    });
 
   }
 
-  private void testCollectionsAPI() throws Exception {
+  @Test
+  public void testCreateNodeSet() throws Exception {
+
+    JettySolrRunner jetty1 = cluster.getRandomJetty(random());
+    JettySolrRunner jetty2 = cluster.getRandomJetty(random());
 
-    boolean disableLegacy = random().nextBoolean();
-    CloudSolrClient client1 = null;
+    List<String> baseUrls = ImmutableList.of(jetty1.getBaseUrl().toString(), jetty2.getBaseUrl().toString());
 
-    if (disableLegacy) {
-      log.info("legacyCloud=false");
-      client1 = createCloudClient(null);
-      setClusterProp(client1, ZkStateReader.LEGACY_CLOUD, "false");
+    CollectionAdminRequest.createCollection("nodeset_collection", "conf", 2, 1)
+        .setCreateNodeSet(baseUrls.get(0) + "," + baseUrls.get(1))
+        .process(cluster.getSolrClient());
+
+    DocCollection collectionState = getCollectionState("nodeset_collection");
+    for (Replica replica : collectionState.getReplicas()) {
+      String replicaUrl = replica.getCoreUrl();
+      boolean matchingJetty = false;
+      for (String jettyUrl : baseUrls) {
+        if (replicaUrl.startsWith(jettyUrl))
+          matchingJetty = true;
+      }
+      if (matchingJetty == false)
+        fail("Expected replica to be on " + baseUrls + " but was on " + replicaUrl);
     }
 
-    // TODO: fragile - because we dont pass collection.confName, it will only
-    // find a default if a conf set with a name matching the collection name is found, or 
-    // if there is only one conf set. That and the fact that other tests run first in this
-    // env make this pretty fragile
-    
+  }
+
+  @Test
+  public void testCollectionsAPI() throws Exception {
+
     // create new collections rapid fire
-    Map<String,List<Integer>> collectionInfos = new HashMap<>();
     int cnt = random().nextInt(TEST_NIGHTLY ? 6 : 1) + 1;
-    
+    CollectionAdminRequest.Create[] createRequests = new CollectionAdminRequest.Create[cnt];
+
     for (int i = 0; i < cnt; i++) {
-      int numShards = TestUtil.nextInt(random(), 0, getShardCount()) + 1;
-      int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
-      int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
-          .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
 
-      
-      CloudSolrClient client = null;
-      try {
-        if (i == 0) {
-          // Test if we can create a collection through CloudSolrServer where
-          // you havnt set default-collection
-          // This is nice because you want to be able to create you first
-          // collection using CloudSolrServer, and in such case there is
-          // nothing reasonable to set as default-collection
-          client = createCloudClient(null);
-        } else if (i == 1) {
-          // Test if we can create a collection through CloudSolrServer where
-          // you have set default-collection to a non-existing collection
-          // This is nice because you want to be able to create you first
-          // collection using CloudSolrServer, and in such case there is
-          // nothing reasonable to set as default-collection, but you might want
-          // to use the same CloudSolrServer throughout the entire
-          // lifetime of your client-application, so it is nice to be able to
-          // set a default-collection on this CloudSolrServer once and for all
-          // and use this CloudSolrServer to create the collection
-          client = createCloudClient("awholynewcollection_" + i);
-        }
-        if (secondConfigSet) {
-          createCollection(collectionInfos, "awholynewcollection_" + i,
-              numShards, replicationFactor, maxShardsPerNode, client, null, "conf2");
-        } else {
-          createCollection(collectionInfos, "awholynewcollection_" + i,
-              numShards, replicationFactor, maxShardsPerNode, client, null);
-        }
-      } finally {
-        if (client != null) client.close();
-      }
-    }
-    
-    Set<Entry<String,List<Integer>>> collectionInfosEntrySet = collectionInfos.entrySet();
-    for (Entry<String,List<Integer>> entry : collectionInfosEntrySet) {
-      String collection = entry.getKey();
-      List<Integer> list = entry.getValue();
-      checkForCollection(collection, list, null);
-      
-      String url = getUrlFromZk(collection);
+      int numShards = TestUtil.nextInt(random(), 0, cluster.getJettySolrRunners().size()) + 1;
+      int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
+      int maxShardsPerNode = (((numShards * replicationFactor) / cluster.getJettySolrRunners().size())) + 1;
 
-      try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
-        // poll for a second - it can take a moment before we are ready to serve
-        waitForNon403or404or503(collectionClient);
-      }
-    }
-    
-    // sometimes we restart one of the jetty nodes
-    if (random().nextBoolean()) {
-      JettySolrRunner jetty = jettys.get(random().nextInt(jettys.size()));
-      ChaosMonkey.stop(jetty);
-      log.info("============ Restarting jetty");
-      ChaosMonkey.start(jetty);
-      
-      for (Entry<String,List<Integer>> entry : collectionInfosEntrySet) {
-        String collection = entry.getKey();
-        List<Integer> list = entry.getValue();
-        checkForCollection(collection, list, null);
-        
-        String url = getUrlFromZk(collection);
-        
-        try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
-          // poll for a second - it can take a moment before we are ready to serve
-          waitForNon403or404or503(collectionClient);
-        }
-      }
+      createRequests[i]
+          = CollectionAdminRequest.createCollection("awhollynewcollection_" + i, "conf2", numShards, replicationFactor)
+          .setMaxShardsPerNode(maxShardsPerNode);
+      createRequests[i].processAsync(cluster.getSolrClient());
     }
 
-    // sometimes we restart zookeeper
-    if (random().nextBoolean()) {
-      zkServer.shutdown();
-      log.info("============ Restarting zookeeper");
-      zkServer = new ZkTestServer(zkServer.getZkDir(), zkServer.getPort());
-      zkServer.run();
-    }
-    
-    // sometimes we cause a connection loss - sometimes it will hit the overseer
-    if (random().nextBoolean()) {
-      JettySolrRunner jetty = jettys.get(random().nextInt(jettys.size()));
-      ChaosMonkey.causeConnectionLoss(jetty);
-    }
-    
-    ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
-    for (int j = 0; j < cnt; j++) {
-      waitForRecoveriesToFinish("awholynewcollection_" + j, zkStateReader, false);
-      
-      if (secondConfigSet) {
-        // let's see if they are using the second config set
-        byte[] data = zkStateReader.getZkClient()
-            .getData(
-                ZkStateReader.COLLECTIONS_ZKNODE + "/" + "awholynewcollection_"
-                    + j, null, null, true);
-        assertNotNull(data);
-        ZkNodeProps props = ZkNodeProps.load(data);
-        String configName = props.getStr(ZkController.CONFIGNAME_PROP);
-        assertEquals("conf2", configName);
-        
-      }
+    for (int i = 0; i < cnt; i++) {
+      String collectionName = "awhollynewcollection_" + i;
+      final int j = i;
+      waitForState("Expected to see collection " + collectionName, collectionName,
+          (n, c) -> {
+            CollectionAdminRequest.Create req = createRequests[j];
+            return DocCollection.isFullyActive(n, c, req.getNumShards(), req.getReplicationFactor());
+          });
     }
-    
-    checkInstanceDirs(jettys.get(0)); 
-    
-    List<String> collectionNameList = new ArrayList<>();
-    collectionNameList.addAll(collectionInfos.keySet());
-    String collectionName = collectionNameList.get(random().nextInt(collectionNameList.size()));
-    
-    String url = getUrlFromZk(collectionName);
 
-    try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
+    cluster.injectChaos(random());
 
-      // lets try and use the solrj client to index a couple documents
-      SolrInputDocument doc1 = getDoc(id, 6, i1, -600, tlong, 600, t1,
-          "humpty dumpy sat on a wall");
-      SolrInputDocument doc2 = getDoc(id, 7, i1, -600, tlong, 600, t1,
-          "humpty dumpy3 sat on a walls");
-      SolrInputDocument doc3 = getDoc(id, 8, i1, -600, tlong, 600, t1,
-          "humpty dumpy2 sat on a walled");
+    for (int i = 0; i < cluster.getJettySolrRunners().size(); i++) {
+      checkInstanceDirs(cluster.getJettySolrRunner(i));
+    }
 
-      collectionClient.add(doc1);
+    String collectionName = createRequests[random().nextInt(createRequests.length)].getCollectionName();
 
-      collectionClient.add(doc2);
+    new UpdateRequest()
+        .add("id", "6")
+        .add("id", "7")
+        .add("id", "8")
+        .commit(cluster.getSolrClient(), collectionName);
+    assertEquals(3, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
 
-      collectionClient.add(doc3);
+    checkNoTwoShardsUseTheSameIndexDir();
+  }
 
-      collectionClient.commit();
+  @Test
+  public void testCollectionReload() throws Exception {
 
-      assertEquals(3, collectionClient.query(new SolrQuery("*:*")).getResults().getNumFound());
-    }
+    final String collectionName = "reloaded_collection";
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2).process(cluster.getSolrClient());
 
-    // lets try a collection reload
-    
     // get core open times
-    Map<String,Long> urlToTimeBefore = new HashMap<>();
+    Map<String, Long> urlToTimeBefore = new HashMap<>();
     collectStartTimes(collectionName, urlToTimeBefore);
     assertTrue(urlToTimeBefore.size() > 0);
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.RELOAD.toString());
-    params.set("name", collectionName);
-    QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    
-    // we can use this client because we just want base url
-    final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
-    
-    makeRequest(baseUrl, request);
+
+    CollectionAdminRequest.reloadCollection(collectionName).processAsync(cluster.getSolrClient());
 
     // reloads make take a short while
     boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
     assertTrue("some core start times did not change on reload", allTimesAreCorrect);
-    
-    
-    waitForRecoveriesToFinish("awholynewcollection_" + (cnt - 1), zkStateReader, false);
-    
-    // remove a collection
-    params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.DELETE.toString());
-    params.set("name", collectionName);
-    request = new QueryRequest(params);
-    request.setPath("/admin/collections");
- 
-    makeRequest(baseUrl, request);
-    
-    // ensure its out of the state
-    assertCollectionNotExists(collectionName, 45);
-    
-    //collectionNameList.remove(collectionName);
-
-    // remove an unknown collection
-    params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.DELETE.toString());
-    params.set("name", "unknown_collection");
-    request = new QueryRequest(params);
-    request.setPath("/admin/collections");
- 
-    boolean exp = false;
-    try {
-      makeRequest(baseUrl, request);
-    } catch (SolrException e) {
-      exp = true;
-    }
-    assertTrue("Expected exception", exp);
-    
-    // create another collection should still work
-    params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-
-    params.set("numShards", 1);
-    params.set(REPLICATION_FACTOR, 2);
-    collectionName = "acollectionafterbaddelete";
-
-    params.set("name", collectionName);
-    if (secondConfigSet) {
-      params.set("collection.configName", "conf1");
-    }
-    request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    makeRequest(baseUrl, request);
-    
-    List<Integer> list = new ArrayList<>(2);
-    list.add(1);
-    list.add(2);
-    checkForCollection(collectionName, list, null);
-    
-    url = getUrlFromZk(collectionName);
-    
-    try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
-      // poll for a second - it can take a moment before we are ready to serve
-      waitForNon403or404or503(collectionClient);
-    }
-
-    for (int j = 0; j < cnt; j++) {
-      waitForRecoveriesToFinish(collectionName, zkStateReader, false);
-    }
-
-    // test maxShardsPerNode
-    int numLiveNodes = getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size();
-    int numShards = (numLiveNodes/2) + 1;
-    int replicationFactor = 2;
-    int maxShardsPerNode = 1;
-    collectionInfos = new HashMap<>();
-    try (CloudSolrClient client = createCloudClient("awholynewcollection_" + cnt)) {
-      exp = false;
-      try {
-        createCollection(collectionInfos, "awholynewcollection_" + cnt,
-            numShards, replicationFactor, maxShardsPerNode, client, null, "conf1");
-      } catch (SolrException e) {
-        exp = true;
-      }
-      assertTrue("expected exception", exp);
-    }
-
-    
-    // Test createNodeSet
-    numLiveNodes = getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size();
-    List<String> createNodeList = new ArrayList<>();
-    int numOfCreateNodes = numLiveNodes/2;
-    assertFalse("createNodeSet test is pointless with only " + numLiveNodes + " nodes running", numOfCreateNodes == 0);
-    int i = 0;
-    for (String liveNode : getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes()) {
-      if (i < numOfCreateNodes) {
-        createNodeList.add(liveNode);
-        i++;
-      } else {
-        break;
-      }
-    }
-    maxShardsPerNode = 2;
-    numShards = createNodeList.size() * maxShardsPerNode;
-    replicationFactor = 1;
-    collectionInfos = new HashMap<>();
-
-    try (SolrClient client = createCloudClient("awholynewcollection_" + (cnt+1))) {
-      CollectionAdminResponse res = createCollection(collectionInfos, "awholynewcollection_" + (cnt+1), numShards, replicationFactor, maxShardsPerNode, client, StrUtils.join(createNodeList, ','), "conf1");
-      assertTrue(res.isSuccess());
-    }
-    checkForCollection(collectionInfos.keySet().iterator().next(), collectionInfos.entrySet().iterator().next().getValue(), createNodeList);
-    
-    checkNoTwoShardsUseTheSameIndexDir();
-    if(disableLegacy) {
-      setClusterProp(client1, ZkStateReader.LEGACY_CLOUD, null);
-      client1.close();
-    }
-  }
-  
-  private void testCollectionsAPIAddRemoveStress() throws Exception {
-    
-    class CollectionThread extends Thread {
-      
-      public CollectionThread(String name) {
-        super(name);
-      }
-      
-      public void run() {
-        // create new collections rapid fire
-        Map<String,List<Integer>> collectionInfos = new HashMap<>();
-        int cnt = random().nextInt(TEST_NIGHTLY ? 13 : 1) + 1;
-        
-        for (int i = 0; i < cnt; i++) {
-          String collectionName = "awholynewstresscollection_" + getName() + "_" + i;
-          int numShards = TestUtil.nextInt(random(), 0, getShardCount() * 2) + 1;
-          int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
-          int maxShardsPerNode = (((numShards * 2 * replicationFactor) / getCommonCloudSolrClient()
-              .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
-
-          try (CloudSolrClient client = createCloudClient(i == 1 ? collectionName : null)) {
-
-            createCollection(collectionInfos, collectionName,
-                numShards, replicationFactor, maxShardsPerNode, client, null,
-                "conf1");
-
-            // remove collection
-            CollectionAdminRequest.Delete delete = new CollectionAdminRequest.Delete()
-                    .setCollectionName(collectionName);
-            client.request(delete);
-          } catch (SolrServerException | IOException e) {
-            e.printStackTrace();
-            throw new RuntimeException(e);
-          }
-        }
-      }
-    }
-    List<Thread> threads = new ArrayList<>();
-    int numThreads = TEST_NIGHTLY ? 6 : 2;
-    for (int i = 0; i < numThreads; i++) {
-      CollectionThread thread = new CollectionThread("collection" + i);
-      threads.add(thread);
-    }
-    
-    for (Thread thread : threads) {
-      thread.start();
-    }
-    for (Thread thread : threads) {
-      thread.join();
-    }
   }
 
   private void checkInstanceDirs(JettySolrRunner jetty) throws IOException {
@@ -1024,7 +545,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
       Path instancedir = (Path) core.getStatistics().get("instanceDir");
       assertTrue("Could not find expected core.properties file", Files.exists(instancedir.resolve("core.properties")));
 
-      Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve("cores").resolve(core.getName());
+      Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve(core.getName());
 
       assertTrue("Expected: " + expected + "\nFrom core stats: " + instancedir, Files.isSameFile(expected, instancedir));
 
@@ -1061,23 +582,14 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
     return allTimesAreCorrect;
   }
 
-  private void collectStartTimes(String collectionName,
-      Map<String,Long> urlToTime) throws SolrServerException, IOException {
-    ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader()
-        .getClusterState();
-//    Map<String,DocCollection> collections = clusterState.getCollectionStates();
-    if (clusterState.hasCollection(collectionName)) {
-      Map<String,Slice> slices = clusterState.getSlicesMap(collectionName);
-
-      Iterator<Entry<String,Slice>> it = slices.entrySet().iterator();
-      while (it.hasNext()) {
-        Entry<String,Slice> sliceEntry = it.next();
-        Map<String,Replica> sliceShards = sliceEntry.getValue().getReplicasMap();
-        Iterator<Entry<String,Replica>> shardIt = sliceShards.entrySet()
-            .iterator();
-        while (shardIt.hasNext()) {
-          Entry<String,Replica> shardEntry = shardIt.next();
-          ZkCoreNodeProps coreProps = new ZkCoreNodeProps(shardEntry.getValue());
+  private void collectStartTimes(String collectionName, Map<String,Long> urlToTime)
+      throws SolrServerException, IOException {
+
+    DocCollection collectionState = getCollectionState(collectionName);
+    if (collectionState != null) {
+      for (Slice shard : collectionState) {
+        for (Replica replica : shard) {
+          ZkCoreNodeProps coreProps = new ZkCoreNodeProps(replica);
           CoreAdminResponse mcr;
           try (HttpSolrClient server = getHttpSolrClient(coreProps.getBaseUrl())) {
             mcr = CoreAdminRequest.getStatus(coreProps.getCoreName(), server);
@@ -1087,59 +599,9 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
         }
       }
     } else {
-      throw new IllegalArgumentException("Could not find collection in :"
-          + clusterState.getCollectionsMap());
-    }
-  }
-
-  private String getUrlFromZk(String collection) {
-    ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader().getClusterState();
-    Map<String,Slice> slices = clusterState.getSlicesMap(collection);
-    
-    if (slices == null) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Could not find collection:" + collection);
-    }
-    
-    for (Map.Entry<String,Slice> entry : slices.entrySet()) {
-      Slice slice = entry.getValue();
-      Map<String,Replica> shards = slice.getReplicasMap();
-      Set<Map.Entry<String,Replica>> shardEntries = shards.entrySet();
-      for (Map.Entry<String,Replica> shardEntry : shardEntries) {
-        final ZkNodeProps node = shardEntry.getValue();
-        if (clusterState.liveNodesContain(node.getStr(ZkStateReader.NODE_NAME_PROP))) {
-          return ZkCoreNodeProps.getCoreUrl(node.getStr(ZkStateReader.BASE_URL_PROP), collection); //new ZkCoreNodeProps(node).getCoreUrl();
-        }
-      }
+      throw new IllegalArgumentException("Could not find collection " + collectionName);
     }
-    
-    throw new RuntimeException("Could not find a live node for collection:" + collection);
   }
-
-/*  private void waitForNon403or404or503(HttpSolrServer collectionClient)
-      throws Exception {
-    SolrException exp = null;
-    long timeoutAt = System.currentTimeMillis() + 30000;
-    
-    while (System.currentTimeMillis() < timeoutAt) {
-      boolean missing = false;
-
-      try {
-        collectionClient.query(new SolrQuery("*:*"));
-      } catch (SolrException e) {
-        if (!(e.code() == 403 || e.code() == 503 || e.code() == 404)) {
-          throw e;
-        }
-        exp = e;
-        missing = true;
-      }
-      if (!missing) {
-        return;
-      }
-      Thread.sleep(50);
-    }
-
-    fail("Could not find the new collection - " + exp.code() + " : " + collectionClient.getBaseURL());
-  }*/
   
   private void checkNoTwoShardsUseTheSameIndexDir() throws Exception {
     Map<String, Set<String>> indexDirToShardNamesMap = new HashMap<>();
@@ -1189,142 +651,70 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
 
   }
 
-  private void addReplicaTest() throws Exception {
+  @Test
+  public void addReplicaTest() throws Exception {
     String collectionName = "addReplicaColl";
-    try (CloudSolrClient client = createCloudClient(null)) {
-      createCollection(collectionName, client, 2, 2);
-      String newReplicaName = Assign.assignNode(client.getZkStateReader().getClusterState().getCollection(collectionName));
-      ArrayList<String> nodeList = new ArrayList<>(client.getZkStateReader().getClusterState().getLiveNodes());
-      Collections.shuffle(nodeList, random());
-
-      Replica newReplica = doAddReplica(collectionName, "shard1",
-          Assign.assignNode(client.getZkStateReader().getClusterState().getCollection(collectionName)),
-          nodeList.get(0), client, null);
-
-      log.info("newReplica {},\n{} ", newReplica, client.getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)));
-
-      assertEquals("Replica should be created on the right node",
-          client.getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)), newReplica.getStr(ZkStateReader.BASE_URL_PROP));
-
-      Properties props = new Properties();
-      String instancePathStr = createTempDir().toString();
-      props.put(CoreAdminParams.INSTANCE_DIR, instancePathStr); //Use name via the property.instanceDir method
-      newReplica = doAddReplica(collectionName, "shard2",
-          Assign.assignNode(client.getZkStateReader().getClusterState().getCollection(collectionName)),
-          null, client, props);
-      assertNotNull(newReplica);
-
-      try (HttpSolrClient coreclient = getHttpSolrClient(newReplica.getStr(ZkStateReader.BASE_URL_PROP))) {
-        CoreAdminResponse status = CoreAdminRequest.getStatus(newReplica.getStr("core"), coreclient);
-        NamedList<Object> coreStatus = status.getCoreStatus(newReplica.getStr("core"));
-        String instanceDirStr = (String) coreStatus.get("instanceDir");
-        assertEquals(Paths.get(instanceDirStr).toString(), instancePathStr);
-      }
 
-      //Test to make sure we can't create another replica with an existing core_name of that collection
-      String coreName = newReplica.getStr(CORE_NAME_PROP);
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", "addreplica");
-      params.set("collection", collectionName);
-      params.set("shard", "shard1");
-      params.set("name", coreName);
-      QueryRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-      try {
-        client.request(request);
-        fail("AddReplica call should not have been successful");
-      } catch (SolrException e) {
-        assertTrue(e.getMessage().contains("Another replica with the same core name already exists for this collection"));
-      }
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
+        .setMaxShardsPerNode(4)
+        .process(cluster.getSolrClient());
 
+    ArrayList<String> nodeList
+        = new ArrayList<>(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
+    Collections.shuffle(nodeList, random());
 
-      // Check that specifying property.name works. DO NOT remove this when the "name" property is deprecated
-      // for ADDREPLICA, this is "property.name". See SOLR-7132
-      props = new Properties();
-      props.put(CoreAdminParams.NAME, "propertyDotName");
+    String newReplicaName = Assign.assignNode(getCollectionState(collectionName));
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .setNode(nodeList.get(0))
+        .process(cluster.getSolrClient());
 
-      newReplica = doAddReplica(collectionName, "shard1",
-          Assign.assignNode(client.getZkStateReader().getClusterState().getCollection(collectionName)),
-          nodeList.get(0), client, props);
-      assertEquals("'core' should be 'propertyDotName' ", "propertyDotName", newReplica.getStr("core"));
-    }
-  }
-
-  private Replica doAddReplica(String collectionName, String shard, String newReplicaName, String node,
-                               CloudSolrClient client, Properties props) throws IOException, SolrServerException {
-    CollectionAdminRequest.AddReplica addReplica = new CollectionAdminRequest.AddReplica();
+    Replica newReplica = getCollectionState(collectionName).getReplica(newReplicaName);
 
-    addReplica.setCollectionName(collectionName);
-    addReplica.setShardName(shard);
-    if (node != null) {
-      addReplica.setNode(node);
-    }
-    if (props != null) {
-      addReplica.setProperties(props);
-    }
-    client.request(addReplica);
-    TimeOut timeout = new TimeOut(3, TimeUnit.SECONDS);
-    Replica newReplica = null;
+    assertEquals("Replica should be created on the right node",
+        cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)),
+        newReplica.getStr(ZkStateReader.BASE_URL_PROP));
 
-    for (; ! timeout.hasTimedOut(); ) {
-      Slice slice = client.getZkStateReader().getClusterState().getSlice(collectionName, shard);
-      newReplica = slice.getReplica(newReplicaName);
-    }
+    newReplicaName = Assign.assignNode(getCollectionState(collectionName));
+    Path instancePath = createTempDir();
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .withProperty(CoreAdminParams.INSTANCE_DIR, instancePath.toString())
+        .process(cluster.getSolrClient());
 
+    newReplica = getCollectionState(collectionName).getReplica(newReplicaName);
     assertNotNull(newReplica);
-    return newReplica;
-  }
-  @Override
-  protected QueryResponse queryServer(ModifiableSolrParams params) throws SolrServerException, IOException {
 
-    if (r.nextBoolean())
-      return super.queryServer(params);
+    try (HttpSolrClient coreclient = getHttpSolrClient(newReplica.getStr(ZkStateReader.BASE_URL_PROP))) {
+      CoreAdminResponse status = CoreAdminRequest.getStatus(newReplica.getStr("core"), coreclient);
+      NamedList<Object> coreStatus = status.getCoreStatus(newReplica.getStr("core"));
+      String instanceDirStr = (String) coreStatus.get("instanceDir");
+      assertEquals(instanceDirStr, instancePath.toString());
+    }
 
-    if (r.nextBoolean())
-      params.set("collection",DEFAULT_COLLECTION);
+    //Test to make sure we can't create another replica with an existing core_name of that collection
+    String coreName = newReplica.getStr(CORE_NAME_PROP);
+    SolrException e = expectThrows(SolrException.class, () -> {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", "addreplica");
+      params.set("collection", collectionName);
+      params.set("shard", "shard1");
+      params.set("name", coreName);
+      QueryRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+      cluster.getSolrClient().request(request);
+    });
 
-    QueryResponse rsp = getCommonCloudSolrClient().query(params);
-    return rsp;
-  }
+    assertTrue(e.getMessage().contains("Another replica with the same core name already exists for this collection"));
 
-  protected void createCollection(String COLL_NAME, CloudSolrClient client,int replicationFactor , int numShards ) throws Exception {
-    int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
-        .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
-
-    Map<String, Object> props = makeMap(
-        REPLICATION_FACTOR, replicationFactor,
-        MAX_SHARDS_PER_NODE, maxShardsPerNode,
-        NUM_SLICES, numShards);
-    Map<String,List<Integer>> collectionInfos = new HashMap<>();
-    createCollection(collectionInfos, COLL_NAME, props, client, "conf1");
-    assertAllActive(COLL_NAME, getCommonCloudSolrClient().getZkStateReader());
-    
-  }
-  
-  private void clusterPropTest() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      assertTrue("cluster property not set", setClusterProp(client, ZkStateReader.LEGACY_CLOUD, "false"));
-      assertTrue("cluster property not unset ", setClusterProp(client, ZkStateReader.LEGACY_CLOUD, null));
-    }
-  }
+    // Check that specifying property.name works. DO NOT remove this when the "name" property is deprecated
+    // for ADDREPLICA, this is "property.name". See SOLR-7132
+    newReplicaName = Assign.assignNode(getCollectionState(collectionName));
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .withProperty(CoreAdminParams.NAME, "propertyDotName")
+        .process(cluster.getSolrClient());
 
-  public static boolean setClusterProp(CloudSolrClient client, String name , String val) throws SolrServerException, IOException, InterruptedException {
-    Map m = makeMap(
-        "action", CollectionAction.CLUSTERPROP.toLower(),
-        "name",name);
+    newReplica = getCollectionState(collectionName).getReplica(newReplicaName);
+    assertEquals("'core' should be 'propertyDotName' ", "propertyDotName", newReplica.getStr("core"));
 
-    if(val != null) m.put("val", val);
-    SolrRequest request = new QueryRequest(new MapSolrParams(m));
-    request.setPath("/admin/collections");
-    client.request(request);
-
-    TimeOut timeout = new TimeOut(3, TimeUnit.SECONDS);
-    boolean changed = false;
-    while(! timeout.hasTimedOut()){
-      Thread.sleep(10);
-      changed = Objects.equals(val,client.getZkStateReader().getClusterProperty(name, (String) null));
-      if(changed) break;
-    }
-    return changed;
   }
+
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f56d111a/solr/core/src/test/org/apache/solr/cloud/CreateCollectionCleanupTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CreateCollectionCleanupTest.java b/solr/core/src/test/org/apache/solr/cloud/CreateCollectionCleanupTest.java
index 989e1af..df7a2e2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CreateCollectionCleanupTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CreateCollectionCleanupTest.java
@@ -77,8 +77,7 @@ public class CreateCollectionCleanupTest extends SolrCloudTestCase {
     assertFalse(rsp.isSuccess());
 
     // Confirm using LIST that the collection does not exist
-    CollectionAdminRequest.List list = CollectionAdminRequest.listCollections();
-    rsp = list.process(cloudClient);
-    assertFalse(((ArrayList) rsp.getResponse().get("collections")).contains("foo"));
+    assertFalse(CollectionAdminRequest.listCollections(cloudClient).contains("foo"));
+
   }
 }