You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by da...@apache.org on 2018/01/23 10:30:45 UTC

[15/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
deleted file mode 100644
index daa267d..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.zookeeper.KeeperException;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-@Slow
-public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(3)
-        .addConfig("conf", configset("cloud-minimal"))
-        .configure();
-  }
-
-  @Before
-  public void deleteCollections() throws Exception {
-    cluster.deleteAllCollections();
-  }
-
-  @Test
-  public void testAddTooManyReplicas() throws Exception {
-    final String collectionName = "TooManyReplicasInSeveralFlavors";
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
-        .setMaxShardsPerNode(1)
-        .process(cluster.getSolrClient());
-
-    // I have two replicas, one for each shard
-
-    // Curiously, I should be able to add a bunch of replicas if I specify the node, even more than maxShardsPerNode
-    // Just get the first node any way we can.
-    // Get a node to use for the "node" parameter.
-    String nodeName = getAllNodeNames(collectionName).get(0);
-
-    // Add a replica using the "node" parameter (no "too many replicas check")
-    // this node should have 2 replicas on it
-    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .setNode(nodeName)
-        .process(cluster.getSolrClient());
-
-    // Three replicas so far, should be able to create another one "normally"
-    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .process(cluster.getSolrClient());
-
-    // This one should fail though, no "node" parameter specified
-    Exception e = expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-          .process(cluster.getSolrClient());
-    });
-
-    assertTrue("Should have gotten the right error message back",
-          e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-
-
-    // Oddly, we should succeed next just because setting property.name will not check for nodes being "full up"
-    // TODO: Isn't this a bug?
-    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .withProperty("name", "bogus2")
-        .setNode(nodeName)
-        .process(cluster.getSolrClient());
-
-    DocCollection collectionState = getCollectionState(collectionName);
-    Slice slice = collectionState.getSlice("shard1");
-    Replica replica = getRandomReplica(slice, r -> r.getCoreName().equals("bogus2"));
-    assertNotNull("Should have found a replica named 'bogus2'", replica);
-    assertEquals("Replica should have been put on correct core", nodeName, replica.getNodeName());
-
-    // Shard1 should have 4 replicas
-    assertEquals("There should be 4 replicas for shard 1", 4, slice.getReplicas().size());
-
-    // And let's fail one more time because to ensure that the math doesn't do weird stuff it we have more replicas
-    // than simple calcs would indicate.
-    Exception e2 = expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-          .process(cluster.getSolrClient());
-    });
-
-    assertTrue("Should have gotten the right error message back",
-        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-
-    // wait for recoveries to finish, for a clean shutdown - see SOLR-9645
-    waitForState("Expected to see all replicas active", collectionName, (n, c) -> {
-      for (Replica r : c.getReplicas()) {
-        if (r.getState() != Replica.State.ACTIVE)
-          return false;
-      }
-      return true;
-    });
-  }
-
-  @Test
-  public void testAddShard() throws Exception {
-
-    String collectionName = "TooManyReplicasWhenAddingShards";
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 2)
-        .setMaxShardsPerNode(2)
-        .process(cluster.getSolrClient());
-
-    // We have two nodes, maxShardsPerNode is set to 2. Therefore, we should be able to add 2 shards each with
-    // two replicas, but fail on the third.
-    CollectionAdminRequest.createShard(collectionName, "shard1")
-        .process(cluster.getSolrClient());
-
-    // Now we should have one replica on each Jetty, add another to reach maxShardsPerNode
-    CollectionAdminRequest.createShard(collectionName, "shard2")
-        .process(cluster.getSolrClient());
-
-    // Now fail to add the third as it should exceed maxShardsPerNode
-    Exception e = expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.createShard(collectionName, "shard3")
-          .process(cluster.getSolrClient());
-    });
-    assertTrue("Should have gotten the right error message back",
-        e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-
-    // Hmmm, providing a nodeset also overrides the checks for max replicas, so prove it.
-    List<String> nodes = getAllNodeNames(collectionName);
-
-    CollectionAdminRequest.createShard(collectionName, "shard4")
-        .setNodeSet(StringUtils.join(nodes, ","))
-        .process(cluster.getSolrClient());
-
-    // And just for yucks, insure we fail the "regular" one again.
-    Exception e2 = expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.createShard(collectionName, "shard5")
-          .process(cluster.getSolrClient());
-    });
-    assertTrue("Should have gotten the right error message back",
-        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-
-    // And finally, insure that there are all the replcias we expect. We should have shards 1, 2 and 4 and each
-    // should have exactly two replicas
-    waitForState("Expected shards shardstart, 1, 2 and 4, each with two active replicas", collectionName, (n, c) -> {
-      return DocCollection.isFullyActive(n, c, 4, 2);
-    });
-    Map<String, Slice> slices = getCollectionState(collectionName).getSlicesMap();
-    assertEquals("There should be exaclty four slices", slices.size(), 4);
-    assertNotNull("shardstart should exist", slices.get("shardstart"));
-    assertNotNull("shard1 should exist", slices.get("shard1"));
-    assertNotNull("shard2 should exist", slices.get("shard2"));
-    assertNotNull("shard4 should exist", slices.get("shard4"));
-    assertEquals("Shardstart should have exactly 2 replicas", 2, slices.get("shardstart").getReplicas().size());
-    assertEquals("Shard1 should have exactly 2 replicas", 2, slices.get("shard1").getReplicas().size());
-    assertEquals("Shard2 should have exactly 2 replicas", 2, slices.get("shard2").getReplicas().size());
-    assertEquals("Shard4 should have exactly 2 replicas", 2, slices.get("shard4").getReplicas().size());
-
-  }
-
-  @Test
-  public void testDownedShards() throws Exception {
-    String collectionName = "TooManyReplicasWhenAddingDownedNode";
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 1)
-        .setMaxShardsPerNode(2)
-        .process(cluster.getSolrClient());
-
-    // Shut down a Jetty, I really don't care which
-    JettySolrRunner jetty = cluster.getRandomJetty(random());
-    String deadNode = jetty.getBaseUrl().toString();
-    cluster.stopJettySolrRunner(jetty);
-
-    try {
-
-      // Adding a replica on a dead node should fail
-      Exception e1 = expectThrows(Exception.class, () -> {
-        CollectionAdminRequest.addReplicaToShard(collectionName, "shardstart")
-            .setNode(deadNode)
-            .process(cluster.getSolrClient());
-      });
-      assertTrue("Should have gotten a message about shard not currently active: " + e1.toString(),
-          e1.toString().contains("At least one of the node(s) specified [" + deadNode + "] are not currently active in"));
-
-      // Should also die if we just add a shard
-      Exception e2 = expectThrows(Exception.class, () -> {
-        CollectionAdminRequest.createShard(collectionName, "shard1")
-            .setNodeSet(deadNode)
-            .process(cluster.getSolrClient());
-      });
-
-      assertTrue("Should have gotten a message about shard not currently active: " + e2.toString(),
-          e2.toString().contains("At least one of the node(s) specified [" + deadNode + "] are not currently active in"));
-    }
-    finally {
-      cluster.startJettySolrRunner(jetty);
-    }
-  }
-
-  private List<String> getAllNodeNames(String collectionName) throws KeeperException, InterruptedException {
-    DocCollection state = getCollectionState(collectionName);
-    return state.getReplicas().stream().map(Replica::getNodeName).distinct().collect(Collectors.toList());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
deleted file mode 100644
index c3dc44b..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Tests the Cloud Collections API.
- */
-@Slow
-public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
-
-  private static final int MAX_TIMEOUT_SECONDS = 60;
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(2)
-        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
-        .configure();
-  }
-
-  @Test
-  public void testSolrJAPICalls() throws Exception {
-
-    final CloudSolrClient client = cluster.getSolrClient();
-
-    RequestStatusState state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("Recreating a collection with the same should have failed.", RequestStatusState.FAILED, state);
-
-    state = CollectionAdminRequest.addReplicaToShard("testasynccollectioncreation", "shard1")
-      .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("Add replica did not complete", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.splitShard("testasynccollectioncreation")
-        .setShardName("shard1")
-        .processAndWait(client, MAX_TIMEOUT_SECONDS * 2);
-    assertEquals("Shard split did not complete. Last recorded state: " + state, RequestStatusState.COMPLETED, state);
-
-  }
-
-  @Test
-  public void testAsyncRequests() throws Exception {
-
-    final String collection = "testAsyncOperations";
-    final CloudSolrClient client = cluster.getSolrClient();
-
-    RequestStatusState state = CollectionAdminRequest.createCollection(collection,"conf1",1,1)
-        .setRouterName("implicit")
-        .setShards("shard1")
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
-
-    //Add a few documents to shard1
-    int numDocs = TestUtil.nextInt(random(), 10, 100);
-    List<SolrInputDocument> docs = new ArrayList<>(numDocs);
-    for (int i=0; i<numDocs; i++) {
-      SolrInputDocument doc = new SolrInputDocument();
-      doc.addField("id", i);
-      doc.addField("_route_", "shard1");
-      docs.add(doc);
-    }
-    client.add(collection, docs);
-    client.commit(collection);
-
-    SolrQuery query = new SolrQuery("*:*");
-    query.set("shards", "shard1");
-    assertEquals(numDocs, client.query(collection, query).getResults().getNumFound());
-
-    state = CollectionAdminRequest.reloadCollection(collection)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("ReloadCollection did not complete", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.createShard(collection,"shard2")
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("CreateShard did not complete", RequestStatusState.COMPLETED, state);
-
-    //Add a doc to shard2 to make sure shard2 was created properly
-    SolrInputDocument doc = new SolrInputDocument();
-    doc.addField("id", numDocs + 1);
-    doc.addField("_route_", "shard2");
-    client.add(collection, doc);
-    client.commit(collection);
-    query = new SolrQuery("*:*");
-    query.set("shards", "shard2");
-    assertEquals(1, client.query(collection, query).getResults().getNumFound());
-
-    state = CollectionAdminRequest.deleteShard(collection,"shard2").processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("DeleteShard did not complete", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.addReplicaToShard(collection, "shard1")
-      .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("AddReplica did not complete", RequestStatusState.COMPLETED, state);
-
-    //cloudClient watch might take a couple of seconds to reflect it
-    Slice shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
-    int count = 0;
-    while (shard1.getReplicas().size() != 2) {
-      if (count++ > 1000) {
-        fail("2nd Replica not reflecting in the cluster state");
-      }
-      Thread.sleep(100);
-    }
-
-    state = CollectionAdminRequest.createAlias("myalias",collection)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("CreateAlias did not complete", RequestStatusState.COMPLETED, state);
-
-    query = new SolrQuery("*:*");
-    query.set("shards", "shard1");
-    assertEquals(numDocs, client.query("myalias", query).getResults().getNumFound());
-
-    state = CollectionAdminRequest.deleteAlias("myalias")
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("DeleteAlias did not complete", RequestStatusState.COMPLETED, state);
-
-    try {
-      client.query("myalias", query);
-      fail("Alias should not exist");
-    } catch (SolrException e) {
-      //expected
-    }
-
-    Replica replica = shard1.getReplicas().iterator().next();
-    for (String liveNode : client.getZkStateReader().getClusterState().getLiveNodes()) {
-      if (!replica.getNodeName().equals(liveNode)) {
-        state = new CollectionAdminRequest.MoveReplica(collection, replica.getName(), liveNode)
-            .processAndWait(client, MAX_TIMEOUT_SECONDS);
-        assertSame("MoveReplica did not complete", RequestStatusState.COMPLETED, state);
-        break;
-      }
-    }
-
-    shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
-    String replicaName = shard1.getReplicas().iterator().next().getName();
-    state = CollectionAdminRequest.deleteReplica(collection, "shard1", replicaName)
-      .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("DeleteReplica did not complete", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.deleteCollection(collection)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("DeleteCollection did not complete", RequestStatusState.COMPLETED, state);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
deleted file mode 100644
index 5615918..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
+++ /dev/null
@@ -1,684 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import javax.management.MBeanServer;
-import javax.management.MBeanServerFactory;
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.lang.management.ManagementFactory;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.collect.ImmutableList;
-import org.apache.commons.io.IOUtils;
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.client.solrj.request.CoreStatus;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.client.solrj.response.CoreAdminResponse;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrInfoBean.Category;
-import org.apache.solr.util.LogLevel;
-import org.apache.solr.util.TestInjection;
-import org.apache.solr.util.TimeOut;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-
-/**
- * Tests the Cloud Collections API.
- */
-@Slow
-public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @BeforeClass
-  public static void beforeCollectionsAPIDistributedZkTest() {
-    // we don't want this test to have zk timeouts
-    System.setProperty("zkClientTimeout", "240000");
-    TestInjection.randomDelayInCoreCreation = "true:20";
-    System.setProperty("validateAfterInactivity", "200");
-  }
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    String solrXml = IOUtils.toString(CollectionsAPIDistributedZkTest.class.getResourceAsStream("/solr/solr-jmxreporter.xml"), "UTF-8");
-    configureCluster(4)
-        .addConfig("conf", configset("cloud-minimal"))
-        .addConfig("conf2", configset("cloud-minimal-jmx"))
-        .withSolrXml(solrXml)
-        .configure();
-  }
-
-  @Before
-  public void clearCluster() throws Exception {
-    try {
-      cluster.deleteAllCollections();
-    } finally {
-      System.clearProperty("zkClientTimeout");
-    }
-  }
-
-  @Test
-  public void testCreationAndDeletion() throws Exception {
-
-    String collectionName = "created_and_deleted";
-
-    CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1).process(cluster.getSolrClient());
-    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient())
-                  .contains(collectionName));
-
-    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
-        .contains(collectionName));
-
-    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
-
-
-  }
-
-  @Test
-  public void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
-    
-    String collectionName = "out_of_sync_collection";
-
-    // manually create a collections zknode
-    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
-
-    CollectionAdminRequest.deleteCollection(collectionName)
-        .process(cluster.getSolrClient());
-
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
-                  .contains(collectionName));
-    
-    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
-
-  }
-
-  @Test
-  public void deletePartiallyCreatedCollection() throws Exception {
-
-    final String collectionName = "halfdeletedcollection";
-
-    assertEquals(0, CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
-        .setCreateNodeSet("")
-        .process(cluster.getSolrClient()).getStatus());
-    String dataDir = createTempDir().toFile().getAbsolutePath();
-    // create a core that simulates something left over from a partially-deleted collection
-    assertTrue(CollectionAdminRequest
-        .addReplicaToShard(collectionName, "shard1")
-        .setDataDir(dataDir)
-        .process(cluster.getSolrClient()).isSuccess());
-
-    CollectionAdminRequest.deleteCollection(collectionName)
-        .process(cluster.getSolrClient());
-
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
-
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
-        .process(cluster.getSolrClient());
-
-    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
-
-  }
-
-  @Test
-  public void deleteCollectionOnlyInZk() throws Exception {
-
-    final String collectionName = "onlyinzk";
-
-    // create the collections node, but nothing else
-    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
-
-    // delete via API - should remove collections node
-    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
-    
-    // now creating that collection should work
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
-        .process(cluster.getSolrClient());
-    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
-
-  }
-
-  @Test
-  public void testBadActionNames() throws Exception {
-
-    // try a bad action
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", "BADACTION");
-    String collectionName = "badactioncollection";
-    params.set("name", collectionName);
-    params.set("numShards", 2);
-    final QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(request);
-    });
-
-  }
-
-  @Test
-  public void testMissingRequiredParameters() {
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    params.set("numShards", 2);
-    // missing required collection parameter
-    final SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(request);
-    });
-  }
-
-  @Test
-  public void testTooManyReplicas() {
-
-    CollectionAdminRequest req = CollectionAdminRequest.createCollection("collection", "conf", 2, 10);
-
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(req);
-    });
-
-  }
-
-  @Test
-  public void testMissingNumShards() {
-
-    // No numShards should fail
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    params.set("name", "acollection");
-    params.set(REPLICATION_FACTOR, 10);
-    params.set("collection.configName", "conf");
-
-    final SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(request);
-    });
-
-  }
-
-  @Test
-  public void testZeroNumShards() {
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    params.set("name", "acollection");
-    params.set(REPLICATION_FACTOR, 10);
-    params.set("numShards", 0);
-    params.set("collection.configName", "conf");
-
-    final SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(request);
-    });
-
-  }
-
-  @Test
-  public void testCreateShouldFailOnExistingCore() throws Exception {
-    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker", "conf", 1, 1)
-        .setCreateNodeSet("")
-        .process(cluster.getSolrClient()).getStatus());
-    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker", "shard1")
-        .setNode(cluster.getJettySolrRunner(0).getNodeName())
-        .setCoreName("halfcollection_shard1_replica_n1")
-        .process(cluster.getSolrClient()).isSuccess());
-
-    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker2", "conf",1, 1)
-        .setCreateNodeSet("")
-        .process(cluster.getSolrClient()).getStatus());
-    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker2", "shard1")
-        .setNode(cluster.getJettySolrRunner(1).getNodeName())
-        .setCoreName("halfcollection_shard1_replica_n1")
-        .process(cluster.getSolrClient()).isSuccess());
-
-    String nn1 = cluster.getJettySolrRunner(0).getNodeName();
-    String nn2 = cluster.getJettySolrRunner(1).getNodeName();
-
-    CollectionAdminResponse resp = CollectionAdminRequest.createCollection("halfcollection", "conf", 2, 1)
-        .setCreateNodeSet(nn1 + "," + nn2)
-        .process(cluster.getSolrClient());
-    
-    SimpleOrderedMap success = (SimpleOrderedMap) resp.getResponse().get("success");
-    SimpleOrderedMap failure = (SimpleOrderedMap) resp.getResponse().get("failure");
-
-    assertNotNull(resp.toString(), success);
-    assertNotNull(resp.toString(), failure);
-    
-    String val1 = success.getVal(0).toString();
-    String val2 = failure.getVal(0).toString();
-    assertTrue(val1.contains("SolrException") || val2.contains("SolrException"));
-  }
-
-  @Test
-  public void testNoConfigSetExist() throws Exception {
-
-    expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.createCollection("noconfig", "conf123", 1, 1)
-          .process(cluster.getSolrClient());
-    });
-
-    TimeUnit.MILLISECONDS.sleep(1000);
-    // in both cases, the collection should have default to the core name
-    cluster.getSolrClient().getZkStateReader().forceUpdateCollection("noconfig");
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains("noconfig"));
-  }
-
-  @Test
-  public void testCoresAreDistributedAcrossNodes() throws Exception {
-
-    CollectionAdminRequest.createCollection("nodes_used_collection", "conf", 2, 2)
-        .process(cluster.getSolrClient());
-
-    Set<String> liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
-
-    List<String> createNodeList = new ArrayList<>();
-    createNodeList.addAll(liveNodes);
-
-    DocCollection collection = getCollectionState("nodes_used_collection");
-    for (Slice slice : collection.getSlices()) {
-      for (Replica replica : slice.getReplicas()) {
-        createNodeList.remove(replica.getNodeName());
-      }
-    }
-
-    assertEquals(createNodeList.toString(), 0, createNodeList.size());
-
-  }
-
-  @Test
-  public void testDeleteNonExistentCollection() throws Exception {
-
-    SolrException e = expectThrows(SolrException.class, () -> {
-      CollectionAdminRequest.deleteCollection("unknown_collection").process(cluster.getSolrClient());
-    });
-
-    // create another collection should still work
-    CollectionAdminRequest.createCollection("acollectionafterbaddelete", "conf", 1, 2)
-        .process(cluster.getSolrClient());
-    waitForState("Collection creation after a bad delete failed", "acollectionafterbaddelete",
-        (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
-  }
-
-  @Test
-  public void testSpecificConfigsets() throws Exception {
-    CollectionAdminRequest.createCollection("withconfigset2", "conf2", 1, 1).process(cluster.getSolrClient());
-    byte[] data = zkClient().getData(ZkStateReader.COLLECTIONS_ZKNODE + "/" + "withconfigset2", null, null, true);
-    assertNotNull(data);
-    ZkNodeProps props = ZkNodeProps.load(data);
-    String configName = props.getStr(ZkController.CONFIGNAME_PROP);
-    assertEquals("conf2", configName);
-  }
-
-  @Test
-  public void testMaxNodesPerShard() throws Exception {
-
-    // test maxShardsPerNode
-    int numLiveNodes = cluster.getJettySolrRunners().size();
-    int numShards = (numLiveNodes/2) + 1;
-    int replicationFactor = 2;
-    int maxShardsPerNode = 1;
-
-    SolrException e = expectThrows(SolrException.class, () -> {
-      CollectionAdminRequest.createCollection("oversharded", "conf", numShards, replicationFactor)
-          .process(cluster.getSolrClient());
-    });
-
-  }
-
-  @Test
-  public void testCreateNodeSet() throws Exception {
-
-    JettySolrRunner jetty1 = cluster.getRandomJetty(random());
-    JettySolrRunner jetty2 = cluster.getRandomJetty(random());
-
-    List<String> baseUrls = ImmutableList.of(jetty1.getBaseUrl().toString(), jetty2.getBaseUrl().toString());
-
-    CollectionAdminRequest.createCollection("nodeset_collection", "conf", 2, 1)
-        .setCreateNodeSet(baseUrls.get(0) + "," + baseUrls.get(1))
-        .process(cluster.getSolrClient());
-
-    DocCollection collectionState = getCollectionState("nodeset_collection");
-    for (Replica replica : collectionState.getReplicas()) {
-      String replicaUrl = replica.getCoreUrl();
-      boolean matchingJetty = false;
-      for (String jettyUrl : baseUrls) {
-        if (replicaUrl.startsWith(jettyUrl))
-          matchingJetty = true;
-      }
-      if (matchingJetty == false)
-        fail("Expected replica to be on " + baseUrls + " but was on " + replicaUrl);
-    }
-
-  }
-
-  @Test
-  public void testCollectionsAPI() throws Exception {
-
-    // create new collections rapid fire
-    int cnt = random().nextInt(TEST_NIGHTLY ? 3 : 1) + 1;
-    CollectionAdminRequest.Create[] createRequests = new CollectionAdminRequest.Create[cnt];
-
-    for (int i = 0; i < cnt; i++) {
-
-      int numShards = TestUtil.nextInt(random(), 0, cluster.getJettySolrRunners().size()) + 1;
-      int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
-      int maxShardsPerNode = (((numShards * replicationFactor) / cluster.getJettySolrRunners().size())) + 1;
-
-      createRequests[i]
-          = CollectionAdminRequest.createCollection("awhollynewcollection_" + i, "conf2", numShards, replicationFactor)
-          .setMaxShardsPerNode(maxShardsPerNode);
-      createRequests[i].processAsync(cluster.getSolrClient());
-    }
-
-    for (int i = 0; i < cnt; i++) {
-      String collectionName = "awhollynewcollection_" + i;
-      final int j = i;
-      waitForState("Expected to see collection " + collectionName, collectionName,
-          (n, c) -> {
-            CollectionAdminRequest.Create req = createRequests[j];
-            return DocCollection.isFullyActive(n, c, req.getNumShards(), req.getReplicationFactor());
-          });
-    }
-
-    cluster.injectChaos(random());
-
-    for (int i = 0; i < cluster.getJettySolrRunners().size(); i++) {
-      checkInstanceDirs(cluster.getJettySolrRunner(i));
-    }
-
-    String collectionName = createRequests[random().nextInt(createRequests.length)].getCollectionName();
-
-    new UpdateRequest()
-        .add("id", "6")
-        .add("id", "7")
-        .add("id", "8")
-        .commit(cluster.getSolrClient(), collectionName);
-    TimeOut timeOut = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-    while (!timeOut.hasTimedOut()) {
-      try {
-        long numFound = cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound();
-        assertEquals(3, numFound);
-        break;
-      } catch (Exception e) {
-        // Query node can have stale clusterstate
-        log.info("Error when query " + collectionName, e);
-        Thread.sleep(500);
-      }
-    }
-    if (timeOut.hasTimedOut()) {
-      fail("Timeout on query " + collectionName);
-    }
-
-    checkNoTwoShardsUseTheSameIndexDir();
-  }
-
-  @Test
-  public void testCollectionReload() throws Exception {
-
-    final String collectionName = "reloaded_collection";
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2).process(cluster.getSolrClient());
-
-    // get core open times
-    Map<String, Long> urlToTimeBefore = new HashMap<>();
-    collectStartTimes(collectionName, urlToTimeBefore);
-    assertTrue(urlToTimeBefore.size() > 0);
-
-    CollectionAdminRequest.reloadCollection(collectionName).processAsync(cluster.getSolrClient());
-
-    // reloads make take a short while
-    boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
-    assertTrue("some core start times did not change on reload", allTimesAreCorrect);
-  }
-
-  private void checkInstanceDirs(JettySolrRunner jetty) throws IOException {
-    CoreContainer cores = jetty.getCoreContainer();
-    Collection<SolrCore> theCores = cores.getCores();
-    for (SolrCore core : theCores) {
-
-      // look for core props file
-      Path instancedir = (Path) core.getResourceLoader().getInstancePath();
-      assertTrue("Could not find expected core.properties file", Files.exists(instancedir.resolve("core.properties")));
-
-      Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve(core.getName());
-
-      assertTrue("Expected: " + expected + "\nFrom core stats: " + instancedir, Files.isSameFile(expected, instancedir));
-
-    }
-  }
-
-  private boolean waitForReloads(String collectionName, Map<String,Long> urlToTimeBefore) throws SolrServerException, IOException {
-
-
-    TimeOut timeout = new TimeOut(45, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-
-    boolean allTimesAreCorrect = false;
-    while (! timeout.hasTimedOut()) {
-      Map<String,Long> urlToTimeAfter = new HashMap<>();
-      collectStartTimes(collectionName, urlToTimeAfter);
-      
-      boolean retry = false;
-      Set<Entry<String,Long>> entries = urlToTimeBefore.entrySet();
-      for (Entry<String,Long> entry : entries) {
-        Long beforeTime = entry.getValue();
-        Long afterTime = urlToTimeAfter.get(entry.getKey());
-        assertNotNull(afterTime);
-        if (afterTime <= beforeTime) {
-          retry = true;
-          break;
-        }
-
-      }
-      if (!retry) {
-        allTimesAreCorrect = true;
-        break;
-      }
-    }
-    return allTimesAreCorrect;
-  }
-
-  private void collectStartTimes(String collectionName, Map<String,Long> urlToTime)
-      throws SolrServerException, IOException {
-
-    DocCollection collectionState = getCollectionState(collectionName);
-    if (collectionState != null) {
-      for (Slice shard : collectionState) {
-        for (Replica replica : shard) {
-          ZkCoreNodeProps coreProps = new ZkCoreNodeProps(replica);
-          CoreStatus coreStatus;
-          try (HttpSolrClient server = getHttpSolrClient(coreProps.getBaseUrl())) {
-            coreStatus = CoreAdminRequest.getCoreStatus(coreProps.getCoreName(), false, server);
-          }
-          long before = coreStatus.getCoreStartTime().getTime();
-          urlToTime.put(coreProps.getCoreUrl(), before);
-        }
-      }
-    } else {
-      throw new IllegalArgumentException("Could not find collection " + collectionName);
-    }
-  }
-  
-  private void checkNoTwoShardsUseTheSameIndexDir() throws Exception {
-    Map<String, Set<String>> indexDirToShardNamesMap = new HashMap<>();
-    
-    List<MBeanServer> servers = new LinkedList<>();
-    servers.add(ManagementFactory.getPlatformMBeanServer());
-    servers.addAll(MBeanServerFactory.findMBeanServer(null));
-    for (final MBeanServer server : servers) {
-      Set<ObjectName> mbeans = new HashSet<>();
-      mbeans.addAll(server.queryNames(null, null));
-      for (final ObjectName mbean : mbeans) {
-
-        try {
-          Map<String, String> props = mbean.getKeyPropertyList();
-          String category = props.get("category");
-          String name = props.get("name");
-          if ((category != null && category.toString().equals(Category.CORE.toString())) &&
-              (name != null && name.equals("indexDir"))) {
-            String indexDir = server.getAttribute(mbean, "Value").toString();
-            String key = props.get("dom2") + "." + props.get("dom3") + "." + props.get("dom4");
-            if (!indexDirToShardNamesMap.containsKey(indexDir)) {
-              indexDirToShardNamesMap.put(indexDir.toString(), new HashSet<>());
-            }
-            indexDirToShardNamesMap.get(indexDir.toString()).add(key);
-          }
-        } catch (Exception e) {
-          // ignore, just continue - probably a "Value" attribute
-          // not found
-        }
-      }
-    }
-    
-    assertTrue(
-        "Something is broken in the assert for no shards using the same indexDir - probably something was changed in the attributes published in the MBean of "
-            + SolrCore.class.getSimpleName() + " : " + indexDirToShardNamesMap,
-        indexDirToShardNamesMap.size() > 0);
-    for (Entry<String,Set<String>> entry : indexDirToShardNamesMap.entrySet()) {
-      if (entry.getValue().size() > 1) {
-        fail("We have shards using the same indexDir. E.g. shards "
-            + entry.getValue().toString() + " all use indexDir "
-            + entry.getKey());
-      }
-    }
-
-  }
-
-  @Test
-  @LogLevel("org.apache.solr.cloud=DEBUG")
-  public void addReplicaTest() throws Exception {
-    String collectionName = "addReplicaColl";
-
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
-        .setMaxShardsPerNode(4)
-        .process(cluster.getSolrClient());
-
-    ArrayList<String> nodeList
-        = new ArrayList<>(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
-    Collections.shuffle(nodeList, random());
-
-    CollectionAdminResponse response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .setNode(nodeList.get(0))
-        .process(cluster.getSolrClient());
-    Replica newReplica = grabNewReplica(response, getCollectionState(collectionName));
-
-    assertEquals("Replica should be created on the right node",
-        cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)),
-        newReplica.getStr(ZkStateReader.BASE_URL_PROP));
-
-    Path instancePath = createTempDir();
-    response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .withProperty(CoreAdminParams.INSTANCE_DIR, instancePath.toString())
-        .process(cluster.getSolrClient());
-    newReplica = grabNewReplica(response, getCollectionState(collectionName));
-    assertNotNull(newReplica);
-
-    try (HttpSolrClient coreclient = getHttpSolrClient(newReplica.getStr(ZkStateReader.BASE_URL_PROP))) {
-      CoreAdminResponse status = CoreAdminRequest.getStatus(newReplica.getStr("core"), coreclient);
-      NamedList<Object> coreStatus = status.getCoreStatus(newReplica.getStr("core"));
-      String instanceDirStr = (String) coreStatus.get("instanceDir");
-      assertEquals(instanceDirStr, instancePath.toString());
-    }
-
-    //Test to make sure we can't create another replica with an existing core_name of that collection
-    String coreName = newReplica.getStr(CORE_NAME_PROP);
-    SolrException e = expectThrows(SolrException.class, () -> {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", "addreplica");
-      params.set("collection", collectionName);
-      params.set("shard", "shard1");
-      params.set("name", coreName);
-      QueryRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-      cluster.getSolrClient().request(request);
-    });
-
-    assertTrue(e.getMessage().contains("Another replica with the same core name already exists for this collection"));
-
-    // Check that specifying property.name works. DO NOT remove this when the "name" property is deprecated
-    // for ADDREPLICA, this is "property.name". See SOLR-7132
-    response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .withProperty(CoreAdminParams.NAME, "propertyDotName")
-        .process(cluster.getSolrClient());
-
-    newReplica = grabNewReplica(response, getCollectionState(collectionName));
-    assertEquals("'core' should be 'propertyDotName' ", "propertyDotName", newReplica.getStr("core"));
-
-  }
-
-  private Replica grabNewReplica(CollectionAdminResponse response, DocCollection docCollection) {
-    String replicaName = response.getCollectionCoresStatus().keySet().iterator().next();
-    Optional<Replica> optional = docCollection.getReplicas().stream()
-        .filter(replica -> replicaName.equals(replica.getCoreName()))
-        .findAny();
-    if (optional.isPresent()) {
-      return optional.get();
-    }
-    throw new AssertionError("Can not find " + replicaName + " from " + docCollection);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java
deleted file mode 100644
index 57d38cd..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Path;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.lucene.util.LuceneTestCase.Nightly;
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.KeeperException;
-import org.junit.After;
-import org.junit.Before;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Nightly
-public class ConcurrentDeleteAndCreateCollectionTest extends SolrTestCaseJ4 {
-  
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  private MiniSolrCloudCluster solrCluster;
-  
-  @Override
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
-    solrCluster = new MiniSolrCloudCluster(1, createTempDir(), buildJettyConfig("/solr"));
-  }
-  
-  @Override
-  @After
-  public void tearDown() throws Exception {
-    solrCluster.shutdown();
-    super.tearDown();
-  }
-  
-  public void testConcurrentCreateAndDeleteDoesNotFail() {
-    final AtomicReference<Exception> failure = new AtomicReference<>();
-    final int timeToRunSec = 30;
-    final CreateDeleteCollectionThread[] threads = new CreateDeleteCollectionThread[10];
-    for (int i = 0; i < threads.length; i++) {
-      final String collectionName = "collection" + i;
-      uploadConfig(configset("configset-2"), collectionName);
-      final String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
-      final SolrClient solrClient = getHttpSolrClient(baseUrl);
-      threads[i] = new CreateDeleteSearchCollectionThread("create-delete-search-" + i, collectionName, collectionName, 
-          timeToRunSec, solrClient, failure);
-    }
-    
-    startAll(threads);
-    joinAll(threads);
-    
-    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
-  }
-  
-  public void testConcurrentCreateAndDeleteOverTheSameConfig() {
-    final String configName = "testconfig";
-    uploadConfig(configset("configset-2"), configName); // upload config once, to be used by all collections
-    final String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
-    final AtomicReference<Exception> failure = new AtomicReference<>();
-    final int timeToRunSec = 30;
-    final CreateDeleteCollectionThread[] threads = new CreateDeleteCollectionThread[2];
-    for (int i = 0; i < threads.length; i++) {
-      final String collectionName = "collection" + i;
-      final SolrClient solrClient = getHttpSolrClient(baseUrl);
-      threads[i] = new CreateDeleteCollectionThread("create-delete-" + i, collectionName, configName,
-                                                    timeToRunSec, solrClient, failure);
-    }
-
-    startAll(threads);
-    joinAll(threads);
-
-    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
-  }
-  
-  private void uploadConfig(Path configDir, String configName) {
-    try {
-      solrCluster.uploadConfigSet(configDir, configName);
-    } catch (IOException | KeeperException | InterruptedException e) {
-      throw new RuntimeException(e);
-    }
-  }
-  
-  private void joinAll(final CreateDeleteCollectionThread[] threads) {
-    for (CreateDeleteCollectionThread t : threads) {
-      try {
-        t.joinAndClose();
-      } catch (InterruptedException e) {
-        Thread.interrupted();
-        throw new RuntimeException(e);
-      }
-    }
-  }
-  
-  private void startAll(final Thread[] threads) {
-    for (Thread t : threads) {
-      t.start();
-    }
-  }
-  
-  private static class CreateDeleteCollectionThread extends Thread {
-    protected final String collectionName;
-    protected final String configName;
-    protected final long timeToRunSec;
-    protected final SolrClient solrClient;
-    protected final AtomicReference<Exception> failure;
-    
-    public CreateDeleteCollectionThread(String name, String collectionName, String configName, long timeToRunSec,
-        SolrClient solrClient, AtomicReference<Exception> failure) {
-      super(name);
-      this.collectionName = collectionName;
-      this.timeToRunSec = timeToRunSec;
-      this.solrClient = solrClient;
-      this.failure = failure;
-      this.configName = configName;
-    }
-    
-    @Override
-    public void run() {
-      final TimeOut timeout = new TimeOut(timeToRunSec, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-      while (! timeout.hasTimedOut() && failure.get() == null) {
-        doWork();
-      }
-    }
-    
-    protected void doWork() {
-      createCollection();
-      deleteCollection();
-    }
-    
-    protected void addFailure(Exception e) {
-      log.error("Add Failure", e);
-      synchronized (failure) {
-        if (failure.get() != null) {
-          failure.get().addSuppressed(e);
-        } else {
-          failure.set(e);
-        }
-      }
-    }
-    
-    private void createCollection() {
-      try {
-        final CollectionAdminResponse response = CollectionAdminRequest.createCollection(collectionName,configName,1,1)
-                .process(solrClient);
-        if (response.getStatus() != 0) {
-          addFailure(new RuntimeException("failed to create collection " + collectionName));
-        }
-      } catch (Exception e) {
-        addFailure(e);
-      }
-      
-    }
-    
-    private void deleteCollection() {
-      try {
-        final CollectionAdminRequest.Delete deleteCollectionRequest
-          = CollectionAdminRequest.deleteCollection(collectionName);
-        final CollectionAdminResponse response = deleteCollectionRequest.process(solrClient);
-        if (response.getStatus() != 0) {
-          addFailure(new RuntimeException("failed to delete collection " + collectionName));
-        }
-      } catch (Exception e) {
-        addFailure(e);
-      }
-    }
-    
-    public void joinAndClose() throws InterruptedException {
-      try {
-        super.join(60000);
-      } finally {
-        IOUtils.closeQuietly(solrClient);
-      }
-    }
-  }
-  
-  private static class CreateDeleteSearchCollectionThread extends CreateDeleteCollectionThread {
-
-    public CreateDeleteSearchCollectionThread(String name, String collectionName, String configName, long timeToRunSec,
-        SolrClient solrClient, AtomicReference<Exception> failure) {
-      super(name, collectionName, configName, timeToRunSec, solrClient, failure);
-    }
-    
-    @Override
-    protected void doWork() {
-      super.doWork();
-      searchNonExistingCollection();
-    }
-    
-    private void searchNonExistingCollection() {
-      try {
-        solrClient.query(collectionName, new SolrQuery("*"));
-      } catch (Exception e) {
-        if (!e.getMessage().contains("not found") && !e.getMessage().contains("Can not find")) {
-          addFailure(e);
-        }
-      }
-    }
-    
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
deleted file mode 100644
index 63a3272..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.Map;
-
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.params.ShardParams._ROUTE_;
-
-/**
- * Tests the Custom Sharding API.
- */
-public class CustomCollectionTest extends SolrCloudTestCase {
-
-  private static final int NODE_COUNT = 4;
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(NODE_COUNT)
-        .addConfig("conf", configset("cloud-dynamic"))
-        .configure();
-  }
-
-  @Before
-  public void ensureClusterEmpty() throws Exception {
-    cluster.deleteAllCollections();
-  }
-
-  @Test
-  public void testCustomCollectionsAPI() throws Exception {
-
-    final String collection = "implicitcoll";
-    int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
-    int numShards = 3;
-    int maxShardsPerNode = (((numShards + 1) * replicationFactor) / NODE_COUNT) + 1;
-
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c", replicationFactor)
-        .setMaxShardsPerNode(maxShardsPerNode)
-        .process(cluster.getSolrClient());
-
-    DocCollection coll = getCollectionState(collection);
-    assertEquals("implicit", ((Map) coll.get(DOC_ROUTER)).get("name"));
-    assertNotNull(coll.getStr(REPLICATION_FACTOR));
-    assertNotNull(coll.getStr(MAX_SHARDS_PER_NODE));
-    assertNull("A shard of a Collection configured with implicit router must have null range",
-        coll.getSlice("a").getRange());
-
-    new UpdateRequest()
-        .add("id", "6")
-        .add("id", "7")
-        .add("id", "8")
-        .withRoute("a")
-        .commit(cluster.getSolrClient(), collection);
-
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
-
-    cluster.getSolrClient().deleteByQuery(collection, "*:*");
-    cluster.getSolrClient().commit(collection, true, true);
-    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
-
-    new UpdateRequest()
-        .add("id", "9")
-        .add("id", "10")
-        .add("id", "11")
-        .withRoute("c")
-        .commit(cluster.getSolrClient(), collection);
-
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
-
-    //Testing CREATESHARD
-    CollectionAdminRequest.createShard(collection, "x")
-        .process(cluster.getSolrClient());
-    waitForState("Expected shard 'x' to be active", collection, (n, c) -> {
-      if (c.getSlice("x") == null)
-        return false;
-      for (Replica r : c.getSlice("x")) {
-        if (r.getState() != Replica.State.ACTIVE)
-          return false;
-      }
-      return true;
-    });
-
-    new UpdateRequest()
-        .add("id", "66", _ROUTE_, "x")
-        .commit(cluster.getSolrClient(), collection);
-    // TODO - the local state is cached and causes the request to fail with 'unknown shard'
-    // assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "x")).getResults().getNumFound());
-
-  }
-
-  @Test
-  public void testRouteFieldForImplicitRouter() throws Exception {
-
-    int numShards = 4;
-    int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
-    int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
-    String shard_fld = "shard_s";
-
-    final String collection = "withShardField";
-
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c,d", replicationFactor)
-        .setMaxShardsPerNode(maxShardsPerNode)
-        .setRouterField(shard_fld)
-        .process(cluster.getSolrClient());
-
-    new UpdateRequest()
-        .add("id", "6", shard_fld, "a")
-        .add("id", "7", shard_fld, "a")
-        .add("id", "8", shard_fld, "b")
-        .commit(cluster.getSolrClient(), collection);
-
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
-    assertEquals(2, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
-
-  }
-
-  @Test
-  public void testRouteFieldForHashRouter()throws Exception{
-    String collectionName = "routeFieldColl";
-    int numShards = 4;
-    int replicationFactor = 2;
-    int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
-    String shard_fld = "shard_s";
-
-    CollectionAdminRequest.createCollection(collectionName, "conf", numShards, replicationFactor)
-        .setMaxShardsPerNode(maxShardsPerNode)
-        .setRouterField(shard_fld)
-        .process(cluster.getSolrClient());
-
-    new UpdateRequest()
-        .add("id", "6", shard_fld, "a")
-        .add("id", "7", shard_fld, "a")
-        .add("id", "8", shard_fld, "b")
-        .commit(cluster.getSolrClient(), collectionName);
-
-    assertEquals(3, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(2, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
-    assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
-    assertEquals(0, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
-
-
-    cluster.getSolrClient().deleteByQuery(collectionName, "*:*");
-    cluster.getSolrClient().commit(collectionName);
-
-    cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "100", shard_fld, "c!doc1"));
-    cluster.getSolrClient().commit(collectionName);
-    assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c!")).getResults().getNumFound());
-
-  }
-
-  @Test
-  public void testCreateShardRepFactor() throws Exception  {
-    final String collectionName = "testCreateShardRepFactor";
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "a,b", 1)
-        .process(cluster.getSolrClient());
-
-    CollectionAdminRequest.createShard(collectionName, "x")
-        .process(cluster.getSolrClient());
-
-    waitForState("Not enough active replicas in shard 'x'", collectionName, (n, c) -> {
-      return c.getSlice("x").getReplicas().size() == 1;
-    });
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
index 8a46808..2775a0c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
@@ -17,8 +17,18 @@
 package org.apache.solr.cloud;
 
 import java.lang.invoke.MethodHandles;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Queue;
+import java.util.Set;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.TimeUnit;
 
@@ -32,6 +42,7 @@ import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.cloud.Overseer.LeaderStatus;
 import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.DocRouter;
@@ -65,7 +76,18 @@ import org.mockito.stubbing.Answer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyInt;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.isNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
 
@@ -572,7 +594,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
       }
     }
     
-    if (random().nextBoolean()) Collections.shuffle(createNodeList, OverseerCollectionMessageHandler.RANDOM);
+    if (random().nextBoolean()) Collections.shuffle(createNodeList, random());
 
     underTest = new OverseerCollectionConfigSetProcessorToBeTested(zkStateReaderMock,
         "1234", shardHandlerFactoryMock, ADMIN_PATH, workQueueMock, runningMapMock,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java
index 54b66a0..9a86912 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java
@@ -22,6 +22,7 @@ import java.util.Map;
 
 import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.client.solrj.response.SolrResponseBase;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CommonAdminParams;
 import org.apache.solr.common.params.CommonParams;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java b/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java
deleted file mode 100644
index a3fbb32..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-// Collect useful operations for testing assigning properties to individual replicas
-// Could probably expand this to do something creative with getting random slices
-// and shards, but for now this will do.
-public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBase {
-
-  public static NamedList<Object> doPropertyAction(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
-    assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    for (int idx = 0; idx < paramsIn.length; idx += 2) {
-      params.set(paramsIn[idx], paramsIn[idx + 1]);
-    }
-    QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    return client.request(request);
-  }
-
-  public static void verifyPropertyNotPresent(CloudSolrClient client, String collectionName, String replicaName,
-                                String property)
-      throws KeeperException, InterruptedException {
-    ClusterState clusterState = null;
-    Replica replica = null;
-    for (int idx = 0; idx < 300; ++idx) {
-      clusterState = client.getZkStateReader().getClusterState();
-      final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
-      replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
-      if (replica == null) {
-        fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
-      }
-      if (StringUtils.isBlank(replica.getProperty(property))) return;
-      Thread.sleep(100);
-    }
-    fail("Property " + property + " not set correctly for collection/replica pair: " +
-        collectionName + "/" + replicaName + ". Replica props: " + replica.getProperties().toString() +
-        ". Cluster state is " + clusterState.toString());
-
-  }
-
-  // The params are triplets,
-  // collection
-  // shard
-  // replica
-  public static void verifyPropertyVal(CloudSolrClient client, String collectionName,
-                         String replicaName, String property, String val)
-      throws InterruptedException, KeeperException {
-    Replica replica = null;
-    ClusterState clusterState = null;
-
-    for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
-      clusterState = client.getZkStateReader().getClusterState();
-      final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
-      replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
-      if (replica == null) {
-        fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
-      }
-      if (StringUtils.equals(val, replica.getProperty(property))) return;
-      Thread.sleep(100);
-    }
-
-    fail("Property '" + property + "' with value " + replica.getProperty(property) +
-        " not set correctly for collection/replica pair: " + collectionName + "/" + replicaName + " property map is " +
-        replica.getProperties().toString() + ".");
-
-  }
-
-  // Verify that
-  // 1> the property is only set once in all the replicas in a slice.
-  // 2> the property is balanced evenly across all the nodes hosting collection
-  public static void verifyUniqueAcrossCollection(CloudSolrClient client, String collectionName,
-                                    String property) throws KeeperException, InterruptedException {
-    verifyUnique(client, collectionName, property, true);
-  }
-
-  public static void verifyUniquePropertyWithinCollection(CloudSolrClient client, String collectionName,
-                            String property) throws KeeperException, InterruptedException {
-    verifyUnique(client, collectionName, property, false);
-  }
-
-  public static void verifyUnique(CloudSolrClient client, String collectionName, String property, boolean balanced)
-      throws KeeperException, InterruptedException {
-
-    DocCollection col = null;
-    for (int idx = 0; idx < 300; ++idx) {
-      ClusterState clusterState = client.getZkStateReader().getClusterState();
-
-      col = clusterState.getCollection(collectionName);
-      if (col == null) {
-        fail("Could not find collection " + collectionName);
-      }
-      Map<String, Integer> counts = new HashMap<>();
-      Set<String> uniqueNodes = new HashSet<>();
-      boolean allSlicesHaveProp = true;
-      boolean badSlice = false;
-      for (Slice slice : col.getSlices()) {
-        boolean thisSliceHasProp = false;
-        int propCount = 0;
-        for (Replica replica : slice.getReplicas()) {
-          uniqueNodes.add(replica.getNodeName());
-          String propVal = replica.getProperty(property);
-          if (StringUtils.isNotBlank(propVal)) {
-            ++propCount;
-            if (counts.containsKey(replica.getNodeName()) == false) {
-              counts.put(replica.getNodeName(), 0);
-            }
-            int count = counts.get(replica.getNodeName());
-            thisSliceHasProp = true;
-            counts.put(replica.getNodeName(), count + 1);
-          }
-        }
-        badSlice = (propCount > 1) ? true : badSlice;
-        allSlicesHaveProp = allSlicesHaveProp ? thisSliceHasProp : allSlicesHaveProp;
-      }
-      if (balanced == false && badSlice == false) {
-        return;
-      }
-      if (allSlicesHaveProp && balanced) {
-        // Check that the properties are evenly distributed.
-        int minProps = col.getSlices().size() / uniqueNodes.size();
-        int maxProps = minProps;
-
-        if (col.getSlices().size() % uniqueNodes.size() > 0) {
-          ++maxProps;
-        }
-        boolean doSleep = false;
-        for (Map.Entry<String, Integer> ent : counts.entrySet()) {
-          if (ent.getValue() != minProps && ent.getValue() != maxProps) {
-            doSleep = true;
-          }
-        }
-
-        if (doSleep == false) {
-          assertTrue("We really shouldn't be calling this if there is no node with the property " + property,
-              counts.size() > 0);
-          return;
-        }
-      }
-      Thread.sleep(100);
-    }
-    fail("Collection " + collectionName + " does not have roles evenly distributed. Collection is: " + col.toString());
-  }
-
-}