You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by va...@apache.org on 2018/01/16 19:05:09 UTC

[03/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
new file mode 100644
index 0000000..eff0d8e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Tests the Cloud Collections API.
+ */
+@Slow
+public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
+
+  private static final int MAX_TIMEOUT_SECONDS = 60;
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(2)
+        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
+        .configure();
+  }
+
+  @Test
+  public void testSolrJAPICalls() throws Exception {
+
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    RequestStatusState state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("Recreating a collection with the same should have failed.", RequestStatusState.FAILED, state);
+
+    state = CollectionAdminRequest.addReplicaToShard("testasynccollectioncreation", "shard1")
+      .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("Add replica did not complete", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.splitShard("testasynccollectioncreation")
+        .setShardName("shard1")
+        .processAndWait(client, MAX_TIMEOUT_SECONDS * 2);
+    assertEquals("Shard split did not complete. Last recorded state: " + state, RequestStatusState.COMPLETED, state);
+
+  }
+
+  @Test
+  public void testAsyncRequests() throws Exception {
+
+    final String collection = "testAsyncOperations";
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    RequestStatusState state = CollectionAdminRequest.createCollection(collection,"conf1",1,1)
+        .setRouterName("implicit")
+        .setShards("shard1")
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
+
+    //Add a few documents to shard1
+    int numDocs = TestUtil.nextInt(random(), 10, 100);
+    List<SolrInputDocument> docs = new ArrayList<>(numDocs);
+    for (int i=0; i<numDocs; i++) {
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", i);
+      doc.addField("_route_", "shard1");
+      docs.add(doc);
+    }
+    client.add(collection, docs);
+    client.commit(collection);
+
+    SolrQuery query = new SolrQuery("*:*");
+    query.set("shards", "shard1");
+    assertEquals(numDocs, client.query(collection, query).getResults().getNumFound());
+
+    state = CollectionAdminRequest.reloadCollection(collection)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("ReloadCollection did not complete", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.createShard(collection,"shard2")
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("CreateShard did not complete", RequestStatusState.COMPLETED, state);
+
+    //Add a doc to shard2 to make sure shard2 was created properly
+    SolrInputDocument doc = new SolrInputDocument();
+    doc.addField("id", numDocs + 1);
+    doc.addField("_route_", "shard2");
+    client.add(collection, doc);
+    client.commit(collection);
+    query = new SolrQuery("*:*");
+    query.set("shards", "shard2");
+    assertEquals(1, client.query(collection, query).getResults().getNumFound());
+
+    state = CollectionAdminRequest.deleteShard(collection,"shard2").processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("DeleteShard did not complete", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.addReplicaToShard(collection, "shard1")
+      .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("AddReplica did not complete", RequestStatusState.COMPLETED, state);
+
+    //cloudClient watch might take a couple of seconds to reflect it
+    Slice shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
+    int count = 0;
+    while (shard1.getReplicas().size() != 2) {
+      if (count++ > 1000) {
+        fail("2nd Replica not reflecting in the cluster state");
+      }
+      Thread.sleep(100);
+    }
+
+    state = CollectionAdminRequest.createAlias("myalias",collection)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("CreateAlias did not complete", RequestStatusState.COMPLETED, state);
+
+    query = new SolrQuery("*:*");
+    query.set("shards", "shard1");
+    assertEquals(numDocs, client.query("myalias", query).getResults().getNumFound());
+
+    state = CollectionAdminRequest.deleteAlias("myalias")
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("DeleteAlias did not complete", RequestStatusState.COMPLETED, state);
+
+    try {
+      client.query("myalias", query);
+      fail("Alias should not exist");
+    } catch (SolrException e) {
+      //expected
+    }
+
+    Replica replica = shard1.getReplicas().iterator().next();
+    for (String liveNode : client.getZkStateReader().getClusterState().getLiveNodes()) {
+      if (!replica.getNodeName().equals(liveNode)) {
+        state = new CollectionAdminRequest.MoveReplica(collection, replica.getName(), liveNode)
+            .processAndWait(client, MAX_TIMEOUT_SECONDS);
+        assertSame("MoveReplica did not complete", RequestStatusState.COMPLETED, state);
+        break;
+      }
+    }
+
+    shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
+    String replicaName = shard1.getReplicas().iterator().next().getName();
+    state = CollectionAdminRequest.deleteReplica(collection, "shard1", replicaName)
+      .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("DeleteReplica did not complete", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.deleteCollection(collection)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("DeleteCollection did not complete", RequestStatusState.COMPLETED, state);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistributedZkTest.java
new file mode 100644
index 0000000..213e554
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistributedZkTest.java
@@ -0,0 +1,686 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import javax.management.MBeanServer;
+import javax.management.MBeanServerFactory;
+import javax.management.ObjectName;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.lang.management.ManagementFactory;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.commons.io.IOUtils;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.request.CoreStatus;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.cloud.ZkController;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.SolrInfoBean.Category;
+import org.apache.solr.util.LogLevel;
+import org.apache.solr.util.TestInjection;
+import org.apache.solr.util.TimeOut;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+
+/**
+ * Tests the Cloud Collections API.
+ */
+@Slow
+public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @BeforeClass
+  public static void beforeCollectionsAPIDistributedZkTest() {
+    // we don't want this test to have zk timeouts
+    System.setProperty("zkClientTimeout", "240000");
+    TestInjection.randomDelayInCoreCreation = "true:20";
+    System.setProperty("validateAfterInactivity", "200");
+  }
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    String solrXml = IOUtils.toString(CollectionsAPIDistributedZkTest.class.getResourceAsStream("/solr/solr-jmxreporter.xml"), "UTF-8");
+    configureCluster(4)
+        .addConfig("conf", configset("cloud-minimal"))
+        .addConfig("conf2", configset("cloud-minimal-jmx"))
+        .withSolrXml(solrXml)
+        .configure();
+  }
+
+  @Before
+  public void clearCluster() throws Exception {
+    try {
+      cluster.deleteAllCollections();
+    } finally {
+      System.clearProperty("zkClientTimeout");
+    }
+  }
+
+  @Test
+  public void testCreationAndDeletion() throws Exception {
+
+    String collectionName = "created_and_deleted";
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1).process(cluster.getSolrClient());
+    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+                  .contains(collectionName));
+
+    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+        .contains(collectionName));
+
+    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
+
+
+  }
+
+  @Test
+  public void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
+    
+    String collectionName = "out_of_sync_collection";
+
+    // manually create a collections zknode
+    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
+
+    CollectionAdminRequest.deleteCollection(collectionName)
+        .process(cluster.getSolrClient());
+
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+                  .contains(collectionName));
+    
+    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
+
+  }
+
+  @Test
+  public void deletePartiallyCreatedCollection() throws Exception {
+
+    final String collectionName = "halfdeletedcollection";
+
+    assertEquals(0, CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    String dataDir = createTempDir().toFile().getAbsolutePath();
+    // create a core that simulates something left over from a partially-deleted collection
+    assertTrue(CollectionAdminRequest
+        .addReplicaToShard(collectionName, "shard1")
+        .setDataDir(dataDir)
+        .process(cluster.getSolrClient()).isSuccess());
+
+    CollectionAdminRequest.deleteCollection(collectionName)
+        .process(cluster.getSolrClient());
+
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .process(cluster.getSolrClient());
+
+    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+  }
+
+  @Test
+  public void deleteCollectionOnlyInZk() throws Exception {
+
+    final String collectionName = "onlyinzk";
+
+    // create the collections node, but nothing else
+    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
+
+    // delete via API - should remove collections node
+    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+    
+    // now creating that collection should work
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .process(cluster.getSolrClient());
+    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+  }
+
+  @Test
+  public void testBadActionNames() throws Exception {
+
+    // try a bad action
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", "BADACTION");
+    String collectionName = "badactioncollection";
+    params.set("name", collectionName);
+    params.set("numShards", 2);
+    final QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+
+  }
+
+  @Test
+  public void testMissingRequiredParameters() {
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATE.toString());
+    params.set("numShards", 2);
+    // missing required collection parameter
+    final SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+  }
+
+  @Test
+  public void testTooManyReplicas() {
+
+    CollectionAdminRequest req = CollectionAdminRequest.createCollection("collection", "conf", 2, 10);
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(req);
+    });
+
+  }
+
+  @Test
+  public void testMissingNumShards() {
+
+    // No numShards should fail
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATE.toString());
+    params.set("name", "acollection");
+    params.set(REPLICATION_FACTOR, 10);
+    params.set("collection.configName", "conf");
+
+    final SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+
+  }
+
+  @Test
+  public void testZeroNumShards() {
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATE.toString());
+    params.set("name", "acollection");
+    params.set(REPLICATION_FACTOR, 10);
+    params.set("numShards", 0);
+    params.set("collection.configName", "conf");
+
+    final SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+
+  }
+
+  @Test
+  public void testCreateShouldFailOnExistingCore() throws Exception {
+    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker", "conf", 1, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker", "shard1")
+        .setNode(cluster.getJettySolrRunner(0).getNodeName())
+        .setCoreName("halfcollection_shard1_replica_n1")
+        .process(cluster.getSolrClient()).isSuccess());
+
+    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker2", "conf",1, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker2", "shard1")
+        .setNode(cluster.getJettySolrRunner(1).getNodeName())
+        .setCoreName("halfcollection_shard1_replica_n1")
+        .process(cluster.getSolrClient()).isSuccess());
+
+    String nn1 = cluster.getJettySolrRunner(0).getNodeName();
+    String nn2 = cluster.getJettySolrRunner(1).getNodeName();
+
+    CollectionAdminResponse resp = CollectionAdminRequest.createCollection("halfcollection", "conf", 2, 1)
+        .setCreateNodeSet(nn1 + "," + nn2)
+        .process(cluster.getSolrClient());
+    
+    SimpleOrderedMap success = (SimpleOrderedMap) resp.getResponse().get("success");
+    SimpleOrderedMap failure = (SimpleOrderedMap) resp.getResponse().get("failure");
+
+    assertNotNull(resp.toString(), success);
+    assertNotNull(resp.toString(), failure);
+    
+    String val1 = success.getVal(0).toString();
+    String val2 = failure.getVal(0).toString();
+    assertTrue(val1.contains("SolrException") || val2.contains("SolrException"));
+  }
+
+  @Test
+  public void testNoConfigSetExist() throws Exception {
+
+    expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.createCollection("noconfig", "conf123", 1, 1)
+          .process(cluster.getSolrClient());
+    });
+
+    TimeUnit.MILLISECONDS.sleep(1000);
+    // in both cases, the collection should have default to the core name
+    cluster.getSolrClient().getZkStateReader().forceUpdateCollection("noconfig");
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains("noconfig"));
+  }
+
+  @Test
+  public void testCoresAreDistributedAcrossNodes() throws Exception {
+
+    CollectionAdminRequest.createCollection("nodes_used_collection", "conf", 2, 2)
+        .process(cluster.getSolrClient());
+
+    Set<String> liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
+
+    List<String> createNodeList = new ArrayList<>();
+    createNodeList.addAll(liveNodes);
+
+    DocCollection collection = getCollectionState("nodes_used_collection");
+    for (Slice slice : collection.getSlices()) {
+      for (Replica replica : slice.getReplicas()) {
+        createNodeList.remove(replica.getNodeName());
+      }
+    }
+
+    assertEquals(createNodeList.toString(), 0, createNodeList.size());
+
+  }
+
+  @Test
+  public void testDeleteNonExistentCollection() throws Exception {
+
+    SolrException e = expectThrows(SolrException.class, () -> {
+      CollectionAdminRequest.deleteCollection("unknown_collection").process(cluster.getSolrClient());
+    });
+
+    // create another collection should still work
+    CollectionAdminRequest.createCollection("acollectionafterbaddelete", "conf", 1, 2)
+        .process(cluster.getSolrClient());
+    waitForState("Collection creation after a bad delete failed", "acollectionafterbaddelete",
+        (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
+  }
+
+  @Test
+  public void testSpecificConfigsets() throws Exception {
+    CollectionAdminRequest.createCollection("withconfigset2", "conf2", 1, 1).process(cluster.getSolrClient());
+    byte[] data = zkClient().getData(ZkStateReader.COLLECTIONS_ZKNODE + "/" + "withconfigset2", null, null, true);
+    assertNotNull(data);
+    ZkNodeProps props = ZkNodeProps.load(data);
+    String configName = props.getStr(ZkController.CONFIGNAME_PROP);
+    assertEquals("conf2", configName);
+  }
+
+  @Test
+  public void testMaxNodesPerShard() throws Exception {
+
+    // test maxShardsPerNode
+    int numLiveNodes = cluster.getJettySolrRunners().size();
+    int numShards = (numLiveNodes/2) + 1;
+    int replicationFactor = 2;
+    int maxShardsPerNode = 1;
+
+    SolrException e = expectThrows(SolrException.class, () -> {
+      CollectionAdminRequest.createCollection("oversharded", "conf", numShards, replicationFactor)
+          .process(cluster.getSolrClient());
+    });
+
+  }
+
+  @Test
+  public void testCreateNodeSet() throws Exception {
+
+    JettySolrRunner jetty1 = cluster.getRandomJetty(random());
+    JettySolrRunner jetty2 = cluster.getRandomJetty(random());
+
+    List<String> baseUrls = ImmutableList.of(jetty1.getBaseUrl().toString(), jetty2.getBaseUrl().toString());
+
+    CollectionAdminRequest.createCollection("nodeset_collection", "conf", 2, 1)
+        .setCreateNodeSet(baseUrls.get(0) + "," + baseUrls.get(1))
+        .process(cluster.getSolrClient());
+
+    DocCollection collectionState = getCollectionState("nodeset_collection");
+    for (Replica replica : collectionState.getReplicas()) {
+      String replicaUrl = replica.getCoreUrl();
+      boolean matchingJetty = false;
+      for (String jettyUrl : baseUrls) {
+        if (replicaUrl.startsWith(jettyUrl))
+          matchingJetty = true;
+      }
+      if (matchingJetty == false)
+        fail("Expected replica to be on " + baseUrls + " but was on " + replicaUrl);
+    }
+
+  }
+
+  @Test
+  public void testCollectionsAPI() throws Exception {
+
+    // create new collections rapid fire
+    int cnt = random().nextInt(TEST_NIGHTLY ? 3 : 1) + 1;
+    CollectionAdminRequest.Create[] createRequests = new CollectionAdminRequest.Create[cnt];
+
+    for (int i = 0; i < cnt; i++) {
+
+      int numShards = TestUtil.nextInt(random(), 0, cluster.getJettySolrRunners().size()) + 1;
+      int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
+      int maxShardsPerNode = (((numShards * replicationFactor) / cluster.getJettySolrRunners().size())) + 1;
+
+      createRequests[i]
+          = CollectionAdminRequest.createCollection("awhollynewcollection_" + i, "conf2", numShards, replicationFactor)
+          .setMaxShardsPerNode(maxShardsPerNode);
+      createRequests[i].processAsync(cluster.getSolrClient());
+    }
+
+    for (int i = 0; i < cnt; i++) {
+      String collectionName = "awhollynewcollection_" + i;
+      final int j = i;
+      waitForState("Expected to see collection " + collectionName, collectionName,
+          (n, c) -> {
+            CollectionAdminRequest.Create req = createRequests[j];
+            return DocCollection.isFullyActive(n, c, req.getNumShards(), req.getReplicationFactor());
+          });
+    }
+
+    cluster.injectChaos(random());
+
+    for (int i = 0; i < cluster.getJettySolrRunners().size(); i++) {
+      checkInstanceDirs(cluster.getJettySolrRunner(i));
+    }
+
+    String collectionName = createRequests[random().nextInt(createRequests.length)].getCollectionName();
+
+    new UpdateRequest()
+        .add("id", "6")
+        .add("id", "7")
+        .add("id", "8")
+        .commit(cluster.getSolrClient(), collectionName);
+    TimeOut timeOut = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+    while (!timeOut.hasTimedOut()) {
+      try {
+        long numFound = cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound();
+        assertEquals(3, numFound);
+        break;
+      } catch (Exception e) {
+        // Query node can have stale clusterstate
+        log.info("Error when query " + collectionName, e);
+        Thread.sleep(500);
+      }
+    }
+    if (timeOut.hasTimedOut()) {
+      fail("Timeout on query " + collectionName);
+    }
+
+    checkNoTwoShardsUseTheSameIndexDir();
+  }
+
+  @Test
+  public void testCollectionReload() throws Exception {
+
+    final String collectionName = "reloaded_collection";
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2).process(cluster.getSolrClient());
+
+    // get core open times
+    Map<String, Long> urlToTimeBefore = new HashMap<>();
+    collectStartTimes(collectionName, urlToTimeBefore);
+    assertTrue(urlToTimeBefore.size() > 0);
+
+    CollectionAdminRequest.reloadCollection(collectionName).processAsync(cluster.getSolrClient());
+
+    // reloads make take a short while
+    boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
+    assertTrue("some core start times did not change on reload", allTimesAreCorrect);
+  }
+
+  private void checkInstanceDirs(JettySolrRunner jetty) throws IOException {
+    CoreContainer cores = jetty.getCoreContainer();
+    Collection<SolrCore> theCores = cores.getCores();
+    for (SolrCore core : theCores) {
+
+      // look for core props file
+      Path instancedir = (Path) core.getResourceLoader().getInstancePath();
+      assertTrue("Could not find expected core.properties file", Files.exists(instancedir.resolve("core.properties")));
+
+      Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve(core.getName());
+
+      assertTrue("Expected: " + expected + "\nFrom core stats: " + instancedir, Files.isSameFile(expected, instancedir));
+
+    }
+  }
+
+  private boolean waitForReloads(String collectionName, Map<String,Long> urlToTimeBefore) throws SolrServerException, IOException {
+
+
+    TimeOut timeout = new TimeOut(45, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+
+    boolean allTimesAreCorrect = false;
+    while (! timeout.hasTimedOut()) {
+      Map<String,Long> urlToTimeAfter = new HashMap<>();
+      collectStartTimes(collectionName, urlToTimeAfter);
+      
+      boolean retry = false;
+      Set<Entry<String,Long>> entries = urlToTimeBefore.entrySet();
+      for (Entry<String,Long> entry : entries) {
+        Long beforeTime = entry.getValue();
+        Long afterTime = urlToTimeAfter.get(entry.getKey());
+        assertNotNull(afterTime);
+        if (afterTime <= beforeTime) {
+          retry = true;
+          break;
+        }
+
+      }
+      if (!retry) {
+        allTimesAreCorrect = true;
+        break;
+      }
+    }
+    return allTimesAreCorrect;
+  }
+
+  private void collectStartTimes(String collectionName, Map<String,Long> urlToTime)
+      throws SolrServerException, IOException {
+
+    DocCollection collectionState = getCollectionState(collectionName);
+    if (collectionState != null) {
+      for (Slice shard : collectionState) {
+        for (Replica replica : shard) {
+          ZkCoreNodeProps coreProps = new ZkCoreNodeProps(replica);
+          CoreStatus coreStatus;
+          try (HttpSolrClient server = getHttpSolrClient(coreProps.getBaseUrl())) {
+            coreStatus = CoreAdminRequest.getCoreStatus(coreProps.getCoreName(), false, server);
+          }
+          long before = coreStatus.getCoreStartTime().getTime();
+          urlToTime.put(coreProps.getCoreUrl(), before);
+        }
+      }
+    } else {
+      throw new IllegalArgumentException("Could not find collection " + collectionName);
+    }
+  }
+  
+  private void checkNoTwoShardsUseTheSameIndexDir() throws Exception {
+    Map<String, Set<String>> indexDirToShardNamesMap = new HashMap<>();
+    
+    List<MBeanServer> servers = new LinkedList<>();
+    servers.add(ManagementFactory.getPlatformMBeanServer());
+    servers.addAll(MBeanServerFactory.findMBeanServer(null));
+    for (final MBeanServer server : servers) {
+      Set<ObjectName> mbeans = new HashSet<>();
+      mbeans.addAll(server.queryNames(null, null));
+      for (final ObjectName mbean : mbeans) {
+
+        try {
+          Map<String, String> props = mbean.getKeyPropertyList();
+          String category = props.get("category");
+          String name = props.get("name");
+          if ((category != null && category.toString().equals(Category.CORE.toString())) &&
+              (name != null && name.equals("indexDir"))) {
+            String indexDir = server.getAttribute(mbean, "Value").toString();
+            String key = props.get("dom2") + "." + props.get("dom3") + "." + props.get("dom4");
+            if (!indexDirToShardNamesMap.containsKey(indexDir)) {
+              indexDirToShardNamesMap.put(indexDir.toString(), new HashSet<>());
+            }
+            indexDirToShardNamesMap.get(indexDir.toString()).add(key);
+          }
+        } catch (Exception e) {
+          // ignore, just continue - probably a "Value" attribute
+          // not found
+        }
+      }
+    }
+    
+    assertTrue(
+        "Something is broken in the assert for no shards using the same indexDir - probably something was changed in the attributes published in the MBean of "
+            + SolrCore.class.getSimpleName() + " : " + indexDirToShardNamesMap,
+        indexDirToShardNamesMap.size() > 0);
+    for (Entry<String,Set<String>> entry : indexDirToShardNamesMap.entrySet()) {
+      if (entry.getValue().size() > 1) {
+        fail("We have shards using the same indexDir. E.g. shards "
+            + entry.getValue().toString() + " all use indexDir "
+            + entry.getKey());
+      }
+    }
+
+  }
+
+  @Test
+  @LogLevel("org.apache.solr.cloud=DEBUG")
+  public void addReplicaTest() throws Exception {
+    String collectionName = "addReplicaColl";
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
+        .setMaxShardsPerNode(4)
+        .process(cluster.getSolrClient());
+
+    ArrayList<String> nodeList
+        = new ArrayList<>(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
+    Collections.shuffle(nodeList, random());
+
+    CollectionAdminResponse response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .setNode(nodeList.get(0))
+        .process(cluster.getSolrClient());
+    Replica newReplica = grabNewReplica(response, getCollectionState(collectionName));
+
+    assertEquals("Replica should be created on the right node",
+        cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)),
+        newReplica.getStr(ZkStateReader.BASE_URL_PROP));
+
+    Path instancePath = createTempDir();
+    response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .withProperty(CoreAdminParams.INSTANCE_DIR, instancePath.toString())
+        .process(cluster.getSolrClient());
+    newReplica = grabNewReplica(response, getCollectionState(collectionName));
+    assertNotNull(newReplica);
+
+    try (HttpSolrClient coreclient = getHttpSolrClient(newReplica.getStr(ZkStateReader.BASE_URL_PROP))) {
+      CoreAdminResponse status = CoreAdminRequest.getStatus(newReplica.getStr("core"), coreclient);
+      NamedList<Object> coreStatus = status.getCoreStatus(newReplica.getStr("core"));
+      String instanceDirStr = (String) coreStatus.get("instanceDir");
+      assertEquals(instanceDirStr, instancePath.toString());
+    }
+
+    //Test to make sure we can't create another replica with an existing core_name of that collection
+    String coreName = newReplica.getStr(CORE_NAME_PROP);
+    SolrException e = expectThrows(SolrException.class, () -> {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", "addreplica");
+      params.set("collection", collectionName);
+      params.set("shard", "shard1");
+      params.set("name", coreName);
+      QueryRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+      cluster.getSolrClient().request(request);
+    });
+
+    assertTrue(e.getMessage().contains("Another replica with the same core name already exists for this collection"));
+
+    // Check that specifying property.name works. DO NOT remove this when the "name" property is deprecated
+    // for ADDREPLICA, this is "property.name". See SOLR-7132
+    response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .withProperty(CoreAdminParams.NAME, "propertyDotName")
+        .process(cluster.getSolrClient());
+
+    newReplica = grabNewReplica(response, getCollectionState(collectionName));
+    assertEquals("'core' should be 'propertyDotName' ", "propertyDotName", newReplica.getStr("core"));
+
+  }
+
+  private Replica grabNewReplica(CollectionAdminResponse response, DocCollection docCollection) {
+    String replicaName = response.getCollectionCoresStatus().keySet().iterator().next();
+    Optional<Replica> optional = docCollection.getReplicas().stream()
+        .filter(replica -> replicaName.equals(replica.getCoreName()))
+        .findAny();
+    if (optional.isPresent()) {
+      return optional.get();
+    }
+    throw new AssertionError("Can not find " + replicaName + " from " + docCollection);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentDeleteAndCreateCollectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentDeleteAndCreateCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentDeleteAndCreateCollectionTest.java
new file mode 100644
index 0000000..1d0036e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentDeleteAndCreateCollectionTest.java
@@ -0,0 +1,227 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.nio.file.Path;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.lucene.util.LuceneTestCase.Nightly;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.cloud.MiniSolrCloudCluster;
+import org.apache.solr.common.util.IOUtils;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.util.TimeOut;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Nightly
+public class ConcurrentDeleteAndCreateCollectionTest extends SolrTestCaseJ4 {
+  
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  
+  private MiniSolrCloudCluster solrCluster;
+  
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+    solrCluster = new MiniSolrCloudCluster(1, createTempDir(), buildJettyConfig("/solr"));
+  }
+  
+  @Override
+  @After
+  public void tearDown() throws Exception {
+    solrCluster.shutdown();
+    super.tearDown();
+  }
+  
+  public void testConcurrentCreateAndDeleteDoesNotFail() {
+    final AtomicReference<Exception> failure = new AtomicReference<>();
+    final int timeToRunSec = 30;
+    final CreateDeleteCollectionThread[] threads = new CreateDeleteCollectionThread[10];
+    for (int i = 0; i < threads.length; i++) {
+      final String collectionName = "collection" + i;
+      uploadConfig(configset("configset-2"), collectionName);
+      final String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
+      final SolrClient solrClient = getHttpSolrClient(baseUrl);
+      threads[i] = new CreateDeleteSearchCollectionThread("create-delete-search-" + i, collectionName, collectionName, 
+          timeToRunSec, solrClient, failure);
+    }
+    
+    startAll(threads);
+    joinAll(threads);
+    
+    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
+  }
+  
+  public void testConcurrentCreateAndDeleteOverTheSameConfig() {
+    final String configName = "testconfig";
+    uploadConfig(configset("configset-2"), configName); // upload config once, to be used by all collections
+    final String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
+    final AtomicReference<Exception> failure = new AtomicReference<>();
+    final int timeToRunSec = 30;
+    final CreateDeleteCollectionThread[] threads = new CreateDeleteCollectionThread[2];
+    for (int i = 0; i < threads.length; i++) {
+      final String collectionName = "collection" + i;
+      final SolrClient solrClient = getHttpSolrClient(baseUrl);
+      threads[i] = new CreateDeleteCollectionThread("create-delete-" + i, collectionName, configName,
+                                                    timeToRunSec, solrClient, failure);
+    }
+
+    startAll(threads);
+    joinAll(threads);
+
+    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
+  }
+  
+  private void uploadConfig(Path configDir, String configName) {
+    try {
+      solrCluster.uploadConfigSet(configDir, configName);
+    } catch (IOException | KeeperException | InterruptedException e) {
+      throw new RuntimeException(e);
+    }
+  }
+  
+  private void joinAll(final CreateDeleteCollectionThread[] threads) {
+    for (CreateDeleteCollectionThread t : threads) {
+      try {
+        t.joinAndClose();
+      } catch (InterruptedException e) {
+        Thread.interrupted();
+        throw new RuntimeException(e);
+      }
+    }
+  }
+  
+  private void startAll(final Thread[] threads) {
+    for (Thread t : threads) {
+      t.start();
+    }
+  }
+  
+  private static class CreateDeleteCollectionThread extends Thread {
+    protected final String collectionName;
+    protected final String configName;
+    protected final long timeToRunSec;
+    protected final SolrClient solrClient;
+    protected final AtomicReference<Exception> failure;
+    
+    public CreateDeleteCollectionThread(String name, String collectionName, String configName, long timeToRunSec,
+        SolrClient solrClient, AtomicReference<Exception> failure) {
+      super(name);
+      this.collectionName = collectionName;
+      this.timeToRunSec = timeToRunSec;
+      this.solrClient = solrClient;
+      this.failure = failure;
+      this.configName = configName;
+    }
+    
+    @Override
+    public void run() {
+      final TimeOut timeout = new TimeOut(timeToRunSec, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+      while (! timeout.hasTimedOut() && failure.get() == null) {
+        doWork();
+      }
+    }
+    
+    protected void doWork() {
+      createCollection();
+      deleteCollection();
+    }
+    
+    protected void addFailure(Exception e) {
+      log.error("Add Failure", e);
+      synchronized (failure) {
+        if (failure.get() != null) {
+          failure.get().addSuppressed(e);
+        } else {
+          failure.set(e);
+        }
+      }
+    }
+    
+    private void createCollection() {
+      try {
+        final CollectionAdminResponse response = CollectionAdminRequest.createCollection(collectionName,configName,1,1)
+                .process(solrClient);
+        if (response.getStatus() != 0) {
+          addFailure(new RuntimeException("failed to create collection " + collectionName));
+        }
+      } catch (Exception e) {
+        addFailure(e);
+      }
+      
+    }
+    
+    private void deleteCollection() {
+      try {
+        final CollectionAdminRequest.Delete deleteCollectionRequest
+          = CollectionAdminRequest.deleteCollection(collectionName);
+        final CollectionAdminResponse response = deleteCollectionRequest.process(solrClient);
+        if (response.getStatus() != 0) {
+          addFailure(new RuntimeException("failed to delete collection " + collectionName));
+        }
+      } catch (Exception e) {
+        addFailure(e);
+      }
+    }
+    
+    public void joinAndClose() throws InterruptedException {
+      try {
+        super.join(60000);
+      } finally {
+        IOUtils.closeQuietly(solrClient);
+      }
+    }
+  }
+  
+  private static class CreateDeleteSearchCollectionThread extends CreateDeleteCollectionThread {
+
+    public CreateDeleteSearchCollectionThread(String name, String collectionName, String configName, long timeToRunSec,
+        SolrClient solrClient, AtomicReference<Exception> failure) {
+      super(name, collectionName, configName, timeToRunSec, solrClient, failure);
+    }
+    
+    @Override
+    protected void doWork() {
+      super.doWork();
+      searchNonExistingCollection();
+    }
+    
+    private void searchNonExistingCollection() {
+      try {
+        solrClient.query(collectionName, new SolrQuery("*"));
+      } catch (Exception e) {
+        if (!e.getMessage().contains("not found") && !e.getMessage().contains("Can not find")) {
+          addFailure(e);
+        }
+      }
+    }
+    
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/CustomCollectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CustomCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CustomCollectionTest.java
new file mode 100644
index 0000000..654c7e9
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CustomCollectionTest.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.util.Map;
+
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
+import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.params.ShardParams._ROUTE_;
+
+/**
+ * Tests the Custom Sharding API.
+ */
+public class CustomCollectionTest extends SolrCloudTestCase {
+
+  private static final int NODE_COUNT = 4;
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(NODE_COUNT)
+        .addConfig("conf", configset("cloud-dynamic"))
+        .configure();
+  }
+
+  @Before
+  public void ensureClusterEmpty() throws Exception {
+    cluster.deleteAllCollections();
+  }
+
+  @Test
+  public void testCustomCollectionsAPI() throws Exception {
+
+    final String collection = "implicitcoll";
+    int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
+    int numShards = 3;
+    int maxShardsPerNode = (((numShards + 1) * replicationFactor) / NODE_COUNT) + 1;
+
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c", replicationFactor)
+        .setMaxShardsPerNode(maxShardsPerNode)
+        .process(cluster.getSolrClient());
+
+    DocCollection coll = getCollectionState(collection);
+    assertEquals("implicit", ((Map) coll.get(DOC_ROUTER)).get("name"));
+    assertNotNull(coll.getStr(REPLICATION_FACTOR));
+    assertNotNull(coll.getStr(MAX_SHARDS_PER_NODE));
+    assertNull("A shard of a Collection configured with implicit router must have null range",
+        coll.getSlice("a").getRange());
+
+    new UpdateRequest()
+        .add("id", "6")
+        .add("id", "7")
+        .add("id", "8")
+        .withRoute("a")
+        .commit(cluster.getSolrClient(), collection);
+
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+
+    cluster.getSolrClient().deleteByQuery(collection, "*:*");
+    cluster.getSolrClient().commit(collection, true, true);
+    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+
+    new UpdateRequest()
+        .add("id", "9")
+        .add("id", "10")
+        .add("id", "11")
+        .withRoute("c")
+        .commit(cluster.getSolrClient(), collection);
+
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
+
+    //Testing CREATESHARD
+    CollectionAdminRequest.createShard(collection, "x")
+        .process(cluster.getSolrClient());
+    waitForState("Expected shard 'x' to be active", collection, (n, c) -> {
+      if (c.getSlice("x") == null)
+        return false;
+      for (Replica r : c.getSlice("x")) {
+        if (r.getState() != Replica.State.ACTIVE)
+          return false;
+      }
+      return true;
+    });
+
+    new UpdateRequest()
+        .add("id", "66", _ROUTE_, "x")
+        .commit(cluster.getSolrClient(), collection);
+    // TODO - the local state is cached and causes the request to fail with 'unknown shard'
+    // assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "x")).getResults().getNumFound());
+
+  }
+
+  @Test
+  public void testRouteFieldForImplicitRouter() throws Exception {
+
+    int numShards = 4;
+    int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
+    int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
+    String shard_fld = "shard_s";
+
+    final String collection = "withShardField";
+
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c,d", replicationFactor)
+        .setMaxShardsPerNode(maxShardsPerNode)
+        .setRouterField(shard_fld)
+        .process(cluster.getSolrClient());
+
+    new UpdateRequest()
+        .add("id", "6", shard_fld, "a")
+        .add("id", "7", shard_fld, "a")
+        .add("id", "8", shard_fld, "b")
+        .commit(cluster.getSolrClient(), collection);
+
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
+    assertEquals(2, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+
+  }
+
+  @Test
+  public void testRouteFieldForHashRouter()throws Exception{
+    String collectionName = "routeFieldColl";
+    int numShards = 4;
+    int replicationFactor = 2;
+    int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
+    String shard_fld = "shard_s";
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", numShards, replicationFactor)
+        .setMaxShardsPerNode(maxShardsPerNode)
+        .setRouterField(shard_fld)
+        .process(cluster.getSolrClient());
+
+    new UpdateRequest()
+        .add("id", "6", shard_fld, "a")
+        .add("id", "7", shard_fld, "a")
+        .add("id", "8", shard_fld, "b")
+        .commit(cluster.getSolrClient(), collectionName);
+
+    assertEquals(3, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(2, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+    assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
+    assertEquals(0, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
+
+
+    cluster.getSolrClient().deleteByQuery(collectionName, "*:*");
+    cluster.getSolrClient().commit(collectionName);
+
+    cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "100", shard_fld, "c!doc1"));
+    cluster.getSolrClient().commit(collectionName);
+    assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c!")).getResults().getNumFound());
+
+  }
+
+  @Test
+  public void testCreateShardRepFactor() throws Exception  {
+    final String collectionName = "testCreateShardRepFactor";
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "a,b", 1)
+        .process(cluster.getSolrClient());
+
+    CollectionAdminRequest.createShard(collectionName, "x")
+        .process(cluster.getSolrClient());
+
+    waitForState("Not enough active replicas in shard 'x'", collectionName, (n, c) -> {
+      return c.getSlice("x").getReplicas().size() == 1;
+    });
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
new file mode 100644
index 0000000..ae83ebf
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Metric;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.request.CoreStatus;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.cloud.hdfs.HdfsTestUtil;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkConfigManager;
+import org.apache.solr.metrics.SolrMetricManager;
+import org.apache.solr.util.BadHdfsThreadsFilter;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+@Slow
+@Nightly
+@ThreadLeakFilters(defaultFilters = true, filters = {
+    BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
+})
+public class HdfsCollectionsAPIDistributedZkTest extends CollectionsAPIDistributedZkTest {
+
+  private static MiniDFSCluster dfsCluster;
+
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
+    System.setProperty("tests.hdfs.numdatanodes", "1");
+   
+    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
+
+    ZkConfigManager configManager = new ZkConfigManager(zkClient());
+    configManager.uploadConfigDir(configset("cloud-hdfs"), "conf");
+    configManager.uploadConfigDir(configset("cloud-hdfs"), "conf2");
+
+    System.setProperty("solr.hdfs.home", HdfsTestUtil.getDataDir(dfsCluster, "data"));
+  }
+
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    cluster.shutdown(); // need to close before the MiniDFSCluster
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+    System.clearProperty("solr.hdfs.blockcache.blocksperbank");
+    System.clearProperty("tests.hdfs.numdatanodes");
+    System.clearProperty("solr.hdfs.home");
+  }
+
+  @Test
+  public void moveReplicaTest() throws Exception {
+    cluster.waitForAllNodes(5000);
+    String coll = "movereplicatest_coll";
+
+    CloudSolrClient cloudClient = cluster.getSolrClient();
+
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
+    create.setMaxShardsPerNode(2);
+    cloudClient.request(create);
+
+    for (int i = 0; i < 10; i++) {
+      cloudClient.add(coll, sdoc("id",String.valueOf(i)));
+      cloudClient.commit(coll);
+    }
+
+    List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
+    Collections.shuffle(slices, random());
+    Slice slice = null;
+    Replica replica = null;
+    for (Slice s : slices) {
+      slice = s;
+      for (Replica r : s.getReplicas()) {
+        if (s.getLeader() != r) {
+          replica = r;
+        }
+      }
+    }
+    String dataDir = getDataDir(replica);
+
+    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+    ArrayList<String> l = new ArrayList<>(liveNodes);
+    Collections.shuffle(l, random());
+    String targetNode = null;
+    for (String node : liveNodes) {
+      if (!replica.getNodeName().equals(node)) {
+        targetNode = node;
+        break;
+      }
+    }
+    assertNotNull(targetNode);
+
+    CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
+    moveReplica.process(cloudClient);
+
+    checkNumOfCores(cloudClient, replica.getNodeName(), 0);
+    checkNumOfCores(cloudClient, targetNode, 2);
+
+    waitForState("Wait for recovery finish failed",coll, clusterShape(2,2));
+    slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
+    boolean found = false;
+    for (Replica newReplica : slice.getReplicas()) {
+      if (getDataDir(newReplica).equals(dataDir)) {
+        found = true;
+      }
+    }
+    assertTrue(found);
+
+
+    // data dir is reused so replication will be skipped
+    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
+      SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
+      List<String> registryNames = manager.registryNames().stream()
+          .filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
+      for (String registry : registryNames) {
+        Map<String, Metric> metrics = manager.registry(registry).getMetrics();
+        Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
+        if (counter != null) {
+          assertEquals(0, counter.getCount());
+        }
+      }
+    }
+  }
+
+
+  private void checkNumOfCores(CloudSolrClient cloudClient, String nodeName, int expectedCores) throws IOException, SolrServerException {
+    assertEquals(nodeName + " does not have expected number of cores",expectedCores, getNumOfCores(cloudClient, nodeName));
+  }
+
+  private int getNumOfCores(CloudSolrClient cloudClient, String nodeName) throws IOException, SolrServerException {
+    try (HttpSolrClient coreclient = getHttpSolrClient(cloudClient.getZkStateReader().getBaseUrlForNodeName(nodeName))) {
+      CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
+      return status.getCoreStatus().size();
+    }
+  }
+
+  private String getDataDir(Replica replica) throws IOException, SolrServerException {
+    try (HttpSolrClient coreclient = getHttpSolrClient(replica.getBaseUrl())) {
+      CoreStatus status = CoreAdminRequest.getCoreStatus(replica.getCoreName(), coreclient);
+      return status.getDataDirectory();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java
new file mode 100644
index 0000000..6f7e717
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.zookeeper.KeeperException;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+// Collect useful operations for testing assigning properties to individual replicas
+// Could probably expand this to do something creative with getting random slices
+// and shards, but for now this will do.
+public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBase {
+
+  public static NamedList<Object> doPropertyAction(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
+    assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    for (int idx = 0; idx < paramsIn.length; idx += 2) {
+      params.set(paramsIn[idx], paramsIn[idx + 1]);
+    }
+    QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+    return client.request(request);
+  }
+
+  public static void verifyPropertyNotPresent(CloudSolrClient client, String collectionName, String replicaName,
+                                String property)
+      throws KeeperException, InterruptedException {
+    ClusterState clusterState = null;
+    Replica replica = null;
+    for (int idx = 0; idx < 300; ++idx) {
+      clusterState = client.getZkStateReader().getClusterState();
+      final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
+      replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
+      if (replica == null) {
+        fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
+      }
+      if (StringUtils.isBlank(replica.getProperty(property))) return;
+      Thread.sleep(100);
+    }
+    fail("Property " + property + " not set correctly for collection/replica pair: " +
+        collectionName + "/" + replicaName + ". Replica props: " + replica.getProperties().toString() +
+        ". Cluster state is " + clusterState.toString());
+
+  }
+
+  // The params are triplets,
+  // collection
+  // shard
+  // replica
+  public static void verifyPropertyVal(CloudSolrClient client, String collectionName,
+                         String replicaName, String property, String val)
+      throws InterruptedException, KeeperException {
+    Replica replica = null;
+    ClusterState clusterState = null;
+
+    for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
+      clusterState = client.getZkStateReader().getClusterState();
+      final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
+      replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
+      if (replica == null) {
+        fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
+      }
+      if (StringUtils.equals(val, replica.getProperty(property))) return;
+      Thread.sleep(100);
+    }
+
+    fail("Property '" + property + "' with value " + replica.getProperty(property) +
+        " not set correctly for collection/replica pair: " + collectionName + "/" + replicaName + " property map is " +
+        replica.getProperties().toString() + ".");
+
+  }
+
+  // Verify that
+  // 1> the property is only set once in all the replicas in a slice.
+  // 2> the property is balanced evenly across all the nodes hosting collection
+  public static void verifyUniqueAcrossCollection(CloudSolrClient client, String collectionName,
+                                    String property) throws KeeperException, InterruptedException {
+    verifyUnique(client, collectionName, property, true);
+  }
+
+  public static void verifyUniquePropertyWithinCollection(CloudSolrClient client, String collectionName,
+                            String property) throws KeeperException, InterruptedException {
+    verifyUnique(client, collectionName, property, false);
+  }
+
+  public static void verifyUnique(CloudSolrClient client, String collectionName, String property, boolean balanced)
+      throws KeeperException, InterruptedException {
+
+    DocCollection col = null;
+    for (int idx = 0; idx < 300; ++idx) {
+      ClusterState clusterState = client.getZkStateReader().getClusterState();
+
+      col = clusterState.getCollection(collectionName);
+      if (col == null) {
+        fail("Could not find collection " + collectionName);
+      }
+      Map<String, Integer> counts = new HashMap<>();
+      Set<String> uniqueNodes = new HashSet<>();
+      boolean allSlicesHaveProp = true;
+      boolean badSlice = false;
+      for (Slice slice : col.getSlices()) {
+        boolean thisSliceHasProp = false;
+        int propCount = 0;
+        for (Replica replica : slice.getReplicas()) {
+          uniqueNodes.add(replica.getNodeName());
+          String propVal = replica.getProperty(property);
+          if (StringUtils.isNotBlank(propVal)) {
+            ++propCount;
+            if (counts.containsKey(replica.getNodeName()) == false) {
+              counts.put(replica.getNodeName(), 0);
+            }
+            int count = counts.get(replica.getNodeName());
+            thisSliceHasProp = true;
+            counts.put(replica.getNodeName(), count + 1);
+          }
+        }
+        badSlice = (propCount > 1) ? true : badSlice;
+        allSlicesHaveProp = allSlicesHaveProp ? thisSliceHasProp : allSlicesHaveProp;
+      }
+      if (balanced == false && badSlice == false) {
+        return;
+      }
+      if (allSlicesHaveProp && balanced) {
+        // Check that the properties are evenly distributed.
+        int minProps = col.getSlices().size() / uniqueNodes.size();
+        int maxProps = minProps;
+
+        if (col.getSlices().size() % uniqueNodes.size() > 0) {
+          ++maxProps;
+        }
+        boolean doSleep = false;
+        for (Map.Entry<String, Integer> ent : counts.entrySet()) {
+          if (ent.getValue() != minProps && ent.getValue() != maxProps) {
+            doSleep = true;
+          }
+        }
+
+        if (doSleep == false) {
+          assertTrue("We really shouldn't be calling this if there is no node with the property " + property,
+              counts.size() > 0);
+          return;
+        }
+      }
+      Thread.sleep(100);
+    }
+    fail("Collection " + collectionName + " does not have roles evenly distributed. Collection is: " + col.toString());
+  }
+
+}