You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by va...@apache.org on 2018/01/16 19:05:07 UTC

[01/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Repository: lucene-solr
Updated Branches:
  refs/heads/master e2bba98df -> a3c4f7388


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
new file mode 100644
index 0000000..840f774
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
@@ -0,0 +1,297 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test of the Collections API with the MiniSolrCloudCluster.
+ */
+@LuceneTestCase.Slow
+public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private static final int numShards = 2;
+  private static final int numReplicas = 2;
+  private static final int maxShardsPerNode = 1;
+  private static final int nodeCount = 5;
+  private static final String configName = "solrCloudCollectionConfig";
+  private static final Map<String,String> collectionProperties  // ensure indexes survive core shutdown
+      = Collections.singletonMap("solr.directoryFactory", "solr.StandardDirectoryFactory");
+
+  @Override
+  public void setUp() throws Exception {
+    configureCluster(nodeCount).addConfig(configName, configset("cloud-minimal")).configure();
+    super.setUp();
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    cluster.shutdown();
+    super.tearDown();
+  }
+
+  private void createCollection(String collectionName, String createNodeSet) throws Exception {
+    if (random().nextBoolean()) { // process asynchronously
+      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
+          .setMaxShardsPerNode(maxShardsPerNode)
+          .setCreateNodeSet(createNodeSet)
+          .setProperties(collectionProperties)
+          .processAndWait(cluster.getSolrClient(), 30);
+    }
+    else {
+      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
+          .setMaxShardsPerNode(maxShardsPerNode)
+          .setCreateNodeSet(createNodeSet)
+          .setProperties(collectionProperties)
+          .process(cluster.getSolrClient());
+    }
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish
+        (collectionName, cluster.getSolrClient().getZkStateReader(), true, true, 330);
+  }
+
+  @Test
+  public void testCollectionCreateSearchDelete() throws Exception {
+    final CloudSolrClient client = cluster.getSolrClient();
+    final String collectionName = "testcollection";
+
+    assertNotNull(cluster.getZkServer());
+    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
+    assertEquals(nodeCount, jettys.size());
+    for (JettySolrRunner jetty : jettys) {
+      assertTrue(jetty.isRunning());
+    }
+
+    // shut down a server
+    JettySolrRunner stoppedServer = cluster.stopJettySolrRunner(0);
+    assertTrue(stoppedServer.isStopped());
+    assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
+
+    // create a server
+    JettySolrRunner startedServer = cluster.startJettySolrRunner();
+    assertTrue(startedServer.isRunning());
+    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
+
+    // create collection
+    createCollection(collectionName, null);
+
+    // modify/query collection
+    new UpdateRequest().add("id", "1").commit(client, collectionName);
+    QueryResponse rsp = client.query(collectionName, new SolrQuery("*:*"));
+    assertEquals(1, rsp.getResults().getNumFound());
+
+    // remove a server not hosting any replicas
+    ZkStateReader zkStateReader = client.getZkStateReader();
+    zkStateReader.forceUpdateCollection(collectionName);
+    ClusterState clusterState = zkStateReader.getClusterState();
+    Map<String,JettySolrRunner> jettyMap = new HashMap<>();
+    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
+      String key = jetty.getBaseUrl().toString().substring((jetty.getBaseUrl().getProtocol() + "://").length());
+      jettyMap.put(key, jetty);
+    }
+    Collection<Slice> slices = clusterState.getCollection(collectionName).getSlices();
+    // track the servers not host replicas
+    for (Slice slice : slices) {
+      jettyMap.remove(slice.getLeader().getNodeName().replace("_solr", "/solr"));
+      for (Replica replica : slice.getReplicas()) {
+        jettyMap.remove(replica.getNodeName().replace("_solr", "/solr"));
+      }
+    }
+    assertTrue("Expected to find a node without a replica", jettyMap.size() > 0);
+    JettySolrRunner jettyToStop = jettyMap.entrySet().iterator().next().getValue();
+    jettys = cluster.getJettySolrRunners();
+    for (int i = 0; i < jettys.size(); ++i) {
+      if (jettys.get(i).equals(jettyToStop)) {
+        cluster.stopJettySolrRunner(i);
+        assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
+      }
+    }
+
+    // re-create a server (to restore original nodeCount count)
+    startedServer = cluster.startJettySolrRunner(jettyToStop);
+    assertTrue(startedServer.isRunning());
+    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
+
+    CollectionAdminRequest.deleteCollection(collectionName).process(client);
+    AbstractDistribZkTestBase.waitForCollectionToDisappear
+        (collectionName, client.getZkStateReader(), true, true, 330);
+
+    // create it again
+    createCollection(collectionName, null);
+
+    // check that there's no left-over state
+    assertEquals(0, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
+
+    // modify/query collection
+    new UpdateRequest().add("id", "1").commit(client, collectionName);
+    assertEquals(1, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
+  }
+
+  @Test
+  public void testCollectionCreateWithoutCoresThenDelete() throws Exception {
+
+    final String collectionName = "testSolrCloudCollectionWithoutCores";
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    assertNotNull(cluster.getZkServer());
+    assertFalse(cluster.getJettySolrRunners().isEmpty());
+
+    // create collection
+    createCollection(collectionName, OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY);
+
+    // check the collection's corelessness
+    int coreCount = 0;
+    DocCollection docCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
+    for (Map.Entry<String,Slice> entry : docCollection.getSlicesMap().entrySet()) {
+      coreCount += entry.getValue().getReplicasMap().entrySet().size();
+    }
+    assertEquals(0, coreCount);
+
+    // delete the collection
+    CollectionAdminRequest.deleteCollection(collectionName).process(client);
+    AbstractDistribZkTestBase.waitForCollectionToDisappear
+        (collectionName, client.getZkStateReader(), true, true, 330);
+  }
+
+  @Test
+  public void testStopAllStartAll() throws Exception {
+
+    final String collectionName = "testStopAllStartAllCollection";
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    assertNotNull(cluster.getZkServer());
+    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
+    assertEquals(nodeCount, jettys.size());
+    for (JettySolrRunner jetty : jettys) {
+      assertTrue(jetty.isRunning());
+    }
+
+    final SolrQuery query = new SolrQuery("*:*");
+    final SolrInputDocument doc = new SolrInputDocument();
+
+    // create collection
+    createCollection(collectionName, null);
+
+    ZkStateReader zkStateReader = client.getZkStateReader();
+
+    // modify collection
+    final int numDocs = 1 + random().nextInt(10);
+    for (int ii = 1; ii <= numDocs; ++ii) {
+      doc.setField("id", ""+ii);
+      client.add(collectionName, doc);
+      if (ii*2 == numDocs) client.commit(collectionName);
+    }
+    client.commit(collectionName);
+
+    // query collection
+    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
+
+    // the test itself
+    zkStateReader.forceUpdateCollection(collectionName);
+    final ClusterState clusterState = zkStateReader.getClusterState();
+
+    final Set<Integer> leaderIndices = new HashSet<>();
+    final Set<Integer> followerIndices = new HashSet<>();
+    {
+      final Map<String,Boolean> shardLeaderMap = new HashMap<>();
+      for (final Slice slice : clusterState.getCollection(collectionName).getSlices()) {
+        for (final Replica replica : slice.getReplicas()) {
+          shardLeaderMap.put(replica.getNodeName().replace("_solr", "/solr"), Boolean.FALSE);
+        }
+        shardLeaderMap.put(slice.getLeader().getNodeName().replace("_solr", "/solr"), Boolean.TRUE);
+      }
+      for (int ii = 0; ii < jettys.size(); ++ii) {
+        final URL jettyBaseUrl = jettys.get(ii).getBaseUrl();
+        final String jettyBaseUrlString = jettyBaseUrl.toString().substring((jettyBaseUrl.getProtocol() + "://").length());
+        final Boolean isLeader = shardLeaderMap.get(jettyBaseUrlString);
+        if (Boolean.TRUE.equals(isLeader)) {
+          leaderIndices.add(ii);
+        } else if (Boolean.FALSE.equals(isLeader)) {
+          followerIndices.add(ii);
+        } // else neither leader nor follower i.e. node without a replica (for our collection)
+      }
+    }
+    final List<Integer> leaderIndicesList = new ArrayList<>(leaderIndices);
+    final List<Integer> followerIndicesList = new ArrayList<>(followerIndices);
+
+    // first stop the followers (in no particular order)
+    Collections.shuffle(followerIndicesList, random());
+    for (Integer ii : followerIndicesList) {
+      if (!leaderIndices.contains(ii)) {
+        cluster.stopJettySolrRunner(jettys.get(ii));
+      }
+    }
+
+    // then stop the leaders (again in no particular order)
+    Collections.shuffle(leaderIndicesList, random());
+    for (Integer ii : leaderIndicesList) {
+      cluster.stopJettySolrRunner(jettys.get(ii));
+    }
+
+    // calculate restart order
+    final List<Integer> restartIndicesList = new ArrayList<>();
+    Collections.shuffle(leaderIndicesList, random());
+    restartIndicesList.addAll(leaderIndicesList);
+    Collections.shuffle(followerIndicesList, random());
+    restartIndicesList.addAll(followerIndicesList);
+    if (random().nextBoolean()) Collections.shuffle(restartIndicesList, random());
+
+    // and then restart jettys in that order
+    for (Integer ii : restartIndicesList) {
+      final JettySolrRunner jetty = jettys.get(ii);
+      if (!jetty.isRunning()) {
+        cluster.startJettySolrRunner(jetty);
+        assertTrue(jetty.isRunning());
+      }
+    }
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
+
+    zkStateReader.forceUpdateCollection(collectionName);
+
+    // re-query collection
+    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
new file mode 100644
index 0000000..58ac17d
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.hdfs.HdfsTestUtil;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.backup.BackupManager;
+import org.apache.solr.core.backup.repository.HdfsBackupRepository;
+import org.apache.solr.util.BadHdfsThreadsFilter;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.core.backup.BackupManager.BACKUP_NAME_PROP;
+import static org.apache.solr.core.backup.BackupManager.BACKUP_PROPS_FILE;
+import static org.apache.solr.core.backup.BackupManager.COLLECTION_NAME_PROP;
+import static org.apache.solr.core.backup.BackupManager.CONFIG_STATE_DIR;
+import static org.apache.solr.core.backup.BackupManager.ZK_STATE_DIR;
+
+/**
+ * This class implements the tests for HDFS integration for Solr backup/restore capability.
+ */
+@ThreadLeakFilters(defaultFilters = true, filters = {
+    BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
+})
+public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
+  public static final String SOLR_XML = "<solr>\n" +
+      "\n" +
+      "  <str name=\"shareSchema\">${shareSchema:false}</str>\n" +
+      "  <str name=\"configSetBaseDir\">${configSetBaseDir:configsets}</str>\n" +
+      "  <str name=\"coreRootDirectory\">${coreRootDirectory:.}</str>\n" +
+      "\n" +
+      "  <shardHandlerFactory name=\"shardHandlerFactory\" class=\"HttpShardHandlerFactory\">\n" +
+      "    <str name=\"urlScheme\">${urlScheme:}</str>\n" +
+      "    <int name=\"socketTimeout\">${socketTimeout:90000}</int>\n" +
+      "    <int name=\"connTimeout\">${connTimeout:15000}</int>\n" +
+      "  </shardHandlerFactory>\n" +
+      "\n" +
+      "  <solrcloud>\n" +
+      "    <str name=\"host\">127.0.0.1</str>\n" +
+      "    <int name=\"hostPort\">${hostPort:8983}</int>\n" +
+      "    <str name=\"hostContext\">${hostContext:solr}</str>\n" +
+      "    <int name=\"zkClientTimeout\">${solr.zkclienttimeout:30000}</int>\n" +
+      "    <bool name=\"genericCoreNodeNames\">${genericCoreNodeNames:true}</bool>\n" +
+      "    <int name=\"leaderVoteWait\">10000</int>\n" +
+      "    <int name=\"distribUpdateConnTimeout\">${distribUpdateConnTimeout:45000}</int>\n" +
+      "    <int name=\"distribUpdateSoTimeout\">${distribUpdateSoTimeout:340000}</int>\n" +
+      "  </solrcloud>\n" +
+      "  \n" +
+      "  <backup>\n" +
+      "    <repository  name=\"hdfs\" class=\"org.apache.solr.core.backup.repository.HdfsBackupRepository\"> \n" +
+      "      <str name=\"location\">${solr.hdfs.default.backup.path}</str>\n" +
+      "      <str name=\"solr.hdfs.home\">${solr.hdfs.home:}</str>\n" +
+      "      <str name=\"solr.hdfs.confdir\">${solr.hdfs.confdir:}</str>\n" +
+      "    </repository>\n" +
+      "  </backup>\n" +
+      "  \n" +
+      "</solr>\n";
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private static MiniDFSCluster dfsCluster;
+  private static String hdfsUri;
+  private static FileSystem fs;
+
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
+    hdfsUri = HdfsTestUtil.getURI(dfsCluster);
+    try {
+      URI uri = new URI(hdfsUri);
+      Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
+      conf.setBoolean("fs.hdfs.impl.disable.cache", true);
+      fs = FileSystem.get(uri, conf);
+
+      if (fs instanceof DistributedFileSystem) {
+        // Make sure dfs is not in safe mode
+        while (((DistributedFileSystem) fs).setSafeMode(SafeModeAction.SAFEMODE_GET, true)) {
+          log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
+          try {
+            Thread.sleep(5000);
+          } catch (InterruptedException e) {
+            Thread.interrupted();
+            // continue
+          }
+        }
+      }
+
+      fs.mkdirs(new org.apache.hadoop.fs.Path("/backup"));
+    } catch (IOException | URISyntaxException e) {
+      throw new RuntimeException(e);
+    }
+
+    System.setProperty("solr.hdfs.default.backup.path", "/backup");
+    System.setProperty("solr.hdfs.home", hdfsUri + "/solr");
+    useFactory("solr.StandardDirectoryFactory");
+
+    configureCluster(NUM_SHARDS)// nodes
+    .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
+    .withSolrXml(SOLR_XML)
+    .configure();
+  }
+
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    System.clearProperty("solr.hdfs.home");
+    System.clearProperty("solr.hdfs.default.backup.path");
+    System.clearProperty("test.build.data");
+    System.clearProperty("test.cache.data");
+    IOUtils.closeQuietly(fs);
+    fs = null;
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+  }
+
+  @Override
+  public String getCollectionName() {
+    return "hdfsbackuprestore";
+  }
+
+  @Override
+  public String getBackupRepoName() {
+    return "hdfs";
+  }
+
+  @Override
+  public String getBackupLocation() {
+    return null;
+  }
+
+  protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
+    String backupName = "configonlybackup";
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
+        .setRepositoryName(getBackupRepoName())
+        .setIndexBackupStrategy(CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY);
+    backup.process(solrClient);
+
+    Map<String,String> params = new HashMap<>();
+    params.put("location", "/backup");
+    params.put("solr.hdfs.home", hdfsUri + "/solr");
+
+    HdfsBackupRepository repo = new HdfsBackupRepository();
+    repo.init(new NamedList<>(params));
+    BackupManager mgr = new BackupManager(repo, solrClient.getZkStateReader());
+
+    URI baseLoc = repo.createURI("/backup");
+
+    Properties props = mgr.readBackupProperties(baseLoc, backupName);
+    assertNotNull(props);
+    assertEquals(collectionName, props.getProperty(COLLECTION_NAME_PROP));
+    assertEquals(backupName, props.getProperty(BACKUP_NAME_PROP));
+    assertEquals(configName, props.getProperty(COLL_CONF));
+
+    DocCollection collectionState = mgr.readCollectionState(baseLoc, backupName, collectionName);
+    assertNotNull(collectionState);
+    assertEquals(collectionName, collectionState.getName());
+
+    URI configDirLoc = repo.resolve(baseLoc, backupName, ZK_STATE_DIR, CONFIG_STATE_DIR, configName);
+    assertTrue(repo.exists(configDirLoc));
+
+    Collection<String> expected = Arrays.asList(BACKUP_PROPS_FILE, ZK_STATE_DIR);
+    URI backupLoc = repo.resolve(baseLoc, backupName);
+    String[] dirs = repo.listAll(backupLoc);
+    for (String d : dirs) {
+      assertTrue(expected.contains(d));
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java
new file mode 100644
index 0000000..587b9b1
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import org.junit.BeforeClass;
+
+/**
+ * This class implements the tests for local file-system integration for Solr backup/restore capability.
+ * Note that the Solr backup/restore still requires a "shared" file-system. Its just that in this case
+ * such file-system would be exposed via local file-system API.
+ */
+public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
+  private static String backupLocation;
+
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    configureCluster(NUM_SHARDS)// nodes
+        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
+        .configure();
+
+    boolean whitespacesInPath = random().nextBoolean();
+    if (whitespacesInPath) {
+      backupLocation = createTempDir("my backup").toAbsolutePath().toString();
+    } else {
+      backupLocation = createTempDir("mybackup").toAbsolutePath().toString();
+    }
+  }
+
+  @Override
+  public String getCollectionName() {
+    return "backuprestore";
+  }
+
+  @Override
+  public String getBackupRepoName() {
+    return null;
+  }
+
+  @Override
+  public String getBackupLocation() {
+    return backupLocation;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java
new file mode 100644
index 0000000..d327aec
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Test;
+
+@Slow
+public class TestReplicaProperties extends ReplicaPropertiesBase {
+
+  public static final String COLLECTION_NAME = "testcollection";
+
+  public TestReplicaProperties() {
+    schemaString = "schema15.xml";      // we need a string id
+    sliceCount = 2;
+  }
+
+  @Test
+  @ShardsFixed(num = 4)
+  public void test() throws Exception {
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      // Mix up a bunch of different combinations of shards and replicas in order to exercise boundary cases.
+      // shards, replicationfactor, maxreplicaspernode
+      int shards = random().nextInt(7);
+      if (shards < 2) shards = 2;
+      int rFactor = random().nextInt(4);
+      if (rFactor < 2) rFactor = 2;
+      createCollection(null, COLLECTION_NAME, shards, rFactor, shards * rFactor + 1, client, null, "conf1");
+    }
+
+    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);
+    waitForRecoveriesToFinish(COLLECTION_NAME, false);
+
+    listCollection();
+
+    clusterAssignPropertyTest();
+  }
+
+  private void listCollection() throws IOException, SolrServerException {
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.LIST.toString());
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      List<String> collections = (List<String>) rsp.get("collections");
+      assertTrue("control_collection was not found in list", collections.contains("control_collection"));
+      assertTrue(DEFAULT_COLLECTION + " was not found in list", collections.contains(DEFAULT_COLLECTION));
+      assertTrue(COLLECTION_NAME + " was not found in list", collections.contains(COLLECTION_NAME));
+    }
+  }
+
+
+  private void clusterAssignPropertyTest() throws Exception {
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      client.connect();
+      try {
+        doPropertyAction(client,
+            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+            "property", "preferredLeader");
+      } catch (SolrException se) {
+        assertTrue("Should have seen missing required parameter 'collection' error",
+            se.getMessage().contains("Missing required parameter: collection"));
+      }
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+          "collection", COLLECTION_NAME,
+          "property", "preferredLeader");
+
+      verifyUniqueAcrossCollection(client, COLLECTION_NAME, "preferredleader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+          "collection", COLLECTION_NAME,
+          "property", "property.newunique",
+          "shardUnique", "true");
+      verifyUniqueAcrossCollection(client, COLLECTION_NAME, "property.newunique");
+
+      try {
+        doPropertyAction(client,
+            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+            "collection", COLLECTION_NAME,
+            "property", "whatever",
+            "shardUnique", "false");
+        fail("Should have thrown an exception here.");
+      } catch (SolrException se) {
+        assertTrue("Should have gotten a specific error message here",
+            se.getMessage().contains("Balancing properties amongst replicas in a slice requires that the " +
+                "property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'"));
+      }
+      // Should be able to set non-unique-per-slice values in several places.
+      Map<String, Slice> slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
+      List<String> sliceList = new ArrayList<>(slices.keySet());
+      String c1_s1 = sliceList.get(0);
+      List<String> replicasList = new ArrayList<>(slices.get(c1_s1).getReplicasMap().keySet());
+      String c1_s1_r1 = replicasList.get(0);
+      String c1_s1_r2 = replicasList.get(1);
+
+      addProperty(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "bogus1",
+          "property.value", "true");
+
+      addProperty(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r2,
+          "property", "property.bogus1",
+          "property.value", "whatever");
+
+      try {
+        doPropertyAction(client,
+            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+            "collection", COLLECTION_NAME,
+            "property", "bogus1",
+            "shardUnique", "false");
+        fail("Should have thrown parameter error here");
+      } catch (SolrException se) {
+        assertTrue("Should have caught specific exception ",
+            se.getMessage().contains("Balancing properties amongst replicas in a slice requires that the property be " +
+                "pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'"));
+      }
+
+      // Should have no effect despite the "shardUnique" param being set.
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+          "collection", COLLECTION_NAME,
+          "property", "property.bogus1",
+          "shardUnique", "true");
+
+      verifyPropertyVal(client, COLLECTION_NAME,
+          c1_s1_r1, "bogus1", "true");
+      verifyPropertyVal(client, COLLECTION_NAME,
+          c1_s1_r2, "property.bogus1", "whatever");
+
+      // At this point we've assigned a preferred leader. Make it happen and check that all the nodes that are
+      // leaders _also_ have the preferredLeader property set.
+
+
+      NamedList<Object> res = doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.REBALANCELEADERS.toString(),
+          "collection", COLLECTION_NAME);
+
+      verifyLeaderAssignment(client, COLLECTION_NAME);
+
+    }
+  }
+
+  private void verifyLeaderAssignment(CloudSolrClient client, String collectionName)
+      throws InterruptedException, KeeperException {
+    String lastFailMsg = "";
+    for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
+      lastFailMsg = "";
+      ClusterState clusterState = client.getZkStateReader().getClusterState();
+      for (Slice slice : clusterState.getCollection(collectionName).getSlices()) {
+        Boolean foundLeader = false;
+        Boolean foundPreferred = false;
+        for (Replica replica : slice.getReplicas()) {
+          Boolean isLeader = replica.getBool("leader", false);
+          Boolean isPreferred = replica.getBool("property.preferredleader", false);
+          if (isLeader != isPreferred) {
+            lastFailMsg = "Replica should NOT have preferredLeader != leader. Preferred: " + isPreferred.toString() +
+                " leader is " + isLeader.toString();
+          }
+          if (foundLeader && isLeader) {
+            lastFailMsg = "There should only be a single leader in _any_ shard! Replica " + replica.getName() +
+                " is the second leader in slice " + slice.getName();
+          }
+          if (foundPreferred && isPreferred) {
+            lastFailMsg = "There should only be a single preferredLeader in _any_ shard! Replica " + replica.getName() +
+                " is the second preferredLeader in slice " + slice.getName();
+          }
+          foundLeader = foundLeader ? foundLeader : isLeader;
+          foundPreferred = foundPreferred ? foundPreferred : isPreferred;
+        }
+      }
+      if (lastFailMsg.length() == 0) return;
+      Thread.sleep(100);
+    }
+    fail(lastFailMsg);
+  }
+
+  private void addProperty(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
+    assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    for (int idx = 0; idx < paramsIn.length; idx += 2) {
+      params.set(paramsIn[idx], paramsIn[idx + 1]);
+    }
+    QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+    client.request(request);
+
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestRequestStatusCollectionAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestRequestStatusCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestRequestStatusCollectionAPI.java
new file mode 100644
index 0000000..3d32d6c
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestRequestStatusCollectionAPI.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.BasicDistributedZkTest;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.junit.Test;
+
+public class TestRequestStatusCollectionAPI extends BasicDistributedZkTest {
+
+  public static final int MAX_WAIT_TIMEOUT_SECONDS = 90;
+
+  public TestRequestStatusCollectionAPI() {
+    schemaString = "schema15.xml";      // we need a string id
+  }
+
+  @Test
+  public void test() throws Exception {
+    ModifiableSolrParams params = new ModifiableSolrParams();
+
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
+    params.set("name", "collection2");
+    params.set("numShards", 2);
+    params.set("replicationFactor", 1);
+    params.set("maxShardsPerNode", 100);
+    params.set("collection.configName", "conf1");
+    params.set(CommonAdminParams.ASYNC, "1000");
+    try {
+      sendRequest(params);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    // Check for the request to be completed.
+
+    NamedList r = null;
+    NamedList status = null;
+    String message = null;
+
+    params = new ModifiableSolrParams();
+
+    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
+    params.set(OverseerCollectionMessageHandler.REQUESTID, "1000");
+
+    try {
+      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    assertEquals("found [1000] in completed tasks", message);
+
+    // Check for a random (hopefully non-existent request id
+    params = new ModifiableSolrParams();
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.REQUESTSTATUS.toString());
+    params.set(OverseerCollectionMessageHandler.REQUESTID, "9999999");
+    try {
+      r = sendRequest(params);
+      status = (NamedList) r.get("status");
+      message = (String) status.get("msg");
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    assertEquals("Did not find [9999999] in any tasks queue", message);
+
+    params = new ModifiableSolrParams();
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.SPLITSHARD.toString());
+    params.set("collection", "collection2");
+    params.set("shard", "shard1");
+    params.set(CommonAdminParams.ASYNC, "1001");
+    try {
+      sendRequest(params);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    // Check for the request to be completed.
+    params = new ModifiableSolrParams();
+    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
+    params.set(OverseerCollectionMessageHandler.REQUESTID, "1001");
+    try {
+      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    assertEquals("found [1001] in completed tasks", message);
+
+    params = new ModifiableSolrParams();
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
+    params.set("name", "collection2");
+    params.set("numShards", 2);
+    params.set("replicationFactor", 1);
+    params.set("maxShardsPerNode", 100);
+    params.set("collection.configName", "conf1");
+    params.set(CommonAdminParams.ASYNC, "1002");
+    try {
+      sendRequest(params);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    params = new ModifiableSolrParams();
+
+    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
+    params.set(OverseerCollectionMessageHandler.REQUESTID, "1002");
+
+    try {
+      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+
+    assertEquals("found [1002] in failed tasks", message);
+
+    params = new ModifiableSolrParams();
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
+    params.set("name", "collection3");
+    params.set("numShards", 1);
+    params.set("replicationFactor", 1);
+    params.set("maxShardsPerNode", 100);
+    params.set("collection.configName", "conf1");
+    params.set(CommonAdminParams.ASYNC, "1002");
+    try {
+      r = sendRequest(params);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    assertEquals("Task with the same requestid already exists.", r.get("error"));
+  }
+
+  /**
+   * Helper method to send a status request with specific retry limit and return
+   * the message/null from the success response.
+   */
+  private String sendStatusRequestWithRetry(ModifiableSolrParams params, int maxCounter)
+      throws SolrServerException, IOException{
+    String message = null;
+    while (maxCounter-- > 0) {
+      final NamedList r = sendRequest(params);
+      final NamedList status = (NamedList) r.get("status");
+      final RequestStatusState state = RequestStatusState.fromKey((String) status.get("state"));
+      message = (String) status.get("msg");
+
+      if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED) {
+        return message;
+      }
+
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+      }
+
+    }
+    // Return last state?
+    return message;
+  }
+
+  protected NamedList sendRequest(ModifiableSolrParams params) throws SolrServerException, IOException {
+    SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.getSolrClient()).getBaseURL();
+    baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
+
+    try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 15000)) {
+      return baseServer.request(request);
+    }
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
index a842a87..2310a14 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
@@ -80,7 +80,7 @@ import org.apache.solr.util.DefaultSolrThreadFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.REQUESTID;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.REQUESTID;
 
 /**
  * Simulated {@link SolrCloudManager}.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
index 22f9fb9..86de8ff 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
@@ -47,11 +47,11 @@ import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
 import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.cloud.ActionThrottle;
-import org.apache.solr.cloud.AddReplicaCmd;
-import org.apache.solr.cloud.Assign;
-import org.apache.solr.cloud.CreateCollectionCmd;
-import org.apache.solr.cloud.CreateShardCmd;
-import org.apache.solr.cloud.SplitShardCmd;
+import org.apache.solr.cloud.api.collections.AddReplicaCmd;
+import org.apache.solr.cloud.api.collections.Assign;
+import org.apache.solr.cloud.api.collections.CreateCollectionCmd;
+import org.apache.solr.cloud.api.collections.CreateShardCmd;
+import org.apache.solr.cloud.api.collections.SplitShardCmd;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
 import org.apache.solr.cloud.overseer.CollectionMutator;
 import org.apache.solr.cloud.overseer.ZkWriteCommand;
@@ -730,7 +730,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
   }
 
   /**
-   * Move replica. This uses a similar algorithm as {@link org.apache.solr.cloud.MoveReplicaCmd#moveNormalReplica(ClusterState, NamedList, String, String, DocCollection, Replica, Slice, int, boolean)}.
+   * Move replica. This uses a similar algorithm as {@link org.apache.solr.cloud.api.collections.MoveReplicaCmd#moveNormalReplica(ClusterState, NamedList, String, String, DocCollection, Replica, Slice, int, boolean)}.
    * @param message operation details
    * @param results operation results.
    */
@@ -909,7 +909,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
   }
 
   /**
-   * Delete a shard. This uses a similar algorithm as {@link org.apache.solr.cloud.DeleteShardCmd}
+   * Delete a shard. This uses a similar algorithm as {@link org.apache.solr.cloud.api.collections.DeleteShardCmd}
    * @param message operation details
    * @param results operation results
    */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java
index c242809..cd45c15 100644
--- a/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java
@@ -43,7 +43,7 @@ import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.cloud.AbstractDistribZkTestBase;
 import org.apache.solr.cloud.AbstractZkTestCase;
 import org.apache.solr.cloud.ChaosMonkey;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
@@ -74,9 +74,8 @@ import org.junit.BeforeClass;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.NUM_SLICES;
 import static org.apache.solr.common.cloud.ZkStateReader.CLUSTER_PROPS;
 import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
@@ -448,9 +447,9 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
     for (Map.Entry<String, Object> entry : collectionProps.entrySet()) {
       if (entry.getValue() != null) params.set(entry.getKey(), String.valueOf(entry.getValue()));
     }
-    Integer numShards = (Integer) collectionProps.get(NUM_SLICES);
+    Integer numShards = (Integer) collectionProps.get(OverseerCollectionMessageHandler.NUM_SLICES);
     if (numShards == null) {
-      String shardNames = (String) collectionProps.get(SHARDS_PROP);
+      String shardNames = (String) collectionProps.get(OverseerCollectionMessageHandler.SHARDS_PROP);
       numShards = StrUtils.splitSmart(shardNames, ',').size();
     }
     Integer replicationFactor = (Integer) collectionProps.get(REPLICATION_FACTOR);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
deleted file mode 100644
index 58d499b..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.hdfs;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import com.carrotsearch.randomizedtesting.annotations.Nightly;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
-import com.codahale.metrics.Counter;
-import com.codahale.metrics.Metric;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.client.solrj.request.CoreStatus;
-import org.apache.solr.client.solrj.response.CoreAdminResponse;
-import org.apache.solr.cloud.CollectionsAPIDistributedZkTest;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.util.BadHdfsThreadsFilter;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-@Slow
-@Nightly
-@ThreadLeakFilters(defaultFilters = true, filters = {
-    BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
-})
-public class HdfsCollectionsAPIDistributedZkTest extends CollectionsAPIDistributedZkTest {
-
-  private static MiniDFSCluster dfsCluster;
-
-  @BeforeClass
-  public static void setupClass() throws Exception {
-    System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
-    System.setProperty("tests.hdfs.numdatanodes", "1");
-   
-    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
-
-    ZkConfigManager configManager = new ZkConfigManager(zkClient());
-    configManager.uploadConfigDir(configset("cloud-hdfs"), "conf");
-    configManager.uploadConfigDir(configset("cloud-hdfs"), "conf2");
-
-    System.setProperty("solr.hdfs.home", HdfsTestUtil.getDataDir(dfsCluster, "data"));
-  }
-
-  @AfterClass
-  public static void teardownClass() throws Exception {
-    cluster.shutdown(); // need to close before the MiniDFSCluster
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
-    System.clearProperty("solr.hdfs.blockcache.blocksperbank");
-    System.clearProperty("tests.hdfs.numdatanodes");
-    System.clearProperty("solr.hdfs.home");
-  }
-
-  @Test
-  public void moveReplicaTest() throws Exception {
-    cluster.waitForAllNodes(5000);
-    String coll = "movereplicatest_coll";
-
-    CloudSolrClient cloudClient = cluster.getSolrClient();
-
-    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
-    create.setMaxShardsPerNode(2);
-    cloudClient.request(create);
-
-    for (int i = 0; i < 10; i++) {
-      cloudClient.add(coll, sdoc("id",String.valueOf(i)));
-      cloudClient.commit(coll);
-    }
-
-    List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
-    Collections.shuffle(slices, random());
-    Slice slice = null;
-    Replica replica = null;
-    for (Slice s : slices) {
-      slice = s;
-      for (Replica r : s.getReplicas()) {
-        if (s.getLeader() != r) {
-          replica = r;
-        }
-      }
-    }
-    String dataDir = getDataDir(replica);
-
-    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
-    ArrayList<String> l = new ArrayList<>(liveNodes);
-    Collections.shuffle(l, random());
-    String targetNode = null;
-    for (String node : liveNodes) {
-      if (!replica.getNodeName().equals(node)) {
-        targetNode = node;
-        break;
-      }
-    }
-    assertNotNull(targetNode);
-
-    CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
-    moveReplica.process(cloudClient);
-
-    checkNumOfCores(cloudClient, replica.getNodeName(), 0);
-    checkNumOfCores(cloudClient, targetNode, 2);
-
-    waitForState("Wait for recovery finish failed",coll, clusterShape(2,2));
-    slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
-    boolean found = false;
-    for (Replica newReplica : slice.getReplicas()) {
-      if (getDataDir(newReplica).equals(dataDir)) {
-        found = true;
-      }
-    }
-    assertTrue(found);
-
-
-    // data dir is reused so replication will be skipped
-    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
-      SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
-      List<String> registryNames = manager.registryNames().stream()
-          .filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
-      for (String registry : registryNames) {
-        Map<String, Metric> metrics = manager.registry(registry).getMetrics();
-        Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
-        if (counter != null) {
-          assertEquals(0, counter.getCount());
-        }
-      }
-    }
-  }
-
-
-  private void checkNumOfCores(CloudSolrClient cloudClient, String nodeName, int expectedCores) throws IOException, SolrServerException {
-    assertEquals(nodeName + " does not have expected number of cores",expectedCores, getNumOfCores(cloudClient, nodeName));
-  }
-
-  private int getNumOfCores(CloudSolrClient cloudClient, String nodeName) throws IOException, SolrServerException {
-    try (HttpSolrClient coreclient = getHttpSolrClient(cloudClient.getZkStateReader().getBaseUrlForNodeName(nodeName))) {
-      CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
-      return status.getCoreStatus().size();
-    }
-  }
-
-  private String getDataDir(Replica replica) throws IOException, SolrServerException {
-    try (HttpSolrClient coreclient = getHttpSolrClient(replica.getBaseUrl())) {
-      CoreStatus status = CoreAdminRequest.getCoreStatus(replica.getCoreName(), coreclient);
-      return status.getDataDirectory();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
index 9f0ff20..b031393 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
@@ -56,6 +56,7 @@ import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.client.solrj.response.CoreAdminResponse;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrException;
@@ -96,9 +97,6 @@ import org.noggit.JSONWriter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
 import static org.apache.solr.common.util.Utils.makeMap;
 
 /**
@@ -174,7 +172,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     }
   }
 
-  static class CloudSolrServerClient {
+  public static class CloudSolrServerClient {
     SolrClient solrClient;
     String shardName;
     int port;
@@ -186,6 +184,10 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
       this.solrClient = client;
     }
 
+    public SolrClient getSolrClient() {
+      return solrClient;
+    }
+
     @Override
     public int hashCode() {
       final int prime = 31;
@@ -1621,9 +1623,9 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     for (Map.Entry<String, Object> entry : collectionProps.entrySet()) {
       if(entry.getValue() !=null) params.set(entry.getKey(), String.valueOf(entry.getValue()));
     }
-    Integer numShards = (Integer) collectionProps.get(NUM_SLICES);
+    Integer numShards = (Integer) collectionProps.get(OverseerCollectionMessageHandler.NUM_SLICES);
     if(numShards==null){
-      String shardNames = (String) collectionProps.get(SHARDS_PROP);
+      String shardNames = (String) collectionProps.get(OverseerCollectionMessageHandler.SHARDS_PROP);
       numShards = StrUtils.splitSmart(shardNames,',').size();
     }
     Integer numNrtReplicas = (Integer) collectionProps.get(ZkStateReader.NRT_REPLICAS);
@@ -1685,12 +1687,12 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     int numTlogReplicas = useTlogReplicas()?replicationFactor:0;
     return createCollection(collectionInfos, collectionName,
         Utils.makeMap(
-        NUM_SLICES, numShards,
-        ZkStateReader.NRT_REPLICAS, numNrtReplicas,
-        ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
-        ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
-        CREATE_NODE_SET, createNodeSetStr,
-        ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode),
+            OverseerCollectionMessageHandler.NUM_SLICES, numShards,
+            ZkStateReader.NRT_REPLICAS, numNrtReplicas,
+            ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
+            ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
+            OverseerCollectionMessageHandler.CREATE_NODE_SET, createNodeSetStr,
+            ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode),
         client, configSetName);
   }
 
@@ -1701,12 +1703,12 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     int numTlogReplicas = useTlogReplicas()?replicationFactor:0;
     return createCollection(collectionInfos, collectionName,
         Utils.makeMap(
-        NUM_SLICES, numShards,
-        ZkStateReader.NRT_REPLICAS, numNrtReplicas,
-        ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
-        ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
-        CREATE_NODE_SET, createNodeSetStr,
-        ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode),
+            OverseerCollectionMessageHandler.NUM_SLICES, numShards,
+            ZkStateReader.NRT_REPLICAS, numNrtReplicas,
+            ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
+            ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
+            OverseerCollectionMessageHandler.CREATE_NODE_SET, createNodeSetStr,
+            ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode),
         client, configName);
   }
 
@@ -1905,7 +1907,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
         ZkStateReader.NRT_REPLICAS, numNrtReplicas,
         ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
         ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
-        NUM_SLICES, numShards);
+        OverseerCollectionMessageHandler.NUM_SLICES, numShards);
     Map<String,List<Integer>> collectionInfos = new HashMap<>();
     createCollection(collectionInfos, collName, props, client);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
index 7f4f0cb..360632c 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
@@ -419,7 +419,7 @@ public class MiniSolrCloudCluster {
     return jetty;
   }
 
-  protected JettySolrRunner stopJettySolrRunner(JettySolrRunner jetty) throws Exception {
+  public JettySolrRunner stopJettySolrRunner(JettySolrRunner jetty) throws Exception {
     jetty.stop();
     return jetty;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java b/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java
index 90eea94..ac1c5e1 100644
--- a/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java
+++ b/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java
@@ -73,7 +73,7 @@ public class MiniSolrCloudClusterTest extends LuceneTestCase {
 
     MiniSolrCloudCluster cluster = new MiniSolrCloudCluster(3, createTempDir(), JettyConfig.builder().build()) {
       @Override
-      protected JettySolrRunner stopJettySolrRunner(JettySolrRunner jetty) throws Exception {
+      public JettySolrRunner stopJettySolrRunner(JettySolrRunner jetty) throws Exception {
         JettySolrRunner j = super.stopJettySolrRunner(jetty);
         if (jettyIndex.incrementAndGet() == 2)
           throw new IOException("Fake IOException on shutdown!");


[05/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
deleted file mode 100644
index bb99799..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
+++ /dev/null
@@ -1,1015 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.client.solrj.response.CoreAdminResponse;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CollectionStateWatcher;
-import org.apache.solr.common.cloud.CompositeIdRouter;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.HashBasedRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.util.TestInjection;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-
-@Slow
-public class ShardSplitTest extends BasicDistributedZkTest {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String SHARD1_0 = SHARD1 + "_0";
-  public static final String SHARD1_1 = SHARD1 + "_1";
-
-  public ShardSplitTest() {
-    schemaString = "schema15.xml";      // we need a string id
-  }
-
-  @Override
-  public void distribSetUp() throws Exception {
-    super.distribSetUp();
-    useFactory(null);
-  }
-
-  @Test
-  public void test() throws Exception {
-
-    waitForThingsToLevelOut(15);
-
-    if (usually()) {
-      log.info("Using legacyCloud=false for cluster");
-      CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
-          .process(cloudClient);
-    }
-    incompleteOrOverlappingCustomRangeTest();
-    splitByUniqueKeyTest();
-    splitByRouteFieldTest();
-    splitByRouteKeyTest();
-
-    // todo can't call waitForThingsToLevelOut because it looks for jettys of all shards
-    // and the new sub-shards don't have any.
-    waitForRecoveriesToFinish(true);
-    //waitForThingsToLevelOut(15);
-  }
-
-  /*
-  Creates a collection with replicationFactor=1, splits a shard. Restarts the sub-shard leader node.
-  Add a replica. Ensure count matches in leader and replica.
-   */
-  public void testSplitStaticIndexReplication() throws Exception {
-    waitForThingsToLevelOut(15);
-
-    DocCollection defCol = cloudClient.getZkStateReader().getClusterState().getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-    Replica replica = defCol.getReplicas().get(0);
-    String nodeName = replica.getNodeName();
-
-    String collectionName = "testSplitStaticIndexReplication";
-    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
-    create.setMaxShardsPerNode(5); // some high number so we can create replicas without hindrance
-    create.setCreateNodeSet(nodeName); // we want to create the leader on a fixed node so that we know which one to restart later
-    create.process(cloudClient);
-    try (CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress(), true, cloudClient.getLbClient().getHttpClient())) {
-      client.setDefaultCollection(collectionName);
-      StoppableIndexingThread thread = new StoppableIndexingThread(controlClient, client, "i1", true);
-      try {
-        thread.start();
-        Thread.sleep(1000); // give the indexer sometime to do its work
-        thread.safeStop();
-        thread.join();
-        client.commit();
-        controlClient.commit();
-
-        CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(collectionName);
-        splitShard.setShardName(SHARD1);
-        String asyncId = splitShard.processAsync(client);
-        RequestStatusState state = CollectionAdminRequest.requestStatus(asyncId).waitFor(client, 120);
-        if (state == RequestStatusState.COMPLETED)  {
-          waitForRecoveriesToFinish(collectionName, true);
-          // let's wait to see parent shard become inactive
-          CountDownLatch latch = new CountDownLatch(1);
-          client.getZkStateReader().registerCollectionStateWatcher(collectionName, new CollectionStateWatcher() {
-            @Override
-            public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
-              Slice parent = collectionState.getSlice(SHARD1);
-              Slice slice10 = collectionState.getSlice(SHARD1_0);
-              Slice slice11 = collectionState.getSlice(SHARD1_1);
-              if (slice10 != null && slice11 != null &&
-                  parent.getState() == Slice.State.INACTIVE &&
-                  slice10.getState() == Slice.State.ACTIVE &&
-                  slice11.getState() == Slice.State.ACTIVE) {
-                latch.countDown();
-                return true; // removes the watch
-              }
-              return false;
-            }
-          });
-          latch.await(1, TimeUnit.MINUTES);
-          if (latch.getCount() != 0)  {
-            // sanity check
-            fail("Sub-shards did not become active even after waiting for 1 minute");
-          }
-
-          int liveNodeCount = client.getZkStateReader().getClusterState().getLiveNodes().size();
-
-          // restart the sub-shard leader node
-          boolean restarted = false;
-          for (JettySolrRunner jetty : jettys) {
-            int port = jetty.getBaseUrl().getPort();
-            if (replica.getStr(BASE_URL_PROP).contains(":" + port))  {
-              ChaosMonkey.kill(jetty);
-              ChaosMonkey.start(jetty);
-              restarted = true;
-              break;
-            }
-          }
-          if (!restarted) {
-            // sanity check
-            fail("We could not find a jetty to kill for replica: " + replica.getCoreUrl());
-          }
-
-          // add a new replica for the sub-shard
-          CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(collectionName, SHARD1_0);
-          // use control client because less chances of it being the node being restarted
-          // this is to avoid flakiness of test because of NoHttpResponseExceptions
-          String control_collection = client.getZkStateReader().getClusterState().getCollection("control_collection").getReplicas().get(0).getStr(BASE_URL_PROP);
-          try (HttpSolrClient control = new HttpSolrClient.Builder(control_collection).withHttpClient(client.getLbClient().getHttpClient()).build())  {
-            state = addReplica.processAndWait(control, 30);
-          }
-          if (state == RequestStatusState.COMPLETED)  {
-            CountDownLatch newReplicaLatch = new CountDownLatch(1);
-            client.getZkStateReader().registerCollectionStateWatcher(collectionName, new CollectionStateWatcher() {
-              @Override
-              public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
-                if (liveNodes.size() != liveNodeCount)  {
-                  return false;
-                }
-                Slice slice = collectionState.getSlice(SHARD1_0);
-                if (slice.getReplicas().size() == 2)  {
-                  if (!slice.getReplicas().stream().anyMatch(r -> r.getState() == Replica.State.RECOVERING)) {
-                    // we see replicas and none of them are recovering
-                    newReplicaLatch.countDown();
-                    return true;
-                  }
-                }
-                return false;
-              }
-            });
-            newReplicaLatch.await(30, TimeUnit.SECONDS);
-            // check consistency of sub-shard replica explicitly because checkShardConsistency methods doesn't
-            // handle new shards/replica so well.
-            ClusterState clusterState = client.getZkStateReader().getClusterState();
-            DocCollection collection = clusterState.getCollection(collectionName);
-            int numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_0));
-            assertEquals("We should have checked consistency for exactly 2 replicas of shard1_0", 2, numReplicasChecked);
-          } else  {
-            fail("Adding a replica to sub-shard did not complete even after waiting for 30 seconds!. Saw state = " + state.getKey());
-          }
-        } else {
-          fail("We expected shard split to succeed on a static index but it didn't. Found state = " + state.getKey());
-        }
-      } finally {
-        thread.safeStop();
-        thread.join();
-      }
-    }
-  }
-
-  private int assertConsistentReplicas(Slice shard) throws SolrServerException, IOException {
-    long numFound = Long.MIN_VALUE;
-    int count = 0;
-    for (Replica replica : shard.getReplicas()) {
-      HttpSolrClient client = new HttpSolrClient.Builder(replica.getCoreUrl())
-          .withHttpClient(cloudClient.getLbClient().getHttpClient()).build();
-      QueryResponse response = client.query(new SolrQuery("q", "*:*", "distrib", "false"));
-      log.info("Found numFound={} on replica: {}", response.getResults().getNumFound(), replica.getCoreUrl());
-      if (numFound == Long.MIN_VALUE)  {
-        numFound = response.getResults().getNumFound();
-      } else  {
-        assertEquals("Shard " + shard.getName() + " replicas do not have same number of documents", numFound, response.getResults().getNumFound());
-      }
-      count++;
-    }
-    return count;
-  }
-
-  /**
-   * Used to test that we can split a shard when a previous split event
-   * left sub-shards in construction or recovery state.
-   *
-   * See SOLR-9439
-   */
-  @Test
-  public void testSplitAfterFailedSplit() throws Exception {
-    waitForThingsToLevelOut(15);
-
-    TestInjection.splitFailureBeforeReplicaCreation = "true:100"; // we definitely want split to fail
-    try {
-      try {
-        CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-        splitShard.setShardName(SHARD1);
-        splitShard.process(cloudClient);
-        fail("Shard split was not supposed to succeed after failure injection!");
-      } catch (Exception e) {
-        // expected
-      }
-
-      // assert that sub-shards cores exist and sub-shard is in construction state
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-      zkStateReader.forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-      ClusterState state = zkStateReader.getClusterState();
-      DocCollection collection = state.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-
-      Slice shard10 = collection.getSlice(SHARD1_0);
-      assertEquals(Slice.State.CONSTRUCTION, shard10.getState());
-      assertEquals(1, shard10.getReplicas().size());
-
-      Slice shard11 = collection.getSlice(SHARD1_1);
-      assertEquals(Slice.State.CONSTRUCTION, shard11.getState());
-      assertEquals(1, shard11.getReplicas().size());
-
-      // lets retry the split
-      TestInjection.reset(); // let the split succeed
-      try {
-        CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-        splitShard.setShardName(SHARD1);
-        splitShard.process(cloudClient);
-        // Yay!
-      } catch (Exception e) {
-        log.error("Shard split failed", e);
-        fail("Shard split did not succeed after a previous failed split attempt left sub-shards in construction state");
-      }
-
-    } finally {
-      TestInjection.reset();
-    }
-  }
-
-  @Test
-  public void testSplitWithChaosMonkey() throws Exception {
-    waitForThingsToLevelOut(15);
-
-    List<StoppableIndexingThread> indexers = new ArrayList<>();
-    try {
-      for (int i = 0; i < 1; i++) {
-        StoppableIndexingThread thread = new StoppableIndexingThread(controlClient, cloudClient, String.valueOf(i), true);
-        indexers.add(thread);
-        thread.start();
-      }
-      Thread.sleep(1000); // give the indexers some time to do their work
-    } catch (Exception e) {
-      log.error("Error in test", e);
-    } finally {
-      for (StoppableIndexingThread indexer : indexers) {
-        indexer.safeStop();
-        indexer.join();
-      }
-    }
-
-    cloudClient.commit();
-    controlClient.commit();
-
-    AtomicBoolean stop = new AtomicBoolean();
-    AtomicBoolean killed = new AtomicBoolean(false);
-    Runnable monkey = new Runnable() {
-      @Override
-      public void run() {
-        ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-        zkStateReader.registerCollectionStateWatcher(AbstractDistribZkTestBase.DEFAULT_COLLECTION, new CollectionStateWatcher() {
-          @Override
-          public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
-            if (stop.get()) {
-              return true; // abort and remove the watch
-            }
-            Slice slice = collectionState.getSlice(SHARD1_0);
-            if (slice != null && slice.getReplicas().size() > 1) {
-              // ensure that only one watcher invocation thread can kill!
-              if (killed.compareAndSet(false, true))  {
-                log.info("Monkey thread found 2 replicas for {} {}", AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
-                CloudJettyRunner cjetty = shardToLeaderJetty.get(SHARD1);
-                try {
-                  Thread.sleep(1000 + random().nextInt(500));
-                  ChaosMonkey.kill(cjetty);
-                  stop.set(true);
-                  return true;
-                } catch (Exception e) {
-                  log.error("Monkey unable to kill jetty at port " + cjetty.jetty.getLocalPort(), e);
-                }
-              }
-            }
-            log.info("Monkey thread found only one replica for {} {}", AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
-            return false;
-          }
-        });
-      }
-    };
-
-    Thread monkeyThread = null;
-    monkeyThread = new Thread(monkey);
-    monkeyThread.start();
-    try {
-      CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-      splitShard.setShardName(SHARD1);
-      String asyncId = splitShard.processAsync(cloudClient);
-      RequestStatusState splitStatus = null;
-      try {
-        splitStatus = CollectionAdminRequest.requestStatus(asyncId).waitFor(cloudClient, 120);
-      } catch (Exception e) {
-        log.warn("Failed to get request status, maybe because the overseer node was shutdown by monkey", e);
-      }
-
-      // we don't care if the split failed because we are injecting faults and it is likely
-      // that the split has failed but in any case we want to assert that all docs that got
-      // indexed are available in SolrCloud and if the split succeeded then all replicas of the sub-shard
-      // must be consistent (i.e. have same numdocs)
-
-      log.info("Shard split request state is COMPLETED");
-      stop.set(true);
-      monkeyThread.join();
-      Set<String> addFails = new HashSet<>();
-      Set<String> deleteFails = new HashSet<>();
-      for (StoppableIndexingThread indexer : indexers) {
-        addFails.addAll(indexer.getAddFails());
-        deleteFails.addAll(indexer.getDeleteFails());
-      }
-
-      CloudJettyRunner cjetty = shardToLeaderJetty.get(SHARD1);
-      log.info("Starting shard1 leader jetty at port {}", cjetty.jetty.getLocalPort());
-      ChaosMonkey.start(cjetty.jetty);
-      cloudClient.getZkStateReader().forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-      log.info("Current collection state: {}", printClusterStateInfo(AbstractDistribZkTestBase.DEFAULT_COLLECTION));
-
-      boolean replicaCreationsFailed = false;
-      if (splitStatus == RequestStatusState.FAILED)  {
-        // either one or more replica creation failed (because it may have been created on the same parent shard leader node)
-        // or the split may have failed while trying to soft-commit *after* all replicas have been created
-        // the latter counts as a successful switch even if the API doesn't say so
-        // so we must find a way to distinguish between the two
-        // an easy way to do that is to look at the sub-shard replicas and check if the replica core actually exists
-        // instead of existing solely inside the cluster state
-        DocCollection collectionState = cloudClient.getZkStateReader().getClusterState().getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-        Slice slice10 = collectionState.getSlice(SHARD1_0);
-        Slice slice11 = collectionState.getSlice(SHARD1_1);
-        if (slice10 != null && slice11 != null) {
-          for (Replica replica : slice10) {
-            if (!doesReplicaCoreExist(replica)) {
-              replicaCreationsFailed = true;
-              break;
-            }
-          }
-          for (Replica replica : slice11) {
-            if (!doesReplicaCoreExist(replica)) {
-              replicaCreationsFailed = true;
-              break;
-            }
-          }
-        }
-      }
-
-      // true if sub-shard states switch to 'active' eventually
-      AtomicBoolean areSubShardsActive = new AtomicBoolean(false);
-
-      if (!replicaCreationsFailed)  {
-        // all sub-shard replicas were created successfully so all cores must recover eventually
-        waitForRecoveriesToFinish(AbstractDistribZkTestBase.DEFAULT_COLLECTION, true);
-        // let's wait for the overseer to switch shard states
-        CountDownLatch latch = new CountDownLatch(1);
-        cloudClient.getZkStateReader().registerCollectionStateWatcher(AbstractDistribZkTestBase.DEFAULT_COLLECTION, new CollectionStateWatcher() {
-          @Override
-          public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
-            Slice parent = collectionState.getSlice(SHARD1);
-            Slice slice10 = collectionState.getSlice(SHARD1_0);
-            Slice slice11 = collectionState.getSlice(SHARD1_1);
-            if (slice10 != null && slice11 != null &&
-                parent.getState() == Slice.State.INACTIVE &&
-                slice10.getState() == Slice.State.ACTIVE &&
-                slice11.getState() == Slice.State.ACTIVE) {
-              areSubShardsActive.set(true);
-              latch.countDown();
-              return true; // removes the watch
-            } else if (slice10 != null && slice11 != null &&
-                parent.getState() == Slice.State.ACTIVE &&
-                slice10.getState() == Slice.State.RECOVERY_FAILED &&
-                slice11.getState() == Slice.State.RECOVERY_FAILED) {
-              areSubShardsActive.set(false);
-              latch.countDown();
-              return true;
-            }
-            return false;
-          }
-        });
-
-        latch.await(2, TimeUnit.MINUTES);
-
-        if (latch.getCount() != 0)  {
-          // sanity check
-          fail("We think that split was successful but sub-shard states were not updated even after 2 minutes.");
-        }
-      }
-
-      cloudClient.commit(); // for visibility of results on sub-shards
-
-      checkShardConsistency(true, true, addFails, deleteFails);
-      long ctrlDocs = controlClient.query(new SolrQuery("*:*")).getResults().getNumFound();
-      // ensure we have added more than 0 docs
-      long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
-      assertTrue("Found " + ctrlDocs + " control docs", cloudClientDocs > 0);
-      assertEquals("Found " + ctrlDocs + " control docs and " + cloudClientDocs + " cloud docs", ctrlDocs, cloudClientDocs);
-
-      // check consistency of sub-shard replica explicitly because checkShardConsistency methods doesn't
-      // handle new shards/replica so well.
-      if (areSubShardsActive.get()) {
-        ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-        DocCollection collection = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
-        int numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_0));
-        assertEquals("We should have checked consistency for exactly 2 replicas of shard1_0", 2, numReplicasChecked);
-        numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_1));
-        assertEquals("We should have checked consistency for exactly 2 replicas of shard1_1", 2, numReplicasChecked);
-      }
-    } finally {
-      stop.set(true);
-      monkeyThread.join();
-    }
-  }
-
-  private boolean doesReplicaCoreExist(Replica replica) throws IOException {
-    try (HttpSolrClient client = new HttpSolrClient.Builder(replica.getStr(BASE_URL_PROP))
-        .withHttpClient(cloudClient.getLbClient().getHttpClient()).build())  {
-      String coreName = replica.getCoreName();
-      try {
-        CoreAdminResponse status = CoreAdminRequest.getStatus(coreName, client);
-        if (status.getCoreStatus(coreName) == null || status.getCoreStatus(coreName).size() == 0) {
-          return false;
-        }
-      } catch (Exception e) {
-        log.warn("Error gettting core status of replica " + replica + ". Perhaps it does not exist!", e);
-        return false;
-      }
-    }
-    return true;
-  }
-
-  @Test
-  public void testSplitShardWithRule() throws Exception {
-    waitForThingsToLevelOut(15);
-
-    if (usually()) {
-      log.info("Using legacyCloud=false for cluster");
-      CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
-          .process(cloudClient);
-    }
-
-    log.info("Starting testSplitShardWithRule");
-    String collectionName = "shardSplitWithRule";
-    CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2)
-        .setRule("shard:*,replica:<2,node:*");
-    CollectionAdminResponse response = createRequest.process(cloudClient);
-    assertEquals(0, response.getStatus());
-
-    CollectionAdminRequest.SplitShard splitShardRequest = CollectionAdminRequest.splitShard(collectionName)
-        .setShardName("shard1");
-    response = splitShardRequest.process(cloudClient);
-    assertEquals(String.valueOf(response.getErrorMessages()), 0, response.getStatus());
-  }
-
-  private void incompleteOrOverlappingCustomRangeTest() throws Exception  {
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-    final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
-    Slice shard1 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice(SHARD1);
-    DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
-
-    List<DocRouter.Range> subRanges = new ArrayList<>();
-    List<DocRouter.Range> ranges = router.partitionRange(4, shard1Range);
-
-    // test with only one range
-    subRanges.add(ranges.get(0));
-    try {
-      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
-      fail("Shard splitting with just one custom hash range should not succeed");
-    } catch (HttpSolrClient.RemoteSolrException e) {
-      log.info("Expected exception:", e);
-    }
-    subRanges.clear();
-
-    // test with ranges with a hole in between them
-    subRanges.add(ranges.get(3)); // order shouldn't matter
-    subRanges.add(ranges.get(0));
-    try {
-      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
-      fail("Shard splitting with missing hashes in between given ranges should not succeed");
-    } catch (HttpSolrClient.RemoteSolrException e) {
-      log.info("Expected exception:", e);
-    }
-    subRanges.clear();
-
-    // test with overlapping ranges
-    subRanges.add(ranges.get(0));
-    subRanges.add(ranges.get(1));
-    subRanges.add(ranges.get(2));
-    subRanges.add(new DocRouter.Range(ranges.get(3).min - 15, ranges.get(3).max));
-    try {
-      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
-      fail("Shard splitting with overlapping ranges should not succeed");
-    } catch (HttpSolrClient.RemoteSolrException e) {
-      log.info("Expected exception:", e);
-    }
-    subRanges.clear();
-  }
-
-  private void splitByUniqueKeyTest() throws Exception {
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-    final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
-    Slice shard1 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice(SHARD1);
-    DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
-    List<DocRouter.Range> subRanges = new ArrayList<>();
-    if (usually())  {
-      List<DocRouter.Range> ranges = router.partitionRange(4, shard1Range);
-      // 75% of range goes to shard1_0 and the rest to shard1_1
-      subRanges.add(new DocRouter.Range(ranges.get(0).min, ranges.get(2).max));
-      subRanges.add(ranges.get(3));
-    } else  {
-      subRanges = router.partitionRange(2, shard1Range);
-    }
-    final List<DocRouter.Range> ranges = subRanges;
-    final int[] docCounts = new int[ranges.size()];
-    int numReplicas = shard1.getReplicas().size();
-
-    del("*:*");
-    for (int id = 0; id <= 100; id++) {
-      String shardKey = "" + (char)('a' + (id % 26)); // See comment in ShardRoutingTest for hash distribution
-      indexAndUpdateCount(router, ranges, docCounts, shardKey + "!" + String.valueOf(id), id);
-    }
-    commit();
-
-    Thread indexThread = new Thread() {
-      @Override
-      public void run() {
-        Random random = random();
-        int max = atLeast(random, 401);
-        int sleep = atLeast(random, 25);
-        log.info("SHARDSPLITTEST: Going to add " + max + " number of docs at 1 doc per " + sleep + "ms");
-        Set<String> deleted = new HashSet<>();
-        for (int id = 101; id < max; id++) {
-          try {
-            indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id), id);
-            Thread.sleep(sleep);
-            if (usually(random))  {
-              String delId = String.valueOf(random.nextInt(id - 101 + 1) + 101);
-              if (deleted.contains(delId))  continue;
-              try {
-                deleteAndUpdateCount(router, ranges, docCounts, delId);
-                deleted.add(delId);
-              } catch (Exception e) {
-                log.error("Exception while deleting docs", e);
-              }
-            }
-          } catch (Exception e) {
-            log.error("Exception while adding doc id = " + id, e);
-            // do not select this id for deletion ever
-            deleted.add(String.valueOf(id));
-          }
-        }
-      }
-    };
-    indexThread.start();
-
-    try {
-      for (int i = 0; i < 3; i++) {
-        try {
-          splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
-          log.info("Layout after split: \n");
-          printLayout();
-          break;
-        } catch (HttpSolrClient.RemoteSolrException e) {
-          if (e.code() != 500)  {
-            throw e;
-          }
-          log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
-          if (i == 2) {
-            fail("SPLITSHARD was not successful even after three tries");
-          }
-        }
-      }
-    } finally {
-      try {
-        indexThread.join();
-      } catch (InterruptedException e) {
-        log.error("Indexing thread interrupted", e);
-      }
-    }
-
-    waitForRecoveriesToFinish(true);
-    checkDocCountsAndShardStates(docCounts, numReplicas);
-  }
-
-
-  public void splitByRouteFieldTest() throws Exception  {
-    log.info("Starting testSplitWithRouteField");
-    String collectionName = "routeFieldColl";
-    int numShards = 4;
-    int replicationFactor = 2;
-    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
-        .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
-
-    HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
-    String shard_fld = "shard_s";
-    try (CloudSolrClient client = createCloudClient(null)) {
-      Map<String, Object> props = Utils.makeMap(
-          REPLICATION_FACTOR, replicationFactor,
-          MAX_SHARDS_PER_NODE, maxShardsPerNode,
-          NUM_SLICES, numShards,
-          "router.field", shard_fld);
-
-      createCollection(collectionInfos, collectionName,props,client);
-    }
-
-    List<Integer> list = collectionInfos.get(collectionName);
-    checkForCollection(collectionName, list, null);
-
-    waitForRecoveriesToFinish(false);
-
-    String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
-
-    try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
-
-      ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-      final DocRouter router = clusterState.getCollection(collectionName).getRouter();
-      Slice shard1 = clusterState.getCollection(collectionName).getSlice(SHARD1);
-      DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
-      final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
-      final int[] docCounts = new int[ranges.size()];
-
-      for (int i = 100; i <= 200; i++) {
-        String shardKey = "" + (char) ('a' + (i % 26)); // See comment in ShardRoutingTest for hash distribution
-
-        collectionClient.add(getDoc(id, i, "n_ti", i, shard_fld, shardKey));
-        int idx = getHashRangeIdx(router, ranges, shardKey);
-        if (idx != -1) {
-          docCounts[idx]++;
-        }
-      }
-
-      for (int i = 0; i < docCounts.length; i++) {
-        int docCount = docCounts[i];
-        log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
-      }
-
-      collectionClient.commit();
-
-      for (int i = 0; i < 3; i++) {
-        try {
-          splitShard(collectionName, SHARD1, null, null);
-          break;
-        } catch (HttpSolrClient.RemoteSolrException e) {
-          if (e.code() != 500) {
-            throw e;
-          }
-          log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
-          if (i == 2) {
-            fail("SPLITSHARD was not successful even after three tries");
-          }
-        }
-      }
-
-      waitForRecoveriesToFinish(collectionName, false);
-
-      assertEquals(docCounts[0], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_0")).getResults().getNumFound());
-      assertEquals(docCounts[1], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_1")).getResults().getNumFound());
-    }
-  }
-
-  private void splitByRouteKeyTest() throws Exception {
-    log.info("Starting splitByRouteKeyTest");
-    String collectionName = "splitByRouteKeyTest";
-    int numShards = 4;
-    int replicationFactor = 2;
-    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
-        .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
-
-    HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      Map<String, Object> props = Utils.makeMap(
-          REPLICATION_FACTOR, replicationFactor,
-          MAX_SHARDS_PER_NODE, maxShardsPerNode,
-          NUM_SLICES, numShards);
-
-      createCollection(collectionInfos, collectionName,props,client);
-    }
-
-    List<Integer> list = collectionInfos.get(collectionName);
-    checkForCollection(collectionName, list, null);
-
-    waitForRecoveriesToFinish(false);
-
-    String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
-
-    try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
-
-      String splitKey = "b!";
-
-      ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-      final DocRouter router = clusterState.getCollection(collectionName).getRouter();
-      Slice shard1 = clusterState.getCollection(collectionName).getSlice(SHARD1);
-      DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
-      final List<DocRouter.Range> ranges = ((CompositeIdRouter) router).partitionRangeByKey(splitKey, shard1Range);
-      final int[] docCounts = new int[ranges.size()];
-
-      int uniqIdentifier = (1 << 12);
-      int splitKeyDocCount = 0;
-      for (int i = 100; i <= 200; i++) {
-        String shardKey = "" + (char) ('a' + (i % 26)); // See comment in ShardRoutingTest for hash distribution
-
-        String idStr = shardKey + "!" + i;
-        collectionClient.add(getDoc(id, idStr, "n_ti", (shardKey + "!").equals(splitKey) ? uniqIdentifier : i));
-        int idx = getHashRangeIdx(router, ranges, idStr);
-        if (idx != -1) {
-          docCounts[idx]++;
-        }
-        if (splitKey.equals(shardKey + "!"))
-          splitKeyDocCount++;
-      }
-
-      for (int i = 0; i < docCounts.length; i++) {
-        int docCount = docCounts[i];
-        log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
-      }
-      log.info("Route key doc count = {}", splitKeyDocCount);
-
-      collectionClient.commit();
-
-      for (int i = 0; i < 3; i++) {
-        try {
-          splitShard(collectionName, null, null, splitKey);
-          break;
-        } catch (HttpSolrClient.RemoteSolrException e) {
-          if (e.code() != 500) {
-            throw e;
-          }
-          log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
-          if (i == 2) {
-            fail("SPLITSHARD was not successful even after three tries");
-          }
-        }
-      }
-
-      waitForRecoveriesToFinish(collectionName, false);
-      SolrQuery solrQuery = new SolrQuery("*:*");
-      assertEquals("DocCount on shard1_0 does not match", docCounts[0], collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
-      assertEquals("DocCount on shard1_1 does not match", docCounts[1], collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
-      assertEquals("DocCount on shard1_2 does not match", docCounts[2], collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
-
-      solrQuery = new SolrQuery("n_ti:" + uniqIdentifier);
-      assertEquals("shard1_0 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
-      assertEquals("Wrong number of docs on shard1_1 for route key: " + splitKey, splitKeyDocCount, collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
-      assertEquals("shard1_2 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
-    }
-  }
-
-  protected void checkDocCountsAndShardStates(int[] docCounts, int numReplicas) throws Exception {
-    ClusterState clusterState = null;
-    Slice slice1_0 = null, slice1_1 = null;
-    int i = 0;
-    for (i = 0; i < 10; i++) {
-      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
-      clusterState = zkStateReader.getClusterState();
-      slice1_0 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice("shard1_0");
-      slice1_1 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice("shard1_1");
-      if (slice1_0.getState() == Slice.State.ACTIVE && slice1_1.getState() == Slice.State.ACTIVE) {
-        break;
-      }
-      Thread.sleep(500);
-    }
-
-    log.info("ShardSplitTest waited for {} ms for shard state to be set to active", i * 500);
-
-    assertNotNull("Cluster state does not contain shard1_0", slice1_0);
-    assertNotNull("Cluster state does not contain shard1_0", slice1_1);
-    assertSame("shard1_0 is not active", Slice.State.ACTIVE, slice1_0.getState());
-    assertSame("shard1_1 is not active", Slice.State.ACTIVE, slice1_1.getState());
-    assertEquals("Wrong number of replicas created for shard1_0", numReplicas, slice1_0.getReplicas().size());
-    assertEquals("Wrong number of replicas created for shard1_1", numReplicas, slice1_1.getReplicas().size());
-
-    commit();
-
-    // can't use checkShardConsistency because it insists on jettys and clients for each shard
-    checkSubShardConsistency(SHARD1_0);
-    checkSubShardConsistency(SHARD1_1);
-
-    SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
-    query.set("distrib", false);
-
-    ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
-    QueryResponse response;
-    try (HttpSolrClient shard1_0Client = getHttpSolrClient(shard1_0.getCoreUrl())) {
-      response = shard1_0Client.query(query);
-    }
-    long shard10Count = response.getResults().getNumFound();
-
-    ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(
-        AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
-    QueryResponse response2;
-    try (HttpSolrClient shard1_1Client = getHttpSolrClient(shard1_1.getCoreUrl())) {
-      response2 = shard1_1Client.query(query);
-    }
-    long shard11Count = response2.getResults().getNumFound();
-
-    logDebugHelp(docCounts, response, shard10Count, response2, shard11Count);
-
-    assertEquals("Wrong doc count on shard1_0. See SOLR-5309", docCounts[0], shard10Count);
-    assertEquals("Wrong doc count on shard1_1. See SOLR-5309", docCounts[1], shard11Count);
-  }
-
-  protected void checkSubShardConsistency(String shard) throws SolrServerException, IOException {
-    SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
-    query.set("distrib", false);
-
-    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-    Slice slice = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice(shard);
-    long[] numFound = new long[slice.getReplicasMap().size()];
-    int c = 0;
-    for (Replica replica : slice.getReplicas()) {
-      String coreUrl = new ZkCoreNodeProps(replica).getCoreUrl();
-      QueryResponse response;
-      try (HttpSolrClient client = getHttpSolrClient(coreUrl)) {
-        response = client.query(query);
-      }
-      numFound[c++] = response.getResults().getNumFound();
-      log.info("Shard: " + shard + " Replica: {} has {} docs", coreUrl, String.valueOf(response.getResults().getNumFound()));
-      assertTrue("Shard: " + shard + " Replica: " + coreUrl + " has 0 docs", response.getResults().getNumFound() > 0);
-    }
-    for (int i = 0; i < slice.getReplicasMap().size(); i++) {
-      assertEquals(shard + " is not consistent", numFound[0], numFound[i]);
-    }
-  }
-
-  protected void splitShard(String collection, String shardId, List<DocRouter.Range> subRanges, String splitKey) throws SolrServerException, IOException {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionParams.CollectionAction.SPLITSHARD.toString());
-    params.set("collection", collection);
-    if (shardId != null)  {
-      params.set("shard", shardId);
-    }
-    if (subRanges != null)  {
-      StringBuilder ranges = new StringBuilder();
-      for (int i = 0; i < subRanges.size(); i++) {
-        DocRouter.Range subRange = subRanges.get(i);
-        ranges.append(subRange.toString());
-        if (i < subRanges.size() - 1)
-          ranges.append(",");
-      }
-      params.set("ranges", ranges.toString());
-    }
-    if (splitKey != null) {
-      params.set("split.key", splitKey);
-    }
-    SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
-        .getBaseURL();
-    baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
-
-    try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 30000, 60000 * 5)) {
-      baseServer.request(request);
-    }
-  }
-
-  protected void indexAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id, int n) throws Exception {
-    index("id", id, "n_ti", n);
-
-    int idx = getHashRangeIdx(router, ranges, id);
-    if (idx != -1)  {
-      docCounts[idx]++;
-    }
-  }
-
-  protected void deleteAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id) throws Exception {
-    controlClient.deleteById(id);
-    cloudClient.deleteById(id);
-
-    int idx = getHashRangeIdx(router, ranges, id);
-    if (idx != -1)  {
-      docCounts[idx]--;
-    }
-  }
-
-  public static int getHashRangeIdx(DocRouter router, List<DocRouter.Range> ranges, String id) {
-    int hash = 0;
-    if (router instanceof HashBasedRouter) {
-      HashBasedRouter hashBasedRouter = (HashBasedRouter) router;
-      hash = hashBasedRouter.sliceHash(id, null, null,null);
-    }
-    for (int i = 0; i < ranges.size(); i++) {
-      DocRouter.Range range = ranges.get(i);
-      if (range.includes(hash))
-        return i;
-    }
-    return -1;
-  }
-
-  protected void logDebugHelp(int[] docCounts, QueryResponse response, long shard10Count, QueryResponse response2, long shard11Count) {
-    for (int i = 0; i < docCounts.length; i++) {
-      int docCount = docCounts[i];
-      log.info("Expected docCount for shard1_{} = {}", i, docCount);
-    }
-
-    log.info("Actual docCount for shard1_0 = {}", shard10Count);
-    log.info("Actual docCount for shard1_1 = {}", shard11Count);
-    Map<String, String> idVsVersion = new HashMap<>();
-    Map<String, SolrDocument> shard10Docs = new HashMap<>();
-    Map<String, SolrDocument> shard11Docs = new HashMap<>();
-    for (int i = 0; i < response.getResults().size(); i++) {
-      SolrDocument document = response.getResults().get(i);
-      idVsVersion.put(document.getFieldValue("id").toString(), document.getFieldValue("_version_").toString());
-      SolrDocument old = shard10Docs.put(document.getFieldValue("id").toString(), document);
-      if (old != null) {
-        log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_0. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
-      }
-    }
-    for (int i = 0; i < response2.getResults().size(); i++) {
-      SolrDocument document = response2.getResults().get(i);
-      String value = document.getFieldValue("id").toString();
-      String version = idVsVersion.get(value);
-      if (version != null) {
-        log.error("DUPLICATE: ID: " + value + " , shard1_0Version: " + version + " shard1_1Version:" + document.getFieldValue("_version_"));
-      }
-      SolrDocument old = shard11Docs.put(document.getFieldValue("id").toString(), document);
-      if (old != null) {
-        log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_1. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
-      }
-    }
-  }
-
-  @Override
-  protected SolrClient createNewSolrClient(String collection, String baseUrl) {
-    HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(collection, baseUrl, DEFAULT_CONNECTION_TIMEOUT, 5 * 60 * 1000);
-    return client;
-  }
-
-  @Override
-  protected SolrClient createNewSolrClient(int port) {
-    HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(port, DEFAULT_CONNECTION_TIMEOUT, 5 * 60 * 1000);
-    return client;
-  }
-
-  @Override
-  protected CloudSolrClient createCloudClient(String defaultCollection) {
-    CloudSolrClient client = super.createCloudClient(defaultCollection);
-    return client;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java b/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java
deleted file mode 100644
index 3e18ce0..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/SimpleCollectionCreateDeleteTest.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.junit.Test;
-
-public class SimpleCollectionCreateDeleteTest extends AbstractFullDistribZkTestBase {
-
-  public SimpleCollectionCreateDeleteTest() {
-    sliceCount = 1;
-  }
-
-  @Test
-  @ShardsFixed(num = 1)
-  public void test() throws Exception {
-    String overseerNode = OverseerCollectionConfigSetProcessor.getLeaderNode(cloudClient.getZkStateReader().getZkClient());
-    String notOverseerNode = null;
-    for (CloudJettyRunner cloudJetty : cloudJettys) {
-      if (!overseerNode.equals(cloudJetty.nodeName)) {
-        notOverseerNode = cloudJetty.nodeName;
-        break;
-      }
-    }
-    String collectionName = "SimpleCollectionCreateDeleteTest";
-    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,1,1)
-            .setCreateNodeSet(overseerNode)
-            .setStateFormat(2);
-
-    NamedList<Object> request = create.process(cloudClient).getResponse();
-
-    if (request.get("success") != null) {
-      assertTrue(cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, false));
-
-      CollectionAdminRequest delete = CollectionAdminRequest.deleteCollection(collectionName);
-      cloudClient.request(delete);
-
-      assertFalse(cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, false));
-
-      // create collection again on a node other than the overseer leader
-      create = CollectionAdminRequest.createCollection(collectionName,1,1)
-              .setCreateNodeSet(notOverseerNode)
-              .setStateFormat(2);
-      request = create.process(cloudClient).getResponse();
-      assertTrue("Collection creation should not have failed", request.get("success") != null);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
deleted file mode 100644
index cf4111e..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestCollectionAPI.java
+++ /dev/null
@@ -1,797 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
-
-import com.google.common.collect.Lists;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.request.V2Request;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-import org.junit.Test;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARD_UNIQUE;
-
-public class TestCollectionAPI extends ReplicaPropertiesBase {
-
-  public static final String COLLECTION_NAME = "testcollection";
-  public static final String COLLECTION_NAME1 = "testcollection1";
-
-  public TestCollectionAPI() {
-    schemaString = "schema15.xml";      // we need a string id
-    sliceCount = 2;
-  }
-
-  @Test
-  @ShardsFixed(num = 2)
-  public void test() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      CollectionAdminRequest.Create req;
-      if (useTlogReplicas()) {
-        req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 0, 1, 1);
-      } else {
-        req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 1, 0, 1);
-      }
-      req.setMaxShardsPerNode(2);
-      setV2(req);
-      client.request(req);
-      assertV2CallsCount();
-      createCollection(null, COLLECTION_NAME1, 1, 1, 1, client, null, "conf1");
-    }
-
-    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);
-    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME1, 1);
-    waitForRecoveriesToFinish(COLLECTION_NAME, false);
-    waitForRecoveriesToFinish(COLLECTION_NAME1, false);
-
-    listCollection();
-    clusterStatusNoCollection();
-    clusterStatusWithCollection();
-    clusterStatusWithCollectionAndShard();
-    clusterStatusWithRouteKey();
-    clusterStatusAliasTest();
-    clusterStatusRolesTest();
-    clusterStatusBadCollectionTest();
-    replicaPropTest();
-    clusterStatusZNodeVersion();
-    testClusterStateMigration();
-    testCollectionCreationCollectionNameValidation();
-    testCollectionCreationShardNameValidation();
-    testAliasCreationNameValidation();
-    testShardCreationNameValidation();
-  }
-
-  private void clusterStatusWithCollectionAndShard() throws IOException, SolrServerException {
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", COLLECTION_NAME);
-      params.set("shard", SHARD1);
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
-      assertNotNull("Collections should not be null in cluster state", collections);
-      assertNotNull(collections.get(COLLECTION_NAME));
-      assertEquals(1, collections.size());
-      Map<String, Object> collection = (Map<String, Object>) collections.get(COLLECTION_NAME);
-      Map<String, Object> shardStatus = (Map<String, Object>) collection.get("shards");
-      assertEquals(1, shardStatus.size());
-      Map<String, Object> selectedShardStatus = (Map<String, Object>) shardStatus.get(SHARD1);
-      assertNotNull(selectedShardStatus);
-
-    }
-  }
-
-
-  private void listCollection() throws IOException, SolrServerException {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.LIST.toString());
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      List<String> collections = (List<String>) rsp.get("collections");
-      assertTrue("control_collection was not found in list", collections.contains("control_collection"));
-      assertTrue(DEFAULT_COLLECTION + " was not found in list", collections.contains(DEFAULT_COLLECTION));
-      assertTrue(COLLECTION_NAME + " was not found in list", collections.contains(COLLECTION_NAME));
-      assertTrue(COLLECTION_NAME1 + " was not found in list", collections.contains(COLLECTION_NAME1));
-    }
-
-  }
-
-  private void clusterStatusNoCollection() throws Exception {
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
-      assertNotNull("Collections should not be null in cluster state", collections);
-      assertNotNull(collections.get(COLLECTION_NAME1));
-      assertEquals(4, collections.size());
-
-      List<String> liveNodes = (List<String>) cluster.get("live_nodes");
-      assertNotNull("Live nodes should not be null", liveNodes);
-      assertFalse(liveNodes.isEmpty());
-    }
-
-  }
-
-  private void clusterStatusWithCollection() throws IOException, SolrServerException {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", COLLECTION_NAME);
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
-      assertNotNull("Collections should not be null in cluster state", collections);
-      assertEquals(1, collections.size());
-      Map<String, Object> collection = (Map<String, Object>) collections.get(COLLECTION_NAME);
-      assertNotNull(collection);
-      assertEquals("conf1", collection.get("configName"));
-//      assertEquals("1", collection.get("nrtReplicas"));
-    }
-  }
-
-  private void clusterStatusZNodeVersion() throws Exception {
-    String cname = "clusterStatusZNodeVersion";
-    try (CloudSolrClient client = createCloudClient(null)) {
-      setV2(CollectionAdminRequest.createCollection(cname, "conf1", 1, 1).setMaxShardsPerNode(1)).process(client);
-      assertV2CallsCount();
-      waitForRecoveriesToFinish(cname, true);
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", cname);
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
-      assertNotNull("Collections should not be null in cluster state", collections);
-      assertEquals(1, collections.size());
-      Map<String, Object> collection = (Map<String, Object>) collections.get(cname);
-      assertNotNull(collection);
-      assertEquals("conf1", collection.get("configName"));
-      Integer znodeVersion = (Integer) collection.get("znodeVersion");
-      assertNotNull(znodeVersion);
-
-      CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(cname, "shard1");
-      setV2(addReplica);
-      addReplica.process(client);
-      assertV2CallsCount();
-      waitForRecoveriesToFinish(cname, true);
-
-      rsp = client.request(request);
-      cluster = (NamedList<Object>) rsp.get("cluster");
-      collections = (NamedList<Object>) cluster.get("collections");
-      collection = (Map<String, Object>) collections.get(cname);
-      Integer newVersion = (Integer) collection.get("znodeVersion");
-      assertNotNull(newVersion);
-      assertTrue(newVersion > znodeVersion);
-    }
-  }
-
-  private static long totalexpectedV2Calls;
-
-  public static SolrRequest setV2(SolrRequest req) {
-    if (V2Request.v2Calls.get() == null) V2Request.v2Calls.set(new AtomicLong());
-    totalexpectedV2Calls = V2Request.v2Calls.get().get();
-    if (random().nextBoolean()) {
-      req.setUseV2(true);
-      req.setUseBinaryV2(random().nextBoolean());
-      totalexpectedV2Calls++;
-    }
-    return req;
-  }
-
-  public static void assertV2CallsCount() {
-    assertEquals(totalexpectedV2Calls, V2Request.v2Calls.get().get());
-  }
-
-  private void clusterStatusWithRouteKey() throws IOException, SolrServerException {
-    try (CloudSolrClient client = createCloudClient(DEFAULT_COLLECTION)) {
-      SolrInputDocument doc = new SolrInputDocument();
-      doc.addField("id", "a!123"); // goes to shard2. see ShardRoutingTest for details
-      client.add(doc);
-      client.commit();
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", DEFAULT_COLLECTION);
-      params.set(ShardParams._ROUTE_, "a!");
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
-      assertNotNull("Collections should not be null in cluster state", collections);
-      assertNotNull(collections.get(DEFAULT_COLLECTION));
-      assertEquals(1, collections.size());
-      Map<String, Object> collection = (Map<String, Object>) collections.get(DEFAULT_COLLECTION);
-      assertEquals("conf1", collection.get("configName"));
-      Map<String, Object> shardStatus = (Map<String, Object>) collection.get("shards");
-      assertEquals(1, shardStatus.size());
-      Map<String, Object> selectedShardStatus = (Map<String, Object>) shardStatus.get(SHARD2);
-      assertNotNull(selectedShardStatus);
-    }
-  }
-
-  private void clusterStatusAliasTest() throws Exception  {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CREATEALIAS.toString());
-      params.set("name", "myalias");
-      params.set("collections", DEFAULT_COLLECTION + "," + COLLECTION_NAME);
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-      client.request(request);
-      params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", DEFAULT_COLLECTION);
-      request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-
-
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      Map<String, String> aliases = (Map<String, String>) cluster.get("aliases");
-      assertNotNull("Aliases should not be null", aliases);
-      assertEquals("Alias: myalias not found in cluster status",
-          DEFAULT_COLLECTION + "," + COLLECTION_NAME, aliases.get("myalias"));
-
-      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
-      assertNotNull("Collections should not be null in cluster state", collections);
-      assertNotNull(collections.get(DEFAULT_COLLECTION));
-      Map<String, Object> collection = (Map<String, Object>) collections.get(DEFAULT_COLLECTION);
-      assertEquals("conf1", collection.get("configName"));
-      List<String> collAlias = (List<String>) collection.get("aliases");
-      assertEquals("Aliases not found", Lists.newArrayList("myalias"), collAlias);
-    }
-  }
-
-  private void clusterStatusRolesTest() throws Exception  {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      client.connect();
-      Replica replica = client.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1);
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.ADDROLE.toString());
-      params.set("node", replica.getNodeName());
-      params.set("role", "overseer");
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-      client.request(request);
-
-      params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", DEFAULT_COLLECTION);
-      request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
-      assertNotNull("Cluster state should not be null", cluster);
-      Map<String, Object> roles = (Map<String, Object>) cluster.get("roles");
-      assertNotNull("Role information should not be null", roles);
-      List<String> overseer = (List<String>) roles.get("overseer");
-      assertNotNull(overseer);
-      assertEquals(1, overseer.size());
-      assertTrue(overseer.contains(replica.getNodeName()));
-    }
-  }
-
-  private void clusterStatusBadCollectionTest() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
-      params.set("collection", "bad_collection_name");
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      try {
-        client.request(request);
-        fail("Collection does not exist. An exception should be thrown");
-      } catch (SolrException e) {
-        //expected
-        assertTrue(e.getMessage().contains("Collection: bad_collection_name not found"));
-      }
-    }
-  }
-
-  private void replicaPropTest() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      client.connect();
-      Map<String, Slice> slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
-      List<String> sliceList = new ArrayList<>(slices.keySet());
-      String c1_s1 = sliceList.get(0);
-      List<String> replicasList = new ArrayList<>(slices.get(c1_s1).getReplicasMap().keySet());
-      String c1_s1_r1 = replicasList.get(0);
-      String c1_s1_r2 = replicasList.get(1);
-
-      String c1_s2 = sliceList.get(1);
-      replicasList = new ArrayList<>(slices.get(c1_s2).getReplicasMap().keySet());
-      String c1_s2_r1 = replicasList.get(0);
-      String c1_s2_r2 = replicasList.get(1);
-
-
-      slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME1).getSlicesMap();
-      sliceList = new ArrayList<>(slices.keySet());
-      String c2_s1 = sliceList.get(0);
-      replicasList = new ArrayList<>(slices.get(c2_s1).getReplicasMap().keySet());
-      String c2_s1_r1 = replicasList.get(0);
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString());
-
-      // Insure we get error returns when omitting required parameters
-
-      missingParamsError(client, params);
-      params.set("collection", COLLECTION_NAME);
-      missingParamsError(client, params);
-      params.set("shard", c1_s1);
-      missingParamsError(client, params);
-      params.set("replica", c1_s1_r1);
-      missingParamsError(client, params);
-      params.set("property", "preferredLeader");
-      missingParamsError(client, params);
-      params.set("property.value", "true");
-
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-      client.request(request);
-
-      // The above should have set exactly one preferredleader...
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "preferredleader", "true");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r2,
-          "property", "preferredLeader",
-          "property.value", "true");
-      // The preferred leader property for shard1 should have switched to the other replica.
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s2,
-          "replica", c1_s2_r1,
-          "property", "preferredLeader",
-          "property.value", "true");
-
-      // Now we should have a preferred leader in both shards...
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME1,
-          "shard", c2_s1,
-          "replica", c2_s1_r1,
-          "property", "preferredLeader",
-          "property.value", "true");
-
-      // Now we should have three preferred leaders.
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME1, c2_s1_r1, "preferredleader", "true");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME1,
-          "shard", c2_s1,
-          "replica", c2_s1_r1,
-          "property", "preferredLeader");
-
-      // Now we should have two preferred leaders.
-      // But first we have to wait for the overseer to finish the action
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      // Try adding an arbitrary property to one that has the leader property
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "testprop",
-          "property.value", "true");
-
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "true");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r2,
-          "property", "prop",
-          "property.value", "silly");
-
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "testprop",
-          "property.value", "nonsense",
-          SHARD_UNIQUE, "true");
-
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "nonsense");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "property.testprop",
-          "property.value", "true",
-          SHARD_UNIQUE, "false");
-
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "property.testprop");
-
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "testprop");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      try {
-        doPropertyAction(client,
-            "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-            "collection", COLLECTION_NAME,
-            "shard", c1_s1,
-            "replica", c1_s1_r1,
-            "property", "preferredLeader",
-            "property.value", "true",
-            SHARD_UNIQUE, "false");
-        fail("Should have thrown an exception, setting shardUnique=false is not allowed for 'preferredLeader'.");
-      } catch (SolrException se) {
-        assertTrue("Should have received a specific error message",
-            se.getMessage().contains("with the shardUnique parameter set to something other than 'true'"));
-      }
-
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
-      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "testprop");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
-      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
-
-      Map<String, String> origProps = getProps(client, COLLECTION_NAME, c1_s1_r1,
-          "state", "core", "node_name", "base_url");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "state",
-          "property.value", "state_bad");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "core",
-          "property.value", "core_bad");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "node_name",
-          "property.value", "node_name_bad");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "base_url",
-          "property.value", "base_url_bad");
-
-      // The above should be on new proeprties.
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "state", "state_bad");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "core", "core_bad");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "node_name", "node_name_bad");
-      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "base_url", "base_url_bad");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "state");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "core");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "node_name");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "base_url");
-
-      // They better not have been changed!
-      for (Map.Entry<String, String> ent : origProps.entrySet()) {
-        verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, ent.getKey(), ent.getValue());
-      }
-
-      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "state");
-      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "core");
-      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "node_name");
-      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "base_url");
-
-    }
-  }
-
-  private void testClusterStateMigration() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      client.connect();
-
-      CollectionAdminRequest.createCollection("testClusterStateMigration","conf1",1,1).setStateFormat(1).process(client);
-
-      waitForRecoveriesToFinish("testClusterStateMigration", true);
-
-      assertEquals(1, client.getZkStateReader().getClusterState().getCollection("testClusterStateMigration").getStateFormat());
-
-      for (int i = 0; i < 10; i++) {
-        SolrInputDocument doc = new SolrInputDocument();
-        doc.addField("id", "id_" + i);
-        client.add("testClusterStateMigration", doc);
-      }
-      client.commit("testClusterStateMigration");
-
-      CollectionAdminRequest.migrateCollectionFormat("testClusterStateMigration").process(client);
-
-      client.getZkStateReader().forceUpdateCollection("testClusterStateMigration");
-
-      assertEquals(2, client.getZkStateReader().getClusterState().getCollection("testClusterStateMigration").getStateFormat());
-
-      QueryResponse response = client.query("testClusterStateMigration", new SolrQuery("*:*"));
-      assertEquals(10, response.getResults().getNumFound());
-    }
-  }
-  
-  private void testCollectionCreationCollectionNameValidation() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CREATE.toString());
-      params.set("name", "invalid@name#with$weird%characters");
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      try {
-        client.request(request);
-        fail();
-      } catch (RemoteSolrException e) {
-        final String errorMessage = e.getMessage();
-        assertTrue(errorMessage.contains("Invalid collection"));
-        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
-        assertTrue(errorMessage.contains("collection names must consist entirely of"));
-      }
-    }
-  }
-  
-  private void testCollectionCreationShardNameValidation() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CREATE.toString());
-      params.set("name", "valid_collection_name");
-      params.set("router.name", "implicit");
-      params.set("numShards", "1");
-      params.set("shards", "invalid@name#with$weird%characters");
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      try {
-        client.request(request);
-        fail();
-      } catch (RemoteSolrException e) {
-        final String errorMessage = e.getMessage();
-        assertTrue(errorMessage.contains("Invalid shard"));
-        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
-        assertTrue(errorMessage.contains("shard names must consist entirely of"));
-      }
-    }
-  }
-  
-  private void testAliasCreationNameValidation() throws Exception{
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CREATEALIAS.toString());
-      params.set("name", "invalid@name#with$weird%characters");
-      params.set("collections", COLLECTION_NAME);
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      try {
-        client.request(request);
-        fail();
-      } catch (RemoteSolrException e) {
-        final String errorMessage = e.getMessage();
-        assertTrue(errorMessage.contains("Invalid alias"));
-        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
-        assertTrue(errorMessage.contains("alias names must consist entirely of"));
-      }
-    }
-  }
-
-  private void testShardCreationNameValidation() throws Exception {
-    try (CloudSolrClient client = createCloudClient(null)) {
-      client.connect();
-      // Create a collection w/ implicit router
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CREATE.toString());
-      params.set("name", "valid_collection_name");
-      params.set("shards", "a");
-      params.set("router.name", "implicit");
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-      client.request(request);
-
-      params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.CREATESHARD.toString());
-      params.set("collection", "valid_collection_name");
-      params.set("shard", "invalid@name#with$weird%characters");
-
-      request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      try {
-        client.request(request);
-        fail();
-      } catch (RemoteSolrException e) {
-        final String errorMessage = e.getMessage();
-        assertTrue(errorMessage.contains("Invalid shard"));
-        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
-        assertTrue(errorMessage.contains("shard names must consist entirely of"));
-      }
-    }
-  }
-
-  // Expects the map will have keys, but blank values.
-  private Map<String, String> getProps(CloudSolrClient client, String collectionName, String replicaName, String... props)
-      throws KeeperException, InterruptedException {
-
-    client.getZkStateReader().forceUpdateCollection(collectionName);
-    ClusterState clusterState = client.getZkStateReader().getClusterState();
-    final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
-    if (docCollection == null || docCollection.getReplica(replicaName) == null) {
-      fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
-    }
-    Replica replica = docCollection.getReplica(replicaName);
-    Map<String, String> propMap = new HashMap<>();
-    for (String prop : props) {
-      propMap.put(prop, replica.getProperty(prop));
-    }
-    return propMap;
-  }
-  private void missingParamsError(CloudSolrClient client, ModifiableSolrParams origParams)
-      throws IOException, SolrServerException {
-
-    SolrRequest request;
-    try {
-      request = new QueryRequest(origParams);
-      request.setPath("/admin/collections");
-      client.request(request);
-      fail("Should have thrown a SolrException due to lack of a required parameter.");
-    } catch (SolrException se) {
-      assertTrue("Should have gotten a specific message back mentioning 'missing required parameter'. Got: " + se.getMessage(),
-          se.getMessage().toLowerCase(Locale.ROOT).contains("missing required parameter:"));
-    }
-  }
-}


[13/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/DeleteSnapshotCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteSnapshotCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteSnapshotCmd.java
deleted file mode 100644
index 765f4b9..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteSnapshotCmd.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.Replica.State;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the functionality of deleting a collection level snapshot.
- */
-public class DeleteSnapshotCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public DeleteSnapshotCmd (OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName =  message.getStr(COLLECTION_PROP);
-    String commitName =  message.getStr(CoreAdminParams.COMMIT_NAME);
-    String asyncId = message.getStr(ASYNC);
-    Map<String, String> requestMap = new HashMap<>();
-    NamedList shardRequestResults = new NamedList();
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    SolrZkClient zkClient = ocmh.overseer.getZkController().getZkClient();
-
-    Optional<CollectionSnapshotMetaData> meta = SolrSnapshotManager.getCollectionLevelSnapshot(zkClient, collectionName, commitName);
-    if (!meta.isPresent()) { // Snapshot not found. Nothing to do.
-      return;
-    }
-
-    log.info("Deleting a snapshot for collection={} with commitName={}", collectionName, commitName);
-
-    Set<String> existingCores = new HashSet<>();
-    for (Slice s : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
-      for (Replica r : s.getReplicas()) {
-        existingCores.add(r.getCoreName());
-      }
-    }
-
-    Set<String> coresWithSnapshot = new HashSet<>();
-    for (CoreSnapshotMetaData m : meta.get().getReplicaSnapshots()) {
-      if (existingCores.contains(m.getCoreName())) {
-        coresWithSnapshot.add(m.getCoreName());
-      }
-    }
-
-    log.info("Existing cores with snapshot for collection={} are {}", collectionName, existingCores);
-    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
-      for (Replica replica : slice.getReplicas()) {
-        if (replica.getState() == State.DOWN) {
-          continue; // Since replica is down - no point sending a request.
-        }
-
-        // Note - when a snapshot is found in_progress state - it is the result of overseer
-        // failure while handling the snapshot creation. Since we don't know the exact set of
-        // replicas to contact at this point, we try on all replicas.
-        if (meta.get().getStatus() == SnapshotStatus.InProgress || coresWithSnapshot.contains(replica.getCoreName())) {
-          String coreName = replica.getStr(CORE_NAME_PROP);
-
-          ModifiableSolrParams params = new ModifiableSolrParams();
-          params.set(CoreAdminParams.ACTION, CoreAdminAction.DELETESNAPSHOT.toString());
-          params.set(NAME, slice.getName());
-          params.set(CORE_NAME_PROP, coreName);
-          params.set(CoreAdminParams.COMMIT_NAME, commitName);
-
-          log.info("Sending deletesnapshot request to core={} with commitName={}", coreName, commitName);
-          ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
-        }
-      }
-    }
-
-    ocmh.processResponses(shardRequestResults, shardHandler, false, null, asyncId, requestMap);
-    NamedList success = (NamedList) shardRequestResults.get("success");
-    List<CoreSnapshotMetaData> replicas = new ArrayList<>();
-    if (success != null) {
-      for ( int i = 0 ; i < success.size() ; i++) {
-        NamedList resp = (NamedList)success.getVal(i);
-        // Unfortunately async processing logic doesn't provide the "core" name automatically.
-        String coreName = (String)resp.get("core");
-        coresWithSnapshot.remove(coreName);
-      }
-    }
-
-    if (!coresWithSnapshot.isEmpty()) { // One or more failures.
-      log.warn("Failed to delete a snapshot for collection {} with commitName = {}. Snapshot could not be deleted for following cores {}",
-          collectionName, commitName, coresWithSnapshot);
-
-      List<CoreSnapshotMetaData> replicasWithSnapshot = new ArrayList<>();
-      for (CoreSnapshotMetaData m : meta.get().getReplicaSnapshots()) {
-        if (coresWithSnapshot.contains(m.getCoreName())) {
-          replicasWithSnapshot.add(m);
-        }
-      }
-
-      // Update the ZK meta-data to include only cores with the snapshot. This will enable users to figure out
-      // which cores still contain the named snapshot.
-      CollectionSnapshotMetaData newResult = new CollectionSnapshotMetaData(meta.get().getName(), SnapshotStatus.Failed,
-          meta.get().getCreationDate(), replicasWithSnapshot);
-      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, newResult);
-      log.info("Saved snapshot information for collection={} with commitName={} in Zookeeper as follows", collectionName, commitName,
-          Utils.toJSON(newResult));
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to delete snapshot on cores " + coresWithSnapshot);
-
-    } else {
-      // Delete the ZK path so that we eliminate the references of this snapshot from collection level meta-data.
-      SolrSnapshotManager.deleteCollectionLevelSnapshot(zkClient, collectionName, commitName);
-      log.info("Deleted Zookeeper snapshot metdata for collection={} with commitName={}", collectionName, commitName);
-      log.info("Successfully deleted snapshot for collection={} with commitName={}", collectionName, commitName);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java b/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
index 2faf6e9..953023f 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
@@ -28,6 +28,7 @@ import java.util.Random;
 import java.util.Set;
 
 import org.apache.commons.lang.StringUtils;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
 import org.apache.solr.cloud.overseer.CollectionMutator;
 import org.apache.solr.cloud.overseer.SliceMutator;
@@ -39,8 +40,8 @@ import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARD_UNIQUE;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARD_UNIQUE;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
 
 // Class to encapsulate processing replica properties that have at most one replica hosting a property per slice.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/LeaderRecoveryWatcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/LeaderRecoveryWatcher.java b/solr/core/src/java/org/apache/solr/cloud/LeaderRecoveryWatcher.java
deleted file mode 100644
index 1eb4873..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/LeaderRecoveryWatcher.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.Set;
-
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.cloud.CollectionStateWatcher;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-
-/**
- * We use this watcher to wait for any eligible replica in a shard to become active so that it can become a leader.
- */
-public class LeaderRecoveryWatcher implements CollectionStateWatcher {
-  String collectionId;
-  String shardId;
-  String replicaId;
-  String targetCore;
-  SolrCloseableLatch latch;
-
-  /**
-   * Watch for recovery of a replica
-   *
-   * @param collectionId   collection name
-   * @param shardId        shard id
-   * @param replicaId      source replica name (coreNodeName)
-   * @param targetCore     specific target core name - if null then any active replica will do
-   * @param latch countdown when recovered
-   */
-  LeaderRecoveryWatcher(String collectionId, String shardId, String replicaId, String targetCore, SolrCloseableLatch latch) {
-    this.collectionId = collectionId;
-    this.shardId = shardId;
-    this.replicaId = replicaId;
-    this.targetCore = targetCore;
-    this.latch = latch;
-  }
-
-  @Override
-  public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
-    if (collectionState == null) { // collection has been deleted - don't wait
-      latch.countDown();
-      return true;
-    }
-    Slice slice = collectionState.getSlice(shardId);
-    if (slice == null) { // shard has been removed - don't wait
-      latch.countDown();
-      return true;
-    }
-    for (Replica replica : slice.getReplicas()) {
-      // check if another replica exists - doesn't have to be the one we're moving
-      // as long as it's active and can become a leader, in which case we don't have to wait
-      // for recovery of specifically the one that we've just added
-      if (!replica.getName().equals(replicaId)) {
-        if (replica.getType().equals(Replica.Type.PULL)) { // not eligible for leader election
-          continue;
-        }
-        // check its state
-        String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
-        if (targetCore != null && !targetCore.equals(coreName)) {
-          continue;
-        }
-        if (replica.isActive(liveNodes)) { // recovered - stop waiting
-          latch.countDown();
-          return true;
-        }
-      }
-    }
-    // set the watch again to wait for the new replica to recover
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/MigrateCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/MigrateCmd.java b/solr/core/src/java/org/apache/solr/cloud/MigrateCmd.java
deleted file mode 100644
index 02fdb5c..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/MigrateCmd.java
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CompositeIdRouter;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.RoutingRule;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardHandlerFactory;
-import org.apache.solr.update.SolrIndexSplitter;
-import org.apache.solr.util.TimeOut;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.util.Utils.makeMap;
-
-public class MigrateCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-
-  public MigrateCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    String sourceCollectionName = message.getStr("collection");
-    String splitKey = message.getStr("split.key");
-    String targetCollectionName = message.getStr("target.collection");
-    int timeout = message.getInt("forward.timeout", 10 * 60) * 1000;
-
-    DocCollection sourceCollection = clusterState.getCollection(sourceCollectionName);
-    if (sourceCollection == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown source collection: " + sourceCollectionName);
-    }
-    DocCollection targetCollection = clusterState.getCollection(targetCollectionName);
-    if (targetCollection == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown target collection: " + sourceCollectionName);
-    }
-    if (!(sourceCollection.getRouter() instanceof CompositeIdRouter)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source collection must use a compositeId router");
-    }
-    if (!(targetCollection.getRouter() instanceof CompositeIdRouter)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target collection must use a compositeId router");
-    }
-    CompositeIdRouter sourceRouter = (CompositeIdRouter) sourceCollection.getRouter();
-    CompositeIdRouter targetRouter = (CompositeIdRouter) targetCollection.getRouter();
-    Collection<Slice> sourceSlices = sourceRouter.getSearchSlicesSingle(splitKey, null, sourceCollection);
-    if (sourceSlices.isEmpty()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "No active slices available in source collection: " + sourceCollection + "for given split.key: " + splitKey);
-    }
-    Collection<Slice> targetSlices = targetRouter.getSearchSlicesSingle(splitKey, null, targetCollection);
-    if (targetSlices.isEmpty()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "No active slices available in target collection: " + targetCollection + "for given split.key: " + splitKey);
-    }
-
-    String asyncId = null;
-    if (message.containsKey(ASYNC) && message.get(ASYNC) != null)
-      asyncId = message.getStr(ASYNC);
-
-    for (Slice sourceSlice : sourceSlices) {
-      for (Slice targetSlice : targetSlices) {
-        log.info("Migrating source shard: {} to target shard: {} for split.key = " + splitKey, sourceSlice, targetSlice);
-        migrateKey(clusterState, sourceCollection, sourceSlice, targetCollection, targetSlice, splitKey,
-            timeout, results, asyncId, message);
-      }
-    }
-  }
-
-  private void migrateKey(ClusterState clusterState, DocCollection sourceCollection, Slice sourceSlice,
-                          DocCollection targetCollection, Slice targetSlice,
-                          String splitKey, int timeout,
-                          NamedList results, String asyncId, ZkNodeProps message) throws Exception {
-    String tempSourceCollectionName = "split_" + sourceSlice.getName() + "_temp_" + targetSlice.getName();
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    if (clusterState.hasCollection(tempSourceCollectionName)) {
-      log.info("Deleting temporary collection: " + tempSourceCollectionName);
-      Map<String, Object> props = makeMap(
-          Overseer.QUEUE_OPERATION, DELETE.toLower(),
-          NAME, tempSourceCollectionName);
-
-      try {
-        ocmh.commandMap.get(DELETE).call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
-        clusterState = zkStateReader.getClusterState();
-      } catch (Exception e) {
-        log.warn("Unable to clean up existing temporary collection: " + tempSourceCollectionName, e);
-      }
-    }
-
-    CompositeIdRouter sourceRouter = (CompositeIdRouter) sourceCollection.getRouter();
-    DocRouter.Range keyHashRange = sourceRouter.keyHashRange(splitKey);
-
-    ShardHandlerFactory shardHandlerFactory = ocmh.shardHandlerFactory;
-    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-
-    log.info("Hash range for split.key: {} is: {}", splitKey, keyHashRange);
-    // intersect source range, keyHashRange and target range
-    // this is the range that has to be split from source and transferred to target
-    DocRouter.Range splitRange = ocmh.intersect(targetSlice.getRange(), ocmh.intersect(sourceSlice.getRange(), keyHashRange));
-    if (splitRange == null) {
-      log.info("No common hashes between source shard: {} and target shard: {}", sourceSlice.getName(), targetSlice.getName());
-      return;
-    }
-    log.info("Common hash range between source shard: {} and target shard: {} = " + splitRange, sourceSlice.getName(), targetSlice.getName());
-
-    Replica targetLeader = zkStateReader.getLeaderRetry(targetCollection.getName(), targetSlice.getName(), 10000);
-    // For tracking async calls.
-    Map<String, String> requestMap = new HashMap<>();
-
-    log.info("Asking target leader node: " + targetLeader.getNodeName() + " core: "
-        + targetLeader.getStr("core") + " to buffer updates");
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTBUFFERUPDATES.toString());
-    params.set(CoreAdminParams.NAME, targetLeader.getStr("core"));
-
-    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to request node to buffer updates", asyncId, requestMap);
-
-    ZkNodeProps m = new ZkNodeProps(
-        Overseer.QUEUE_OPERATION, OverseerAction.ADDROUTINGRULE.toLower(),
-        COLLECTION_PROP, sourceCollection.getName(),
-        SHARD_ID_PROP, sourceSlice.getName(),
-        "routeKey", SolrIndexSplitter.getRouteKey(splitKey) + "!",
-        "range", splitRange.toString(),
-        "targetCollection", targetCollection.getName(),
-        "expireAt", RoutingRule.makeExpiryAt(timeout));
-    log.info("Adding routing rule: " + m);
-    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-
-    // wait for a while until we see the new rule
-    log.info("Waiting to see routing rule updated in clusterstate");
-    TimeOut waitUntil = new TimeOut(60, TimeUnit.SECONDS, timeSource);
-    boolean added = false;
-    while (!waitUntil.hasTimedOut()) {
-      waitUntil.sleep(100);
-      sourceCollection = zkStateReader.getClusterState().getCollection(sourceCollection.getName());
-      sourceSlice = sourceCollection.getSlice(sourceSlice.getName());
-      Map<String, RoutingRule> rules = sourceSlice.getRoutingRules();
-      if (rules != null) {
-        RoutingRule rule = rules.get(SolrIndexSplitter.getRouteKey(splitKey) + "!");
-        if (rule != null && rule.getRouteRanges().contains(splitRange)) {
-          added = true;
-          break;
-        }
-      }
-    }
-    if (!added) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not add routing rule: " + m);
-    }
-
-    log.info("Routing rule added successfully");
-
-    // Create temp core on source shard
-    Replica sourceLeader = zkStateReader.getLeaderRetry(sourceCollection.getName(), sourceSlice.getName(), 10000);
-
-    // create a temporary collection with just one node on the shard leader
-    String configName = zkStateReader.readConfigName(sourceCollection.getName());
-    Map<String, Object> props = makeMap(
-        Overseer.QUEUE_OPERATION, CREATE.toLower(),
-        NAME, tempSourceCollectionName,
-        NRT_REPLICAS, 1,
-        NUM_SLICES, 1,
-        COLL_CONF, configName,
-        CREATE_NODE_SET, sourceLeader.getNodeName());
-    if (asyncId != null) {
-      String internalAsyncId = asyncId + Math.abs(System.nanoTime());
-      props.put(ASYNC, internalAsyncId);
-    }
-
-    log.info("Creating temporary collection: " + props);
-    ocmh.commandMap.get(CREATE).call(clusterState, new ZkNodeProps(props), results);
-    // refresh cluster state
-    clusterState = zkStateReader.getClusterState();
-    Slice tempSourceSlice = clusterState.getCollection(tempSourceCollectionName).getSlices().iterator().next();
-    Replica tempSourceLeader = zkStateReader.getLeaderRetry(tempSourceCollectionName, tempSourceSlice.getName(), 120000);
-
-    String tempCollectionReplica1 = tempSourceLeader.getCoreName();
-    String coreNodeName = ocmh.waitForCoreNodeName(tempSourceCollectionName,
-        sourceLeader.getNodeName(), tempCollectionReplica1);
-    // wait for the replicas to be seen as active on temp source leader
-    log.info("Asking source leader to wait for: " + tempCollectionReplica1 + " to be alive on: " + sourceLeader.getNodeName());
-    CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
-    cmd.setCoreName(tempCollectionReplica1);
-    cmd.setNodeName(sourceLeader.getNodeName());
-    cmd.setCoreNodeName(coreNodeName);
-    cmd.setState(Replica.State.ACTIVE);
-    cmd.setCheckLive(true);
-    cmd.setOnlyIfLeader(true);
-    // we don't want this to happen asynchronously
-    ocmh.sendShardRequest(tempSourceLeader.getNodeName(), new ModifiableSolrParams(cmd.getParams()), shardHandler, null, null);
-
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create temp collection leader" +
-        " or timed out waiting for it to come up", asyncId, requestMap);
-
-    log.info("Asking source leader to split index");
-    params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
-    params.set(CoreAdminParams.CORE, sourceLeader.getStr("core"));
-    params.add(CoreAdminParams.TARGET_CORE, tempSourceLeader.getStr("core"));
-    params.set(CoreAdminParams.RANGES, splitRange.toString());
-    params.set("split.key", splitKey);
-
-    String tempNodeName = sourceLeader.getNodeName();
-
-    ocmh.sendShardRequest(tempNodeName, params, shardHandler, asyncId, requestMap);
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to invoke SPLIT core admin command", asyncId, requestMap);
-
-    log.info("Creating a replica of temporary collection: {} on the target leader node: {}",
-        tempSourceCollectionName, targetLeader.getNodeName());
-    String tempCollectionReplica2 = Assign.buildSolrCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(),
-        zkStateReader.getClusterState().getCollection(tempSourceCollectionName), tempSourceSlice.getName(), Replica.Type.NRT);
-    props = new HashMap<>();
-    props.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
-    props.put(COLLECTION_PROP, tempSourceCollectionName);
-    props.put(SHARD_ID_PROP, tempSourceSlice.getName());
-    props.put("node", targetLeader.getNodeName());
-    props.put(CoreAdminParams.NAME, tempCollectionReplica2);
-    // copy over property params:
-    for (String key : message.keySet()) {
-      if (key.startsWith(COLL_PROP_PREFIX)) {
-        props.put(key, message.getStr(key));
-      }
-    }
-    // add async param
-    if (asyncId != null) {
-      props.put(ASYNC, asyncId);
-    }
-    ((AddReplicaCmd)ocmh.commandMap.get(ADDREPLICA)).addReplica(clusterState, new ZkNodeProps(props), results, null);
-
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create replica of " +
-        "temporary collection in target leader node.", asyncId, requestMap);
-
-    coreNodeName = ocmh.waitForCoreNodeName(tempSourceCollectionName,
-        targetLeader.getNodeName(), tempCollectionReplica2);
-    // wait for the replicas to be seen as active on temp source leader
-    log.info("Asking temp source leader to wait for: " + tempCollectionReplica2 + " to be alive on: " + targetLeader.getNodeName());
-    cmd = new CoreAdminRequest.WaitForState();
-    cmd.setCoreName(tempSourceLeader.getStr("core"));
-    cmd.setNodeName(targetLeader.getNodeName());
-    cmd.setCoreNodeName(coreNodeName);
-    cmd.setState(Replica.State.ACTIVE);
-    cmd.setCheckLive(true);
-    cmd.setOnlyIfLeader(true);
-    params = new ModifiableSolrParams(cmd.getParams());
-
-    ocmh.sendShardRequest(tempSourceLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create temp collection" +
-        " replica or timed out waiting for them to come up", asyncId, requestMap);
-
-    log.info("Successfully created replica of temp source collection on target leader node");
-
-    log.info("Requesting merge of temp source collection replica to target leader");
-    params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.MERGEINDEXES.toString());
-    params.set(CoreAdminParams.CORE, targetLeader.getStr("core"));
-    params.set(CoreAdminParams.SRC_CORE, tempCollectionReplica2);
-
-    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-    String msg = "MIGRATE failed to merge " + tempCollectionReplica2 + " to "
-        + targetLeader.getStr("core") + " on node: " + targetLeader.getNodeName();
-    ocmh.processResponses(results, shardHandler, true, msg, asyncId, requestMap);
-
-    log.info("Asking target leader to apply buffered updates");
-    params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
-    params.set(CoreAdminParams.NAME, targetLeader.getStr("core"));
-
-    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to request node to apply buffered updates",
-        asyncId, requestMap);
-
-    try {
-      log.info("Deleting temporary collection: " + tempSourceCollectionName);
-      props = makeMap(
-          Overseer.QUEUE_OPERATION, DELETE.toLower(),
-          NAME, tempSourceCollectionName);
-      ocmh.commandMap.get(DELETE). call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
-    } catch (Exception e) {
-      log.error("Unable to delete temporary collection: " + tempSourceCollectionName
-          + ". Please remove it manually", e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java
deleted file mode 100644
index 44493ec..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/MoveReplicaCmd.java
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Locale;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.update.UpdateLog;
-import org.apache.solr.util.TimeOut;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.*;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonAdminParams.IN_PLACE_MOVE;
-import static org.apache.solr.common.params.CommonAdminParams.TIMEOUT;
-import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
-
-public class MoveReplicaCmd implements Cmd{
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-
-  public MoveReplicaCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    moveReplica(ocmh.zkStateReader.getClusterState(), message, results);
-  }
-
-  private void moveReplica(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    log.debug("moveReplica() : {}", Utils.toJSONString(message));
-    ocmh.checkRequired(message, COLLECTION_PROP, CollectionParams.TARGET_NODE);
-    String collection = message.getStr(COLLECTION_PROP);
-    String targetNode = message.getStr(CollectionParams.TARGET_NODE);
-    boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
-    boolean inPlaceMove = message.getBool(IN_PLACE_MOVE, true);
-    int timeout = message.getInt(TIMEOUT, 10 * 60); // 10 minutes
-
-    String async = message.getStr(ASYNC);
-
-    DocCollection coll = clusterState.getCollection(collection);
-    if (coll == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + collection + " does not exist");
-    }
-    if (!clusterState.getLiveNodes().contains(targetNode)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target node: " + targetNode + " not in live nodes: " + clusterState.getLiveNodes());
-    }
-    Replica replica = null;
-    if (message.containsKey(REPLICA_PROP)) {
-      String replicaName = message.getStr(REPLICA_PROP);
-      replica = coll.getReplica(replicaName);
-      if (replica == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Collection: " + collection + " replica: " + replicaName + " does not exist");
-      }
-    } else {
-      String sourceNode = message.getStr(CollectionParams.SOURCE_NODE, message.getStr(CollectionParams.FROM_NODE));
-      if (sourceNode == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'" + CollectionParams.SOURCE_NODE +
-            " or '" + CollectionParams.FROM_NODE + "' is a required param");
-      }
-      String shardId = message.getStr(SHARD_ID_PROP);
-      if (shardId == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'" + SHARD_ID_PROP + "' is a required param");
-      }
-      Slice slice = clusterState.getCollection(collection).getSlice(shardId);
-      List<Replica> sliceReplicas = new ArrayList<>(slice.getReplicas());
-      Collections.shuffle(sliceReplicas, RANDOM);
-      // this picks up a single random replica from the sourceNode
-      for (Replica r : slice.getReplicas()) {
-        if (r.getNodeName().equals(sourceNode)) {
-          replica = r;
-        }
-      }
-      if (replica == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Collection: " + collection + " node: " + sourceNode + " does not have any replica belonging to shard: " + shardId);
-      }
-    }
-
-    log.info("Replica will be moved to node {}: {}", targetNode, replica);
-    Slice slice = null;
-    for (Slice s : coll.getSlices()) {
-      if (s.getReplicas().contains(replica)) {
-        slice = s;
-      }
-    }
-    assert slice != null;
-    Object dataDir = replica.get("dataDir");
-    boolean isSharedFS = replica.getBool(ZkStateReader.SHARED_STORAGE_PROP, false) && dataDir != null;
-
-    if (isSharedFS && inPlaceMove) {
-      log.debug("-- moveHdfsReplica");
-      moveHdfsReplica(clusterState, results, dataDir.toString(), targetNode, async, coll, replica, slice, timeout, waitForFinalState);
-    } else {
-      log.debug("-- moveNormalReplica (inPlaceMove=" + inPlaceMove + ", isSharedFS=" + isSharedFS);
-      moveNormalReplica(clusterState, results, targetNode, async, coll, replica, slice, timeout, waitForFinalState);
-    }
-  }
-
-  private void moveHdfsReplica(ClusterState clusterState, NamedList results, String dataDir, String targetNode, String async,
-                                 DocCollection coll, Replica replica, Slice slice, int timeout, boolean waitForFinalState) throws Exception {
-    String skipCreateReplicaInClusterState = "true";
-    if (clusterState.getLiveNodes().contains(replica.getNodeName())) {
-      skipCreateReplicaInClusterState = "false";
-      ZkNodeProps removeReplicasProps = new ZkNodeProps(
-          COLLECTION_PROP, coll.getName(),
-          SHARD_ID_PROP, slice.getName(),
-          REPLICA_PROP, replica.getName()
-      );
-      removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_DATA_DIR, false);
-      removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_INDEX, false);
-      if(async!=null) removeReplicasProps.getProperties().put(ASYNC, async);
-      NamedList deleteResult = new NamedList();
-      ocmh.deleteReplica(clusterState, removeReplicasProps, deleteResult, null);
-      if (deleteResult.get("failure") != null) {
-        String errorString = String.format(Locale.ROOT, "Failed to cleanup replica collection=%s shard=%s name=%s, failure=%s",
-            coll.getName(), slice.getName(), replica.getName(), deleteResult.get("failure"));
-        log.warn(errorString);
-        results.add("failure", errorString);
-        return;
-      }
-
-      TimeOut timeOut = new TimeOut(20L, TimeUnit.SECONDS, timeSource);
-      while (!timeOut.hasTimedOut()) {
-        coll = ocmh.zkStateReader.getClusterState().getCollection(coll.getName());
-        if (coll.getReplica(replica.getName()) != null) {
-          timeOut.sleep(100);
-        } else {
-          break;
-        }
-      }
-      if (timeOut.hasTimedOut()) {
-        results.add("failure", "Still see deleted replica in clusterstate!");
-        return;
-      }
-
-    }
-
-    String ulogDir = replica.getStr(CoreAdminParams.ULOG_DIR);
-    ZkNodeProps addReplicasProps = new ZkNodeProps(
-        COLLECTION_PROP, coll.getName(),
-        SHARD_ID_PROP, slice.getName(),
-        CoreAdminParams.NODE, targetNode,
-        CoreAdminParams.CORE_NODE_NAME, replica.getName(),
-        CoreAdminParams.NAME, replica.getCoreName(),
-        WAIT_FOR_FINAL_STATE, String.valueOf(waitForFinalState),
-        SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, skipCreateReplicaInClusterState,
-        CoreAdminParams.ULOG_DIR, ulogDir.substring(0, ulogDir.lastIndexOf(UpdateLog.TLOG_NAME)),
-        CoreAdminParams.DATA_DIR, dataDir);
-    if(async!=null) addReplicasProps.getProperties().put(ASYNC, async);
-    NamedList addResult = new NamedList();
-    try {
-      ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, addResult, null);
-    } catch (Exception e) {
-      // fatal error - try rolling back
-      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
-          " on node=%s, failure=%s", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
-      results.add("failure", errorString);
-      log.warn("Error adding replica " + addReplicasProps + " - trying to roll back...", e);
-      addReplicasProps = addReplicasProps.plus(CoreAdminParams.NODE, replica.getNodeName());
-      NamedList rollback = new NamedList();
-      ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, rollback, null);
-      if (rollback.get("failure") != null) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
-            + ", collection may be inconsistent: " + rollback.get("failure"));
-      }
-      return;
-    }
-    if (addResult.get("failure") != null) {
-      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
-          " on node=%s, failure=%s", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
-      log.warn(errorString);
-      results.add("failure", errorString);
-      log.debug("--- trying to roll back...");
-      // try to roll back
-      addReplicasProps = addReplicasProps.plus(CoreAdminParams.NODE, replica.getNodeName());
-      NamedList rollback = new NamedList();
-      try {
-        ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, rollback, null);
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
-            + ", collection may be inconsistent!", e);
-      }
-      if (rollback.get("failure") != null) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
-            + ", collection may be inconsistent! Failure: " + rollback.get("failure"));
-      }
-      return;
-    } else {
-      String successString = String.format(Locale.ROOT, "MOVEREPLICA action completed successfully, moved replica=%s at node=%s " +
-          "to replica=%s at node=%s", replica.getCoreName(), replica.getNodeName(), replica.getCoreName(), targetNode);
-      results.add("success", successString);
-    }
-  }
-
-  private void moveNormalReplica(ClusterState clusterState, NamedList results, String targetNode, String async,
-                                 DocCollection coll, Replica replica, Slice slice, int timeout, boolean waitForFinalState) throws Exception {
-    String newCoreName = Assign.buildSolrCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(), coll, slice.getName(), replica.getType());
-    ZkNodeProps addReplicasProps = new ZkNodeProps(
-        COLLECTION_PROP, coll.getName(),
-        SHARD_ID_PROP, slice.getName(),
-        CoreAdminParams.NODE, targetNode,
-        CoreAdminParams.NAME, newCoreName);
-    if (async != null) addReplicasProps.getProperties().put(ASYNC, async);
-    NamedList addResult = new NamedList();
-    SolrCloseableLatch countDownLatch = new SolrCloseableLatch(1, ocmh);
-    ActiveReplicaWatcher watcher = null;
-    ZkNodeProps props = ocmh.addReplica(clusterState, addReplicasProps, addResult, null);
-    log.debug("props " + props);
-    if (replica.equals(slice.getLeader()) || waitForFinalState) {
-      watcher = new ActiveReplicaWatcher(coll.getName(), null, Collections.singletonList(newCoreName), countDownLatch);
-      log.debug("-- registered watcher " + watcher);
-      ocmh.zkStateReader.registerCollectionStateWatcher(coll.getName(), watcher);
-    }
-    if (addResult.get("failure") != null) {
-      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
-          " on node=%s, failure=", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
-      log.warn(errorString);
-      results.add("failure", errorString);
-      if (watcher != null) { // unregister
-        ocmh.zkStateReader.removeCollectionStateWatcher(coll.getName(), watcher);
-      }
-      return;
-    }
-    // wait for the other replica to be active if the source replica was a leader
-    if (watcher != null) {
-      try {
-        log.debug("Waiting for leader's replica to recover.");
-        if (!countDownLatch.await(timeout, TimeUnit.SECONDS)) {
-          String errorString = String.format(Locale.ROOT, "Timed out waiting for leader's replica to recover, collection=%s shard=%s" +
-              " on node=%s", coll.getName(), slice.getName(), targetNode);
-          log.warn(errorString);
-          results.add("failure", errorString);
-          return;
-        } else {
-          log.debug("Replica " + watcher.getActiveReplicas() + " is active - deleting the source...");
-        }
-      } finally {
-        ocmh.zkStateReader.removeCollectionStateWatcher(coll.getName(), watcher);
-      }
-    }
-
-    ZkNodeProps removeReplicasProps = new ZkNodeProps(
-        COLLECTION_PROP, coll.getName(),
-        SHARD_ID_PROP, slice.getName(),
-        REPLICA_PROP, replica.getName());
-    if (async != null) removeReplicasProps.getProperties().put(ASYNC, async);
-    NamedList deleteResult = new NamedList();
-    ocmh.deleteReplica(clusterState, removeReplicasProps, deleteResult, null);
-    if (deleteResult.get("failure") != null) {
-      String errorString = String.format(Locale.ROOT, "Failed to cleanup replica collection=%s shard=%s name=%s, failure=%s",
-          coll.getName(), slice.getName(), replica.getName(), deleteResult.get("failure"));
-      log.warn(errorString);
-      results.add("failure", errorString);
-    } else {
-      String successString = String.format(Locale.ROOT, "MOVEREPLICA action completed successfully, moved replica=%s at node=%s " +
-          "to replica=%s at node=%s", replica.getCoreName(), replica.getNodeName(), newCoreName, targetNode);
-      results.add("success", successString);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/Overseer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
index ee5fb18..edf3838 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
@@ -29,6 +29,7 @@ import java.util.Set;
 import com.codahale.metrics.Timer;
 import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.cloud.autoscaling.OverseerTriggerThread;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
 import org.apache.solr.cloud.overseer.CollectionMutator;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
index 570843a..e8d85ce 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionConfigSetProcessor.java
@@ -19,6 +19,7 @@ package org.apache.solr.cloud;
 import java.io.IOException;
 
 import org.apache.commons.io.IOUtils;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.handler.component.ShardHandler;


[07/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAliasCreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAliasCreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAliasCreateCollectionCmd.java
new file mode 100644
index 0000000..8cfd0bd
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAliasCreateCollectionCmd.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TimeZone;
+
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.OverseerSolrResponse;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.Aliases;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.handler.admin.CollectionsHandler;
+import org.apache.solr.request.LocalSolrQueryRequest;
+import org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor;
+import org.apache.solr.util.TimeZoneUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor.ROUTER_FIELD_METADATA;
+import static org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor.ROUTER_INTERVAL_METADATA;
+
+/**
+ * For "routed aliases", creates another collection and adds it to the alias. In some cases it will not
+ * add a new collection.
+ * If a collection is created, then collection creation info is returned.
+ *
+ * Note: this logic is within an Overseer because we want to leverage the mutual exclusion
+ * property afforded by the lock it obtains on the alias name.
+ * @since 7.3
+ */
+public class RoutedAliasCreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  public static final String IF_MOST_RECENT_COLL_NAME = "ifMostRecentCollName";
+
+  public static final String COLL_METAPREFIX = "collection-create.";
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public RoutedAliasCreateCollectionCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  /* TODO:
+  There are a few classes related to time routed alias processing.  We need to share some logic better.
+   */
+
+
+  @Override
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    //---- PARSE PRIMARY MESSAGE PARAMS
+    // important that we use NAME for the alias as that is what the Overseer will get a lock on before calling us
+    final String aliasName = message.getStr(NAME);
+    // the client believes this is the mostRecent collection name.  We assert this if provided.
+    final String ifMostRecentCollName = message.getStr(IF_MOST_RECENT_COLL_NAME); // optional
+
+    // TODO collection param (or intervalDateMath override?), useful for data capped collections
+
+    //---- PARSE ALIAS INFO FROM ZK
+    final ZkStateReader.AliasesManager aliasesHolder = ocmh.zkStateReader.aliasesHolder;
+    final Aliases aliases = aliasesHolder.getAliases();
+    final Map<String, String> aliasMetadata = aliases.getCollectionAliasMetadata(aliasName);
+    if (aliasMetadata == null) {
+      throw newAliasMustExistException(aliasName); // if it did exist, we'd have a non-null map
+    }
+
+    String routeField = aliasMetadata.get(ROUTER_FIELD_METADATA);
+    if (routeField == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+          "This command only works on time routed aliases.  Expected alias metadata not found.");
+    }
+    String intervalDateMath = aliasMetadata.getOrDefault(ROUTER_INTERVAL_METADATA, "+1DAY");
+    TimeZone intervalTimeZone = TimeZoneUtils.parseTimezone(aliasMetadata.get(CommonParams.TZ));
+
+    //TODO this is ugly; how can we organize the code related to this feature better?
+    final List<Map.Entry<Instant, String>> parsedCollections =
+        TimeRoutedAliasUpdateProcessor.parseCollections(aliasName, aliases, () -> newAliasMustExistException(aliasName));
+
+    //---- GET MOST RECENT COLL
+    final Map.Entry<Instant, String> mostRecentEntry = parsedCollections.get(0);
+    final Instant mostRecentCollTimestamp = mostRecentEntry.getKey();
+    final String mostRecentCollName = mostRecentEntry.getValue();
+    if (ifMostRecentCollName != null) {
+      if (!mostRecentCollName.equals(ifMostRecentCollName)) {
+        // Possibly due to race conditions in URPs on multiple leaders calling us at the same time
+        String msg = IF_MOST_RECENT_COLL_NAME + " expected " + ifMostRecentCollName + " but it's " + mostRecentCollName;
+        if (parsedCollections.stream().map(Map.Entry::getValue).noneMatch(ifMostRecentCollName::equals)) {
+          msg += ". Furthermore this collection isn't in the list of collections referenced by the alias.";
+        }
+        log.info(msg);
+        results.add("message", msg);
+        return;
+      }
+    } else if (mostRecentCollTimestamp.isAfter(Instant.now())) {
+      final String msg = "Most recent collection is in the future, so we won't create another.";
+      log.info(msg);
+      results.add("message", msg);
+      return;
+    }
+
+    //---- COMPUTE NEXT COLLECTION NAME
+    final Instant nextCollTimestamp = TimeRoutedAliasUpdateProcessor.computeNextCollTimestamp(mostRecentCollTimestamp, intervalDateMath, intervalTimeZone);
+    assert nextCollTimestamp.isAfter(mostRecentCollTimestamp);
+    final String createCollName = TimeRoutedAliasUpdateProcessor.formatCollectionNameFromInstant(aliasName, nextCollTimestamp);
+
+    //---- CREATE THE COLLECTION
+    // Map alias metadata starting with a prefix to a create-collection API request
+    final ModifiableSolrParams createReqParams = new ModifiableSolrParams();
+    for (Map.Entry<String, String> e : aliasMetadata.entrySet()) {
+      if (e.getKey().startsWith(COLL_METAPREFIX)) {
+        createReqParams.set(e.getKey().substring(COLL_METAPREFIX.length()), e.getValue());
+      }
+    }
+    if (createReqParams.get(COLL_CONF) == null) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+          "We require an explicit " + COLL_CONF );
+    }
+    createReqParams.set(NAME, createCollName);
+    createReqParams.set("property." + TimeRoutedAliasUpdateProcessor.TIME_PARTITION_ALIAS_NAME_CORE_PROP, aliasName);
+    // a CollectionOperation reads params and produces a message (Map) that is supposed to be sent to the Overseer.
+    //   Although we could create the Map without it, there are a fair amount of rules we don't want to reproduce.
+    final Map<String, Object> createMsgMap = CollectionsHandler.CollectionOperation.CREATE_OP.execute(
+        new LocalSolrQueryRequest(null, createReqParams),
+        null,
+        ocmh.overseer.getCoreContainer().getCollectionsHandler());
+    createMsgMap.put(Overseer.QUEUE_OPERATION, "create");
+    // Since we are running in the Overseer here, send the message directly to the Overseer CreateCollectionCmd
+    ocmh.commandMap.get(CollectionParams.CollectionAction.CREATE).call(clusterState, new ZkNodeProps(createMsgMap), results);
+
+    CollectionsHandler.waitForActiveCollection(createCollName, null, ocmh.overseer.getCoreContainer(), new OverseerSolrResponse(results));
+
+    //TODO delete some of the oldest collection(s) ?
+
+    //---- UPDATE THE ALIAS
+    aliasesHolder.applyModificationAndExportToZk(curAliases -> {
+      final List<String> curTargetCollections = curAliases.getCollectionAliasListMap().get(aliasName);
+      if (curTargetCollections.contains(createCollName)) {
+        return curAliases;
+      } else {
+        List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size() + 1);
+        // prepend it on purpose (thus reverse sorted). Solr alias resolution defaults to the first collection in a list
+        newTargetCollections.add(createCollName);
+        newTargetCollections.addAll(curTargetCollections);
+        return curAliases.cloneWithCollectionAlias(aliasName, StrUtils.join(newTargetCollections, ','));
+      }
+    });
+
+  }
+
+  private SolrException newAliasMustExistException(String aliasName) {
+    return new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+        "Alias " + aliasName + " does not exist.");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
new file mode 100644
index 0000000..03e7430
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
@@ -0,0 +1,540 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.solr.client.solrj.cloud.DistributedQueue;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.overseer.OverseerAction;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.CompositeIdRouter;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.PlainIdRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ReplicaPosition;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.handler.component.ShardHandler;
+import org.apache.solr.util.TestInjection;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+
+public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public SplitShardCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    split(state, message, results);
+  }
+
+  public boolean split(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
+    String collectionName = message.getStr(CoreAdminParams.COLLECTION);
+
+    log.info("Split shard invoked");
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    zkStateReader.forceUpdateCollection(collectionName);
+    AtomicReference<String> slice = new AtomicReference<>();
+    slice.set(message.getStr(ZkStateReader.SHARD_ID_PROP));
+
+    String splitKey = message.getStr("split.key");
+    DocCollection collection = clusterState.getCollection(collectionName);
+
+    PolicyHelper.SessionWrapper sessionWrapper = null;
+
+    Slice parentSlice = getParentSlice(clusterState, collectionName, slice, splitKey);
+
+    // find the leader for the shard
+    Replica parentShardLeader = null;
+    try {
+      parentShardLeader = zkStateReader.getLeaderRetry(collectionName, slice.get(), 10000);
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+    }
+
+    // let's record the ephemeralOwner of the parent leader node
+    Stat leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
+    if (leaderZnodeStat == null)  {
+      // we just got to know the leader but its live node is gone already!
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
+    }
+
+    List<DocRouter.Range> subRanges = new ArrayList<>();
+    List<String> subSlices = new ArrayList<>();
+    List<String> subShardNames = new ArrayList<>();
+
+    String rangesStr = fillRanges(ocmh.cloudManager, message, collection, parentSlice, subRanges, subSlices, subShardNames);
+
+    try {
+
+      boolean oldShardsDeleted = false;
+      for (String subSlice : subSlices) {
+        Slice oSlice = collection.getSlice(subSlice);
+        if (oSlice != null) {
+          final Slice.State state = oSlice.getState();
+          if (state == Slice.State.ACTIVE) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+                "Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
+          } else if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY) {
+            // delete the shards
+            log.info("Sub-shard: {} already exists therefore requesting its deletion", subSlice);
+            Map<String, Object> propMap = new HashMap<>();
+            propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
+            propMap.put(COLLECTION_PROP, collectionName);
+            propMap.put(SHARD_ID_PROP, subSlice);
+            ZkNodeProps m = new ZkNodeProps(propMap);
+            try {
+              ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
+            } catch (Exception e) {
+              throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
+                  e);
+            }
+
+            oldShardsDeleted = true;
+          }
+        }
+      }
+
+      if (oldShardsDeleted) {
+        // refresh the locally cached cluster state
+        // we know we have the latest because otherwise deleteshard would have failed
+        clusterState = zkStateReader.getClusterState();
+        collection = clusterState.getCollection(collectionName);
+      }
+
+      final String asyncId = message.getStr(ASYNC);
+      Map<String, String> requestMap = new HashMap<>();
+      String nodeName = parentShardLeader.getNodeName();
+
+      for (int i = 0; i < subRanges.size(); i++) {
+        String subSlice = subSlices.get(i);
+        String subShardName = subShardNames.get(i);
+        DocRouter.Range subRange = subRanges.get(i);
+
+        log.info("Creating slice " + subSlice + " of collection " + collectionName + " on " + nodeName);
+
+        Map<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD.toLower());
+        propMap.put(ZkStateReader.SHARD_ID_PROP, subSlice);
+        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+        propMap.put(ZkStateReader.SHARD_RANGE_PROP, subRange.toString());
+        propMap.put(ZkStateReader.SHARD_STATE_PROP, Slice.State.CONSTRUCTION.toString());
+        propMap.put(ZkStateReader.SHARD_PARENT_PROP, parentSlice.getName());
+        propMap.put("shard_parent_node", parentShardLeader.getNodeName());
+        propMap.put("shard_parent_zk_session", leaderZnodeStat.getEphemeralOwner());
+        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+        inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
+
+        // wait until we are able to see the new shard in cluster state
+        ocmh.waitForNewShard(collectionName, subSlice);
+
+        // refresh cluster state
+        clusterState = zkStateReader.getClusterState();
+
+        log.info("Adding replica " + subShardName + " as part of slice " + subSlice + " of collection " + collectionName
+            + " on " + nodeName);
+        propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
+        propMap.put(COLLECTION_PROP, collectionName);
+        propMap.put(SHARD_ID_PROP, subSlice);
+        propMap.put("node", nodeName);
+        propMap.put(CoreAdminParams.NAME, subShardName);
+        propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+        // copy over property params:
+        for (String key : message.keySet()) {
+          if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
+            propMap.put(key, message.getStr(key));
+          }
+        }
+        // add async param
+        if (asyncId != null) {
+          propMap.put(ASYNC, asyncId);
+        }
+        ocmh.addReplica(clusterState, new ZkNodeProps(propMap), results, null);
+      }
+
+      ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+
+      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard leaders", asyncId, requestMap);
+
+      for (String subShardName : subShardNames) {
+        // wait for parent leader to acknowledge the sub-shard core
+        log.info("Asking parent leader to wait for: " + subShardName + " to be alive on: " + nodeName);
+        String coreNodeName = ocmh.waitForCoreNodeName(collectionName, nodeName, subShardName);
+        CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
+        cmd.setCoreName(subShardName);
+        cmd.setNodeName(nodeName);
+        cmd.setCoreNodeName(coreNodeName);
+        cmd.setState(Replica.State.ACTIVE);
+        cmd.setCheckLive(true);
+        cmd.setOnlyIfLeader(true);
+
+        ModifiableSolrParams p = new ModifiableSolrParams(cmd.getParams());
+        ocmh.sendShardRequest(nodeName, p, shardHandler, asyncId, requestMap);
+      }
+
+      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD timed out waiting for subshard leaders to come up",
+          asyncId, requestMap);
+
+      log.info("Successfully created all sub-shards for collection " + collectionName + " parent shard: " + slice
+          + " on: " + parentShardLeader);
+
+      log.info("Splitting shard " + parentShardLeader.getName() + " as part of slice " + slice + " of collection "
+          + collectionName + " on " + parentShardLeader);
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
+      params.set(CoreAdminParams.CORE, parentShardLeader.getStr("core"));
+      for (int i = 0; i < subShardNames.size(); i++) {
+        String subShardName = subShardNames.get(i);
+        params.add(CoreAdminParams.TARGET_CORE, subShardName);
+      }
+      params.set(CoreAdminParams.RANGES, rangesStr);
+
+      ocmh.sendShardRequest(parentShardLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
+
+      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to invoke SPLIT core admin command", asyncId,
+          requestMap);
+
+      log.info("Index on shard: " + nodeName + " split into two successfully");
+
+      // apply buffered updates on sub-shards
+      for (int i = 0; i < subShardNames.size(); i++) {
+        String subShardName = subShardNames.get(i);
+
+        log.info("Applying buffered updates on : " + subShardName);
+
+        params = new ModifiableSolrParams();
+        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
+        params.set(CoreAdminParams.NAME, subShardName);
+
+        ocmh.sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap);
+      }
+
+      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed while asking sub shard leaders" +
+          " to apply buffered updates", asyncId, requestMap);
+
+      log.info("Successfully applied buffered updates on : " + subShardNames);
+
+      // Replica creation for the new Slices
+
+      // look at the replication factor and see if it matches reality
+      // if it does not, find best nodes to create more cores
+
+      // TODO: Have replication factor decided in some other way instead of numShards for the parent
+
+      int repFactor = parentSlice.getReplicas().size();
+
+      // we need to look at every node and see how many cores it serves
+      // add our new cores to existing nodes serving the least number of cores
+      // but (for now) require that each core goes on a distinct node.
+
+      // TODO: add smarter options that look at the current number of cores per
+      // node?
+      // for now we just go random
+      Set<String> nodes = clusterState.getLiveNodes();
+      List<String> nodeList = new ArrayList<>(nodes.size());
+      nodeList.addAll(nodes);
+
+      // TODO: Have maxShardsPerNode param for this operation?
+
+      // Remove the node that hosts the parent shard for replica creation.
+      nodeList.remove(nodeName);
+
+      // TODO: change this to handle sharding a slice into > 2 sub-shards.
+
+      List<ReplicaPosition> replicaPositions = Assign.identifyNodes(ocmh.cloudManager,
+          clusterState,
+          new ArrayList<>(clusterState.getLiveNodes()),
+          collectionName,
+          new ZkNodeProps(collection.getProperties()),
+          subSlices, repFactor - 1, 0, 0);
+      sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
+
+      List<Map<String, Object>> replicas = new ArrayList<>((repFactor - 1) * 2);
+
+      for (ReplicaPosition replicaPosition : replicaPositions) {
+        String sliceName = replicaPosition.shard;
+        String subShardNodeName = replicaPosition.node;
+        String solrCoreName = collectionName + "_" + sliceName + "_replica" + (replicaPosition.index);
+
+        log.info("Creating replica shard " + solrCoreName + " as part of slice " + sliceName + " of collection "
+            + collectionName + " on " + subShardNodeName);
+
+        ZkNodeProps props = new ZkNodeProps(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
+            ZkStateReader.COLLECTION_PROP, collectionName,
+            ZkStateReader.SHARD_ID_PROP, sliceName,
+            ZkStateReader.CORE_NAME_PROP, solrCoreName,
+            ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
+            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(subShardNodeName),
+            ZkStateReader.NODE_NAME_PROP, subShardNodeName,
+            CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+        Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
+
+        HashMap<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
+        propMap.put(COLLECTION_PROP, collectionName);
+        propMap.put(SHARD_ID_PROP, sliceName);
+        propMap.put("node", subShardNodeName);
+        propMap.put(CoreAdminParams.NAME, solrCoreName);
+        // copy over property params:
+        for (String key : message.keySet()) {
+          if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
+            propMap.put(key, message.getStr(key));
+          }
+        }
+        // add async param
+        if (asyncId != null) {
+          propMap.put(ASYNC, asyncId);
+        }
+        // special flag param to instruct addReplica not to create the replica in cluster state again
+        propMap.put(OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, "true");
+
+        propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+
+        replicas.add(propMap);
+      }
+
+      assert TestInjection.injectSplitFailureBeforeReplicaCreation();
+
+      long ephemeralOwner = leaderZnodeStat.getEphemeralOwner();
+      // compare against the ephemeralOwner of the parent leader node
+      leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
+      if (leaderZnodeStat == null || ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
+        // put sub-shards in recovery_failed state
+        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+        Map<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+        for (String subSlice : subSlices) {
+          propMap.put(subSlice, Slice.State.RECOVERY_FAILED.toString());
+        }
+        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+        ZkNodeProps m = new ZkNodeProps(propMap);
+        inQueue.offer(Utils.toJSON(m));
+
+        if (leaderZnodeStat == null)  {
+          // the leader is not live anymore, fail the split!
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
+        } else if (ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
+          // there's a new leader, fail the split!
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+              "The zk session id for the shard leader node: " + parentShardLeader.getNodeName() + " has changed from "
+                  + ephemeralOwner + " to " + leaderZnodeStat.getEphemeralOwner() + ". This can cause data loss so we must abort the split");
+        }
+      }
+
+      // we must set the slice state into recovery before actually creating the replica cores
+      // this ensures that the logic inside Overseer to update sub-shard state to 'active'
+      // always gets a chance to execute. See SOLR-7673
+
+      if (repFactor == 1) {
+        // switch sub shard states to 'active'
+        log.info("Replication factor is 1 so switching shard states");
+        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+        Map<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+        propMap.put(slice.get(), Slice.State.INACTIVE.toString());
+        for (String subSlice : subSlices) {
+          propMap.put(subSlice, Slice.State.ACTIVE.toString());
+        }
+        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+        ZkNodeProps m = new ZkNodeProps(propMap);
+        inQueue.offer(Utils.toJSON(m));
+      } else {
+        log.info("Requesting shard state be set to 'recovery'");
+        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+        Map<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+        for (String subSlice : subSlices) {
+          propMap.put(subSlice, Slice.State.RECOVERY.toString());
+        }
+        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+        ZkNodeProps m = new ZkNodeProps(propMap);
+        inQueue.offer(Utils.toJSON(m));
+      }
+
+      // now actually create replica cores on sub shard nodes
+      for (Map<String, Object> replica : replicas) {
+        ocmh.addReplica(clusterState, new ZkNodeProps(replica), results, null);
+      }
+
+      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard replicas", asyncId, requestMap);
+
+      log.info("Successfully created all replica shards for all sub-slices " + subSlices);
+
+      ocmh.commit(results, slice.get(), parentShardLeader);
+
+      return true;
+    } catch (SolrException e) {
+      throw e;
+    } catch (Exception e) {
+      log.error("Error executing split operation for collection: " + collectionName + " parent shard: " + slice, e);
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e);
+    } finally {
+      if (sessionWrapper != null) sessionWrapper.release();
+    }
+  }
+
+  public static Slice getParentSlice(ClusterState clusterState, String collectionName, AtomicReference<String> slice, String splitKey) {
+    DocCollection collection = clusterState.getCollection(collectionName);
+    DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
+
+    Slice parentSlice;
+
+    if (slice.get() == null) {
+      if (router instanceof CompositeIdRouter) {
+        Collection<Slice> searchSlices = router.getSearchSlicesSingle(splitKey, new ModifiableSolrParams(), collection);
+        if (searchSlices.isEmpty()) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to find an active shard for split.key: " + splitKey);
+        }
+        if (searchSlices.size() > 1) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+              "Splitting a split.key: " + splitKey + " which spans multiple shards is not supported");
+        }
+        parentSlice = searchSlices.iterator().next();
+        slice.set(parentSlice.getName());
+        log.info("Split by route.key: {}, parent shard is: {} ", splitKey, slice);
+      } else {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            "Split by route key can only be used with CompositeIdRouter or subclass. Found router: "
+                + router.getClass().getName());
+      }
+    } else {
+      parentSlice = collection.getSlice(slice.get());
+    }
+
+    if (parentSlice == null) {
+      // no chance of the collection being null because ClusterState#getCollection(String) would have thrown
+      // an exception already
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No shard with the specified name exists: " + slice);
+    }
+    return parentSlice;
+  }
+
+  public static String fillRanges(SolrCloudManager cloudManager, ZkNodeProps message, DocCollection collection, Slice parentSlice,
+                                List<DocRouter.Range> subRanges, List<String> subSlices, List<String> subShardNames) {
+    String splitKey = message.getStr("split.key");
+    DocRouter.Range range = parentSlice.getRange();
+    if (range == null) {
+      range = new PlainIdRouter().fullRange();
+    }
+    DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
+
+    String rangesStr = message.getStr(CoreAdminParams.RANGES);
+    if (rangesStr != null) {
+      String[] ranges = rangesStr.split(",");
+      if (ranges.length == 0 || ranges.length == 1) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least two ranges specified to split a shard");
+      } else {
+        for (int i = 0; i < ranges.length; i++) {
+          String r = ranges[i];
+          try {
+            subRanges.add(DocRouter.DEFAULT.fromString(r));
+          } catch (Exception e) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception in parsing hexadecimal hash range: " + r, e);
+          }
+          if (!subRanges.get(i).isSubsetOf(range)) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+                "Specified hash range: " + r + " is not a subset of parent shard's range: " + range.toString());
+          }
+        }
+        List<DocRouter.Range> temp = new ArrayList<>(subRanges); // copy to preserve original order
+        Collections.sort(temp);
+        if (!range.equals(new DocRouter.Range(temp.get(0).min, temp.get(temp.size() - 1).max))) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+              "Specified hash ranges: " + rangesStr + " do not cover the entire range of parent shard: " + range);
+        }
+        for (int i = 1; i < temp.size(); i++) {
+          if (temp.get(i - 1).max + 1 != temp.get(i).min) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specified hash ranges: " + rangesStr
+                + " either overlap with each other or " + "do not cover the entire range of parent shard: " + range);
+          }
+        }
+      }
+    } else if (splitKey != null) {
+      if (router instanceof CompositeIdRouter) {
+        CompositeIdRouter compositeIdRouter = (CompositeIdRouter) router;
+        List<DocRouter.Range> tmpSubRanges = compositeIdRouter.partitionRangeByKey(splitKey, range);
+        if (tmpSubRanges.size() == 1) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey
+              + " has a hash range that is exactly equal to hash range of shard: " + parentSlice.getName());
+        }
+        for (DocRouter.Range subRange : tmpSubRanges) {
+          if (subRange.min == subRange.max) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey + " must be a compositeId");
+          }
+        }
+        subRanges.addAll(tmpSubRanges);
+        log.info("Partitioning parent shard " + parentSlice.getName() + " range: " + parentSlice.getRange() + " yields: " + subRanges);
+        rangesStr = "";
+        for (int i = 0; i < subRanges.size(); i++) {
+          DocRouter.Range subRange = subRanges.get(i);
+          rangesStr += subRange.toString();
+          if (i < subRanges.size() - 1) rangesStr += ',';
+        }
+      }
+    } else {
+      // todo: fixed to two partitions?
+      subRanges.addAll(router.partitionRange(2, range));
+    }
+
+    for (int i = 0; i < subRanges.size(); i++) {
+      String subSlice = parentSlice.getName() + "_" + i;
+      subSlices.add(subSlice);
+      String subShardName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), collection, subSlice, Replica.Type.NRT);
+      subShardNames.add(subShardName);
+    }
+    return rangesStr;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java
new file mode 100644
index 0000000..60da61a
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/UtilizeNodeCmd.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
+import org.apache.solr.client.solrj.request.V2Request;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
+import static org.apache.solr.common.params.AutoScalingParams.NODE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+public class UtilizeNodeCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public UtilizeNodeCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    ocmh.checkRequired(message, NODE);
+    String nodeName = message.getStr(NODE);
+    String async = message.getStr(ASYNC);
+    AutoScalingConfig autoScalingConfig = ocmh.overseer.getSolrCloudManager().getDistribStateManager().getAutoScalingConfig();
+
+    //first look for any violation that may use this replica
+    List<ZkNodeProps> requests = new ArrayList<>();
+    //first look for suggestions if any
+    List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(autoScalingConfig, ocmh.overseer.getSolrCloudManager());
+    for (Suggester.SuggestionInfo suggestionInfo : suggestions) {
+      log.info("op: " + suggestionInfo.getOperation());
+      String coll = null;
+      List<String> pieces = StrUtils.splitSmart(suggestionInfo.getOperation().getPath(), '/');
+      if (pieces.size() > 1) {
+        coll = pieces.get(2);
+      } else {
+        continue;
+      }
+      log.info("coll: " + coll);
+      if (suggestionInfo.getOperation() instanceof V2Request) {
+        String targetNode = (String) Utils.getObjectByPath(suggestionInfo.getOperation(), true, "command/move-replica/targetNode");
+        if (Objects.equals(targetNode, nodeName)) {
+          String replica = (String) Utils.getObjectByPath(suggestionInfo.getOperation(), true, "command/move-replica/replica");
+          requests.add(new ZkNodeProps(COLLECTION_PROP, coll,
+              CollectionParams.TARGET_NODE, targetNode,
+              ASYNC, async,
+              REPLICA_PROP, replica));
+        }
+      }
+    }
+    executeAll(requests);
+    PolicyHelper.SessionWrapper sessionWrapper = PolicyHelper.getSession(ocmh.overseer.getSolrCloudManager());
+    Policy.Session session =  sessionWrapper.get();
+    for (; ; ) {
+      Suggester suggester = session.getSuggester(MOVEREPLICA)
+          .hint(Suggester.Hint.TARGET_NODE, nodeName);
+      session = suggester.getSession();
+      SolrRequest request = suggester.getSuggestion();
+      if (request == null) break;
+      requests.add(new ZkNodeProps(COLLECTION_PROP, request.getParams().get(COLLECTION_PROP),
+          CollectionParams.TARGET_NODE, request.getParams().get(CollectionParams.TARGET_NODE),
+          REPLICA_PROP, request.getParams().get(REPLICA_PROP),
+          ASYNC, request.getParams().get(ASYNC)));
+    }
+    sessionWrapper.returnSession(session);
+    try {
+      executeAll(requests);
+    } finally {
+      sessionWrapper.release();
+    }
+  }
+
+  private void executeAll(List<ZkNodeProps> requests) throws Exception {
+    if (requests.isEmpty()) return;
+    for (ZkNodeProps props : requests) {
+      NamedList result = new NamedList();
+      ocmh.commandMap.get(MOVEREPLICA)
+          .call(ocmh.overseer.getSolrCloudManager().getClusterStateProvider().getClusterState(),
+              props,
+              result);
+    }
+    requests.clear();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java
new file mode 100644
index 0000000..651d4fe
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+/** 
+ * Package related to internal implementations of the SolrCloud collections api
+ */
+package org.apache.solr.cloud.api.collections;
+
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
index 55d6a7e..e5303de 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
@@ -26,7 +26,7 @@ import java.util.Map;
 
 import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
 import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
index dbcdd3d..f2c9a2f 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
@@ -17,7 +17,6 @@
 package org.apache.solr.cloud.overseer;
 
 import java.lang.invoke.MethodHandles;
-
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
@@ -31,9 +30,9 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
 import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
 import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.cloud.Assign;
 import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler;
+import org.apache.solr.cloud.api.collections.Assign;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
@@ -45,7 +44,6 @@ import org.apache.solr.common.util.Utils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
 import static org.apache.solr.cloud.overseer.CollectionMutator.checkCollectionKeyExistence;
 import static org.apache.solr.cloud.overseer.CollectionMutator.checkKeyExistence;
 import static org.apache.solr.common.params.CommonParams.NAME;
@@ -113,7 +111,7 @@ public class ReplicaMutator {
     String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
     String replicaName = message.getStr(ZkStateReader.REPLICA_PROP);
     String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT);
-    if (StringUtils.startsWith(property, COLL_PROP_PREFIX) == false) {
+    if (StringUtils.startsWith(property, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) {
       property = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + property;
     }
     property = property.toLowerCase(Locale.ROOT);
@@ -177,7 +175,7 @@ public class ReplicaMutator {
     String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
     String replicaName = message.getStr(ZkStateReader.REPLICA_PROP);
     String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT);
-    if (StringUtils.startsWith(property, COLL_PROP_PREFIX) == false) {
+    if (StringUtils.startsWith(property, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) {
       property = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + property;
     }
 
@@ -284,7 +282,7 @@ public class ReplicaMutator {
         replicaProps.put(ZkStateReader.REPLICA_TYPE, oldReplica.getType().toString());
         // Move custom props over.
         for (Map.Entry<String, Object> ent : oldReplica.getProperties().entrySet()) {
-          if (ent.getKey().startsWith(COLL_PROP_PREFIX)) {
+          if (ent.getKey().startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
             replicaProps.put(ent.getKey(), ent.getValue());
           }
         }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
index 6718a80..87bf481 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
@@ -16,20 +16,18 @@
  */
 package org.apache.solr.cloud.overseer;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
-import static org.apache.solr.cloud.overseer.CollectionMutator.checkCollectionKeyExistence;
-import static org.apache.solr.common.util.Utils.makeMap;
-
 import java.lang.invoke.MethodHandles;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Set;
 
+import com.google.common.collect.ImmutableSet;
 import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
 import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.cloud.Assign;
 import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.api.collections.Assign;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
@@ -41,12 +39,13 @@ import org.apache.solr.common.cloud.ZkStateReader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.collect.ImmutableSet;
+import static org.apache.solr.cloud.overseer.CollectionMutator.checkCollectionKeyExistence;
+import static org.apache.solr.common.util.Utils.makeMap;
 
 public class SliceMutator {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  public static final String PREFERRED_LEADER_PROP = COLL_PROP_PREFIX + "preferredleader";
+  public static final String PREFERRED_LEADER_PROP = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + "preferredleader";
 
   public static final Set<String> SLICE_UNIQUE_BOOLEAN_PROPERTIES = ImmutableSet.of(PREFERRED_LEADER_PROP);
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index 74d4764..56f979d 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -42,7 +42,7 @@ import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestSyncShard;
 import org.apache.solr.client.solrj.response.RequestStatusState;
 import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
 import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.cloud.OverseerSolrResponse;
 import org.apache.solr.cloud.OverseerTaskQueue;
 import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
@@ -100,17 +100,17 @@ import static org.apache.solr.client.solrj.response.RequestStatusState.NOT_FOUND
 import static org.apache.solr.client.solrj.response.RequestStatusState.RUNNING;
 import static org.apache.solr.client.solrj.response.RequestStatusState.SUBMITTED;
 import static org.apache.solr.cloud.Overseer.QUEUE_OPERATION;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.ONLY_IF_DOWN;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.REQUESTID;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARD_UNIQUE;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.NUM_SLICES;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_ACTIVE_NODES;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ONLY_IF_DOWN;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.REQUESTID;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARDS_PROP;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SHARD_UNIQUE;
 import static org.apache.solr.common.SolrException.ErrorCode.BAD_REQUEST;
 import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
 import static org.apache.solr.common.cloud.DocCollection.RULE;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java
index bc242ba..6f71acc 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/TimeRoutedAliasUpdateProcessor.java
@@ -42,7 +42,7 @@ import java.util.function.Supplier;
 import java.util.stream.Collectors;
 
 import org.apache.solr.cloud.Overseer;
-import org.apache.solr.cloud.RoutedAliasCreateCollectionCmd;
+import org.apache.solr.cloud.api.collections.RoutedAliasCreateCollectionCmd;
 import org.apache.solr.cloud.ZkController;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.Aliases;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
deleted file mode 100644
index a90783a..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Random;
-import java.util.TreeMap;
-
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest.ClusterProp;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the logic required to test Solr cloud backup/restore capability.
- */
-public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  protected static final int NUM_SHARDS = 2;//granted we sometimes shard split to get more
-
-  int replFactor;
-  int numTlogReplicas;
-  int numPullReplicas;
-
-  private static long docsSeed; // see indexDocs()
-
-  @BeforeClass
-  public static void createCluster() throws Exception {
-    docsSeed = random().nextLong();
-  }
-
-  /**
-   * @return The name of the collection to use.
-   */
-  public abstract String getCollectionName();
-
-  /**
-   * @return The name of the backup repository to use.
-   */
-  public abstract String getBackupRepoName();
-
-  /**
-   * @return The absolute path for the backup location.
-   *         Could return null.
-   */
-  public abstract String getBackupLocation();
-
-  @Test
-  public void test() throws Exception {
-    boolean isImplicit = random().nextBoolean();
-    boolean doSplitShardOperation = !isImplicit && random().nextBoolean();
-    replFactor = TestUtil.nextInt(random(), 1, 2);
-    numTlogReplicas = TestUtil.nextInt(random(), 0, 1);
-    numPullReplicas = TestUtil.nextInt(random(), 0, 1);
-    
-    CollectionAdminRequest.Create create = isImplicit ?
-      // NOTE: use shard list with same # of shards as NUM_SHARDS; we assume this later
-      CollectionAdminRequest.createCollectionWithImplicitRouter(getCollectionName(), "conf1", "shard1,shard2", replFactor, numTlogReplicas, numPullReplicas) :
-      CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor, numTlogReplicas, numPullReplicas);
-    
-    if (NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) > cluster.getJettySolrRunners().size() || random().nextBoolean()) {
-      create.setMaxShardsPerNode((int)Math.ceil(NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) / cluster.getJettySolrRunners().size()));//just to assert it survives the restoration
-      if (doSplitShardOperation) {
-        create.setMaxShardsPerNode(create.getMaxShardsPerNode() * 2);
-      }
-    }
-    if (random().nextBoolean()) {
-      create.setAutoAddReplicas(true);//just to assert it survives the restoration
-    }
-    Properties coreProps = new Properties();
-    coreProps.put("customKey", "customValue");//just to assert it survives the restoration
-    create.setProperties(coreProps);
-    if (isImplicit) { //implicit router
-      create.setRouterField("shard_s");
-    } else {//composite id router
-      if (random().nextBoolean()) {
-        create.setRouterField("shard_s");
-      }
-    }
-
-    CloudSolrClient solrClient = cluster.getSolrClient();
-    create.process(solrClient);
-
-    indexDocs(getCollectionName());
-
-    if (doSplitShardOperation) {
-      // shard split the first shard
-      int prevActiveSliceCount = getActiveSliceCount(getCollectionName());
-      CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(getCollectionName());
-      splitShard.setShardName("shard1");
-      splitShard.process(solrClient);
-      // wait until we see one more active slice...
-      for (int i = 0; getActiveSliceCount(getCollectionName()) != prevActiveSliceCount + 1; i++) {
-        assertTrue(i < 30);
-        Thread.sleep(500);
-      }
-      // issue a hard commit.  Split shard does a soft commit which isn't good enough for the backup/snapshooter to see
-      solrClient.commit(getCollectionName());
-    }
-
-    testBackupAndRestore(getCollectionName());
-    testConfigBackupOnly("conf1", getCollectionName());
-    testInvalidPath(getCollectionName());
-  }
-
-  /**
-   * This test validates the backup of collection configuration using
-   *  {@linkplain CollectionAdminParams#NO_INDEX_BACKUP_STRATEGY}.
-   *
-   * @param configName The config name for the collection to be backed up.
-   * @param collectionName The name of the collection to be backed up.
-   * @throws Exception in case of errors.
-   */
-  protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
-    // This is deliberately no-op since we want to run this test only for one of the backup repository
-    // implementation (mainly to avoid redundant test execution). Currently HDFS backup repository test
-    // implements this.
-  }
-
-  // This test verifies the system behavior when the backup location cluster property is configured with an invalid
-  // value for the specified repository (and the default backup location is not configured in solr.xml).
-  private void testInvalidPath(String collectionName) throws Exception {
-    // Execute this test only if the default backup location is NOT configured in solr.xml
-    if (getBackupLocation() == null) {
-      return;
-    }
-
-    String backupName = "invalidbackuprequest";
-    CloudSolrClient solrClient = cluster.getSolrClient();
-
-    ClusterProp req = CollectionAdminRequest.setClusterProperty(CoreAdminParams.BACKUP_LOCATION, "/location/does/not/exist");
-    assertEquals(0, req.process(solrClient).getStatus());
-
-    // Do not specify the backup location.
-    CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
-        .setRepositoryName(getBackupRepoName());
-    try {
-      backup.process(solrClient);
-      fail("This request should have failed since the cluster property value for backup location property is invalid.");
-    } catch (SolrException ex) {
-      assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
-    }
-
-    String restoreCollectionName = collectionName + "_invalidrequest";
-    CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
-        .setRepositoryName(getBackupRepoName());
-    try {
-      restore.process(solrClient);
-      fail("This request should have failed since the cluster property value for backup location property is invalid.");
-    } catch (SolrException ex) {
-      assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
-    }
-  }
-
-  private int getActiveSliceCount(String collectionName) {
-    return cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(collectionName).getActiveSlices().size();
-  }
-
-  private void indexDocs(String collectionName) throws Exception {
-    Random random = new Random(docsSeed);// use a constant seed for the whole test run so that we can easily re-index.
-    int numDocs = random.nextInt(100);
-    if (numDocs == 0) {
-      log.info("Indexing ZERO test docs");
-      return;
-    }
-    List<SolrInputDocument> docs = new ArrayList<>(numDocs);
-    for (int i=0; i<numDocs; i++) {
-      SolrInputDocument doc = new SolrInputDocument();
-      doc.addField("id", i);
-      doc.addField("shard_s", "shard" + (1 + random.nextInt(NUM_SHARDS))); // for implicit router
-      docs.add(doc);
-    }
-    CloudSolrClient client = cluster.getSolrClient();
-    client.add(collectionName, docs);// batch
-    client.commit(collectionName);
-  }
-
-  private void testBackupAndRestore(String collectionName) throws Exception {
-    String backupLocation = getBackupLocation();
-    String backupName = "mytestbackup";
-
-    CloudSolrClient client = cluster.getSolrClient();
-    DocCollection backupCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
-
-    Map<String, Integer> origShardToDocCount = getShardToDocCountMap(client, backupCollection);
-    assert origShardToDocCount.isEmpty() == false;
-
-    log.info("Triggering Backup command");
-
-    {
-      CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
-          .setLocation(backupLocation).setRepositoryName(getBackupRepoName());
-      if (random().nextBoolean()) {
-        assertEquals(0, backup.process(client).getStatus());
-      } else {
-        assertEquals(RequestStatusState.COMPLETED, backup.processAndWait(client, 30));//async
-      }
-    }
-
-    log.info("Triggering Restore command");
-
-    String restoreCollectionName = collectionName + "_restored";
-    boolean sameConfig = random().nextBoolean();
-
-    {
-      CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
-          .setLocation(backupLocation).setRepositoryName(getBackupRepoName());
-
-
-      //explicitly specify the replicationFactor/pullReplicas/nrtReplicas/tlogReplicas .
-      //Value is still the same as the original. maybe test with different values that the original for better test coverage
-      if (random().nextBoolean())  {
-        restore.setReplicationFactor(replFactor);
-      }
-      if (backupCollection.getReplicas().size() > cluster.getJettySolrRunners().size()) {
-        // may need to increase maxShardsPerNode (e.g. if it was shard split, then now we need more)
-        restore.setMaxShardsPerNode((int)Math.ceil(backupCollection.getReplicas().size()/cluster.getJettySolrRunners().size()));
-      }
-      
-
-      if (rarely()) { // Try with createNodeSet configuration
-        int nodeSetSize = cluster.getJettySolrRunners().size() / 2;
-        List<String> nodeStrs = new ArrayList<>(nodeSetSize);
-        Iterator<JettySolrRunner> iter = cluster.getJettySolrRunners().iterator();
-        for (int i = 0; i < nodeSetSize ; i++) {
-          nodeStrs.add(iter.next().getNodeName());
-        }
-        restore.setCreateNodeSet(String.join(",", nodeStrs));
-        restore.setCreateNodeSetShuffle(usually());
-        // we need to double maxShardsPerNode value since we reduced number of available nodes by half.
-        if (restore.getMaxShardsPerNode() != null) {
-          restore.setMaxShardsPerNode(restore.getMaxShardsPerNode() * 2);
-        } else {
-          restore.setMaxShardsPerNode(origShardToDocCount.size() * 2);
-        }
-      }
-
-      Properties props = new Properties();
-      props.setProperty("customKey", "customVal");
-      restore.setProperties(props);
-
-      if (sameConfig==false) {
-        restore.setConfigName("customConfigName");
-      }
-      if (random().nextBoolean()) {
-        assertEquals(0, restore.process(client).getStatus());
-      } else {
-        assertEquals(RequestStatusState.COMPLETED, restore.processAndWait(client, 30));//async
-      }
-      AbstractDistribZkTestBase.waitForRecoveriesToFinish(
-          restoreCollectionName, cluster.getSolrClient().getZkStateReader(), log.isDebugEnabled(), true, 30);
-    }
-
-    //Check the number of results are the same
-    DocCollection restoreCollection = client.getZkStateReader().getClusterState().getCollection(restoreCollectionName);
-    assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
-    //Re-index same docs (should be identical docs given same random seed) and test we have the same result.  Helps
-    //  test we reconstituted the hash ranges / doc router.
-    if (!(restoreCollection.getRouter() instanceof ImplicitDocRouter) && random().nextBoolean()) {
-      indexDocs(restoreCollectionName);
-      assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
-    }
-
-    assertEquals(backupCollection.getReplicationFactor(), restoreCollection.getReplicationFactor());
-    assertEquals(backupCollection.getAutoAddReplicas(), restoreCollection.getAutoAddReplicas());
-    assertEquals(backupCollection.getActiveSlices().iterator().next().getReplicas().size(),
-        restoreCollection.getActiveSlices().iterator().next().getReplicas().size());
-    assertEquals(sameConfig ? "conf1" : "customConfigName",
-        cluster.getSolrClient().getZkStateReader().readConfigName(restoreCollectionName));
-
-    Map<String, Integer> numReplicasByNodeName = new HashMap<>();
-    restoreCollection.getReplicas().forEach(x -> {
-      numReplicasByNodeName.put(x.getNodeName(), numReplicasByNodeName.getOrDefault(x.getNodeName(), 0) + 1);
-    });
-    numReplicasByNodeName.forEach((k, v) -> {
-      assertTrue("Node " + k + " has " + v + " replicas. Expected num replicas : " + restoreCollection.getMaxShardsPerNode() ,
-          v <= restoreCollection.getMaxShardsPerNode());
-    });
-
-    assertEquals("Different count of nrtReplicas. Backup collection state=" + backupCollection + "\nRestore " +
-        "collection state=" + restoreCollection, replFactor, restoreCollection.getNumNrtReplicas().intValue());
-    assertEquals("Different count of pullReplicas. Backup collection state=" + backupCollection + "\nRestore" +
-        " collection state=" + restoreCollection, numPullReplicas, restoreCollection.getNumPullReplicas().intValue());
-    assertEquals("Different count of TlogReplica. Backup collection state=" + backupCollection + "\nRestore" +
-        " collection state=" + restoreCollection, numTlogReplicas, restoreCollection.getNumTlogReplicas().intValue());
-
-    assertEquals("Restore collection should use stateFormat=2", 2, restoreCollection.getStateFormat());
-
-
-    // assert added core properties:
-    // DWS: did via manual inspection.
-    // TODO Find the applicable core.properties on the file system but how?
-  }
-
-  private Map<String, Integer> getShardToDocCountMap(CloudSolrClient client, DocCollection docCollection) throws SolrServerException, IOException {
-    Map<String,Integer> shardToDocCount = new TreeMap<>();
-    for (Slice slice : docCollection.getActiveSlices()) {
-      String shardName = slice.getName();
-      try (HttpSolrClient leaderClient = new HttpSolrClient.Builder(slice.getLeader().getCoreUrl()).withHttpClient(client.getHttpClient()).build()) {
-        long docsInShard = leaderClient.query(new SolrQuery("*:*").setParam("distrib", "false"))
-            .getResults().getNumFound();
-        shardToDocCount.put(shardName, (int) docsInShard);
-      }
-    }
-    return shardToDocCount;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/AssignTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AssignTest.java b/solr/core/src/test/org/apache/solr/cloud/AssignTest.java
deleted file mode 100644
index cf26de4..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/AssignTest.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.client.solrj.impl.ZkDistribStateManager;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.zookeeper.KeeperException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-public class AssignTest extends SolrTestCaseJ4 {
-  
-  @Override
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
-
-  }
-  
-  @Override
-  @After
-  public void tearDown() throws Exception {
-    super.tearDown();
-  }
-  
-  @Test
-  public void testAssignNode() throws Exception {
-    assumeWorkingMockito();
-    
-    SolrZkClient zkClient = mock(SolrZkClient.class);
-    Map<String, byte[]> zkClientData = new HashMap<>();
-    when(zkClient.setData(anyString(), any(), anyInt(), anyBoolean())).then(invocation -> {
-        zkClientData.put(invocation.getArgument(0), invocation.getArgument(1));
-        return null;
-      }
-    );
-    when(zkClient.getData(anyString(), any(), any(), anyBoolean())).then(invocation ->
-        zkClientData.get(invocation.getArgument(0)));
-    // TODO: fix this to be independent of ZK
-    ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
-    String nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
-    assertEquals("core_node1", nodeName);
-    nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection2", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
-    assertEquals("core_node1", nodeName);
-    nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
-    assertEquals("core_node2", nodeName);
-  }
-
-  @Test
-  public void testIdIsUnique() throws Exception {
-    String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
-    ZkTestServer server = new ZkTestServer(zkDir);
-    Object fixedValue = new Object();
-    String[] collections = new String[]{"c1","c2","c3","c4","c5","c6","c7","c8","c9"};
-    Map<String, ConcurrentHashMap<Integer, Object>> collectionUniqueIds = new HashMap<>();
-    for (String c : collections) {
-      collectionUniqueIds.put(c, new ConcurrentHashMap<>());
-    }
-
-    ExecutorService executor = ExecutorUtil.newMDCAwareCachedThreadPool("threadpool");
-    try {
-      server.run();
-
-      try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
-        assertTrue(zkClient.isConnected());
-        zkClient.makePath("/", true);
-        for (String c : collections) {
-          zkClient.makePath("/collections/"+c, true);
-        }
-        // TODO: fix this to be independent of ZK
-        ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
-        List<Future<?>> futures = new ArrayList<>();
-        for (int i = 0; i < 1000; i++) {
-          futures.add(executor.submit(() -> {
-            String collection = collections[random().nextInt(collections.length)];
-            int id = Assign.incAndGetId(stateManager, collection, 0);
-            Object val = collectionUniqueIds.get(collection).put(id, fixedValue);
-            if (val != null) {
-              fail("ZkController do not generate unique id for " + collection);
-            }
-          }));
-        }
-        for (Future<?> future : futures) {
-          future.get();
-        }
-      }
-      assertEquals(1000, (long) collectionUniqueIds.values().stream()
-          .map(ConcurrentHashMap::size)
-          .reduce((m1, m2) -> m1 + m2).get());
-    } finally {
-      server.shutdown();
-      ExecutorUtil.shutdownAndAwaitTermination(executor);
-    }
-  }
-
-
-  @Test
-  public void testBuildCoreName() throws IOException, InterruptedException, KeeperException {
-    String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
-    ZkTestServer server = new ZkTestServer(zkDir);
-    server.run();
-    try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
-      zkClient.makePath("/", true);
-      // TODO: fix this to be independent of ZK
-      ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
-      Map<String, Slice> slices = new HashMap<>();
-      slices.put("shard1", new Slice("shard1", new HashMap<>(), null));
-      slices.put("shard2", new Slice("shard2", new HashMap<>(), null));
-
-      DocCollection docCollection = new DocCollection("collection1", slices, null, DocRouter.DEFAULT);
-      assertEquals("Core name pattern changed", "collection1_shard1_replica_n1", Assign.buildSolrCoreName(stateManager, docCollection, "shard1", Replica.Type.NRT));
-      assertEquals("Core name pattern changed", "collection1_shard2_replica_p2", Assign.buildSolrCoreName(stateManager, docCollection, "shard2", Replica.Type.PULL));
-    } finally {
-      server.shutdown();
-    }
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
index 66b7866..2190c80 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
@@ -37,6 +37,7 @@ import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.UpdateResponse;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
index 1a01386..22862b4 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
@@ -24,6 +24,11 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.ElectionContext;
+import org.apache.solr.cloud.LeaderElector;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.api.collections.ShardSplitTest;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java
deleted file mode 100644
index e886bb6..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionReloadTest.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.util.RetryUtil;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Verifies cluster state remains consistent after collection reload.
- */
-@SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
-public class CollectionReloadTest extends SolrCloudTestCase {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(1)
-        .addConfig("conf", configset("cloud-minimal"))
-        .configure();
-  }
-  
-  @Test
-  public void testReloadedLeaderStateAfterZkSessionLoss() throws Exception {
-
-    log.info("testReloadedLeaderStateAfterZkSessionLoss initialized OK ... running test logic");
-
-    final String testCollectionName = "c8n_1x1";
-    CollectionAdminRequest.createCollection(testCollectionName, "conf", 1, 1)
-        .process(cluster.getSolrClient());
-
-    Replica leader
-        = cluster.getSolrClient().getZkStateReader().getLeaderRetry(testCollectionName, "shard1", DEFAULT_TIMEOUT);
-
-    long coreStartTime = getCoreStatus(leader).getCoreStartTime().getTime();
-    CollectionAdminRequest.reloadCollection(testCollectionName).process(cluster.getSolrClient());
-
-    RetryUtil.retryUntil("Timed out waiting for core to reload", 30, 1000, TimeUnit.MILLISECONDS, () -> {
-      long restartTime = 0;
-      try {
-        restartTime = getCoreStatus(leader).getCoreStartTime().getTime();
-      } catch (Exception e) {
-        log.warn("Exception getting core start time: {}", e.getMessage());
-        return false;
-      }
-      return restartTime > coreStartTime;
-    });
-
-    final int initialStateVersion = getCollectionState(testCollectionName).getZNodeVersion();
-
-    cluster.expireZkSession(cluster.getReplicaJetty(leader));
-
-    waitForState("Timed out waiting for core to re-register as ACTIVE after session expiry", testCollectionName, (n, c) -> {
-      log.info("Collection state: {}", c.toString());
-      Replica expiredReplica = c.getReplica(leader.getName());
-      return expiredReplica.getState() == Replica.State.ACTIVE && c.getZNodeVersion() > initialStateVersion;
-    });
-
-    log.info("testReloadedLeaderStateAfterZkSessionLoss succeeded ... shutting down now!");
-  }
-}


[12/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java
deleted file mode 100644
index 426c879..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionMessageHandler.java
+++ /dev/null
@@ -1,1003 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
-import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
-import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
-import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.UpdateResponse;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrCloseable;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.SuppressForbidden;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardHandlerFactory;
-import org.apache.solr.handler.component.ShardRequest;
-import org.apache.solr.handler.component.ShardResponse;
-import org.apache.solr.util.DefaultSolrThreadFactory;
-import org.apache.solr.util.RTimer;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
-import static org.apache.solr.common.cloud.DocCollection.SNITCH;
-import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NODE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.util.Utils.makeMap;
-
-/**
- * A {@link OverseerMessageHandler} that handles Collections API related
- * overseer messages.
- */
-public class OverseerCollectionMessageHandler implements OverseerMessageHandler, SolrCloseable {
-
-  public static final String NUM_SLICES = "numShards";
-
-  static final boolean CREATE_NODE_SET_SHUFFLE_DEFAULT = true;
-  public static final String CREATE_NODE_SET_SHUFFLE = CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM;
-  public static final String CREATE_NODE_SET_EMPTY = "EMPTY";
-  public static final String CREATE_NODE_SET = CollectionAdminParams.CREATE_NODE_SET_PARAM;
-
-  public static final String ROUTER = "router";
-
-  public static final String SHARDS_PROP = "shards";
-
-  public static final String REQUESTID = "requestid";
-
-  public static final String COLL_CONF = "collection.configName";
-
-  public static final String COLL_PROP_PREFIX = "property.";
-
-  public static final String ONLY_IF_DOWN = "onlyIfDown";
-
-  public static final String SHARD_UNIQUE = "shardUnique";
-
-  public static final String ONLY_ACTIVE_NODES = "onlyactivenodes";
-
-  static final String SKIP_CREATE_REPLICA_IN_CLUSTER_STATE = "skipCreateReplicaInClusterState";
-
-  public static final Map<String, Object> COLL_PROPS = Collections.unmodifiableMap(makeMap(
-      ROUTER, DocRouter.DEFAULT_NAME,
-      ZkStateReader.REPLICATION_FACTOR, "1",
-      ZkStateReader.NRT_REPLICAS, "1",
-      ZkStateReader.TLOG_REPLICAS, "0",
-      ZkStateReader.PULL_REPLICAS, "0",
-      ZkStateReader.MAX_SHARDS_PER_NODE, "1",
-      ZkStateReader.AUTO_ADD_REPLICAS, "false",
-      DocCollection.RULE, null,
-      POLICY, null,
-      SNITCH, null));
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  Overseer overseer;
-  ShardHandlerFactory shardHandlerFactory;
-  String adminPath;
-  ZkStateReader zkStateReader;
-  SolrCloudManager cloudManager;
-  String myId;
-  Stats stats;
-  TimeSource timeSource;
-
-  // Set that tracks collections that are currently being processed by a running task.
-  // This is used for handling mutual exclusion of the tasks.
-
-  final private LockTree lockTree = new LockTree();
-  ExecutorService tpe = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, 10, 0L, TimeUnit.MILLISECONDS,
-      new SynchronousQueue<>(),
-      new DefaultSolrThreadFactory("OverseerCollectionMessageHandlerThreadFactory"));
-
-  static final Random RANDOM;
-  static {
-    // We try to make things reproducible in the context of our tests by initializing the random instance
-    // based on the current seed
-    String seed = System.getProperty("tests.seed");
-    if (seed == null) {
-      RANDOM = new Random();
-    } else {
-      RANDOM = new Random(seed.hashCode());
-    }
-  }
-
-  final Map<CollectionAction, Cmd> commandMap;
-
-  private volatile boolean isClosed;
-
-  public OverseerCollectionMessageHandler(ZkStateReader zkStateReader, String myId,
-                                        final ShardHandlerFactory shardHandlerFactory,
-                                        String adminPath,
-                                        Stats stats,
-                                        Overseer overseer,
-                                        OverseerNodePrioritizer overseerPrioritizer) {
-    this.zkStateReader = zkStateReader;
-    this.shardHandlerFactory = shardHandlerFactory;
-    this.adminPath = adminPath;
-    this.myId = myId;
-    this.stats = stats;
-    this.overseer = overseer;
-    this.cloudManager = overseer.getSolrCloudManager();
-    this.timeSource = cloudManager.getTimeSource();
-    this.isClosed = false;
-    commandMap = new ImmutableMap.Builder<CollectionAction, Cmd>()
-        .put(REPLACENODE, new ReplaceNodeCmd(this))
-        .put(DELETENODE, new DeleteNodeCmd(this))
-        .put(BACKUP, new BackupCmd(this))
-        .put(RESTORE, new RestoreCmd(this))
-        .put(CREATESNAPSHOT, new CreateSnapshotCmd(this))
-        .put(DELETESNAPSHOT, new DeleteSnapshotCmd(this))
-        .put(SPLITSHARD, new SplitShardCmd(this))
-        .put(ADDROLE, new OverseerRoleCmd(this, ADDROLE, overseerPrioritizer))
-        .put(REMOVEROLE, new OverseerRoleCmd(this, REMOVEROLE, overseerPrioritizer))
-        .put(MOCK_COLL_TASK, this::mockOperation)
-        .put(MOCK_SHARD_TASK, this::mockOperation)
-        .put(MOCK_REPLICA_TASK, this::mockOperation)
-        .put(MIGRATESTATEFORMAT, this::migrateStateFormat)
-        .put(CREATESHARD, new CreateShardCmd(this))
-        .put(MIGRATE, new MigrateCmd(this))
-        .put(CREATE, new CreateCollectionCmd(this))
-        .put(MODIFYCOLLECTION, this::modifyCollection)
-        .put(ADDREPLICAPROP, this::processReplicaAddPropertyCommand)
-        .put(DELETEREPLICAPROP, this::processReplicaDeletePropertyCommand)
-        .put(BALANCESHARDUNIQUE, this::balanceProperty)
-        .put(REBALANCELEADERS, this::processRebalanceLeaders)
-        .put(RELOAD, this::reloadCollection)
-        .put(DELETE, new DeleteCollectionCmd(this))
-        .put(CREATEALIAS, new CreateAliasCmd(this))
-        .put(DELETEALIAS, new DeleteAliasCmd(this))
-        .put(ROUTEDALIAS_CREATECOLL, new RoutedAliasCreateCollectionCmd(this))
-        .put(OVERSEERSTATUS, new OverseerStatusCmd(this))
-        .put(DELETESHARD, new DeleteShardCmd(this))
-        .put(DELETEREPLICA, new DeleteReplicaCmd(this))
-        .put(ADDREPLICA, new AddReplicaCmd(this))
-        .put(MOVEREPLICA, new MoveReplicaCmd(this))
-        .put(UTILIZENODE, new UtilizeNodeCmd(this))
-        .build()
-    ;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public SolrResponse processMessage(ZkNodeProps message, String operation) {
-    log.debug("OverseerCollectionMessageHandler.processMessage : {} , {}", operation, message);
-
-    NamedList results = new NamedList();
-    try {
-      CollectionAction action = getCollectionAction(operation);
-      Cmd command = commandMap.get(action);
-      if (command != null) {
-        command.call(cloudManager.getClusterStateProvider().getClusterState(), message, results);
-      } else {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:"
-            + operation);
-      }
-    } catch (Exception e) {
-      String collName = message.getStr("collection");
-      if (collName == null) collName = message.getStr(NAME);
-
-      if (collName == null) {
-        SolrException.log(log, "Operation " + operation + " failed", e);
-      } else  {
-        SolrException.log(log, "Collection: " + collName + " operation: " + operation
-            + " failed", e);
-      }
-
-      results.add("Operation " + operation + " caused exception:", e);
-      SimpleOrderedMap nl = new SimpleOrderedMap();
-      nl.add("msg", e.getMessage());
-      nl.add("rspCode", e instanceof SolrException ? ((SolrException)e).code() : -1);
-      results.add("exception", nl);
-    }
-    return new OverseerSolrResponse(results);
-  }
-
-  @SuppressForbidden(reason = "Needs currentTimeMillis for mock requests")
-  private void mockOperation(ClusterState state, ZkNodeProps message, NamedList results) throws InterruptedException {
-    //only for test purposes
-    Thread.sleep(message.getInt("sleep", 1));
-    log.info("MOCK_TASK_EXECUTED time {} data {}", System.currentTimeMillis(), Utils.toJSONString(message));
-    results.add("MOCK_FINISHED", System.currentTimeMillis());
-  }
-
-  private CollectionAction getCollectionAction(String operation) {
-    CollectionAction action = CollectionAction.get(operation);
-    if (action == null) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:" + operation);
-    }
-    return action;
-  }
-
-  private void reloadCollection(ClusterState clusterState, ZkNodeProps message, NamedList results) {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminAction.RELOAD.toString());
-
-    String asyncId = message.getStr(ASYNC);
-    Map<String, String> requestMap = null;
-    if (asyncId != null) {
-      requestMap = new HashMap<>();
-    }
-    collectionCmd(message, params, results, Replica.State.ACTIVE, asyncId, requestMap);
-  }
-
-  @SuppressWarnings("unchecked")
-  private void processRebalanceLeaders(ClusterState clusterState, ZkNodeProps message, NamedList results)
-      throws Exception {
-    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, CORE_NAME_PROP, ELECTION_NODE_PROP,
-        CORE_NODE_NAME_PROP, BASE_URL_PROP, REJOIN_AT_HEAD_PROP);
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(COLLECTION_PROP, message.getStr(COLLECTION_PROP));
-    params.set(SHARD_ID_PROP, message.getStr(SHARD_ID_PROP));
-    params.set(REJOIN_AT_HEAD_PROP, message.getStr(REJOIN_AT_HEAD_PROP));
-    params.set(CoreAdminParams.ACTION, CoreAdminAction.REJOINLEADERELECTION.toString());
-    params.set(CORE_NAME_PROP, message.getStr(CORE_NAME_PROP));
-    params.set(CORE_NODE_NAME_PROP, message.getStr(CORE_NODE_NAME_PROP));
-    params.set(ELECTION_NODE_PROP, message.getStr(ELECTION_NODE_PROP));
-    params.set(BASE_URL_PROP, message.getStr(BASE_URL_PROP));
-
-    String baseUrl = message.getStr(BASE_URL_PROP);
-    ShardRequest sreq = new ShardRequest();
-    sreq.nodeName = message.getStr(ZkStateReader.CORE_NAME_PROP);
-    // yes, they must use same admin handler path everywhere...
-    params.set("qt", adminPath);
-    sreq.purpose = ShardRequest.PURPOSE_PRIVATE;
-    sreq.shards = new String[] {baseUrl};
-    sreq.actualShards = sreq.shards;
-    sreq.params = params;
-    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-    shardHandler.submit(sreq, baseUrl, sreq.params);
-  }
-
-  @SuppressWarnings("unchecked")
-  private void processReplicaAddPropertyCommand(ClusterState clusterState, ZkNodeProps message, NamedList results)
-      throws Exception {
-    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP, PROPERTY_VALUE_PROP);
-    SolrZkClient zkClient = zkStateReader.getZkClient();
-    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
-    Map<String, Object> propMap = new HashMap<>();
-    propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICAPROP.toLower());
-    propMap.putAll(message.getProperties());
-    ZkNodeProps m = new ZkNodeProps(propMap);
-    inQueue.offer(Utils.toJSON(m));
-  }
-
-  private void processReplicaDeletePropertyCommand(ClusterState clusterState, ZkNodeProps message, NamedList results)
-      throws Exception {
-    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP);
-    SolrZkClient zkClient = zkStateReader.getZkClient();
-    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
-    Map<String, Object> propMap = new HashMap<>();
-    propMap.put(Overseer.QUEUE_OPERATION, DELETEREPLICAPROP.toLower());
-    propMap.putAll(message.getProperties());
-    ZkNodeProps m = new ZkNodeProps(propMap);
-    inQueue.offer(Utils.toJSON(m));
-  }
-
-  private void balanceProperty(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    if (StringUtils.isBlank(message.getStr(COLLECTION_PROP)) || StringUtils.isBlank(message.getStr(PROPERTY_PROP))) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          "The '" + COLLECTION_PROP + "' and '" + PROPERTY_PROP +
-              "' parameters are required for the BALANCESHARDUNIQUE operation, no action taken");
-    }
-    SolrZkClient zkClient = zkStateReader.getZkClient();
-    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
-    Map<String, Object> propMap = new HashMap<>();
-    propMap.put(Overseer.QUEUE_OPERATION, BALANCESHARDUNIQUE.toLower());
-    propMap.putAll(message.getProperties());
-    inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
-  }
-
-  /**
-   * Walks the tree of collection status to verify that any replicas not reporting a "down" status is
-   * on a live node, if any replicas reporting their status as "active" but the node is not live is
-   * marked as "down"; used by CLUSTERSTATUS.
-   * @param liveNodes List of currently live node names.
-   * @param collectionProps Map of collection status information pulled directly from ZooKeeper.
-   */
-
-  @SuppressWarnings("unchecked")
-  protected void crossCheckReplicaStateWithLiveNodes(List<String> liveNodes, NamedList<Object> collectionProps) {
-    Iterator<Map.Entry<String,Object>> colls = collectionProps.iterator();
-    while (colls.hasNext()) {
-      Map.Entry<String,Object> next = colls.next();
-      Map<String,Object> collMap = (Map<String,Object>)next.getValue();
-      Map<String,Object> shards = (Map<String,Object>)collMap.get("shards");
-      for (Object nextShard : shards.values()) {
-        Map<String,Object> shardMap = (Map<String,Object>)nextShard;
-        Map<String,Object> replicas = (Map<String,Object>)shardMap.get("replicas");
-        for (Object nextReplica : replicas.values()) {
-          Map<String,Object> replicaMap = (Map<String,Object>)nextReplica;
-          if (Replica.State.getState((String) replicaMap.get(ZkStateReader.STATE_PROP)) != Replica.State.DOWN) {
-            // not down, so verify the node is live
-            String node_name = (String)replicaMap.get(ZkStateReader.NODE_NAME_PROP);
-            if (!liveNodes.contains(node_name)) {
-              // node is not live, so this replica is actually down
-              replicaMap.put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
-            }
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Get collection status from cluster state.
-   * Can return collection status by given shard name.
-   *
-   *
-   * @param collection collection map parsed from JSON-serialized {@link ClusterState}
-   * @param name  collection name
-   * @param requestedShards a set of shards to be returned in the status.
-   *                        An empty or null values indicates <b>all</b> shards.
-   * @return map of collection properties
-   */
-  @SuppressWarnings("unchecked")
-  private Map<String, Object> getCollectionStatus(Map<String, Object> collection, String name, Set<String> requestedShards) {
-    if (collection == null)  {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + name + " not found");
-    }
-    if (requestedShards == null || requestedShards.isEmpty()) {
-      return collection;
-    } else {
-      Map<String, Object> shards = (Map<String, Object>) collection.get("shards");
-      Map<String, Object>  selected = new HashMap<>();
-      for (String selectedShard : requestedShards) {
-        if (!shards.containsKey(selectedShard)) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + name + " shard: " + selectedShard + " not found");
-        }
-        selected.put(selectedShard, shards.get(selectedShard));
-        collection.put("shards", selected);
-      }
-      return collection;
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
-      throws Exception {
-    ((DeleteReplicaCmd) commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, message, results, onComplete);
-
-  }
-
-  boolean waitForCoreNodeGone(String collectionName, String shard, String replicaName, int timeoutms) throws InterruptedException {
-    TimeOut timeout = new TimeOut(timeoutms, TimeUnit.MILLISECONDS, timeSource);
-    while (! timeout.hasTimedOut()) {
-      timeout.sleep(100);
-      DocCollection docCollection = zkStateReader.getClusterState().getCollection(collectionName);
-      if (docCollection == null) { // someone already deleted the collection
-        return true;
-      }
-      Slice slice = docCollection.getSlice(shard);
-      if(slice == null || slice.getReplica(replicaName) == null) {
-        return true;
-      }
-    }
-    // replica still exists after the timeout
-    return false;
-  }
-
-  void deleteCoreNode(String collectionName, String replicaName, Replica replica, String core) throws Exception {
-    ZkNodeProps m = new ZkNodeProps(
-        Overseer.QUEUE_OPERATION, OverseerAction.DELETECORE.toLower(),
-        ZkStateReader.CORE_NAME_PROP, core,
-        ZkStateReader.NODE_NAME_PROP, replica.getStr(ZkStateReader.NODE_NAME_PROP),
-        ZkStateReader.COLLECTION_PROP, collectionName,
-        ZkStateReader.CORE_NODE_NAME_PROP, replicaName,
-        ZkStateReader.BASE_URL_PROP, replica.getStr(ZkStateReader.BASE_URL_PROP));
-    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-  }
-
-  void checkRequired(ZkNodeProps message, String... props) {
-    for (String prop : props) {
-      if(message.get(prop) == null){
-        throw new SolrException(ErrorCode.BAD_REQUEST, StrUtils.join(Arrays.asList(props),',') +" are required params" );
-      }
-    }
-
-  }
-
-  //TODO should we not remove in the next release ?
-  private void migrateStateFormat(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    final String collectionName = message.getStr(COLLECTION_PROP);
-
-    boolean firstLoop = true;
-    // wait for a while until the state format changes
-    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-    while (! timeout.hasTimedOut()) {
-      DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
-      if (collection == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + collectionName + " not found");
-      }
-      if (collection.getStateFormat() == 2) {
-        // Done.
-        results.add("success", new SimpleOrderedMap<>());
-        return;
-      }
-
-      if (firstLoop) {
-        // Actually queue the migration command.
-        firstLoop = false;
-        ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, MIGRATESTATEFORMAT.toLower(), COLLECTION_PROP, collectionName);
-        Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-      }
-      timeout.sleep(100);
-    }
-    throw new SolrException(ErrorCode.SERVER_ERROR, "Could not migrate state format for collection: " + collectionName);
-  }
-
-  void commit(NamedList results, String slice, Replica parentShardLeader) {
-    log.debug("Calling soft commit to make sub shard updates visible");
-    String coreUrl = new ZkCoreNodeProps(parentShardLeader).getCoreUrl();
-    // HttpShardHandler is hard coded to send a QueryRequest hence we go direct
-    // and we force open a searcher so that we have documents to show upon switching states
-    UpdateResponse updateResponse = null;
-    try {
-      updateResponse = softCommit(coreUrl);
-      processResponse(results, null, coreUrl, updateResponse, slice, Collections.emptySet());
-    } catch (Exception e) {
-      processResponse(results, e, coreUrl, updateResponse, slice, Collections.emptySet());
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to call distrib softCommit on: " + coreUrl, e);
-    }
-  }
-
-
-  static UpdateResponse softCommit(String url) throws SolrServerException, IOException {
-
-    try (HttpSolrClient client = new HttpSolrClient.Builder(url)
-        .withConnectionTimeout(30000)
-        .withSocketTimeout(120000)
-        .build()) {
-      UpdateRequest ureq = new UpdateRequest();
-      ureq.setParams(new ModifiableSolrParams());
-      ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true, true);
-      return ureq.process(client);
-    }
-  }
-
-  String waitForCoreNodeName(String collectionName, String msgNodeName, String msgCore) {
-    int retryCount = 320;
-    while (retryCount-- > 0) {
-      final DocCollection docCollection = zkStateReader.getClusterState().getCollectionOrNull(collectionName);
-      if (docCollection != null && docCollection.getSlicesMap() != null) {
-        Map<String,Slice> slicesMap = docCollection.getSlicesMap();
-        for (Slice slice : slicesMap.values()) {
-          for (Replica replica : slice.getReplicas()) {
-            // TODO: for really large clusters, we could 'index' on this
-
-            String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
-            String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
-
-            if (nodeName.equals(msgNodeName) && core.equals(msgCore)) {
-              return replica.getName();
-            }
-          }
-        }
-      }
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-      }
-    }
-    throw new SolrException(ErrorCode.SERVER_ERROR, "Could not find coreNodeName");
-  }
-
-  void waitForNewShard(String collectionName, String sliceName) throws KeeperException, InterruptedException {
-    log.debug("Waiting for slice {} of collection {} to be available", sliceName, collectionName);
-    RTimer timer = new RTimer();
-    int retryCount = 320;
-    while (retryCount-- > 0) {
-      DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
-      if (collection == null) {
-        throw new SolrException(ErrorCode.SERVER_ERROR,
-            "Unable to find collection: " + collectionName + " in clusterstate");
-      }
-      Slice slice = collection.getSlice(sliceName);
-      if (slice != null) {
-        log.debug("Waited for {}ms for slice {} of collection {} to be available",
-            timer.getTime(), sliceName, collectionName);
-        return;
-      }
-      Thread.sleep(1000);
-    }
-    throw new SolrException(ErrorCode.SERVER_ERROR,
-        "Could not find new slice " + sliceName + " in collection " + collectionName
-            + " even after waiting for " + timer.getTime() + "ms"
-    );
-  }
-
-  DocRouter.Range intersect(DocRouter.Range a, DocRouter.Range b) {
-    if (a == null || b == null || !a.overlaps(b)) {
-      return null;
-    } else if (a.isSubsetOf(b))
-      return a;
-    else if (b.isSubsetOf(a))
-      return b;
-    else if (b.includes(a.max)) {
-      return new DocRouter.Range(b.min, a.max);
-    } else  {
-      return new DocRouter.Range(a.min, b.max);
-    }
-  }
-
-  void sendShardRequest(String nodeName, ModifiableSolrParams params,
-                        ShardHandler shardHandler, String asyncId,
-                        Map<String, String> requestMap) {
-    sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap, adminPath, zkStateReader);
-
-  }
-
-  public static void sendShardRequest(String nodeName, ModifiableSolrParams params, ShardHandler shardHandler,
-                                      String asyncId, Map<String, String> requestMap, String adminPath,
-                                      ZkStateReader zkStateReader) {
-    if (asyncId != null) {
-      String coreAdminAsyncId = asyncId + Math.abs(System.nanoTime());
-      params.set(ASYNC, coreAdminAsyncId);
-      requestMap.put(nodeName, coreAdminAsyncId);
-    }
-
-    ShardRequest sreq = new ShardRequest();
-    params.set("qt", adminPath);
-    sreq.purpose = 1;
-    String replica = zkStateReader.getBaseUrlForNodeName(nodeName);
-    sreq.shards = new String[]{replica};
-    sreq.actualShards = sreq.shards;
-    sreq.nodeName = nodeName;
-    sreq.params = params;
-
-    shardHandler.submit(sreq, replica, sreq.params);
-  }
-
-  void addPropertyParams(ZkNodeProps message, ModifiableSolrParams params) {
-    // Now add the property.key=value pairs
-    for (String key : message.keySet()) {
-      if (key.startsWith(COLL_PROP_PREFIX)) {
-        params.set(key, message.getStr(key));
-      }
-    }
-  }
-
-  void addPropertyParams(ZkNodeProps message, Map<String, Object> map) {
-    // Now add the property.key=value pairs
-    for (String key : message.keySet()) {
-      if (key.startsWith(COLL_PROP_PREFIX)) {
-        map.put(key, message.getStr(key));
-      }
-    }
-  }
-
-
-  private void modifyCollection(ClusterState clusterState, ZkNodeProps message, NamedList results)
-      throws Exception {
-    
-    final String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    //the rest of the processing is based on writing cluster state properties
-    //remove the property here to avoid any errors down the pipeline due to this property appearing
-    String configName = (String) message.getProperties().remove(COLL_CONF);
-    
-    if(configName != null) {
-      validateConfigOrThrowSolrException(configName);
-      
-      boolean isLegacyCloud =  Overseer.isLegacy(zkStateReader);
-      createConfNode(cloudManager.getDistribStateManager(), configName, collectionName, isLegacyCloud);
-      reloadCollection(null, new ZkNodeProps(NAME, collectionName), results);
-    }
-    
-    overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
-
-    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-    boolean areChangesVisible = true;
-    while (!timeout.hasTimedOut()) {
-      DocCollection collection = cloudManager.getClusterStateProvider().getClusterState().getCollection(collectionName);
-      areChangesVisible = true;
-      for (Map.Entry<String,Object> updateEntry : message.getProperties().entrySet()) {
-        String updateKey = updateEntry.getKey();
-        if (!updateKey.equals(ZkStateReader.COLLECTION_PROP)
-            && !updateKey.equals(Overseer.QUEUE_OPERATION)
-            && !collection.get(updateKey).equals(updateEntry.getValue())){
-          areChangesVisible = false;
-          break;
-        }
-      }
-      if (areChangesVisible) break;
-      timeout.sleep(100);
-    }
-
-    if (!areChangesVisible)
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not modify collection " + message);
-  }
-
-  void cleanupCollection(String collectionName, NamedList results) throws Exception {
-    log.error("Cleaning up collection [" + collectionName + "]." );
-    Map<String, Object> props = makeMap(
-        Overseer.QUEUE_OPERATION, DELETE.toLower(),
-        NAME, collectionName);
-    commandMap.get(DELETE).call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
-  }
-
-  Map<String, Replica> waitToSeeReplicasInState(String collectionName, Collection<String> coreNames) throws InterruptedException {
-    Map<String, Replica> result = new HashMap<>();
-    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-    while (true) {
-      DocCollection coll = zkStateReader.getClusterState().getCollection(collectionName);
-      for (String coreName : coreNames) {
-        if (result.containsKey(coreName)) continue;
-        for (Slice slice : coll.getSlices()) {
-          for (Replica replica : slice.getReplicas()) {
-            if (coreName.equals(replica.getStr(ZkStateReader.CORE_NAME_PROP))) {
-              result.put(coreName, replica);
-              break;
-            }
-          }
-        }
-      }
-      
-      if (result.size() == coreNames.size()) {
-        return result;
-      } else {
-        log.debug("Expecting {} cores but found {}", coreNames, result);
-      }
-      if (timeout.hasTimedOut()) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Timed out waiting to see all replicas: " + coreNames + " in cluster state. Last state: " + coll);
-      }
-      
-      Thread.sleep(100);
-    }
-  }
-
-  ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
-      throws Exception {
-
-    return ((AddReplicaCmd) commandMap.get(ADDREPLICA)).addReplica(clusterState, message, results, onComplete);
-  }
-
-  void processResponses(NamedList results, ShardHandler shardHandler, boolean abortOnError, String msgOnError,
-                        String asyncId, Map<String, String> requestMap) {
-    processResponses(results, shardHandler, abortOnError, msgOnError, asyncId, requestMap, Collections.emptySet());
-  }
-
-  void processResponses(NamedList results, ShardHandler shardHandler, boolean abortOnError, String msgOnError,
-                                String asyncId, Map<String, String> requestMap, Set<String> okayExceptions) {
-    //Processes all shard responses
-    ShardResponse srsp;
-    do {
-      srsp = shardHandler.takeCompletedOrError();
-      if (srsp != null) {
-        processResponse(results, srsp, okayExceptions);
-        Throwable exception = srsp.getException();
-        if (abortOnError && exception != null)  {
-          // drain pending requests
-          while (srsp != null)  {
-            srsp = shardHandler.takeCompletedOrError();
-          }
-          throw new SolrException(ErrorCode.SERVER_ERROR, msgOnError, exception);
-        }
-      }
-    } while (srsp != null);
-
-    //If request is async wait for the core admin to complete before returning
-    if (asyncId != null) {
-      waitForAsyncCallsToComplete(requestMap, results);
-      requestMap.clear();
-    }
-  }
-
-
-  void validateConfigOrThrowSolrException(String configName) throws IOException, KeeperException, InterruptedException {
-    boolean isValid = cloudManager.getDistribStateManager().hasData(ZkConfigManager.CONFIGS_ZKNODE + "/" + configName);
-    if(!isValid) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Can not find the specified config set: " + configName);
-    }
-  }
-
-  /**
-   * This doesn't validate the config (path) itself and is just responsible for creating the confNode.
-   * That check should be done before the config node is created.
-   */
-  public static void createConfNode(DistribStateManager stateManager, String configName, String coll, boolean isLegacyCloud) throws IOException, AlreadyExistsException, BadVersionException, KeeperException, InterruptedException {
-    
-    if (configName != null) {
-      String collDir = ZkStateReader.COLLECTIONS_ZKNODE + "/" + coll;
-      log.debug("creating collections conf node {} ", collDir);
-      byte[] data = Utils.toJSON(makeMap(ZkController.CONFIGNAME_PROP, configName));
-      if (stateManager.hasData(collDir)) {
-        stateManager.setData(collDir, data, -1);
-      } else {
-        stateManager.makePath(collDir, data, CreateMode.PERSISTENT, false);
-      }
-    } else {
-      if(isLegacyCloud){
-        log.warn("Could not obtain config name");
-      } else {
-        throw new SolrException(ErrorCode.BAD_REQUEST,"Unable to get config name");
-      }
-    }
-  }
-  
-  private void collectionCmd(ZkNodeProps message, ModifiableSolrParams params,
-                             NamedList results, Replica.State stateMatcher, String asyncId, Map<String, String> requestMap) {
-    collectionCmd( message, params, results, stateMatcher, asyncId, requestMap, Collections.emptySet());
-  }
-
-
-  void collectionCmd(ZkNodeProps message, ModifiableSolrParams params,
-                     NamedList results, Replica.State stateMatcher, String asyncId, Map<String, String> requestMap, Set<String> okayExceptions) {
-    log.info("Executing Collection Cmd : " + params);
-    String collectionName = message.getStr(NAME);
-    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-
-    ClusterState clusterState = zkStateReader.getClusterState();
-    DocCollection coll = clusterState.getCollection(collectionName);
-    
-    for (Slice slice : coll.getSlices()) {
-      sliceCmd(clusterState, params, stateMatcher, slice, shardHandler, asyncId, requestMap);
-    }
-
-    processResponses(results, shardHandler, false, null, asyncId, requestMap, okayExceptions);
-
-  }
-
-  void sliceCmd(ClusterState clusterState, ModifiableSolrParams params, Replica.State stateMatcher,
-                Slice slice, ShardHandler shardHandler, String asyncId, Map<String, String> requestMap) {
-
-    for (Replica replica : slice.getReplicas()) {
-      if (clusterState.liveNodesContain(replica.getStr(ZkStateReader.NODE_NAME_PROP))
-          && (stateMatcher == null || Replica.State.getState(replica.getStr(ZkStateReader.STATE_PROP)) == stateMatcher)) {
-
-        // For thread safety, only simple clone the ModifiableSolrParams
-        ModifiableSolrParams cloneParams = new ModifiableSolrParams();
-        cloneParams.add(params);
-        cloneParams.set(CoreAdminParams.CORE, replica.getStr(ZkStateReader.CORE_NAME_PROP));
-
-        sendShardRequest(replica.getStr(ZkStateReader.NODE_NAME_PROP), cloneParams, shardHandler, asyncId, requestMap);
-      }
-    }
-  }
-  
-  private void processResponse(NamedList results, ShardResponse srsp, Set<String> okayExceptions) {
-    Throwable e = srsp.getException();
-    String nodeName = srsp.getNodeName();
-    SolrResponse solrResponse = srsp.getSolrResponse();
-    String shard = srsp.getShard();
-
-    processResponse(results, e, nodeName, solrResponse, shard, okayExceptions);
-  }
-
-  @SuppressWarnings("unchecked")
-  private void processResponse(NamedList results, Throwable e, String nodeName, SolrResponse solrResponse, String shard, Set<String> okayExceptions) {
-    String rootThrowable = null;
-    if (e instanceof RemoteSolrException) {
-      rootThrowable = ((RemoteSolrException) e).getRootThrowable();
-    }
-
-    if (e != null && (rootThrowable == null || !okayExceptions.contains(rootThrowable))) {
-      log.error("Error from shard: " + shard, e);
-
-      SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
-      if (failure == null) {
-        failure = new SimpleOrderedMap();
-        results.add("failure", failure);
-      }
-
-      failure.add(nodeName, e.getClass().getName() + ":" + e.getMessage());
-
-    } else {
-
-      SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
-      if (success == null) {
-        success = new SimpleOrderedMap();
-        results.add("success", success);
-      }
-
-      success.add(nodeName, solrResponse.getResponse());
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  private void waitForAsyncCallsToComplete(Map<String, String> requestMap, NamedList results) {
-    for (String k:requestMap.keySet()) {
-      log.debug("I am Waiting for :{}/{}", k, requestMap.get(k));
-      results.add(requestMap.get(k), waitForCoreAdminAsyncCallToComplete(k, requestMap.get(k)));
-    }
-  }
-
-  private NamedList waitForCoreAdminAsyncCallToComplete(String nodeName, String requestId) {
-    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set(CoreAdminParams.ACTION, CoreAdminAction.REQUESTSTATUS.toString());
-    params.set(CoreAdminParams.REQUESTID, requestId);
-    int counter = 0;
-    ShardRequest sreq;
-    do {
-      sreq = new ShardRequest();
-      params.set("qt", adminPath);
-      sreq.purpose = 1;
-      String replica = zkStateReader.getBaseUrlForNodeName(nodeName);
-      sreq.shards = new String[] {replica};
-      sreq.actualShards = sreq.shards;
-      sreq.params = params;
-
-      shardHandler.submit(sreq, replica, sreq.params);
-
-      ShardResponse srsp;
-      do {
-        srsp = shardHandler.takeCompletedOrError();
-        if (srsp != null) {
-          NamedList results = new NamedList();
-          processResponse(results, srsp, Collections.emptySet());
-          if (srsp.getSolrResponse().getResponse() == null) {
-            NamedList response = new NamedList();
-            response.add("STATUS", "failed");
-            return response;
-          }
-          
-          String r = (String) srsp.getSolrResponse().getResponse().get("STATUS");
-          if (r.equals("running")) {
-            log.debug("The task is still RUNNING, continuing to wait.");
-            try {
-              Thread.sleep(1000);
-            } catch (InterruptedException e) {
-              Thread.currentThread().interrupt();
-            }
-            continue;
-
-          } else if (r.equals("completed")) {
-            log.debug("The task is COMPLETED, returning");
-            return srsp.getSolrResponse().getResponse();
-          } else if (r.equals("failed")) {
-            // TODO: Improve this. Get more information.
-            log.debug("The task is FAILED, returning");
-            return srsp.getSolrResponse().getResponse();
-          } else if (r.equals("notfound")) {
-            log.debug("The task is notfound, retry");
-            if (counter++ < 5) {
-              try {
-                Thread.sleep(1000);
-              } catch (InterruptedException e) {
-              }
-              break;
-            }
-            throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid status request for requestId: " + requestId + "" + srsp.getSolrResponse().getResponse().get("STATUS") +
-                "retried " + counter + "times");
-          } else {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid status request " + srsp.getSolrResponse().getResponse().get("STATUS"));
-          }
-        }
-      } while (srsp != null);
-    } while(true);
-  }
-
-  @Override
-  public String getName() {
-    return "Overseer Collection Message Handler";
-  }
-
-  @Override
-  public String getTimerName(String operation) {
-    return "collection_" + operation;
-  }
-
-  @Override
-  public String getTaskKey(ZkNodeProps message) {
-    return message.containsKey(COLLECTION_PROP) ?
-      message.getStr(COLLECTION_PROP) : message.getStr(NAME);
-  }
-
-
-  private long sessionId = -1;
-  private LockTree.Session lockSession;
-
-  @Override
-  public Lock lockTask(ZkNodeProps message, OverseerTaskProcessor.TaskBatch taskBatch) {
-    if (lockSession == null || sessionId != taskBatch.getId()) {
-      //this is always called in the same thread.
-      //Each batch is supposed to have a new taskBatch
-      //So if taskBatch changes we must create a new Session
-      // also check if the running tasks are empty. If yes, clear lockTree
-      // this will ensure that locks are not 'leaked'
-      if(taskBatch.getRunningTasks() == 0) lockTree.clear();
-      lockSession = lockTree.getSession();
-    }
-    return lockSession.lock(getCollectionAction(message.getStr(Overseer.QUEUE_OPERATION)),
-        Arrays.asList(
-            getTaskKey(message),
-            message.getStr(ZkStateReader.SHARD_ID_PROP),
-            message.getStr(ZkStateReader.REPLICA_PROP))
-
-    );
-  }
-
-
-  @Override
-  public void close() throws IOException {
-    this.isClosed = true;
-    if (tpe != null) {
-      if (!tpe.isShutdown()) {
-        ExecutorUtil.shutdownAndAwaitTermination(tpe);
-      }
-    }
-  }
-
-  @Override
-  public boolean isClosed() {
-    return isClosed;
-  }
-
-  interface Cmd {
-    void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/OverseerRoleCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerRoleCmd.java b/solr/core/src/java/org/apache/solr/cloud/OverseerRoleCmd.java
deleted file mode 100644
index 0f450bd..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerRoleCmd.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.REMOVEROLE;
-
-public class OverseerRoleCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-  private final CollectionAction operation;
-  private final OverseerNodePrioritizer overseerPrioritizer;
-
-
-
-  public OverseerRoleCmd(OverseerCollectionMessageHandler ocmh, CollectionAction operation, OverseerNodePrioritizer prioritizer) {
-    this.ocmh = ocmh;
-    this.operation = operation;
-    this.overseerPrioritizer = prioritizer;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    SolrZkClient zkClient = zkStateReader.getZkClient();
-    Map roles = null;
-    String node = message.getStr("node");
-
-    String roleName = message.getStr("role");
-    boolean nodeExists = false;
-    if (nodeExists = zkClient.exists(ZkStateReader.ROLES, true)) {
-      roles = (Map) Utils.fromJSON(zkClient.getData(ZkStateReader.ROLES, null, new Stat(), true));
-    } else {
-      roles = new LinkedHashMap(1);
-    }
-
-    List nodeList = (List) roles.get(roleName);
-    if (nodeList == null) roles.put(roleName, nodeList = new ArrayList());
-    if (ADDROLE == operation) {
-      log.info("Overseer role added to {}", node);
-      if (!nodeList.contains(node)) nodeList.add(node);
-    } else if (REMOVEROLE == operation) {
-      log.info("Overseer role removed from {}", node);
-      nodeList.remove(node);
-    }
-
-    if (nodeExists) {
-      zkClient.setData(ZkStateReader.ROLES, Utils.toJSON(roles), true);
-    } else {
-      zkClient.create(ZkStateReader.ROLES, Utils.toJSON(roles), CreateMode.PERSISTENT, true);
-    }
-    //if there are too many nodes this command may time out. And most likely dedicated
-    // overseers are created when there are too many nodes  . So , do this operation in a separate thread
-    new Thread(() -> {
-      try {
-        overseerPrioritizer.prioritizeOverseerNodes(ocmh.myId);
-      } catch (Exception e) {
-        log.error("Error in prioritizing Overseer", e);
-      }
-
-    }).start();
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/OverseerStatusCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerStatusCmd.java b/solr/core/src/java/org/apache/solr/cloud/OverseerStatusCmd.java
deleted file mode 100644
index aba4872..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerStatusCmd.java
+++ /dev/null
@@ -1,112 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import com.codahale.metrics.Timer;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.util.stats.MetricUtils;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class OverseerStatusCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public OverseerStatusCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    String leaderNode = OverseerTaskProcessor.getLeaderNode(zkStateReader.getZkClient());
-    results.add("leader", leaderNode);
-    Stat stat = new Stat();
-    zkStateReader.getZkClient().getData("/overseer/queue",null, stat, true);
-    results.add("overseer_queue_size", stat.getNumChildren());
-    stat = new Stat();
-    zkStateReader.getZkClient().getData("/overseer/queue-work",null, stat, true);
-    results.add("overseer_work_queue_size", stat.getNumChildren());
-    stat = new Stat();
-    zkStateReader.getZkClient().getData("/overseer/collection-queue-work",null, stat, true);
-    results.add("overseer_collection_queue_size", stat.getNumChildren());
-
-    NamedList overseerStats = new NamedList();
-    NamedList collectionStats = new NamedList();
-    NamedList stateUpdateQueueStats = new NamedList();
-    NamedList workQueueStats = new NamedList();
-    NamedList collectionQueueStats = new NamedList();
-    Stats stats = ocmh.stats;
-    for (Map.Entry<String, Stats.Stat> entry : stats.getStats().entrySet()) {
-      String key = entry.getKey();
-      NamedList<Object> lst = new SimpleOrderedMap<>();
-      if (key.startsWith("collection_"))  {
-        collectionStats.add(key.substring(11), lst);
-        int successes = stats.getSuccessCount(entry.getKey());
-        int errors = stats.getErrorCount(entry.getKey());
-        lst.add("requests", successes);
-        lst.add("errors", errors);
-        List<Stats.FailedOp> failureDetails = stats.getFailureDetails(key);
-        if (failureDetails != null) {
-          List<SimpleOrderedMap<Object>> failures = new ArrayList<>();
-          for (Stats.FailedOp failedOp : failureDetails) {
-            SimpleOrderedMap<Object> fail = new SimpleOrderedMap<>();
-            fail.add("request", failedOp.req.getProperties());
-            fail.add("response", failedOp.resp.getResponse());
-            failures.add(fail);
-          }
-          lst.add("recent_failures", failures);
-        }
-      } else if (key.startsWith("/overseer/queue_"))  {
-        stateUpdateQueueStats.add(key.substring(16), lst);
-      } else if (key.startsWith("/overseer/queue-work_"))  {
-        workQueueStats.add(key.substring(21), lst);
-      } else if (key.startsWith("/overseer/collection-queue-work_"))  {
-        collectionQueueStats.add(key.substring(32), lst);
-      } else  {
-        // overseer stats
-        overseerStats.add(key, lst);
-        int successes = stats.getSuccessCount(entry.getKey());
-        int errors = stats.getErrorCount(entry.getKey());
-        lst.add("requests", successes);
-        lst.add("errors", errors);
-      }
-      Timer timer = entry.getValue().requestTime;
-      MetricUtils.addMetrics(lst, timer);
-    }
-    results.add("overseer_operations", overseerStats);
-    results.add("collection_operations", collectionStats);
-    results.add("overseer_queue", stateUpdateQueueStats);
-    results.add("overseer_internal_queue", workQueueStats);
-    results.add("collection_queue", collectionQueueStats);
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
deleted file mode 100644
index e903091..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/ReplaceNodeCmd.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CollectionStateWatcher;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public ReplaceNodeCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    String source = message.getStr(CollectionParams.SOURCE_NODE, message.getStr("source"));
-    String target = message.getStr(CollectionParams.TARGET_NODE, message.getStr("target"));
-    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
-    if (source == null || target == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "sourceNode and targetNode are required params" );
-    }
-    String async = message.getStr("async");
-    int timeout = message.getInt("timeout", 10 * 60); // 10 minutes
-    boolean parallel = message.getBool("parallel", false);
-    ClusterState clusterState = zkStateReader.getClusterState();
-
-    if (!clusterState.liveNodesContain(source)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source Node: " + source + " is not live");
-    }
-    if (!clusterState.liveNodesContain(target)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target Node: " + target + " is not live");
-    }
-    List<ZkNodeProps> sourceReplicas = getReplicasOfNode(source, clusterState);
-    // how many leaders are we moving? for these replicas we have to make sure that either:
-    // * another existing replica can become a leader, or
-    // * we wait until the newly created replica completes recovery (and can become the new leader)
-    // If waitForFinalState=true we wait for all replicas
-    int numLeaders = 0;
-    for (ZkNodeProps props : sourceReplicas) {
-      if (props.getBool(ZkStateReader.LEADER_PROP, false) || waitForFinalState) {
-        numLeaders++;
-      }
-    }
-    // map of collectionName_coreNodeName to watchers
-    Map<String, CollectionStateWatcher> watchers = new HashMap<>();
-    List<ZkNodeProps> createdReplicas = new ArrayList<>();
-
-    AtomicBoolean anyOneFailed = new AtomicBoolean(false);
-    SolrCloseableLatch countDownLatch = new SolrCloseableLatch(sourceReplicas.size(), ocmh);
-
-    SolrCloseableLatch replicasToRecover = new SolrCloseableLatch(numLeaders, ocmh);
-
-    for (ZkNodeProps sourceReplica : sourceReplicas) {
-      NamedList nl = new NamedList();
-      log.info("Going to create replica for collection={} shard={} on node={}", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
-      ZkNodeProps msg = sourceReplica.plus("parallel", String.valueOf(parallel)).plus(CoreAdminParams.NODE, target);
-      if(async!=null) msg.getProperties().put(ASYNC, async);
-      final ZkNodeProps addedReplica = ocmh.addReplica(clusterState,
-          msg, nl, () -> {
-            countDownLatch.countDown();
-            if (nl.get("failure") != null) {
-              String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
-                  " on node=%s", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
-              log.warn(errorString);
-              // one replica creation failed. Make the best attempt to
-              // delete all the replicas created so far in the target
-              // and exit
-              synchronized (results) {
-                results.add("failure", errorString);
-                anyOneFailed.set(true);
-              }
-            } else {
-              log.debug("Successfully created replica for collection={} shard={} on node={}",
-                  sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
-            }
-          });
-
-      if (addedReplica != null) {
-        createdReplicas.add(addedReplica);
-        if (sourceReplica.getBool(ZkStateReader.LEADER_PROP, false) || waitForFinalState) {
-          String shardName = sourceReplica.getStr(SHARD_ID_PROP);
-          String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
-          String collectionName = sourceReplica.getStr(COLLECTION_PROP);
-          String key = collectionName + "_" + replicaName;
-          CollectionStateWatcher watcher;
-          if (waitForFinalState) {
-            watcher = new ActiveReplicaWatcher(collectionName, null,
-                Collections.singletonList(addedReplica.getStr(ZkStateReader.CORE_NAME_PROP)), replicasToRecover);
-          } else {
-            watcher = new LeaderRecoveryWatcher(collectionName, shardName, replicaName,
-                addedReplica.getStr(ZkStateReader.CORE_NAME_PROP), replicasToRecover);
-          }
-          watchers.put(key, watcher);
-          log.debug("--- adding " + key + ", " + watcher);
-          zkStateReader.registerCollectionStateWatcher(collectionName, watcher);
-        } else {
-          log.debug("--- not waiting for " + addedReplica);
-        }
-      }
-    }
-
-    log.debug("Waiting for replicas to be added");
-    if (!countDownLatch.await(timeout, TimeUnit.SECONDS)) {
-      log.info("Timed out waiting for replicas to be added");
-      anyOneFailed.set(true);
-    } else {
-      log.debug("Finished waiting for replicas to be added");
-    }
-
-    // now wait for leader replicas to recover
-    log.debug("Waiting for " + numLeaders + " leader replicas to recover");
-    if (!replicasToRecover.await(timeout, TimeUnit.SECONDS)) {
-      log.info("Timed out waiting for " + replicasToRecover.getCount() + " leader replicas to recover");
-      anyOneFailed.set(true);
-    } else {
-      log.debug("Finished waiting for leader replicas to recover");
-    }
-    // remove the watchers, we're done either way
-    for (Map.Entry<String, CollectionStateWatcher> e : watchers.entrySet()) {
-      zkStateReader.removeCollectionStateWatcher(e.getKey(), e.getValue());
-    }
-    if (anyOneFailed.get()) {
-      log.info("Failed to create some replicas. Cleaning up all replicas on target node");
-      SolrCloseableLatch cleanupLatch = new SolrCloseableLatch(createdReplicas.size(), ocmh);
-      for (ZkNodeProps createdReplica : createdReplicas) {
-        NamedList deleteResult = new NamedList();
-        try {
-          ocmh.deleteReplica(zkStateReader.getClusterState(), createdReplica.plus("parallel", "true"), deleteResult, () -> {
-            cleanupLatch.countDown();
-            if (deleteResult.get("failure") != null) {
-              synchronized (results) {
-                results.add("failure", "Could not cleanup, because of : " + deleteResult.get("failure"));
-              }
-            }
-          });
-        } catch (KeeperException e) {
-          cleanupLatch.countDown();
-          log.warn("Error deleting replica ", e);
-        } catch (Exception e) {
-          log.warn("Error deleting replica ", e);
-          cleanupLatch.countDown();
-          throw e;
-        }
-      }
-      cleanupLatch.await(5, TimeUnit.MINUTES);
-      return;
-    }
-
-
-    // we have reached this far means all replicas could be recreated
-    //now cleanup the replicas in the source node
-    DeleteNodeCmd.cleanupReplicas(results, state, sourceReplicas, ocmh, source, async);
-    results.add("success", "REPLACENODE action completed successfully from  : " + source + " to : " + target);
-  }
-
-  static List<ZkNodeProps> getReplicasOfNode(String source, ClusterState state) {
-    List<ZkNodeProps> sourceReplicas = new ArrayList<>();
-    for (Map.Entry<String, DocCollection> e : state.getCollectionsMap().entrySet()) {
-      for (Slice slice : e.getValue().getSlices()) {
-        for (Replica replica : slice.getReplicas()) {
-          if (source.equals(replica.getNodeName())) {
-            ZkNodeProps props = new ZkNodeProps(
-                COLLECTION_PROP, e.getKey(),
-                SHARD_ID_PROP, slice.getName(),
-                ZkStateReader.CORE_NAME_PROP, replica.getCoreName(),
-                ZkStateReader.REPLICA_PROP, replica.getName(),
-                ZkStateReader.REPLICA_TYPE, replica.getType().name(),
-                ZkStateReader.LEADER_PROP, String.valueOf(replica.equals(slice.getLeader())),
-                CoreAdminParams.NODE, source);
-            sourceReplicas.add(props);
-          }
-        }
-      }
-    }
-    return sourceReplicas;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java b/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
deleted file mode 100644
index 9c9a5c9..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/RestoreCmd.java
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.Properties;
-import java.util.Set;
-
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.backup.BackupManager;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROPS;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.RANDOM;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
-import static org.apache.solr.common.cloud.DocCollection.STATE_FORMAT;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public RestoreCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    // TODO maybe we can inherit createCollection's options/code
-
-    String restoreCollectionName = message.getStr(COLLECTION_PROP);
-    String backupName = message.getStr(NAME); // of backup
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    String asyncId = message.getStr(ASYNC);
-    String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
-    Map<String, String> requestMap = new HashMap<>();
-
-    CoreContainer cc = ocmh.overseer.getZkController().getCoreContainer();
-    BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
-
-    URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
-    URI backupPath = repository.resolve(location, backupName);
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    BackupManager backupMgr = new BackupManager(repository, zkStateReader);
-
-    Properties properties = backupMgr.readBackupProperties(location, backupName);
-    String backupCollection = properties.getProperty(BackupManager.COLLECTION_NAME_PROP);
-    DocCollection backupCollectionState = backupMgr.readCollectionState(location, backupName, backupCollection);
-
-    // Get the Solr nodes to restore a collection.
-    final List<String> nodeList = Assign.getLiveOrLiveAndCreateNodeSetList(
-        zkStateReader.getClusterState().getLiveNodes(), message, RANDOM);
-
-    int numShards = backupCollectionState.getActiveSlices().size();
-    
-    int numNrtReplicas = getInt(message, NRT_REPLICAS, backupCollectionState.getNumNrtReplicas(), 0);
-    if (numNrtReplicas == 0) {
-      numNrtReplicas = getInt(message, REPLICATION_FACTOR, backupCollectionState.getReplicationFactor(), 0);
-    }
-    int numTlogReplicas = getInt(message, TLOG_REPLICAS, backupCollectionState.getNumTlogReplicas(), 0);
-    int numPullReplicas = getInt(message, PULL_REPLICAS, backupCollectionState.getNumPullReplicas(), 0);
-    int totalReplicasPerShard = numNrtReplicas + numTlogReplicas + numPullReplicas;
-    
-    int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, backupCollectionState.getMaxShardsPerNode());
-    int availableNodeCount = nodeList.size();
-    if ((numShards * totalReplicasPerShard) > (availableNodeCount * maxShardsPerNode)) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          String.format(Locale.ROOT, "Solr cloud with available number of nodes:%d is insufficient for"
-              + " restoring a collection with %d shards, total replicas per shard %d and maxShardsPerNode %d."
-              + " Consider increasing maxShardsPerNode value OR number of available nodes.",
-              availableNodeCount, numShards, totalReplicasPerShard, maxShardsPerNode));
-    }
-
-    //Upload the configs
-    String configName = (String) properties.get(COLL_CONF);
-    String restoreConfigName = message.getStr(COLL_CONF, configName);
-    if (zkStateReader.getConfigManager().configExists(restoreConfigName)) {
-      log.info("Using existing config {}", restoreConfigName);
-      //TODO add overwrite option?
-    } else {
-      log.info("Uploading config {}", restoreConfigName);
-      backupMgr.uploadConfigDir(location, backupName, configName, restoreConfigName);
-    }
-
-    log.info("Starting restore into collection={} with backup_name={} at location={}", restoreCollectionName, backupName,
-        location);
-
-    //Create core-less collection
-    {
-      Map<String, Object> propMap = new HashMap<>();
-      propMap.put(Overseer.QUEUE_OPERATION, CREATE.toString());
-      propMap.put("fromApi", "true"); // mostly true.  Prevents autoCreated=true in the collection state.
-      if (properties.get(STATE_FORMAT) == null) {
-        propMap.put(STATE_FORMAT, "2");
-      }
-
-      // inherit settings from input API, defaulting to the backup's setting.  Ex: replicationFactor
-      for (String collProp : COLL_PROPS.keySet()) {
-        Object val = message.getProperties().getOrDefault(collProp, backupCollectionState.get(collProp));
-        if (val != null) {
-          propMap.put(collProp, val);
-        }
-      }
-
-      propMap.put(NAME, restoreCollectionName);
-      propMap.put(CREATE_NODE_SET, CREATE_NODE_SET_EMPTY); //no cores
-      propMap.put(COLL_CONF, restoreConfigName);
-
-      // router.*
-      @SuppressWarnings("unchecked")
-      Map<String, Object> routerProps = (Map<String, Object>) backupCollectionState.getProperties().get(DocCollection.DOC_ROUTER);
-      for (Map.Entry<String, Object> pair : routerProps.entrySet()) {
-        propMap.put(DocCollection.DOC_ROUTER + "." + pair.getKey(), pair.getValue());
-      }
-
-      Set<String> sliceNames = backupCollectionState.getActiveSlicesMap().keySet();
-      if (backupCollectionState.getRouter() instanceof ImplicitDocRouter) {
-        propMap.put(SHARDS_PROP, StrUtils.join(sliceNames, ','));
-      } else {
-        propMap.put(NUM_SLICES, sliceNames.size());
-        // ClusterStateMutator.createCollection detects that "slices" is in fact a slice structure instead of a
-        //   list of names, and if so uses this instead of building it.  We clear the replica list.
-        Collection<Slice> backupSlices = backupCollectionState.getActiveSlices();
-        Map<String, Slice> newSlices = new LinkedHashMap<>(backupSlices.size());
-        for (Slice backupSlice : backupSlices) {
-          newSlices.put(backupSlice.getName(),
-              new Slice(backupSlice.getName(), Collections.emptyMap(), backupSlice.getProperties()));
-        }
-        propMap.put(SHARDS_PROP, newSlices);
-      }
-
-      ocmh.commandMap.get(CREATE).call(zkStateReader.getClusterState(), new ZkNodeProps(propMap), new NamedList());
-      // note: when createCollection() returns, the collection exists (no race)
-    }
-
-    DocCollection restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
-
-    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-
-    //Mark all shards in CONSTRUCTION STATE while we restore the data
-    {
-      //TODO might instead createCollection accept an initial state?  Is there a race?
-      Map<String, Object> propMap = new HashMap<>();
-      propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-      for (Slice shard : restoreCollection.getSlices()) {
-        propMap.put(shard.getName(), Slice.State.CONSTRUCTION.toString());
-      }
-      propMap.put(ZkStateReader.COLLECTION_PROP, restoreCollectionName);
-      inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
-    }
-
-    // TODO how do we leverage the RULE / SNITCH logic in createCollection?
-
-    ClusterState clusterState = zkStateReader.getClusterState();
-
-    List<String> sliceNames = new ArrayList<>();
-    restoreCollection.getSlices().forEach(x -> sliceNames.add(x.getName()));
-    PolicyHelper.SessionWrapper sessionWrapper = null;
-
-    try {
-      List<ReplicaPosition> replicaPositions = Assign.identifyNodes(
-          ocmh.cloudManager, clusterState,
-          nodeList, restoreCollectionName,
-          message, sliceNames,
-          numNrtReplicas, numTlogReplicas, numPullReplicas);
-      sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
-      //Create one replica per shard and copy backed up data to it
-      for (Slice slice : restoreCollection.getSlices()) {
-        log.debug("Adding replica for shard={} collection={} ", slice.getName(), restoreCollection);
-        HashMap<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD);
-        propMap.put(COLLECTION_PROP, restoreCollectionName);
-        propMap.put(SHARD_ID_PROP, slice.getName());
-
-        if (numNrtReplicas >= 1) {
-          propMap.put(REPLICA_TYPE, Replica.Type.NRT.name());
-        } else if (numTlogReplicas >= 1) {
-          propMap.put(REPLICA_TYPE, Replica.Type.TLOG.name());
-        } else {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "Unexpected number of replicas, replicationFactor, " +
-              Replica.Type.NRT + " or " + Replica.Type.TLOG + " must be greater than 0");
-        }
-
-        // Get the first node matching the shard to restore in
-        String node;
-        for (ReplicaPosition replicaPosition : replicaPositions) {
-          if (Objects.equals(replicaPosition.shard, slice.getName())) {
-            node = replicaPosition.node;
-            propMap.put(CoreAdminParams.NODE, node);
-            replicaPositions.remove(replicaPosition);
-            break;
-          }
-        }
-
-        // add async param
-        if (asyncId != null) {
-          propMap.put(ASYNC, asyncId);
-        }
-        ocmh.addPropertyParams(message, propMap);
-
-        ocmh.addReplica(clusterState, new ZkNodeProps(propMap), new NamedList(), null);
-      }
-
-      //refresh the location copy of collection state
-      restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
-
-      //Copy data from backed up index to each replica
-      for (Slice slice : restoreCollection.getSlices()) {
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.RESTORECORE.toString());
-        params.set(NAME, "snapshot." + slice.getName());
-        params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString());
-        params.set(CoreAdminParams.BACKUP_REPOSITORY, repo);
-
-        ocmh.sliceCmd(clusterState, params, null, slice, shardHandler, asyncId, requestMap);
-      }
-      ocmh.processResponses(new NamedList(), shardHandler, true, "Could not restore core", asyncId, requestMap);
-
-      //Mark all shards in ACTIVE STATE
-      {
-        HashMap<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-        propMap.put(ZkStateReader.COLLECTION_PROP, restoreCollectionName);
-        for (Slice shard : restoreCollection.getSlices()) {
-          propMap.put(shard.getName(), Slice.State.ACTIVE.toString());
-        }
-        inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
-      }
-
-      //refresh the location copy of collection state
-      restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
-
-      if (totalReplicasPerShard > 1) {
-        log.info("Adding replicas to restored collection={}", restoreCollection);
-        for (Slice slice : restoreCollection.getSlices()) {
-
-          //Add the remaining replicas for each shard, considering it's type
-          int createdNrtReplicas = 0, createdTlogReplicas = 0, createdPullReplicas = 0;
-
-          // We already created either a NRT or an TLOG replica as leader
-          if (numNrtReplicas > 0) {
-            createdNrtReplicas++;
-          } else if (createdTlogReplicas > 0) {
-            createdTlogReplicas++;
-          }
-
-          for (int i = 1; i < totalReplicasPerShard; i++) {
-            Replica.Type typeToCreate;
-            if (createdNrtReplicas < numNrtReplicas) {
-              createdNrtReplicas++;
-              typeToCreate = Replica.Type.NRT;
-            } else if (createdTlogReplicas < numTlogReplicas) {
-              createdTlogReplicas++;
-              typeToCreate = Replica.Type.TLOG;
-            } else {
-              createdPullReplicas++;
-              typeToCreate = Replica.Type.PULL;
-              assert createdPullReplicas <= numPullReplicas: "Unexpected number of replicas";
-            }
-
-            log.debug("Adding replica for shard={} collection={} of type {} ", slice.getName(), restoreCollection, typeToCreate);
-            HashMap<String, Object> propMap = new HashMap<>();
-            propMap.put(COLLECTION_PROP, restoreCollectionName);
-            propMap.put(SHARD_ID_PROP, slice.getName());
-            propMap.put(REPLICA_TYPE, typeToCreate.name());
-
-            // Get the first node matching the shard to restore in
-            String node;
-            for (ReplicaPosition replicaPosition : replicaPositions) {
-              if (Objects.equals(replicaPosition.shard, slice.getName())) {
-                node = replicaPosition.node;
-                propMap.put(CoreAdminParams.NODE, node);
-                replicaPositions.remove(replicaPosition);
-                break;
-              }
-            }
-
-            // add async param
-            if (asyncId != null) {
-              propMap.put(ASYNC, asyncId);
-            }
-            ocmh.addPropertyParams(message, propMap);
-
-            ocmh.addReplica(zkStateReader.getClusterState(), new ZkNodeProps(propMap), results, null);
-          }
-        }
-      }
-
-      log.info("Completed restoring collection={} backupName={}", restoreCollection, backupName);
-    } finally {
-      if (sessionWrapper != null) sessionWrapper.release();
-    }
-  }
-
-  private int getInt(ZkNodeProps message, String propertyName, Integer count, int defaultValue) {
-    Integer value = message.getInt(propertyName, count);
-    return value!=null ? value:defaultValue;
-  }
-}


[08/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
new file mode 100644
index 0000000..9529ee1
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
@@ -0,0 +1,1011 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.commons.lang.StringUtils;
+import org.apache.solr.client.solrj.SolrResponse;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.cloud.DistributedQueue;
+import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
+import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
+import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
+import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.UpdateResponse;
+import org.apache.solr.cloud.LockTree;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.OverseerMessageHandler;
+import org.apache.solr.cloud.OverseerNodePrioritizer;
+import org.apache.solr.cloud.OverseerSolrResponse;
+import org.apache.solr.cloud.OverseerTaskProcessor;
+import org.apache.solr.cloud.Stats;
+import org.apache.solr.cloud.ZkController;
+import org.apache.solr.cloud.overseer.OverseerAction;
+import org.apache.solr.common.SolrCloseable;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkConfigManager;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.SuppressForbidden;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.handler.component.ShardHandler;
+import org.apache.solr.handler.component.ShardHandlerFactory;
+import org.apache.solr.handler.component.ShardRequest;
+import org.apache.solr.handler.component.ShardResponse;
+import org.apache.solr.util.DefaultSolrThreadFactory;
+import org.apache.solr.util.RTimer;
+import org.apache.solr.util.TimeOut;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
+import static org.apache.solr.common.cloud.DocCollection.SNITCH;
+import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NODE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.common.util.Utils.makeMap;
+
+/**
+ * A {@link OverseerMessageHandler} that handles Collections API related
+ * overseer messages.
+ */
+public class OverseerCollectionMessageHandler implements OverseerMessageHandler, SolrCloseable {
+
+  public static final String NUM_SLICES = "numShards";
+
+  public static final boolean CREATE_NODE_SET_SHUFFLE_DEFAULT = true;
+  public static final String CREATE_NODE_SET_SHUFFLE = CollectionAdminParams.CREATE_NODE_SET_SHUFFLE_PARAM;
+  public static final String CREATE_NODE_SET_EMPTY = "EMPTY";
+  public static final String CREATE_NODE_SET = CollectionAdminParams.CREATE_NODE_SET_PARAM;
+
+  public static final String ROUTER = "router";
+
+  public static final String SHARDS_PROP = "shards";
+
+  public static final String REQUESTID = "requestid";
+
+  public static final String COLL_CONF = "collection.configName";
+
+  public static final String COLL_PROP_PREFIX = "property.";
+
+  public static final String ONLY_IF_DOWN = "onlyIfDown";
+
+  public static final String SHARD_UNIQUE = "shardUnique";
+
+  public static final String ONLY_ACTIVE_NODES = "onlyactivenodes";
+
+  static final String SKIP_CREATE_REPLICA_IN_CLUSTER_STATE = "skipCreateReplicaInClusterState";
+
+  public static final Map<String, Object> COLL_PROPS = Collections.unmodifiableMap(makeMap(
+      ROUTER, DocRouter.DEFAULT_NAME,
+      ZkStateReader.REPLICATION_FACTOR, "1",
+      ZkStateReader.NRT_REPLICAS, "1",
+      ZkStateReader.TLOG_REPLICAS, "0",
+      ZkStateReader.PULL_REPLICAS, "0",
+      ZkStateReader.MAX_SHARDS_PER_NODE, "1",
+      ZkStateReader.AUTO_ADD_REPLICAS, "false",
+      DocCollection.RULE, null,
+      POLICY, null,
+      SNITCH, null));
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  Overseer overseer;
+  ShardHandlerFactory shardHandlerFactory;
+  String adminPath;
+  ZkStateReader zkStateReader;
+  SolrCloudManager cloudManager;
+  String myId;
+  Stats stats;
+  TimeSource timeSource;
+
+  // Set that tracks collections that are currently being processed by a running task.
+  // This is used for handling mutual exclusion of the tasks.
+
+  final private LockTree lockTree = new LockTree();
+  ExecutorService tpe = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, 10, 0L, TimeUnit.MILLISECONDS,
+      new SynchronousQueue<>(),
+      new DefaultSolrThreadFactory("OverseerCollectionMessageHandlerThreadFactory"));
+
+  protected static final Random RANDOM;
+  static {
+    // We try to make things reproducible in the context of our tests by initializing the random instance
+    // based on the current seed
+    String seed = System.getProperty("tests.seed");
+    if (seed == null) {
+      RANDOM = new Random();
+    } else {
+      RANDOM = new Random(seed.hashCode());
+    }
+  }
+
+  final Map<CollectionAction, Cmd> commandMap;
+
+  private volatile boolean isClosed;
+
+  public OverseerCollectionMessageHandler(ZkStateReader zkStateReader, String myId,
+                                        final ShardHandlerFactory shardHandlerFactory,
+                                        String adminPath,
+                                        Stats stats,
+                                        Overseer overseer,
+                                        OverseerNodePrioritizer overseerPrioritizer) {
+    this.zkStateReader = zkStateReader;
+    this.shardHandlerFactory = shardHandlerFactory;
+    this.adminPath = adminPath;
+    this.myId = myId;
+    this.stats = stats;
+    this.overseer = overseer;
+    this.cloudManager = overseer.getSolrCloudManager();
+    this.timeSource = cloudManager.getTimeSource();
+    this.isClosed = false;
+    commandMap = new ImmutableMap.Builder<CollectionAction, Cmd>()
+        .put(REPLACENODE, new ReplaceNodeCmd(this))
+        .put(DELETENODE, new DeleteNodeCmd(this))
+        .put(BACKUP, new BackupCmd(this))
+        .put(RESTORE, new RestoreCmd(this))
+        .put(CREATESNAPSHOT, new CreateSnapshotCmd(this))
+        .put(DELETESNAPSHOT, new DeleteSnapshotCmd(this))
+        .put(SPLITSHARD, new SplitShardCmd(this))
+        .put(ADDROLE, new OverseerRoleCmd(this, ADDROLE, overseerPrioritizer))
+        .put(REMOVEROLE, new OverseerRoleCmd(this, REMOVEROLE, overseerPrioritizer))
+        .put(MOCK_COLL_TASK, this::mockOperation)
+        .put(MOCK_SHARD_TASK, this::mockOperation)
+        .put(MOCK_REPLICA_TASK, this::mockOperation)
+        .put(MIGRATESTATEFORMAT, this::migrateStateFormat)
+        .put(CREATESHARD, new CreateShardCmd(this))
+        .put(MIGRATE, new MigrateCmd(this))
+        .put(CREATE, new CreateCollectionCmd(this))
+        .put(MODIFYCOLLECTION, this::modifyCollection)
+        .put(ADDREPLICAPROP, this::processReplicaAddPropertyCommand)
+        .put(DELETEREPLICAPROP, this::processReplicaDeletePropertyCommand)
+        .put(BALANCESHARDUNIQUE, this::balanceProperty)
+        .put(REBALANCELEADERS, this::processRebalanceLeaders)
+        .put(RELOAD, this::reloadCollection)
+        .put(DELETE, new DeleteCollectionCmd(this))
+        .put(CREATEALIAS, new CreateAliasCmd(this))
+        .put(DELETEALIAS, new DeleteAliasCmd(this))
+        .put(ROUTEDALIAS_CREATECOLL, new RoutedAliasCreateCollectionCmd(this))
+        .put(OVERSEERSTATUS, new OverseerStatusCmd(this))
+        .put(DELETESHARD, new DeleteShardCmd(this))
+        .put(DELETEREPLICA, new DeleteReplicaCmd(this))
+        .put(ADDREPLICA, new AddReplicaCmd(this))
+        .put(MOVEREPLICA, new MoveReplicaCmd(this))
+        .put(UTILIZENODE, new UtilizeNodeCmd(this))
+        .build()
+    ;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public SolrResponse processMessage(ZkNodeProps message, String operation) {
+    log.debug("OverseerCollectionMessageHandler.processMessage : {} , {}", operation, message);
+
+    NamedList results = new NamedList();
+    try {
+      CollectionAction action = getCollectionAction(operation);
+      Cmd command = commandMap.get(action);
+      if (command != null) {
+        command.call(cloudManager.getClusterStateProvider().getClusterState(), message, results);
+      } else {
+        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:"
+            + operation);
+      }
+    } catch (Exception e) {
+      String collName = message.getStr("collection");
+      if (collName == null) collName = message.getStr(NAME);
+
+      if (collName == null) {
+        SolrException.log(log, "Operation " + operation + " failed", e);
+      } else  {
+        SolrException.log(log, "Collection: " + collName + " operation: " + operation
+            + " failed", e);
+      }
+
+      results.add("Operation " + operation + " caused exception:", e);
+      SimpleOrderedMap nl = new SimpleOrderedMap();
+      nl.add("msg", e.getMessage());
+      nl.add("rspCode", e instanceof SolrException ? ((SolrException)e).code() : -1);
+      results.add("exception", nl);
+    }
+    return new OverseerSolrResponse(results);
+  }
+
+  @SuppressForbidden(reason = "Needs currentTimeMillis for mock requests")
+  private void mockOperation(ClusterState state, ZkNodeProps message, NamedList results) throws InterruptedException {
+    //only for test purposes
+    Thread.sleep(message.getInt("sleep", 1));
+    log.info("MOCK_TASK_EXECUTED time {} data {}", System.currentTimeMillis(), Utils.toJSONString(message));
+    results.add("MOCK_FINISHED", System.currentTimeMillis());
+  }
+
+  private CollectionAction getCollectionAction(String operation) {
+    CollectionAction action = CollectionAction.get(operation);
+    if (action == null) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:" + operation);
+    }
+    return action;
+  }
+
+  private void reloadCollection(ClusterState clusterState, ZkNodeProps message, NamedList results) {
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set(CoreAdminParams.ACTION, CoreAdminAction.RELOAD.toString());
+
+    String asyncId = message.getStr(ASYNC);
+    Map<String, String> requestMap = null;
+    if (asyncId != null) {
+      requestMap = new HashMap<>();
+    }
+    collectionCmd(message, params, results, Replica.State.ACTIVE, asyncId, requestMap);
+  }
+
+  @SuppressWarnings("unchecked")
+  private void processRebalanceLeaders(ClusterState clusterState, ZkNodeProps message, NamedList results)
+      throws Exception {
+    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, CORE_NAME_PROP, ELECTION_NODE_PROP,
+        CORE_NODE_NAME_PROP, BASE_URL_PROP, REJOIN_AT_HEAD_PROP);
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set(COLLECTION_PROP, message.getStr(COLLECTION_PROP));
+    params.set(SHARD_ID_PROP, message.getStr(SHARD_ID_PROP));
+    params.set(REJOIN_AT_HEAD_PROP, message.getStr(REJOIN_AT_HEAD_PROP));
+    params.set(CoreAdminParams.ACTION, CoreAdminAction.REJOINLEADERELECTION.toString());
+    params.set(CORE_NAME_PROP, message.getStr(CORE_NAME_PROP));
+    params.set(CORE_NODE_NAME_PROP, message.getStr(CORE_NODE_NAME_PROP));
+    params.set(ELECTION_NODE_PROP, message.getStr(ELECTION_NODE_PROP));
+    params.set(BASE_URL_PROP, message.getStr(BASE_URL_PROP));
+
+    String baseUrl = message.getStr(BASE_URL_PROP);
+    ShardRequest sreq = new ShardRequest();
+    sreq.nodeName = message.getStr(ZkStateReader.CORE_NAME_PROP);
+    // yes, they must use same admin handler path everywhere...
+    params.set("qt", adminPath);
+    sreq.purpose = ShardRequest.PURPOSE_PRIVATE;
+    sreq.shards = new String[] {baseUrl};
+    sreq.actualShards = sreq.shards;
+    sreq.params = params;
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+    shardHandler.submit(sreq, baseUrl, sreq.params);
+  }
+
+  @SuppressWarnings("unchecked")
+  private void processReplicaAddPropertyCommand(ClusterState clusterState, ZkNodeProps message, NamedList results)
+      throws Exception {
+    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP, PROPERTY_VALUE_PROP);
+    SolrZkClient zkClient = zkStateReader.getZkClient();
+    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
+    Map<String, Object> propMap = new HashMap<>();
+    propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICAPROP.toLower());
+    propMap.putAll(message.getProperties());
+    ZkNodeProps m = new ZkNodeProps(propMap);
+    inQueue.offer(Utils.toJSON(m));
+  }
+
+  private void processReplicaDeletePropertyCommand(ClusterState clusterState, ZkNodeProps message, NamedList results)
+      throws Exception {
+    checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP, PROPERTY_PROP);
+    SolrZkClient zkClient = zkStateReader.getZkClient();
+    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
+    Map<String, Object> propMap = new HashMap<>();
+    propMap.put(Overseer.QUEUE_OPERATION, DELETEREPLICAPROP.toLower());
+    propMap.putAll(message.getProperties());
+    ZkNodeProps m = new ZkNodeProps(propMap);
+    inQueue.offer(Utils.toJSON(m));
+  }
+
+  private void balanceProperty(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    if (StringUtils.isBlank(message.getStr(COLLECTION_PROP)) || StringUtils.isBlank(message.getStr(PROPERTY_PROP))) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          "The '" + COLLECTION_PROP + "' and '" + PROPERTY_PROP +
+              "' parameters are required for the BALANCESHARDUNIQUE operation, no action taken");
+    }
+    SolrZkClient zkClient = zkStateReader.getZkClient();
+    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkClient);
+    Map<String, Object> propMap = new HashMap<>();
+    propMap.put(Overseer.QUEUE_OPERATION, BALANCESHARDUNIQUE.toLower());
+    propMap.putAll(message.getProperties());
+    inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
+  }
+
+  /**
+   * Walks the tree of collection status to verify that any replicas not reporting a "down" status is
+   * on a live node, if any replicas reporting their status as "active" but the node is not live is
+   * marked as "down"; used by CLUSTERSTATUS.
+   * @param liveNodes List of currently live node names.
+   * @param collectionProps Map of collection status information pulled directly from ZooKeeper.
+   */
+
+  @SuppressWarnings("unchecked")
+  protected void crossCheckReplicaStateWithLiveNodes(List<String> liveNodes, NamedList<Object> collectionProps) {
+    Iterator<Map.Entry<String,Object>> colls = collectionProps.iterator();
+    while (colls.hasNext()) {
+      Map.Entry<String,Object> next = colls.next();
+      Map<String,Object> collMap = (Map<String,Object>)next.getValue();
+      Map<String,Object> shards = (Map<String,Object>)collMap.get("shards");
+      for (Object nextShard : shards.values()) {
+        Map<String,Object> shardMap = (Map<String,Object>)nextShard;
+        Map<String,Object> replicas = (Map<String,Object>)shardMap.get("replicas");
+        for (Object nextReplica : replicas.values()) {
+          Map<String,Object> replicaMap = (Map<String,Object>)nextReplica;
+          if (Replica.State.getState((String) replicaMap.get(ZkStateReader.STATE_PROP)) != Replica.State.DOWN) {
+            // not down, so verify the node is live
+            String node_name = (String)replicaMap.get(ZkStateReader.NODE_NAME_PROP);
+            if (!liveNodes.contains(node_name)) {
+              // node is not live, so this replica is actually down
+              replicaMap.put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
+            }
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * Get collection status from cluster state.
+   * Can return collection status by given shard name.
+   *
+   *
+   * @param collection collection map parsed from JSON-serialized {@link ClusterState}
+   * @param name  collection name
+   * @param requestedShards a set of shards to be returned in the status.
+   *                        An empty or null values indicates <b>all</b> shards.
+   * @return map of collection properties
+   */
+  @SuppressWarnings("unchecked")
+  private Map<String, Object> getCollectionStatus(Map<String, Object> collection, String name, Set<String> requestedShards) {
+    if (collection == null)  {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + name + " not found");
+    }
+    if (requestedShards == null || requestedShards.isEmpty()) {
+      return collection;
+    } else {
+      Map<String, Object> shards = (Map<String, Object>) collection.get("shards");
+      Map<String, Object>  selected = new HashMap<>();
+      for (String selectedShard : requestedShards) {
+        if (!shards.containsKey(selectedShard)) {
+          throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + name + " shard: " + selectedShard + " not found");
+        }
+        selected.put(selectedShard, shards.get(selectedShard));
+        collection.put("shards", selected);
+      }
+      return collection;
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
+      throws Exception {
+    ((DeleteReplicaCmd) commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, message, results, onComplete);
+
+  }
+
+  boolean waitForCoreNodeGone(String collectionName, String shard, String replicaName, int timeoutms) throws InterruptedException {
+    TimeOut timeout = new TimeOut(timeoutms, TimeUnit.MILLISECONDS, timeSource);
+    while (! timeout.hasTimedOut()) {
+      timeout.sleep(100);
+      DocCollection docCollection = zkStateReader.getClusterState().getCollection(collectionName);
+      if (docCollection == null) { // someone already deleted the collection
+        return true;
+      }
+      Slice slice = docCollection.getSlice(shard);
+      if(slice == null || slice.getReplica(replicaName) == null) {
+        return true;
+      }
+    }
+    // replica still exists after the timeout
+    return false;
+  }
+
+  void deleteCoreNode(String collectionName, String replicaName, Replica replica, String core) throws Exception {
+    ZkNodeProps m = new ZkNodeProps(
+        Overseer.QUEUE_OPERATION, OverseerAction.DELETECORE.toLower(),
+        ZkStateReader.CORE_NAME_PROP, core,
+        ZkStateReader.NODE_NAME_PROP, replica.getStr(ZkStateReader.NODE_NAME_PROP),
+        ZkStateReader.COLLECTION_PROP, collectionName,
+        ZkStateReader.CORE_NODE_NAME_PROP, replicaName,
+        ZkStateReader.BASE_URL_PROP, replica.getStr(ZkStateReader.BASE_URL_PROP));
+    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
+  }
+
+  void checkRequired(ZkNodeProps message, String... props) {
+    for (String prop : props) {
+      if(message.get(prop) == null){
+        throw new SolrException(ErrorCode.BAD_REQUEST, StrUtils.join(Arrays.asList(props),',') +" are required params" );
+      }
+    }
+
+  }
+
+  //TODO should we not remove in the next release ?
+  private void migrateStateFormat(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    final String collectionName = message.getStr(COLLECTION_PROP);
+
+    boolean firstLoop = true;
+    // wait for a while until the state format changes
+    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
+    while (! timeout.hasTimedOut()) {
+      DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
+      if (collection == null) {
+        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection: " + collectionName + " not found");
+      }
+      if (collection.getStateFormat() == 2) {
+        // Done.
+        results.add("success", new SimpleOrderedMap<>());
+        return;
+      }
+
+      if (firstLoop) {
+        // Actually queue the migration command.
+        firstLoop = false;
+        ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, MIGRATESTATEFORMAT.toLower(), COLLECTION_PROP, collectionName);
+        Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
+      }
+      timeout.sleep(100);
+    }
+    throw new SolrException(ErrorCode.SERVER_ERROR, "Could not migrate state format for collection: " + collectionName);
+  }
+
+  void commit(NamedList results, String slice, Replica parentShardLeader) {
+    log.debug("Calling soft commit to make sub shard updates visible");
+    String coreUrl = new ZkCoreNodeProps(parentShardLeader).getCoreUrl();
+    // HttpShardHandler is hard coded to send a QueryRequest hence we go direct
+    // and we force open a searcher so that we have documents to show upon switching states
+    UpdateResponse updateResponse = null;
+    try {
+      updateResponse = softCommit(coreUrl);
+      processResponse(results, null, coreUrl, updateResponse, slice, Collections.emptySet());
+    } catch (Exception e) {
+      processResponse(results, e, coreUrl, updateResponse, slice, Collections.emptySet());
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to call distrib softCommit on: " + coreUrl, e);
+    }
+  }
+
+
+  static UpdateResponse softCommit(String url) throws SolrServerException, IOException {
+
+    try (HttpSolrClient client = new HttpSolrClient.Builder(url)
+        .withConnectionTimeout(30000)
+        .withSocketTimeout(120000)
+        .build()) {
+      UpdateRequest ureq = new UpdateRequest();
+      ureq.setParams(new ModifiableSolrParams());
+      ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true, true);
+      return ureq.process(client);
+    }
+  }
+
+  String waitForCoreNodeName(String collectionName, String msgNodeName, String msgCore) {
+    int retryCount = 320;
+    while (retryCount-- > 0) {
+      final DocCollection docCollection = zkStateReader.getClusterState().getCollectionOrNull(collectionName);
+      if (docCollection != null && docCollection.getSlicesMap() != null) {
+        Map<String,Slice> slicesMap = docCollection.getSlicesMap();
+        for (Slice slice : slicesMap.values()) {
+          for (Replica replica : slice.getReplicas()) {
+            // TODO: for really large clusters, we could 'index' on this
+
+            String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
+            String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+
+            if (nodeName.equals(msgNodeName) && core.equals(msgCore)) {
+              return replica.getName();
+            }
+          }
+        }
+      }
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+      }
+    }
+    throw new SolrException(ErrorCode.SERVER_ERROR, "Could not find coreNodeName");
+  }
+
+  void waitForNewShard(String collectionName, String sliceName) throws KeeperException, InterruptedException {
+    log.debug("Waiting for slice {} of collection {} to be available", sliceName, collectionName);
+    RTimer timer = new RTimer();
+    int retryCount = 320;
+    while (retryCount-- > 0) {
+      DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
+      if (collection == null) {
+        throw new SolrException(ErrorCode.SERVER_ERROR,
+            "Unable to find collection: " + collectionName + " in clusterstate");
+      }
+      Slice slice = collection.getSlice(sliceName);
+      if (slice != null) {
+        log.debug("Waited for {}ms for slice {} of collection {} to be available",
+            timer.getTime(), sliceName, collectionName);
+        return;
+      }
+      Thread.sleep(1000);
+    }
+    throw new SolrException(ErrorCode.SERVER_ERROR,
+        "Could not find new slice " + sliceName + " in collection " + collectionName
+            + " even after waiting for " + timer.getTime() + "ms"
+    );
+  }
+
+  DocRouter.Range intersect(DocRouter.Range a, DocRouter.Range b) {
+    if (a == null || b == null || !a.overlaps(b)) {
+      return null;
+    } else if (a.isSubsetOf(b))
+      return a;
+    else if (b.isSubsetOf(a))
+      return b;
+    else if (b.includes(a.max)) {
+      return new DocRouter.Range(b.min, a.max);
+    } else  {
+      return new DocRouter.Range(a.min, b.max);
+    }
+  }
+
+  void sendShardRequest(String nodeName, ModifiableSolrParams params,
+                        ShardHandler shardHandler, String asyncId,
+                        Map<String, String> requestMap) {
+    sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap, adminPath, zkStateReader);
+
+  }
+
+  public static void sendShardRequest(String nodeName, ModifiableSolrParams params, ShardHandler shardHandler,
+                                      String asyncId, Map<String, String> requestMap, String adminPath,
+                                      ZkStateReader zkStateReader) {
+    if (asyncId != null) {
+      String coreAdminAsyncId = asyncId + Math.abs(System.nanoTime());
+      params.set(ASYNC, coreAdminAsyncId);
+      requestMap.put(nodeName, coreAdminAsyncId);
+    }
+
+    ShardRequest sreq = new ShardRequest();
+    params.set("qt", adminPath);
+    sreq.purpose = 1;
+    String replica = zkStateReader.getBaseUrlForNodeName(nodeName);
+    sreq.shards = new String[]{replica};
+    sreq.actualShards = sreq.shards;
+    sreq.nodeName = nodeName;
+    sreq.params = params;
+
+    shardHandler.submit(sreq, replica, sreq.params);
+  }
+
+  void addPropertyParams(ZkNodeProps message, ModifiableSolrParams params) {
+    // Now add the property.key=value pairs
+    for (String key : message.keySet()) {
+      if (key.startsWith(COLL_PROP_PREFIX)) {
+        params.set(key, message.getStr(key));
+      }
+    }
+  }
+
+  void addPropertyParams(ZkNodeProps message, Map<String, Object> map) {
+    // Now add the property.key=value pairs
+    for (String key : message.keySet()) {
+      if (key.startsWith(COLL_PROP_PREFIX)) {
+        map.put(key, message.getStr(key));
+      }
+    }
+  }
+
+
+  private void modifyCollection(ClusterState clusterState, ZkNodeProps message, NamedList results)
+      throws Exception {
+    
+    final String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
+    //the rest of the processing is based on writing cluster state properties
+    //remove the property here to avoid any errors down the pipeline due to this property appearing
+    String configName = (String) message.getProperties().remove(COLL_CONF);
+    
+    if(configName != null) {
+      validateConfigOrThrowSolrException(configName);
+      
+      boolean isLegacyCloud =  Overseer.isLegacy(zkStateReader);
+      createConfNode(cloudManager.getDistribStateManager(), configName, collectionName, isLegacyCloud);
+      reloadCollection(null, new ZkNodeProps(NAME, collectionName), results);
+    }
+    
+    overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
+
+    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
+    boolean areChangesVisible = true;
+    while (!timeout.hasTimedOut()) {
+      DocCollection collection = cloudManager.getClusterStateProvider().getClusterState().getCollection(collectionName);
+      areChangesVisible = true;
+      for (Map.Entry<String,Object> updateEntry : message.getProperties().entrySet()) {
+        String updateKey = updateEntry.getKey();
+        if (!updateKey.equals(ZkStateReader.COLLECTION_PROP)
+            && !updateKey.equals(Overseer.QUEUE_OPERATION)
+            && !collection.get(updateKey).equals(updateEntry.getValue())){
+          areChangesVisible = false;
+          break;
+        }
+      }
+      if (areChangesVisible) break;
+      timeout.sleep(100);
+    }
+
+    if (!areChangesVisible)
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not modify collection " + message);
+  }
+
+  void cleanupCollection(String collectionName, NamedList results) throws Exception {
+    log.error("Cleaning up collection [" + collectionName + "]." );
+    Map<String, Object> props = makeMap(
+        Overseer.QUEUE_OPERATION, DELETE.toLower(),
+        NAME, collectionName);
+    commandMap.get(DELETE).call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
+  }
+
+  Map<String, Replica> waitToSeeReplicasInState(String collectionName, Collection<String> coreNames) throws InterruptedException {
+    Map<String, Replica> result = new HashMap<>();
+    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
+    while (true) {
+      DocCollection coll = zkStateReader.getClusterState().getCollection(collectionName);
+      for (String coreName : coreNames) {
+        if (result.containsKey(coreName)) continue;
+        for (Slice slice : coll.getSlices()) {
+          for (Replica replica : slice.getReplicas()) {
+            if (coreName.equals(replica.getStr(ZkStateReader.CORE_NAME_PROP))) {
+              result.put(coreName, replica);
+              break;
+            }
+          }
+        }
+      }
+      
+      if (result.size() == coreNames.size()) {
+        return result;
+      } else {
+        log.debug("Expecting {} cores but found {}", coreNames, result);
+      }
+      if (timeout.hasTimedOut()) {
+        throw new SolrException(ErrorCode.SERVER_ERROR, "Timed out waiting to see all replicas: " + coreNames + " in cluster state. Last state: " + coll);
+      }
+      
+      Thread.sleep(100);
+    }
+  }
+
+  ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
+      throws Exception {
+
+    return ((AddReplicaCmd) commandMap.get(ADDREPLICA)).addReplica(clusterState, message, results, onComplete);
+  }
+
+  void processResponses(NamedList results, ShardHandler shardHandler, boolean abortOnError, String msgOnError,
+                        String asyncId, Map<String, String> requestMap) {
+    processResponses(results, shardHandler, abortOnError, msgOnError, asyncId, requestMap, Collections.emptySet());
+  }
+
+  void processResponses(NamedList results, ShardHandler shardHandler, boolean abortOnError, String msgOnError,
+                                String asyncId, Map<String, String> requestMap, Set<String> okayExceptions) {
+    //Processes all shard responses
+    ShardResponse srsp;
+    do {
+      srsp = shardHandler.takeCompletedOrError();
+      if (srsp != null) {
+        processResponse(results, srsp, okayExceptions);
+        Throwable exception = srsp.getException();
+        if (abortOnError && exception != null)  {
+          // drain pending requests
+          while (srsp != null)  {
+            srsp = shardHandler.takeCompletedOrError();
+          }
+          throw new SolrException(ErrorCode.SERVER_ERROR, msgOnError, exception);
+        }
+      }
+    } while (srsp != null);
+
+    //If request is async wait for the core admin to complete before returning
+    if (asyncId != null) {
+      waitForAsyncCallsToComplete(requestMap, results);
+      requestMap.clear();
+    }
+  }
+
+
+  void validateConfigOrThrowSolrException(String configName) throws IOException, KeeperException, InterruptedException {
+    boolean isValid = cloudManager.getDistribStateManager().hasData(ZkConfigManager.CONFIGS_ZKNODE + "/" + configName);
+    if(!isValid) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Can not find the specified config set: " + configName);
+    }
+  }
+
+  /**
+   * This doesn't validate the config (path) itself and is just responsible for creating the confNode.
+   * That check should be done before the config node is created.
+   */
+  public static void createConfNode(DistribStateManager stateManager, String configName, String coll, boolean isLegacyCloud) throws IOException, AlreadyExistsException, BadVersionException, KeeperException, InterruptedException {
+    
+    if (configName != null) {
+      String collDir = ZkStateReader.COLLECTIONS_ZKNODE + "/" + coll;
+      log.debug("creating collections conf node {} ", collDir);
+      byte[] data = Utils.toJSON(makeMap(ZkController.CONFIGNAME_PROP, configName));
+      if (stateManager.hasData(collDir)) {
+        stateManager.setData(collDir, data, -1);
+      } else {
+        stateManager.makePath(collDir, data, CreateMode.PERSISTENT, false);
+      }
+    } else {
+      if(isLegacyCloud){
+        log.warn("Could not obtain config name");
+      } else {
+        throw new SolrException(ErrorCode.BAD_REQUEST,"Unable to get config name");
+      }
+    }
+  }
+  
+  private void collectionCmd(ZkNodeProps message, ModifiableSolrParams params,
+                             NamedList results, Replica.State stateMatcher, String asyncId, Map<String, String> requestMap) {
+    collectionCmd( message, params, results, stateMatcher, asyncId, requestMap, Collections.emptySet());
+  }
+
+
+  void collectionCmd(ZkNodeProps message, ModifiableSolrParams params,
+                     NamedList results, Replica.State stateMatcher, String asyncId, Map<String, String> requestMap, Set<String> okayExceptions) {
+    log.info("Executing Collection Cmd : " + params);
+    String collectionName = message.getStr(NAME);
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+
+    ClusterState clusterState = zkStateReader.getClusterState();
+    DocCollection coll = clusterState.getCollection(collectionName);
+    
+    for (Slice slice : coll.getSlices()) {
+      sliceCmd(clusterState, params, stateMatcher, slice, shardHandler, asyncId, requestMap);
+    }
+
+    processResponses(results, shardHandler, false, null, asyncId, requestMap, okayExceptions);
+
+  }
+
+  void sliceCmd(ClusterState clusterState, ModifiableSolrParams params, Replica.State stateMatcher,
+                Slice slice, ShardHandler shardHandler, String asyncId, Map<String, String> requestMap) {
+
+    for (Replica replica : slice.getReplicas()) {
+      if (clusterState.liveNodesContain(replica.getStr(ZkStateReader.NODE_NAME_PROP))
+          && (stateMatcher == null || Replica.State.getState(replica.getStr(ZkStateReader.STATE_PROP)) == stateMatcher)) {
+
+        // For thread safety, only simple clone the ModifiableSolrParams
+        ModifiableSolrParams cloneParams = new ModifiableSolrParams();
+        cloneParams.add(params);
+        cloneParams.set(CoreAdminParams.CORE, replica.getStr(ZkStateReader.CORE_NAME_PROP));
+
+        sendShardRequest(replica.getStr(ZkStateReader.NODE_NAME_PROP), cloneParams, shardHandler, asyncId, requestMap);
+      }
+    }
+  }
+  
+  private void processResponse(NamedList results, ShardResponse srsp, Set<String> okayExceptions) {
+    Throwable e = srsp.getException();
+    String nodeName = srsp.getNodeName();
+    SolrResponse solrResponse = srsp.getSolrResponse();
+    String shard = srsp.getShard();
+
+    processResponse(results, e, nodeName, solrResponse, shard, okayExceptions);
+  }
+
+  @SuppressWarnings("unchecked")
+  private void processResponse(NamedList results, Throwable e, String nodeName, SolrResponse solrResponse, String shard, Set<String> okayExceptions) {
+    String rootThrowable = null;
+    if (e instanceof RemoteSolrException) {
+      rootThrowable = ((RemoteSolrException) e).getRootThrowable();
+    }
+
+    if (e != null && (rootThrowable == null || !okayExceptions.contains(rootThrowable))) {
+      log.error("Error from shard: " + shard, e);
+
+      SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
+      if (failure == null) {
+        failure = new SimpleOrderedMap();
+        results.add("failure", failure);
+      }
+
+      failure.add(nodeName, e.getClass().getName() + ":" + e.getMessage());
+
+    } else {
+
+      SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
+      if (success == null) {
+        success = new SimpleOrderedMap();
+        results.add("success", success);
+      }
+
+      success.add(nodeName, solrResponse.getResponse());
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  private void waitForAsyncCallsToComplete(Map<String, String> requestMap, NamedList results) {
+    for (String k:requestMap.keySet()) {
+      log.debug("I am Waiting for :{}/{}", k, requestMap.get(k));
+      results.add(requestMap.get(k), waitForCoreAdminAsyncCallToComplete(k, requestMap.get(k)));
+    }
+  }
+
+  private NamedList waitForCoreAdminAsyncCallToComplete(String nodeName, String requestId) {
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set(CoreAdminParams.ACTION, CoreAdminAction.REQUESTSTATUS.toString());
+    params.set(CoreAdminParams.REQUESTID, requestId);
+    int counter = 0;
+    ShardRequest sreq;
+    do {
+      sreq = new ShardRequest();
+      params.set("qt", adminPath);
+      sreq.purpose = 1;
+      String replica = zkStateReader.getBaseUrlForNodeName(nodeName);
+      sreq.shards = new String[] {replica};
+      sreq.actualShards = sreq.shards;
+      sreq.params = params;
+
+      shardHandler.submit(sreq, replica, sreq.params);
+
+      ShardResponse srsp;
+      do {
+        srsp = shardHandler.takeCompletedOrError();
+        if (srsp != null) {
+          NamedList results = new NamedList();
+          processResponse(results, srsp, Collections.emptySet());
+          if (srsp.getSolrResponse().getResponse() == null) {
+            NamedList response = new NamedList();
+            response.add("STATUS", "failed");
+            return response;
+          }
+          
+          String r = (String) srsp.getSolrResponse().getResponse().get("STATUS");
+          if (r.equals("running")) {
+            log.debug("The task is still RUNNING, continuing to wait.");
+            try {
+              Thread.sleep(1000);
+            } catch (InterruptedException e) {
+              Thread.currentThread().interrupt();
+            }
+            continue;
+
+          } else if (r.equals("completed")) {
+            log.debug("The task is COMPLETED, returning");
+            return srsp.getSolrResponse().getResponse();
+          } else if (r.equals("failed")) {
+            // TODO: Improve this. Get more information.
+            log.debug("The task is FAILED, returning");
+            return srsp.getSolrResponse().getResponse();
+          } else if (r.equals("notfound")) {
+            log.debug("The task is notfound, retry");
+            if (counter++ < 5) {
+              try {
+                Thread.sleep(1000);
+              } catch (InterruptedException e) {
+              }
+              break;
+            }
+            throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid status request for requestId: " + requestId + "" + srsp.getSolrResponse().getResponse().get("STATUS") +
+                "retried " + counter + "times");
+          } else {
+            throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid status request " + srsp.getSolrResponse().getResponse().get("STATUS"));
+          }
+        }
+      } while (srsp != null);
+    } while(true);
+  }
+
+  @Override
+  public String getName() {
+    return "Overseer Collection Message Handler";
+  }
+
+  @Override
+  public String getTimerName(String operation) {
+    return "collection_" + operation;
+  }
+
+  @Override
+  public String getTaskKey(ZkNodeProps message) {
+    return message.containsKey(COLLECTION_PROP) ?
+      message.getStr(COLLECTION_PROP) : message.getStr(NAME);
+  }
+
+
+  private long sessionId = -1;
+  private LockTree.Session lockSession;
+
+  @Override
+  public Lock lockTask(ZkNodeProps message, OverseerTaskProcessor.TaskBatch taskBatch) {
+    if (lockSession == null || sessionId != taskBatch.getId()) {
+      //this is always called in the same thread.
+      //Each batch is supposed to have a new taskBatch
+      //So if taskBatch changes we must create a new Session
+      // also check if the running tasks are empty. If yes, clear lockTree
+      // this will ensure that locks are not 'leaked'
+      if(taskBatch.getRunningTasks() == 0) lockTree.clear();
+      lockSession = lockTree.getSession();
+    }
+    return lockSession.lock(getCollectionAction(message.getStr(Overseer.QUEUE_OPERATION)),
+        Arrays.asList(
+            getTaskKey(message),
+            message.getStr(ZkStateReader.SHARD_ID_PROP),
+            message.getStr(ZkStateReader.REPLICA_PROP))
+
+    );
+  }
+
+
+  @Override
+  public void close() throws IOException {
+    this.isClosed = true;
+    if (tpe != null) {
+      if (!tpe.isShutdown()) {
+        ExecutorUtil.shutdownAndAwaitTermination(tpe);
+      }
+    }
+  }
+
+  @Override
+  public boolean isClosed() {
+    return isClosed;
+  }
+
+  protected interface Cmd {
+    void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerRoleCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerRoleCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerRoleCmd.java
new file mode 100644
index 0000000..16f9327
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerRoleCmd.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.solr.cloud.OverseerNodePrioritizer;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Utils;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.REMOVEROLE;
+
+public class OverseerRoleCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+  private final CollectionAction operation;
+  private final OverseerNodePrioritizer overseerPrioritizer;
+
+
+
+  public OverseerRoleCmd(OverseerCollectionMessageHandler ocmh, CollectionAction operation, OverseerNodePrioritizer prioritizer) {
+    this.ocmh = ocmh;
+    this.operation = operation;
+    this.overseerPrioritizer = prioritizer;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    SolrZkClient zkClient = zkStateReader.getZkClient();
+    Map roles = null;
+    String node = message.getStr("node");
+
+    String roleName = message.getStr("role");
+    boolean nodeExists = false;
+    if (nodeExists = zkClient.exists(ZkStateReader.ROLES, true)) {
+      roles = (Map) Utils.fromJSON(zkClient.getData(ZkStateReader.ROLES, null, new Stat(), true));
+    } else {
+      roles = new LinkedHashMap(1);
+    }
+
+    List nodeList = (List) roles.get(roleName);
+    if (nodeList == null) roles.put(roleName, nodeList = new ArrayList());
+    if (ADDROLE == operation) {
+      log.info("Overseer role added to {}", node);
+      if (!nodeList.contains(node)) nodeList.add(node);
+    } else if (REMOVEROLE == operation) {
+      log.info("Overseer role removed from {}", node);
+      nodeList.remove(node);
+    }
+
+    if (nodeExists) {
+      zkClient.setData(ZkStateReader.ROLES, Utils.toJSON(roles), true);
+    } else {
+      zkClient.create(ZkStateReader.ROLES, Utils.toJSON(roles), CreateMode.PERSISTENT, true);
+    }
+    //if there are too many nodes this command may time out. And most likely dedicated
+    // overseers are created when there are too many nodes  . So , do this operation in a separate thread
+    new Thread(() -> {
+      try {
+        overseerPrioritizer.prioritizeOverseerNodes(ocmh.myId);
+      } catch (Exception e) {
+        log.error("Error in prioritizing Overseer", e);
+      }
+
+    }).start();
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerStatusCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerStatusCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerStatusCmd.java
new file mode 100644
index 0000000..6f0bbfd
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerStatusCmd.java
@@ -0,0 +1,113 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import com.codahale.metrics.Timer;
+import org.apache.solr.cloud.OverseerTaskProcessor;
+import org.apache.solr.cloud.Stats;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.util.stats.MetricUtils;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class OverseerStatusCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public OverseerStatusCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    String leaderNode = OverseerTaskProcessor.getLeaderNode(zkStateReader.getZkClient());
+    results.add("leader", leaderNode);
+    Stat stat = new Stat();
+    zkStateReader.getZkClient().getData("/overseer/queue",null, stat, true);
+    results.add("overseer_queue_size", stat.getNumChildren());
+    stat = new Stat();
+    zkStateReader.getZkClient().getData("/overseer/queue-work",null, stat, true);
+    results.add("overseer_work_queue_size", stat.getNumChildren());
+    stat = new Stat();
+    zkStateReader.getZkClient().getData("/overseer/collection-queue-work",null, stat, true);
+    results.add("overseer_collection_queue_size", stat.getNumChildren());
+
+    NamedList overseerStats = new NamedList();
+    NamedList collectionStats = new NamedList();
+    NamedList stateUpdateQueueStats = new NamedList();
+    NamedList workQueueStats = new NamedList();
+    NamedList collectionQueueStats = new NamedList();
+    Stats stats = ocmh.stats;
+    for (Map.Entry<String, Stats.Stat> entry : stats.getStats().entrySet()) {
+      String key = entry.getKey();
+      NamedList<Object> lst = new SimpleOrderedMap<>();
+      if (key.startsWith("collection_"))  {
+        collectionStats.add(key.substring(11), lst);
+        int successes = stats.getSuccessCount(entry.getKey());
+        int errors = stats.getErrorCount(entry.getKey());
+        lst.add("requests", successes);
+        lst.add("errors", errors);
+        List<Stats.FailedOp> failureDetails = stats.getFailureDetails(key);
+        if (failureDetails != null) {
+          List<SimpleOrderedMap<Object>> failures = new ArrayList<>();
+          for (Stats.FailedOp failedOp : failureDetails) {
+            SimpleOrderedMap<Object> fail = new SimpleOrderedMap<>();
+            fail.add("request", failedOp.req.getProperties());
+            fail.add("response", failedOp.resp.getResponse());
+            failures.add(fail);
+          }
+          lst.add("recent_failures", failures);
+        }
+      } else if (key.startsWith("/overseer/queue_"))  {
+        stateUpdateQueueStats.add(key.substring(16), lst);
+      } else if (key.startsWith("/overseer/queue-work_"))  {
+        workQueueStats.add(key.substring(21), lst);
+      } else if (key.startsWith("/overseer/collection-queue-work_"))  {
+        collectionQueueStats.add(key.substring(32), lst);
+      } else  {
+        // overseer stats
+        overseerStats.add(key, lst);
+        int successes = stats.getSuccessCount(entry.getKey());
+        int errors = stats.getErrorCount(entry.getKey());
+        lst.add("requests", successes);
+        lst.add("errors", errors);
+      }
+      Timer timer = entry.getValue().requestTime;
+      MetricUtils.addMetrics(lst, timer);
+    }
+    results.add("overseer_operations", overseerStats);
+    results.add("collection_operations", collectionStats);
+    results.add("overseer_queue", stateUpdateQueueStats);
+    results.add("overseer_internal_queue", workQueueStats);
+    results.add("collection_queue", collectionQueueStats);
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
new file mode 100644
index 0000000..35d2379
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
@@ -0,0 +1,227 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.solr.cloud.ActiveReplicaWatcher;
+import org.apache.solr.common.SolrCloseableLatch;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.CollectionStateWatcher;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+public class ReplaceNodeCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public ReplaceNodeCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    String source = message.getStr(CollectionParams.SOURCE_NODE, message.getStr("source"));
+    String target = message.getStr(CollectionParams.TARGET_NODE, message.getStr("target"));
+    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
+    if (source == null || target == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "sourceNode and targetNode are required params" );
+    }
+    String async = message.getStr("async");
+    int timeout = message.getInt("timeout", 10 * 60); // 10 minutes
+    boolean parallel = message.getBool("parallel", false);
+    ClusterState clusterState = zkStateReader.getClusterState();
+
+    if (!clusterState.liveNodesContain(source)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source Node: " + source + " is not live");
+    }
+    if (!clusterState.liveNodesContain(target)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target Node: " + target + " is not live");
+    }
+    List<ZkNodeProps> sourceReplicas = getReplicasOfNode(source, clusterState);
+    // how many leaders are we moving? for these replicas we have to make sure that either:
+    // * another existing replica can become a leader, or
+    // * we wait until the newly created replica completes recovery (and can become the new leader)
+    // If waitForFinalState=true we wait for all replicas
+    int numLeaders = 0;
+    for (ZkNodeProps props : sourceReplicas) {
+      if (props.getBool(ZkStateReader.LEADER_PROP, false) || waitForFinalState) {
+        numLeaders++;
+      }
+    }
+    // map of collectionName_coreNodeName to watchers
+    Map<String, CollectionStateWatcher> watchers = new HashMap<>();
+    List<ZkNodeProps> createdReplicas = new ArrayList<>();
+
+    AtomicBoolean anyOneFailed = new AtomicBoolean(false);
+    SolrCloseableLatch countDownLatch = new SolrCloseableLatch(sourceReplicas.size(), ocmh);
+
+    SolrCloseableLatch replicasToRecover = new SolrCloseableLatch(numLeaders, ocmh);
+
+    for (ZkNodeProps sourceReplica : sourceReplicas) {
+      NamedList nl = new NamedList();
+      log.info("Going to create replica for collection={} shard={} on node={}", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
+      ZkNodeProps msg = sourceReplica.plus("parallel", String.valueOf(parallel)).plus(CoreAdminParams.NODE, target);
+      if(async!=null) msg.getProperties().put(ASYNC, async);
+      final ZkNodeProps addedReplica = ocmh.addReplica(clusterState,
+          msg, nl, () -> {
+            countDownLatch.countDown();
+            if (nl.get("failure") != null) {
+              String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
+                  " on node=%s", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
+              log.warn(errorString);
+              // one replica creation failed. Make the best attempt to
+              // delete all the replicas created so far in the target
+              // and exit
+              synchronized (results) {
+                results.add("failure", errorString);
+                anyOneFailed.set(true);
+              }
+            } else {
+              log.debug("Successfully created replica for collection={} shard={} on node={}",
+                  sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
+            }
+          });
+
+      if (addedReplica != null) {
+        createdReplicas.add(addedReplica);
+        if (sourceReplica.getBool(ZkStateReader.LEADER_PROP, false) || waitForFinalState) {
+          String shardName = sourceReplica.getStr(SHARD_ID_PROP);
+          String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
+          String collectionName = sourceReplica.getStr(COLLECTION_PROP);
+          String key = collectionName + "_" + replicaName;
+          CollectionStateWatcher watcher;
+          if (waitForFinalState) {
+            watcher = new ActiveReplicaWatcher(collectionName, null,
+                Collections.singletonList(addedReplica.getStr(ZkStateReader.CORE_NAME_PROP)), replicasToRecover);
+          } else {
+            watcher = new LeaderRecoveryWatcher(collectionName, shardName, replicaName,
+                addedReplica.getStr(ZkStateReader.CORE_NAME_PROP), replicasToRecover);
+          }
+          watchers.put(key, watcher);
+          log.debug("--- adding " + key + ", " + watcher);
+          zkStateReader.registerCollectionStateWatcher(collectionName, watcher);
+        } else {
+          log.debug("--- not waiting for " + addedReplica);
+        }
+      }
+    }
+
+    log.debug("Waiting for replicas to be added");
+    if (!countDownLatch.await(timeout, TimeUnit.SECONDS)) {
+      log.info("Timed out waiting for replicas to be added");
+      anyOneFailed.set(true);
+    } else {
+      log.debug("Finished waiting for replicas to be added");
+    }
+
+    // now wait for leader replicas to recover
+    log.debug("Waiting for " + numLeaders + " leader replicas to recover");
+    if (!replicasToRecover.await(timeout, TimeUnit.SECONDS)) {
+      log.info("Timed out waiting for " + replicasToRecover.getCount() + " leader replicas to recover");
+      anyOneFailed.set(true);
+    } else {
+      log.debug("Finished waiting for leader replicas to recover");
+    }
+    // remove the watchers, we're done either way
+    for (Map.Entry<String, CollectionStateWatcher> e : watchers.entrySet()) {
+      zkStateReader.removeCollectionStateWatcher(e.getKey(), e.getValue());
+    }
+    if (anyOneFailed.get()) {
+      log.info("Failed to create some replicas. Cleaning up all replicas on target node");
+      SolrCloseableLatch cleanupLatch = new SolrCloseableLatch(createdReplicas.size(), ocmh);
+      for (ZkNodeProps createdReplica : createdReplicas) {
+        NamedList deleteResult = new NamedList();
+        try {
+          ocmh.deleteReplica(zkStateReader.getClusterState(), createdReplica.plus("parallel", "true"), deleteResult, () -> {
+            cleanupLatch.countDown();
+            if (deleteResult.get("failure") != null) {
+              synchronized (results) {
+                results.add("failure", "Could not cleanup, because of : " + deleteResult.get("failure"));
+              }
+            }
+          });
+        } catch (KeeperException e) {
+          cleanupLatch.countDown();
+          log.warn("Error deleting replica ", e);
+        } catch (Exception e) {
+          log.warn("Error deleting replica ", e);
+          cleanupLatch.countDown();
+          throw e;
+        }
+      }
+      cleanupLatch.await(5, TimeUnit.MINUTES);
+      return;
+    }
+
+
+    // we have reached this far means all replicas could be recreated
+    //now cleanup the replicas in the source node
+    DeleteNodeCmd.cleanupReplicas(results, state, sourceReplicas, ocmh, source, async);
+    results.add("success", "REPLACENODE action completed successfully from  : " + source + " to : " + target);
+  }
+
+  static List<ZkNodeProps> getReplicasOfNode(String source, ClusterState state) {
+    List<ZkNodeProps> sourceReplicas = new ArrayList<>();
+    for (Map.Entry<String, DocCollection> e : state.getCollectionsMap().entrySet()) {
+      for (Slice slice : e.getValue().getSlices()) {
+        for (Replica replica : slice.getReplicas()) {
+          if (source.equals(replica.getNodeName())) {
+            ZkNodeProps props = new ZkNodeProps(
+                COLLECTION_PROP, e.getKey(),
+                SHARD_ID_PROP, slice.getName(),
+                ZkStateReader.CORE_NAME_PROP, replica.getCoreName(),
+                ZkStateReader.REPLICA_PROP, replica.getName(),
+                ZkStateReader.REPLICA_TYPE, replica.getType().name(),
+                ZkStateReader.LEADER_PROP, String.valueOf(replica.equals(slice.getLeader())),
+                CoreAdminParams.NODE, source);
+            sourceReplicas.add(props);
+          }
+        }
+      }
+    }
+    return sourceReplicas;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
new file mode 100644
index 0000000..09ceb55
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
@@ -0,0 +1,357 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Properties;
+import java.util.Set;
+
+import org.apache.solr.client.solrj.cloud.DistributedQueue;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.overseer.OverseerAction;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.ImplicitDocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ReplicaPosition;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.backup.BackupManager;
+import org.apache.solr.core.backup.repository.BackupRepository;
+import org.apache.solr.handler.component.ShardHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.DocCollection.STATE_FORMAT;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public RestoreCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    // TODO maybe we can inherit createCollection's options/code
+
+    String restoreCollectionName = message.getStr(COLLECTION_PROP);
+    String backupName = message.getStr(NAME); // of backup
+    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+    String asyncId = message.getStr(ASYNC);
+    String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
+    Map<String, String> requestMap = new HashMap<>();
+
+    CoreContainer cc = ocmh.overseer.getCoreContainer();
+    BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
+
+    URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
+    URI backupPath = repository.resolve(location, backupName);
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    BackupManager backupMgr = new BackupManager(repository, zkStateReader);
+
+    Properties properties = backupMgr.readBackupProperties(location, backupName);
+    String backupCollection = properties.getProperty(BackupManager.COLLECTION_NAME_PROP);
+    DocCollection backupCollectionState = backupMgr.readCollectionState(location, backupName, backupCollection);
+
+    // Get the Solr nodes to restore a collection.
+    final List<String> nodeList = Assign.getLiveOrLiveAndCreateNodeSetList(
+        zkStateReader.getClusterState().getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM);
+
+    int numShards = backupCollectionState.getActiveSlices().size();
+    
+    int numNrtReplicas = getInt(message, NRT_REPLICAS, backupCollectionState.getNumNrtReplicas(), 0);
+    if (numNrtReplicas == 0) {
+      numNrtReplicas = getInt(message, REPLICATION_FACTOR, backupCollectionState.getReplicationFactor(), 0);
+    }
+    int numTlogReplicas = getInt(message, TLOG_REPLICAS, backupCollectionState.getNumTlogReplicas(), 0);
+    int numPullReplicas = getInt(message, PULL_REPLICAS, backupCollectionState.getNumPullReplicas(), 0);
+    int totalReplicasPerShard = numNrtReplicas + numTlogReplicas + numPullReplicas;
+    
+    int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, backupCollectionState.getMaxShardsPerNode());
+    int availableNodeCount = nodeList.size();
+    if ((numShards * totalReplicasPerShard) > (availableNodeCount * maxShardsPerNode)) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          String.format(Locale.ROOT, "Solr cloud with available number of nodes:%d is insufficient for"
+              + " restoring a collection with %d shards, total replicas per shard %d and maxShardsPerNode %d."
+              + " Consider increasing maxShardsPerNode value OR number of available nodes.",
+              availableNodeCount, numShards, totalReplicasPerShard, maxShardsPerNode));
+    }
+
+    //Upload the configs
+    String configName = (String) properties.get(OverseerCollectionMessageHandler.COLL_CONF);
+    String restoreConfigName = message.getStr(OverseerCollectionMessageHandler.COLL_CONF, configName);
+    if (zkStateReader.getConfigManager().configExists(restoreConfigName)) {
+      log.info("Using existing config {}", restoreConfigName);
+      //TODO add overwrite option?
+    } else {
+      log.info("Uploading config {}", restoreConfigName);
+      backupMgr.uploadConfigDir(location, backupName, configName, restoreConfigName);
+    }
+
+    log.info("Starting restore into collection={} with backup_name={} at location={}", restoreCollectionName, backupName,
+        location);
+
+    //Create core-less collection
+    {
+      Map<String, Object> propMap = new HashMap<>();
+      propMap.put(Overseer.QUEUE_OPERATION, CREATE.toString());
+      propMap.put("fromApi", "true"); // mostly true.  Prevents autoCreated=true in the collection state.
+      if (properties.get(STATE_FORMAT) == null) {
+        propMap.put(STATE_FORMAT, "2");
+      }
+
+      // inherit settings from input API, defaulting to the backup's setting.  Ex: replicationFactor
+      for (String collProp : OverseerCollectionMessageHandler.COLL_PROPS.keySet()) {
+        Object val = message.getProperties().getOrDefault(collProp, backupCollectionState.get(collProp));
+        if (val != null) {
+          propMap.put(collProp, val);
+        }
+      }
+
+      propMap.put(NAME, restoreCollectionName);
+      propMap.put(OverseerCollectionMessageHandler.CREATE_NODE_SET, OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY); //no cores
+      propMap.put(OverseerCollectionMessageHandler.COLL_CONF, restoreConfigName);
+
+      // router.*
+      @SuppressWarnings("unchecked")
+      Map<String, Object> routerProps = (Map<String, Object>) backupCollectionState.getProperties().get(DocCollection.DOC_ROUTER);
+      for (Map.Entry<String, Object> pair : routerProps.entrySet()) {
+        propMap.put(DocCollection.DOC_ROUTER + "." + pair.getKey(), pair.getValue());
+      }
+
+      Set<String> sliceNames = backupCollectionState.getActiveSlicesMap().keySet();
+      if (backupCollectionState.getRouter() instanceof ImplicitDocRouter) {
+        propMap.put(OverseerCollectionMessageHandler.SHARDS_PROP, StrUtils.join(sliceNames, ','));
+      } else {
+        propMap.put(OverseerCollectionMessageHandler.NUM_SLICES, sliceNames.size());
+        // ClusterStateMutator.createCollection detects that "slices" is in fact a slice structure instead of a
+        //   list of names, and if so uses this instead of building it.  We clear the replica list.
+        Collection<Slice> backupSlices = backupCollectionState.getActiveSlices();
+        Map<String, Slice> newSlices = new LinkedHashMap<>(backupSlices.size());
+        for (Slice backupSlice : backupSlices) {
+          newSlices.put(backupSlice.getName(),
+              new Slice(backupSlice.getName(), Collections.emptyMap(), backupSlice.getProperties()));
+        }
+        propMap.put(OverseerCollectionMessageHandler.SHARDS_PROP, newSlices);
+      }
+
+      ocmh.commandMap.get(CREATE).call(zkStateReader.getClusterState(), new ZkNodeProps(propMap), new NamedList());
+      // note: when createCollection() returns, the collection exists (no race)
+    }
+
+    DocCollection restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
+
+    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+
+    //Mark all shards in CONSTRUCTION STATE while we restore the data
+    {
+      //TODO might instead createCollection accept an initial state?  Is there a race?
+      Map<String, Object> propMap = new HashMap<>();
+      propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+      for (Slice shard : restoreCollection.getSlices()) {
+        propMap.put(shard.getName(), Slice.State.CONSTRUCTION.toString());
+      }
+      propMap.put(ZkStateReader.COLLECTION_PROP, restoreCollectionName);
+      inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
+    }
+
+    // TODO how do we leverage the RULE / SNITCH logic in createCollection?
+
+    ClusterState clusterState = zkStateReader.getClusterState();
+
+    List<String> sliceNames = new ArrayList<>();
+    restoreCollection.getSlices().forEach(x -> sliceNames.add(x.getName()));
+    PolicyHelper.SessionWrapper sessionWrapper = null;
+
+    try {
+      List<ReplicaPosition> replicaPositions = Assign.identifyNodes(
+          ocmh.cloudManager, clusterState,
+          nodeList, restoreCollectionName,
+          message, sliceNames,
+          numNrtReplicas, numTlogReplicas, numPullReplicas);
+      sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
+      //Create one replica per shard and copy backed up data to it
+      for (Slice slice : restoreCollection.getSlices()) {
+        log.debug("Adding replica for shard={} collection={} ", slice.getName(), restoreCollection);
+        HashMap<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD);
+        propMap.put(COLLECTION_PROP, restoreCollectionName);
+        propMap.put(SHARD_ID_PROP, slice.getName());
+
+        if (numNrtReplicas >= 1) {
+          propMap.put(REPLICA_TYPE, Replica.Type.NRT.name());
+        } else if (numTlogReplicas >= 1) {
+          propMap.put(REPLICA_TYPE, Replica.Type.TLOG.name());
+        } else {
+          throw new SolrException(ErrorCode.BAD_REQUEST, "Unexpected number of replicas, replicationFactor, " +
+              Replica.Type.NRT + " or " + Replica.Type.TLOG + " must be greater than 0");
+        }
+
+        // Get the first node matching the shard to restore in
+        String node;
+        for (ReplicaPosition replicaPosition : replicaPositions) {
+          if (Objects.equals(replicaPosition.shard, slice.getName())) {
+            node = replicaPosition.node;
+            propMap.put(CoreAdminParams.NODE, node);
+            replicaPositions.remove(replicaPosition);
+            break;
+          }
+        }
+
+        // add async param
+        if (asyncId != null) {
+          propMap.put(ASYNC, asyncId);
+        }
+        ocmh.addPropertyParams(message, propMap);
+
+        ocmh.addReplica(clusterState, new ZkNodeProps(propMap), new NamedList(), null);
+      }
+
+      //refresh the location copy of collection state
+      restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
+
+      //Copy data from backed up index to each replica
+      for (Slice slice : restoreCollection.getSlices()) {
+        ModifiableSolrParams params = new ModifiableSolrParams();
+        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.RESTORECORE.toString());
+        params.set(NAME, "snapshot." + slice.getName());
+        params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString());
+        params.set(CoreAdminParams.BACKUP_REPOSITORY, repo);
+
+        ocmh.sliceCmd(clusterState, params, null, slice, shardHandler, asyncId, requestMap);
+      }
+      ocmh.processResponses(new NamedList(), shardHandler, true, "Could not restore core", asyncId, requestMap);
+
+      //Mark all shards in ACTIVE STATE
+      {
+        HashMap<String, Object> propMap = new HashMap<>();
+        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+        propMap.put(ZkStateReader.COLLECTION_PROP, restoreCollectionName);
+        for (Slice shard : restoreCollection.getSlices()) {
+          propMap.put(shard.getName(), Slice.State.ACTIVE.toString());
+        }
+        inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
+      }
+
+      //refresh the location copy of collection state
+      restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
+
+      if (totalReplicasPerShard > 1) {
+        log.info("Adding replicas to restored collection={}", restoreCollection);
+        for (Slice slice : restoreCollection.getSlices()) {
+
+          //Add the remaining replicas for each shard, considering it's type
+          int createdNrtReplicas = 0, createdTlogReplicas = 0, createdPullReplicas = 0;
+
+          // We already created either a NRT or an TLOG replica as leader
+          if (numNrtReplicas > 0) {
+            createdNrtReplicas++;
+          } else if (createdTlogReplicas > 0) {
+            createdTlogReplicas++;
+          }
+
+          for (int i = 1; i < totalReplicasPerShard; i++) {
+            Replica.Type typeToCreate;
+            if (createdNrtReplicas < numNrtReplicas) {
+              createdNrtReplicas++;
+              typeToCreate = Replica.Type.NRT;
+            } else if (createdTlogReplicas < numTlogReplicas) {
+              createdTlogReplicas++;
+              typeToCreate = Replica.Type.TLOG;
+            } else {
+              createdPullReplicas++;
+              typeToCreate = Replica.Type.PULL;
+              assert createdPullReplicas <= numPullReplicas: "Unexpected number of replicas";
+            }
+
+            log.debug("Adding replica for shard={} collection={} of type {} ", slice.getName(), restoreCollection, typeToCreate);
+            HashMap<String, Object> propMap = new HashMap<>();
+            propMap.put(COLLECTION_PROP, restoreCollectionName);
+            propMap.put(SHARD_ID_PROP, slice.getName());
+            propMap.put(REPLICA_TYPE, typeToCreate.name());
+
+            // Get the first node matching the shard to restore in
+            String node;
+            for (ReplicaPosition replicaPosition : replicaPositions) {
+              if (Objects.equals(replicaPosition.shard, slice.getName())) {
+                node = replicaPosition.node;
+                propMap.put(CoreAdminParams.NODE, node);
+                replicaPositions.remove(replicaPosition);
+                break;
+              }
+            }
+
+            // add async param
+            if (asyncId != null) {
+              propMap.put(ASYNC, asyncId);
+            }
+            ocmh.addPropertyParams(message, propMap);
+
+            ocmh.addReplica(zkStateReader.getClusterState(), new ZkNodeProps(propMap), results, null);
+          }
+        }
+      }
+
+      log.info("Completed restoring collection={} backupName={}", restoreCollection, backupName);
+    } finally {
+      if (sessionWrapper != null) sessionWrapper.release();
+    }
+  }
+
+  private int getInt(ZkNodeProps message, String propertyName, Integer count, int defaultValue) {
+    Integer value = message.getInt(propertyName, count);
+    return value!=null ? value:defaultValue;
+  }
+}


[15/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
SOLR-11817: Move Collections API classes to it's own package


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a3c4f738
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a3c4f738
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a3c4f738

Branch: refs/heads/master
Commit: a3c4f7388c13cfdeb66d83b434b991e5e159d4cc
Parents: e2bba98
Author: Varun Thacker <va...@apache.org>
Authored: Mon Jan 15 18:07:34 2018 -0800
Committer: Varun Thacker <va...@apache.org>
Committed: Tue Jan 16 11:03:40 2018 -0800

----------------------------------------------------------------------
 solr/CHANGES.txt                                |    2 +
 .../org/apache/solr/cloud/AddReplicaCmd.java    |  279 -----
 .../src/java/org/apache/solr/cloud/Assign.java  |  483 ---------
 .../java/org/apache/solr/cloud/BackupCmd.java   |  225 ----
 .../solr/cloud/CloudConfigSetService.java       |    1 +
 .../java/org/apache/solr/cloud/CloudUtil.java   |    2 +-
 .../org/apache/solr/cloud/CreateAliasCmd.java   |  101 --
 .../apache/solr/cloud/CreateCollectionCmd.java  |  533 ---------
 .../org/apache/solr/cloud/CreateShardCmd.java   |  191 ----
 .../apache/solr/cloud/CreateSnapshotCmd.java    |  179 ---
 .../org/apache/solr/cloud/DeleteAliasCmd.java   |   43 -
 .../apache/solr/cloud/DeleteCollectionCmd.java  |  141 ---
 .../org/apache/solr/cloud/DeleteNodeCmd.java    |  137 ---
 .../org/apache/solr/cloud/DeleteReplicaCmd.java |  281 -----
 .../org/apache/solr/cloud/DeleteShardCmd.java   |  178 ---
 .../apache/solr/cloud/DeleteSnapshotCmd.java    |  160 ---
 .../solr/cloud/ExclusiveSliceProperty.java      |    5 +-
 .../solr/cloud/LeaderRecoveryWatcher.java       |   88 --
 .../java/org/apache/solr/cloud/MigrateCmd.java  |  337 ------
 .../org/apache/solr/cloud/MoveReplicaCmd.java   |  302 ------
 .../java/org/apache/solr/cloud/Overseer.java    |    1 +
 .../OverseerCollectionConfigSetProcessor.java   |    1 +
 .../cloud/OverseerCollectionMessageHandler.java | 1003 -----------------
 .../org/apache/solr/cloud/OverseerRoleCmd.java  |  102 --
 .../apache/solr/cloud/OverseerStatusCmd.java    |  112 --
 .../org/apache/solr/cloud/ReplaceNodeCmd.java   |  226 ----
 .../java/org/apache/solr/cloud/RestoreCmd.java  |  363 -------
 .../cloud/RoutedAliasCreateCollectionCmd.java   |  182 ----
 .../org/apache/solr/cloud/SplitShardCmd.java    |  542 ----------
 .../org/apache/solr/cloud/UtilizeNodeCmd.java   |  120 ---
 .../cloud/api/collections/AddReplicaCmd.java    |  282 +++++
 .../solr/cloud/api/collections/Assign.java      |  483 +++++++++
 .../solr/cloud/api/collections/BackupCmd.java   |  224 ++++
 .../cloud/api/collections/CreateAliasCmd.java   |  100 ++
 .../api/collections/CreateCollectionCmd.java    |  531 +++++++++
 .../cloud/api/collections/CreateShardCmd.java   |  190 ++++
 .../api/collections/CreateSnapshotCmd.java      |  179 +++
 .../cloud/api/collections/DeleteAliasCmd.java   |   43 +
 .../api/collections/DeleteCollectionCmd.java    |  142 +++
 .../cloud/api/collections/DeleteNodeCmd.java    |  137 +++
 .../cloud/api/collections/DeleteReplicaCmd.java |  280 +++++
 .../cloud/api/collections/DeleteShardCmd.java   |  178 +++
 .../api/collections/DeleteSnapshotCmd.java      |  160 +++
 .../api/collections/LeaderRecoveryWatcher.java  |   88 ++
 .../solr/cloud/api/collections/MigrateCmd.java  |  334 ++++++
 .../cloud/api/collections/MoveReplicaCmd.java   |  303 ++++++
 .../OverseerCollectionMessageHandler.java       | 1011 +++++++++++++++++
 .../cloud/api/collections/OverseerRoleCmd.java  |  102 ++
 .../api/collections/OverseerStatusCmd.java      |  113 ++
 .../cloud/api/collections/ReplaceNodeCmd.java   |  227 ++++
 .../solr/cloud/api/collections/RestoreCmd.java  |  357 ++++++
 .../RoutedAliasCreateCollectionCmd.java         |  184 ++++
 .../cloud/api/collections/SplitShardCmd.java    |  540 ++++++++++
 .../cloud/api/collections/UtilizeNodeCmd.java   |  120 +++
 .../cloud/api/collections/package-info.java     |   23 +
 .../cloud/overseer/ClusterStateMutator.java     |    2 +-
 .../solr/cloud/overseer/ReplicaMutator.java     |   12 +-
 .../solr/cloud/overseer/SliceMutator.java       |   13 +-
 .../solr/handler/admin/CollectionsHandler.java  |   24 +-
 .../TimeRoutedAliasUpdateProcessor.java         |    2 +-
 .../AbstractCloudBackupRestoreTestCase.java     |  346 ------
 .../test/org/apache/solr/cloud/AssignTest.java  |  155 ---
 .../solr/cloud/BasicDistributedZkTest.java      |    1 +
 .../solr/cloud/ChaosMonkeyShardSplitTest.java   |    5 +
 .../apache/solr/cloud/CollectionReloadTest.java |   84 --
 .../cloud/CollectionTooManyReplicasTest.java    |  221 ----
 .../CollectionsAPIAsyncDistributedZkTest.java   |  177 ---
 .../cloud/CollectionsAPIDistributedZkTest.java  |  684 ------------
 ...ConcurrentDeleteAndCreateCollectionTest.java |  226 ----
 .../apache/solr/cloud/CustomCollectionTest.java |  198 ----
 ...verseerCollectionConfigSetProcessorTest.java |   28 +-
 .../solr/cloud/OverseerTaskQueueTest.java       |    1 +
 .../solr/cloud/ReplicaPropertiesBase.java       |  177 ---
 .../org/apache/solr/cloud/ShardSplitTest.java   | 1015 -----------------
 .../cloud/SimpleCollectionCreateDeleteTest.java |   64 --
 .../apache/solr/cloud/TestCollectionAPI.java    |  797 --------------
 .../TestCollectionsAPIViaSolrCloudCluster.java  |  295 -----
 .../solr/cloud/TestHdfsCloudBackupRestore.java  |  203 ----
 .../cloud/TestLocalFSCloudBackupRestore.java    |   57 -
 .../solr/cloud/TestReplicaProperties.java       |  236 ----
 .../cloud/TestRequestStatusCollectionAPI.java   |  197 ----
 .../AbstractCloudBackupRestoreTestCase.java     |  348 ++++++
 .../solr/cloud/api/collections/AssignTest.java  |  156 +++
 .../api/collections/CollectionReloadTest.java   |   85 ++
 .../CollectionTooManyReplicasTest.java          |  222 ++++
 .../CollectionsAPIAsyncDistributedZkTest.java   |  178 +++
 .../CollectionsAPIDistributedZkTest.java        |  686 ++++++++++++
 ...ConcurrentDeleteAndCreateCollectionTest.java |  227 ++++
 .../api/collections/CustomCollectionTest.java   |  199 ++++
 .../HdfsCollectionsAPIDistributedZkTest.java    |  176 +++
 .../api/collections/ReplicaPropertiesBase.java  |  178 +++
 .../cloud/api/collections/ShardSplitTest.java   | 1017 ++++++++++++++++++
 .../SimpleCollectionCreateDeleteTest.java       |   66 ++
 .../api/collections/TestCollectionAPI.java      |  795 ++++++++++++++
 .../TestCollectionsAPIViaSolrCloudCluster.java  |  297 +++++
 .../collections/TestHdfsCloudBackupRestore.java |  207 ++++
 .../TestLocalFSCloudBackupRestore.java          |   57 +
 .../api/collections/TestReplicaProperties.java  |  236 ++++
 .../TestRequestStatusCollectionAPI.java         |  198 ++++
 .../cloud/autoscaling/sim/SimCloudManager.java  |    2 +-
 .../sim/SimClusterStateProvider.java            |   14 +-
 .../cloud/cdcr/BaseCdcrDistributedZkTest.java   |   11 +-
 .../HdfsCollectionsAPIDistributedZkTest.java    |  176 ---
 .../cloud/AbstractFullDistribZkTestBase.java    |   40 +-
 .../apache/solr/cloud/MiniSolrCloudCluster.java |    2 +-
 .../solr/cloud/MiniSolrCloudClusterTest.java    |    2 +-
 106 files changed, 11761 insertions(+), 11685 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 45a9a59..187976d 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -150,6 +150,8 @@ Other Changes
 
 * SOLR-11218: Fail and return an error when attempting to delete a collection that's part of an alias (Erick Erickson)
 
+* SOLR-11817: Move Collections API classes to it's own package (Varun Thacker)
+
 ==================  7.2.1 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
deleted file mode 100644
index 71a54c14..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/AddReplicaCmd.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonAdminParams.TIMEOUT;
-import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
-
-public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public AddReplicaCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    addReplica(state, message, results, null);
-  }
-
-  ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
-      throws IOException, InterruptedException {
-    log.debug("addReplica() : {}", Utils.toJSONString(message));
-    boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
-    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
-    final String asyncId = message.getStr(ASYNC);
-
-    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-    message = assignReplicaDetails(ocmh.cloudManager, clusterState, message, sessionWrapper);
-
-    String collection = message.getStr(COLLECTION_PROP);
-    DocCollection coll = clusterState.getCollection(collection);
-
-    String node = message.getStr(CoreAdminParams.NODE);
-    String shard = message.getStr(SHARD_ID_PROP);
-    String coreName = message.getStr(CoreAdminParams.NAME);
-    String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
-    int timeout = message.getInt(TIMEOUT, 10 * 60); // 10 minutes
-    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
-    boolean parallel = message.getBool("parallel", false);
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    if (!Overseer.isLegacy(zkStateReader)) {
-      if (!skipCreateReplicaInClusterState) {
-        ZkNodeProps props = new ZkNodeProps(
-            Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
-            ZkStateReader.COLLECTION_PROP, collection,
-            ZkStateReader.SHARD_ID_PROP, shard,
-            ZkStateReader.CORE_NAME_PROP, coreName,
-            ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
-            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(node),
-            ZkStateReader.NODE_NAME_PROP, node,
-            ZkStateReader.REPLICA_TYPE, replicaType.name());
-        if (coreNodeName != null) {
-          props = props.plus(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
-        }
-        try {
-          Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
-        } catch (Exception e) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Exception updating Overseer state queue", e);
-        }
-      }
-      params.set(CoreAdminParams.CORE_NODE_NAME,
-          ocmh.waitToSeeReplicasInState(collection, Collections.singletonList(coreName)).get(coreName).getName());
-    }
-
-    String configName = zkStateReader.readConfigName(collection);
-    String routeKey = message.getStr(ShardParams._ROUTE_);
-    String dataDir = message.getStr(CoreAdminParams.DATA_DIR);
-    String ulogDir = message.getStr(CoreAdminParams.ULOG_DIR);
-    String instanceDir = message.getStr(CoreAdminParams.INSTANCE_DIR);
-
-    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
-    params.set(CoreAdminParams.NAME, coreName);
-    params.set(COLL_CONF, configName);
-    params.set(CoreAdminParams.COLLECTION, collection);
-    params.set(CoreAdminParams.REPLICA_TYPE, replicaType.name());
-    if (shard != null) {
-      params.set(CoreAdminParams.SHARD, shard);
-    } else if (routeKey != null) {
-      Collection<Slice> slices = coll.getRouter().getSearchSlicesSingle(routeKey, null, coll);
-      if (slices.isEmpty()) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No active shard serving _route_=" + routeKey + " found");
-      } else {
-        params.set(CoreAdminParams.SHARD, slices.iterator().next().getName());
-      }
-    } else {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specify either 'shard' or _route_ param");
-    }
-    if (dataDir != null) {
-      params.set(CoreAdminParams.DATA_DIR, dataDir);
-    }
-    if (ulogDir != null) {
-      params.set(CoreAdminParams.ULOG_DIR, ulogDir);
-    }
-    if (instanceDir != null) {
-      params.set(CoreAdminParams.INSTANCE_DIR, instanceDir);
-    }
-    if (coreNodeName != null) {
-      params.set(CoreAdminParams.CORE_NODE_NAME, coreNodeName);
-    }
-    ocmh.addPropertyParams(message, params);
-
-    // For tracking async calls.
-    Map<String,String> requestMap = new HashMap<>();
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-
-    ocmh.sendShardRequest(node, params, shardHandler, asyncId, requestMap);
-
-    final String fnode = node;
-    final String fcoreName = coreName;
-
-    Runnable runnable = () -> {
-      ocmh.processResponses(results, shardHandler, true, "ADDREPLICA failed to create replica", asyncId, requestMap);
-      ocmh.waitForCoreNodeName(collection, fnode, fcoreName);
-      if (sessionWrapper.get() != null) {
-        sessionWrapper.get().release();
-      }
-      if (onComplete != null) onComplete.run();
-    };
-
-    if (!parallel || waitForFinalState) {
-      if (waitForFinalState) {
-        SolrCloseableLatch latch = new SolrCloseableLatch(1, ocmh);
-        ActiveReplicaWatcher watcher = new ActiveReplicaWatcher(collection, null, Collections.singletonList(coreName), latch);
-        try {
-          zkStateReader.registerCollectionStateWatcher(collection, watcher);
-          runnable.run();
-          if (!latch.await(timeout, TimeUnit.SECONDS)) {
-            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Timeout waiting " + timeout + " seconds for replica to become active.");
-          }
-        } finally {
-          zkStateReader.removeCollectionStateWatcher(collection, watcher);
-        }
-      } else {
-        runnable.run();
-      }
-    } else {
-      ocmh.tpe.submit(runnable);
-    }
-
-
-    return new ZkNodeProps(
-        ZkStateReader.COLLECTION_PROP, collection,
-        ZkStateReader.SHARD_ID_PROP, shard,
-        ZkStateReader.CORE_NAME_PROP, coreName,
-        ZkStateReader.NODE_NAME_PROP, node
-    );
-  }
-
-  public static ZkNodeProps assignReplicaDetails(SolrCloudManager cloudManager, ClusterState clusterState,
-                                                 ZkNodeProps message, AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
-    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
-
-    String collection = message.getStr(COLLECTION_PROP);
-    String node = message.getStr(CoreAdminParams.NODE);
-    String shard = message.getStr(SHARD_ID_PROP);
-    String coreName = message.getStr(CoreAdminParams.NAME);
-    String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
-    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
-    if (StringUtils.isBlank(coreName)) {
-      coreName = message.getStr(CoreAdminParams.PROPERTY_PREFIX + CoreAdminParams.NAME);
-    }
-
-    DocCollection coll = clusterState.getCollection(collection);
-    if (coll == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + collection + " does not exist");
-    }
-    if (coll.getSlice(shard) == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Collection: " + collection + " shard: " + shard + " does not exist");
-    }
-
-    // Kind of unnecessary, but it does put the logic of whether to override maxShardsPerNode in one place.
-    if (!skipCreateReplicaInClusterState) {
-      if (CloudUtil.usePolicyFramework(coll, cloudManager)) {
-        if (node == null) {
-          if(coll.getPolicyName() != null) message.getProperties().put(Policy.POLICY, coll.getPolicyName());
-          node = Assign.identifyNodes(cloudManager,
-              clusterState,
-              Collections.emptyList(),
-              collection,
-              message,
-              Collections.singletonList(shard),
-              replicaType == Replica.Type.NRT ? 0 : 1,
-              replicaType == Replica.Type.TLOG ? 0 : 1,
-              replicaType == Replica.Type.PULL ? 0 : 1
-          ).get(0).node;
-          sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
-        }
-      } else {
-        node = Assign.getNodesForNewReplicas(clusterState, collection, shard, 1, node,
-            cloudManager).get(0).nodeName;// TODO: use replica type in this logic too
-      }
-    }
-    log.info("Node Identified {} for creating new replica", node);
-
-    if (!clusterState.liveNodesContain(node)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Node: " + node + " is not live");
-    }
-    if (coreName == null) {
-      coreName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), coll, shard, replicaType);
-    } else if (!skipCreateReplicaInClusterState) {
-      //Validate that the core name is unique in that collection
-      for (Slice slice : coll.getSlices()) {
-        for (Replica replica : slice.getReplicas()) {
-          String replicaCoreName = replica.getStr(CORE_NAME_PROP);
-          if (coreName.equals(replicaCoreName)) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Another replica with the same core name already exists" +
-                " for this collection");
-          }
-        }
-      }
-    }
-    if (coreNodeName != null) {
-      message = message.plus(CoreAdminParams.CORE_NODE_NAME, coreNodeName);
-    }
-    message = message.plus(CoreAdminParams.NAME, coreName);
-    message = message.plus(CoreAdminParams.NODE, node);
-    return message;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/Assign.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/Assign.java b/solr/core/src/java/org/apache/solr/cloud/Assign.java
deleted file mode 100644
index c746c94..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/Assign.java
+++ /dev/null
@@ -1,483 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
-import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.cloud.rule.ReplicaAssigner;
-import org.apache.solr.cloud.rule.Rule;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.util.NumberUtils;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE_DEFAULT;
-import static org.apache.solr.common.cloud.DocCollection.SNITCH;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-
-public class Assign {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static int incAndGetId(DistribStateManager stateManager, String collection, int defaultValue) {
-    String path = "/collections/"+collection;
-    try {
-      if (!stateManager.hasData(path)) {
-        try {
-          stateManager.makePath(path);
-        } catch (AlreadyExistsException e) {
-          // it's okay if another beats us creating the node
-        }
-      }
-      path += "/counter";
-      if (!stateManager.hasData(path)) {
-        try {
-          stateManager.createData(path, NumberUtils.intToBytes(defaultValue), CreateMode.PERSISTENT);
-        } catch (AlreadyExistsException e) {
-          // it's okay if another beats us creating the node
-        }
-      }
-    } catch (InterruptedException e) {
-      Thread.interrupted();
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
-    } catch (IOException | KeeperException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
-    }
-
-    while (true) {
-      try {
-        int version = 0;
-        int currentId = 0;
-        VersionedData data = stateManager.getData(path, null);
-        if (data != null) {
-          currentId = NumberUtils.bytesToInt(data.getData());
-          version = data.getVersion();
-        }
-        byte[] bytes = NumberUtils.intToBytes(++currentId);
-        stateManager.setData(path, bytes, version);
-        return currentId;
-      } catch (BadVersionException e) {
-        continue;
-      } catch (IOException | KeeperException e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:"+collection, e);
-      } catch (InterruptedException e) {
-        Thread.interrupted();
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:" + collection, e);
-      }
-    }
-  }
-
-  public static String assignCoreNodeName(DistribStateManager stateManager, DocCollection collection) {
-    // for backward compatibility;
-    int defaultValue = defaultCounterValue(collection, false);
-    String coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
-    while (collection.getReplica(coreNodeName) != null) {
-      // there is wee chance that, the new coreNodeName id not totally unique,
-      // but this will be guaranteed unique for new collections
-      coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
-    }
-    return coreNodeName;
-  }
-
-  /**
-   * Assign a new unique id up to slices count - then add replicas evenly.
-   *
-   * @return the assigned shard id
-   */
-  public static String assignShard(DocCollection collection, Integer numShards) {
-    if (numShards == null) {
-      numShards = 1;
-    }
-    String returnShardId = null;
-    Map<String, Slice> sliceMap = collection != null ? collection.getActiveSlicesMap() : null;
-
-
-    // TODO: now that we create shards ahead of time, is this code needed?  Esp since hash ranges aren't assigned when creating via this method?
-
-    if (sliceMap == null) {
-      return "shard1";
-    }
-
-    List<String> shardIdNames = new ArrayList<>(sliceMap.keySet());
-
-    if (shardIdNames.size() < numShards) {
-      return "shard" + (shardIdNames.size() + 1);
-    }
-
-    // TODO: don't need to sort to find shard with fewest replicas!
-
-    // else figure out which shard needs more replicas
-    final Map<String, Integer> map = new HashMap<>();
-    for (String shardId : shardIdNames) {
-      int cnt = sliceMap.get(shardId).getReplicasMap().size();
-      map.put(shardId, cnt);
-    }
-
-    Collections.sort(shardIdNames, (String o1, String o2) -> {
-      Integer one = map.get(o1);
-      Integer two = map.get(o2);
-      return one.compareTo(two);
-    });
-
-    returnShardId = shardIdNames.get(0);
-    return returnShardId;
-  }
-
-  private static String buildSolrCoreName(String collectionName, String shard, Replica.Type type, int replicaNum) {
-    // TODO: Adding the suffix is great for debugging, but may be an issue if at some point we want to support a way to change replica type
-    return String.format(Locale.ROOT, "%s_%s_replica_%s%s", collectionName, shard, type.name().substring(0,1).toLowerCase(Locale.ROOT), replicaNum);
-  }
-
-  private static int defaultCounterValue(DocCollection collection, boolean newCollection) {
-    if (newCollection) return 0;
-    int defaultValue = collection.getReplicas().size();
-    if (collection.getReplicationFactor() != null) {
-      // numReplicas and replicationFactor * numSlices can be not equals,
-      // in case of many addReplicas or deleteReplicas are executed
-      defaultValue = Math.max(defaultValue,
-          collection.getReplicationFactor() * collection.getSlices().size());
-    }
-    return defaultValue * 20;
-  }
-
-  public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type, boolean newCollection) {
-    Slice slice = collection.getSlice(shard);
-    int defaultValue = defaultCounterValue(collection, newCollection);
-    int replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
-    String coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum);
-    while (existCoreName(coreName, slice)) {
-      replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
-      coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum);
-    }
-    return coreName;
-  }
-
-  public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type) {
-    return buildSolrCoreName(stateManager, collection, shard, type, false);
-  }
-
-  private static boolean existCoreName(String coreName, Slice slice) {
-    if (slice == null) return false;
-    for (Replica replica : slice.getReplicas()) {
-      if (coreName.equals(replica.getStr(CORE_NAME_PROP))) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  public static List<String> getLiveOrLiveAndCreateNodeSetList(final Set<String> liveNodes, final ZkNodeProps message, final Random random) {
-    // TODO: add smarter options that look at the current number of cores per
-    // node?
-    // for now we just go random (except when createNodeSet and createNodeSet.shuffle=false are passed in)
-
-    List<String> nodeList;
-
-    final String createNodeSetStr = message.getStr(CREATE_NODE_SET);
-    final List<String> createNodeList = (createNodeSetStr == null) ? null : StrUtils.splitSmart((CREATE_NODE_SET_EMPTY.equals(createNodeSetStr) ? "" : createNodeSetStr), ",", true);
-
-    if (createNodeList != null) {
-      nodeList = new ArrayList<>(createNodeList);
-      nodeList.retainAll(liveNodes);
-      if (message.getBool(CREATE_NODE_SET_SHUFFLE, CREATE_NODE_SET_SHUFFLE_DEFAULT)) {
-        Collections.shuffle(nodeList, random);
-      }
-    } else {
-      nodeList = new ArrayList<>(liveNodes);
-      Collections.shuffle(nodeList, random);
-    }
-
-    return nodeList;
-  }
-
-  public static List<ReplicaPosition> identifyNodes(SolrCloudManager cloudManager,
-                                                    ClusterState clusterState,
-                                                    List<String> nodeList,
-                                                    String collectionName,
-                                                    ZkNodeProps message,
-                                                    List<String> shardNames,
-                                                    int numNrtReplicas,
-                                                    int numTlogReplicas,
-                                                    int numPullReplicas) throws IOException, InterruptedException {
-    List<Map> rulesMap = (List) message.get("rule");
-    String policyName = message.getStr(POLICY);
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-
-    if (rulesMap == null && policyName == null && autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
-      log.debug("Identify nodes using default");
-      int i = 0;
-      List<ReplicaPosition> result = new ArrayList<>();
-      for (String aShard : shardNames)
-        for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, numNrtReplicas,
-            Replica.Type.TLOG, numTlogReplicas,
-            Replica.Type.PULL, numPullReplicas
-        ).entrySet()) {
-          for (int j = 0; j < e.getValue(); j++){
-            result.add(new ReplicaPosition(aShard, j, e.getKey(), nodeList.get(i % nodeList.size())));
-            i++;
-          }
-        }
-      return result;
-    } else {
-      if (numTlogReplicas + numPullReplicas != 0 && rulesMap != null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            Replica.Type.TLOG + " or " + Replica.Type.PULL + " replica types not supported with placement rules or cluster policies");
-      }
-    }
-
-    if (rulesMap != null && !rulesMap.isEmpty()) {
-      List<Rule> rules = new ArrayList<>();
-      for (Object map : rulesMap) rules.add(new Rule((Map) map));
-      Map<String, Integer> sharVsReplicaCount = new HashMap<>();
-
-      for (String shard : shardNames) sharVsReplicaCount.put(shard, numNrtReplicas);
-      ReplicaAssigner replicaAssigner = new ReplicaAssigner(rules,
-          sharVsReplicaCount,
-          (List<Map>) message.get(SNITCH),
-          new HashMap<>(),//this is a new collection. So, there are no nodes in any shard
-          nodeList,
-          cloudManager,
-          clusterState);
-
-      Map<ReplicaPosition, String> nodeMappings = replicaAssigner.getNodeMappings();
-      return nodeMappings.entrySet().stream()
-          .map(e -> new ReplicaPosition(e.getKey().shard, e.getKey().index, e.getKey().type, e.getValue()))
-          .collect(Collectors.toList());
-    } else  {
-      if (message.getStr(CREATE_NODE_SET) == null)
-        nodeList = Collections.emptyList();// unless explicitly specified do not pass node list to Policy
-      return getPositionsUsingPolicy(collectionName,
-          shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas, policyName, cloudManager, nodeList);
-    }
-  }
-
-  static class ReplicaCount {
-    public final String nodeName;
-    public int thisCollectionNodes = 0;
-    public int totalNodes = 0;
-
-    ReplicaCount(String nodeName) {
-      this.nodeName = nodeName;
-    }
-
-    public int weight() {
-      return (thisCollectionNodes * 100) + totalNodes;
-    }
-  }
-
-  // Only called from createShard and addReplica (so far).
-  //
-  // Gets a list of candidate nodes to put the required replica(s) on. Throws errors if not enough replicas
-  // could be created on live nodes given maxShardsPerNode, Replication factor (if from createShard) etc.
-  public static List<ReplicaCount> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
-                                                          String shard, int nrtReplicas,
-                                                          Object createNodeSet, SolrCloudManager cloudManager) throws IOException, InterruptedException {
-    log.debug("getNodesForNewReplicas() shard: {} , replicas : {} , createNodeSet {}", shard, nrtReplicas, createNodeSet );
-    DocCollection coll = clusterState.getCollection(collectionName);
-    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
-    List<String> createNodeList = null;
-
-    if (createNodeSet instanceof List) {
-      createNodeList = (List) createNodeSet;
-    } else {
-      createNodeList = createNodeSet == null ? null : StrUtils.splitSmart((String) createNodeSet, ",", true);
-    }
-
-     HashMap<String, ReplicaCount> nodeNameVsShardCount = getNodeNameVsShardCount(collectionName, clusterState, createNodeList);
-
-    if (createNodeList == null) { // We only care if we haven't been told to put new replicas on specific nodes.
-      int availableSlots = 0;
-      for (Map.Entry<String, ReplicaCount> ent : nodeNameVsShardCount.entrySet()) {
-        //ADDREPLICA can put more than maxShardsPerNode on an instance, so this test is necessary.
-        if (maxShardsPerNode > ent.getValue().thisCollectionNodes) {
-          availableSlots += (maxShardsPerNode - ent.getValue().thisCollectionNodes);
-        }
-      }
-      if (availableSlots < nrtReplicas) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            String.format(Locale.ROOT, "Cannot create %d new replicas for collection %s given the current number of live nodes and a maxShardsPerNode of %d",
-                nrtReplicas, collectionName, maxShardsPerNode));
-      }
-    }
-
-    List l = (List) coll.get(DocCollection.RULE);
-    List<ReplicaPosition> replicaPositions = null;
-    if (l != null) {
-      // TODO: make it so that this method doesn't require access to CC
-      replicaPositions = getNodesViaRules(clusterState, shard, nrtReplicas, cloudManager, coll, createNodeList, l);
-    }
-    String policyName = coll.getStr(POLICY);
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    if (policyName != null || !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
-      replicaPositions = Assign.getPositionsUsingPolicy(collectionName, Collections.singletonList(shard), nrtReplicas, 0, 0,
-          policyName, cloudManager, createNodeList);
-    }
-
-    if(replicaPositions != null){
-      List<ReplicaCount> repCounts = new ArrayList<>();
-      for (ReplicaPosition p : replicaPositions) {
-        repCounts.add(new ReplicaCount(p.node));
-      }
-      return repCounts;
-    }
-
-    ArrayList<ReplicaCount> sortedNodeList = new ArrayList<>(nodeNameVsShardCount.values());
-    Collections.sort(sortedNodeList, (x, y) -> (x.weight() < y.weight()) ? -1 : ((x.weight() == y.weight()) ? 0 : 1));
-    return sortedNodeList;
-
-  }
-
-  public static List<ReplicaPosition> getPositionsUsingPolicy(String collName, List<String> shardNames,
-                                                              int nrtReplicas,
-                                                              int tlogReplicas,
-                                                              int pullReplicas,
-                                                              String policyName, SolrCloudManager cloudManager,
-                                                              List<String> nodesList) throws IOException, InterruptedException {
-    log.debug("shardnames {} NRT {} TLOG {} PULL {} , policy {}, nodeList {}", shardNames, nrtReplicas, tlogReplicas, pullReplicas, policyName, nodesList);
-    List<ReplicaPosition> replicaPositions = null;
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    try {
-      Map<String, String> kvMap = Collections.singletonMap(collName, policyName);
-      replicaPositions = PolicyHelper.getReplicaLocations(
-          collName,
-          autoScalingConfig,
-          cloudManager,
-          kvMap,
-          shardNames,
-          nrtReplicas,
-          tlogReplicas,
-          pullReplicas,
-          nodesList);
-      return replicaPositions;
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error getting replica locations", e);
-    } finally {
-      if (log.isTraceEnabled()) {
-        if (replicaPositions != null)
-          log.trace("REPLICA_POSITIONS: " + Utils.toJSONString(Utils.getDeepCopy(replicaPositions, 7, true)));
-        log.trace("AUTOSCALING_CONF: " + Utils.toJSONString(autoScalingConfig));
-      }
-    }
-  }
-
-  private static List<ReplicaPosition> getNodesViaRules(ClusterState clusterState, String shard, int numberOfNodes,
-                                                        SolrCloudManager cloudManager, DocCollection coll, List<String> createNodeList, List l) {
-    ArrayList<Rule> rules = new ArrayList<>();
-    for (Object o : l) rules.add(new Rule((Map) o));
-    Map<String, Map<String, Integer>> shardVsNodes = new LinkedHashMap<>();
-    for (Slice slice : coll.getSlices()) {
-      LinkedHashMap<String, Integer> n = new LinkedHashMap<>();
-      shardVsNodes.put(slice.getName(), n);
-      for (Replica replica : slice.getReplicas()) {
-        Integer count = n.get(replica.getNodeName());
-        if (count == null) count = 0;
-        n.put(replica.getNodeName(), ++count);
-      }
-    }
-    List snitches = (List) coll.get(SNITCH);
-    List<String> nodesList = createNodeList == null ?
-        new ArrayList<>(clusterState.getLiveNodes()) :
-        createNodeList;
-    Map<ReplicaPosition, String> positions = new ReplicaAssigner(
-        rules,
-        Collections.singletonMap(shard, numberOfNodes),
-        snitches,
-        shardVsNodes,
-        nodesList, cloudManager, clusterState).getNodeMappings();
-
-    return positions.entrySet().stream().map(e -> e.getKey().setNode(e.getValue())).collect(Collectors.toList());// getReplicaCounts(positions);
-  }
-
-  private static HashMap<String, ReplicaCount> getNodeNameVsShardCount(String collectionName,
-                                                                       ClusterState clusterState, List<String> createNodeList) {
-    Set<String> nodes = clusterState.getLiveNodes();
-
-    List<String> nodeList = new ArrayList<>(nodes.size());
-    nodeList.addAll(nodes);
-    if (createNodeList != null) nodeList.retainAll(createNodeList);
-
-    HashMap<String, ReplicaCount> nodeNameVsShardCount = new HashMap<>();
-    for (String s : nodeList) {
-      nodeNameVsShardCount.put(s, new ReplicaCount(s));
-    }
-    if (createNodeList != null) { // Overrides petty considerations about maxShardsPerNode
-      if (createNodeList.size() != nodeNameVsShardCount.size()) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "At least one of the node(s) specified " + createNodeList + " are not currently active in "
-                + nodeNameVsShardCount.keySet() + ", no action taken.");
-      }
-      return nodeNameVsShardCount;
-    }
-    DocCollection coll = clusterState.getCollection(collectionName);
-    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
-    Map<String, DocCollection> collections = clusterState.getCollectionsMap();
-    for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
-      DocCollection c = entry.getValue();
-      //identify suitable nodes  by checking the no:of cores in each of them
-      for (Slice slice : c.getSlices()) {
-        Collection<Replica> replicas = slice.getReplicas();
-        for (Replica replica : replicas) {
-          ReplicaCount count = nodeNameVsShardCount.get(replica.getNodeName());
-          if (count != null) {
-            count.totalNodes++; // Used ot "weigh" whether this node should be used later.
-            if (entry.getKey().equals(collectionName)) {
-              count.thisCollectionNodes++;
-              if (count.thisCollectionNodes >= maxShardsPerNode) nodeNameVsShardCount.remove(replica.getNodeName());
-            }
-          }
-        }
-      }
-    }
-
-    return nodeNameVsShardCount;
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java b/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
deleted file mode 100644
index a4012f0..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/BackupCmd.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.time.Instant;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Properties;
-
-import org.apache.lucene.util.Version;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Replica.State;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.backup.BackupManager;
-import org.apache.solr.core.backup.repository.BackupRepository;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public BackupCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String backupName = message.getStr(NAME);
-    String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
-
-    Instant startTime = Instant.now();
-
-    CoreContainer cc = ocmh.overseer.getZkController().getCoreContainer();
-    BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
-    BackupManager backupMgr = new BackupManager(repository, ocmh.zkStateReader);
-
-    // Backup location
-    URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
-    URI backupPath = repository.resolve(location, backupName);
-
-    //Validating if the directory already exists.
-    if (repository.exists(backupPath)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The backup directory already exists: " + backupPath);
-    }
-
-    // Create a directory to store backup details.
-    repository.createDirectory(backupPath);
-
-    String strategy = message.getStr(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY);
-    switch (strategy) {
-      case CollectionAdminParams.COPY_FILES_STRATEGY: {
-        copyIndexFiles(backupPath, message, results);
-        break;
-      }
-      case CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY: {
-        break;
-      }
-    }
-
-    log.info("Starting to backup ZK data for backupName={}", backupName);
-
-    //Download the configs
-    String configName = ocmh.zkStateReader.readConfigName(collectionName);
-    backupMgr.downloadConfigDir(location, backupName, configName);
-
-    //Save the collection's state. Can be part of the monolithic clusterstate.json or a individual state.json
-    //Since we don't want to distinguish we extract the state and back it up as a separate json
-    DocCollection collectionState = ocmh.zkStateReader.getClusterState().getCollection(collectionName);
-    backupMgr.writeCollectionState(location, backupName, collectionName, collectionState);
-
-    Properties properties = new Properties();
-
-    properties.put(BackupManager.BACKUP_NAME_PROP, backupName);
-    properties.put(BackupManager.COLLECTION_NAME_PROP, collectionName);
-    properties.put(COLL_CONF, configName);
-    properties.put(BackupManager.START_TIME_PROP, startTime.toString());
-    properties.put(BackupManager.INDEX_VERSION_PROP, Version.LATEST.toString());
-    //TODO: Add MD5 of the configset. If during restore the same name configset exists then we can compare checksums to see if they are the same.
-    //if they are not the same then we can throw an error or have an 'overwriteConfig' flag
-    //TODO save numDocs for the shardLeader. We can use it to sanity check the restore.
-
-    backupMgr.writeBackupProperties(location, backupName, properties);
-
-    log.info("Completed backing up ZK data for backupName={}", backupName);
-  }
-
-  private Replica selectReplicaWithSnapshot(CollectionSnapshotMetaData snapshotMeta, Slice slice) {
-    // The goal here is to choose the snapshot of the replica which was the leader at the time snapshot was created.
-    // If that is not possible, we choose any other replica for the given shard.
-    Collection<CoreSnapshotMetaData> snapshots = snapshotMeta.getReplicaSnapshotsForShard(slice.getName());
-
-    Optional<CoreSnapshotMetaData> leaderCore = snapshots.stream().filter(x -> x.isLeader()).findFirst();
-    if (leaderCore.isPresent()) {
-      log.info("Replica {} was the leader when snapshot {} was created.", leaderCore.get().getCoreName(), snapshotMeta.getName());
-      Replica r = slice.getReplica(leaderCore.get().getCoreName());
-      if ((r != null) && !r.getState().equals(State.DOWN)) {
-        return r;
-      }
-    }
-
-    Optional<Replica> r = slice.getReplicas().stream()
-                               .filter(x -> x.getState() != State.DOWN && snapshotMeta.isSnapshotExists(slice.getName(), x))
-                               .findFirst();
-
-    if (!r.isPresent()) {
-      throw new SolrException(ErrorCode.SERVER_ERROR,
-          "Unable to find any live replica with a snapshot named " + snapshotMeta.getName() + " for shard " + slice.getName());
-    }
-
-    return r.get();
-  }
-
-  private void copyIndexFiles(URI backupPath, ZkNodeProps request, NamedList results) throws Exception {
-    String collectionName = request.getStr(COLLECTION_PROP);
-    String backupName = request.getStr(NAME);
-    String asyncId = request.getStr(ASYNC);
-    String repoName = request.getStr(CoreAdminParams.BACKUP_REPOSITORY);
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    Map<String, String> requestMap = new HashMap<>();
-
-    String commitName = request.getStr(CoreAdminParams.COMMIT_NAME);
-    Optional<CollectionSnapshotMetaData> snapshotMeta = Optional.empty();
-    if (commitName != null) {
-      SolrZkClient zkClient = ocmh.overseer.getZkController().getZkClient();
-      snapshotMeta = SolrSnapshotManager.getCollectionLevelSnapshot(zkClient, collectionName, commitName);
-      if (!snapshotMeta.isPresent()) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName
-            + " does not exist for collection " + collectionName);
-      }
-      if (snapshotMeta.get().getStatus() != SnapshotStatus.Successful) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName + " for collection " + collectionName
-            + " has not completed successfully. The status is " + snapshotMeta.get().getStatus());
-      }
-    }
-
-    log.info("Starting backup of collection={} with backupName={} at location={}", collectionName, backupName,
-        backupPath);
-
-    Collection<String> shardsToConsider = Collections.emptySet();
-    if (snapshotMeta.isPresent()) {
-      shardsToConsider = snapshotMeta.get().getShards();
-    }
-
-    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getActiveSlices()) {
-      Replica replica = null;
-
-      if (snapshotMeta.isPresent()) {
-        if (!shardsToConsider.contains(slice.getName())) {
-          log.warn("Skipping the backup for shard {} since it wasn't part of the collection {} when snapshot {} was created.",
-              slice.getName(), collectionName, snapshotMeta.get().getName());
-          continue;
-        }
-        replica = selectReplicaWithSnapshot(snapshotMeta.get(), slice);
-      } else {
-        // Note - Actually this can return a null value when there is no leader for this shard.
-        replica = slice.getLeader();
-        if (replica == null) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "No 'leader' replica available for shard " + slice.getName() + " of collection " + collectionName);
-        }
-      }
-
-      String coreName = replica.getStr(CORE_NAME_PROP);
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString());
-      params.set(NAME, slice.getName());
-      params.set(CoreAdminParams.BACKUP_REPOSITORY, repoName);
-      params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString()); // note: index dir will be here then the "snapshot." + slice name
-      params.set(CORE_NAME_PROP, coreName);
-      if (snapshotMeta.isPresent()) {
-        params.set(CoreAdminParams.COMMIT_NAME, snapshotMeta.get().getName());
-      }
-
-      ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
-      log.debug("Sent backup request to core={} for backupName={}", coreName, backupName);
-    }
-    log.debug("Sent backup requests to all shard leaders for backupName={}", backupName);
-
-    ocmh.processResponses(results, shardHandler, true, "Could not backup all replicas", asyncId, requestMap);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java b/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
index 3cdc903..9b16d23 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CloudConfigSetService.java
@@ -18,6 +18,7 @@ package org.apache.solr.cloud;
 
 import java.lang.invoke.MethodHandles;
 
+import org.apache.solr.cloud.api.collections.CreateCollectionCmd;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.core.ConfigSetService;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
index 30de3d4..0d45129 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CloudUtil.java
@@ -132,7 +132,7 @@ public class CloudUtil {
 
   }
 
-  static boolean usePolicyFramework(DocCollection collection, SolrCloudManager cloudManager)
+  public static boolean usePolicyFramework(DocCollection collection, SolrCloudManager cloudManager)
       throws IOException, InterruptedException {
     AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
     return !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty() || collection.getPolicyName() != null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CreateAliasCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateAliasCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateAliasCmd.java
deleted file mode 100644
index e10d53e..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/CreateAliasCmd.java
+++ /dev/null
@@ -1,101 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Locale;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-
-public class CreateAliasCmd implements Cmd {
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public CreateAliasCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results)
-      throws Exception {
-    final String aliasName = message.getStr(NAME);
-    final List<String> canonicalCollectionList = parseCollectionsParameter(message.get("collections"));
-    final String canonicalCollectionsString = StrUtils.join(canonicalCollectionList, ',');
-
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    validateAllCollectionsExistAndNoDups(canonicalCollectionList, zkStateReader);
-
-    zkStateReader.aliasesHolder.applyModificationAndExportToZk(aliases -> aliases.cloneWithCollectionAlias(aliasName, canonicalCollectionsString));
-
-    // Sleep a bit to allow ZooKeeper state propagation.
-    //
-    // THIS IS A KLUDGE.
-    //
-    // Solr's view of the cluster is eventually consistent. *Eventually* all nodes and CloudSolrClients will be aware of
-    // alias changes, but not immediately. If a newly created alias is queried, things should work right away since Solr
-    // will attempt to see if it needs to get the latest aliases when it can't otherwise resolve the name.  However
-    // modifications to an alias will take some time.
-    //
-    // We could levy this requirement on the client but they would probably always add an obligatory sleep, which is
-    // just kicking the can down the road.  Perhaps ideally at this juncture here we could somehow wait until all
-    // Solr nodes in the cluster have the latest aliases?
-    Thread.sleep(100);
-  }
-
-  private void validateAllCollectionsExistAndNoDups(List<String> collectionList, ZkStateReader zkStateReader) {
-    final String collectionStr = StrUtils.join(collectionList, ',');
-
-    if (new HashSet<>(collectionList).size() != collectionList.size()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', since it contains duplicates", collectionStr));
-    }
-    ClusterState clusterState = zkStateReader.getClusterState();
-    Set<String> aliasNames = zkStateReader.getAliases().getCollectionAliasListMap().keySet();
-    for (String collection : collectionList) {
-      if (clusterState.getCollectionOrNull(collection) == null && !aliasNames.contains(collection)) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', '%s' is not an existing collection or alias", collectionStr, collection));
-      }
-    }
-  }
-  
-  /**
-   * The v2 API directs that the 'collections' parameter be provided as a JSON array (e.g. ["a", "b"]).  We also
-   * maintain support for the legacy format, a comma-separated list (e.g. a,b).
-   */
-  @SuppressWarnings("unchecked")
-  private List<String> parseCollectionsParameter(Object colls) {
-    if (colls == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "missing collections param");
-    if (colls instanceof List) return (List<String>) colls;
-    return StrUtils.splitSmart(colls.toString(), ",", true).stream()
-        .map(String::trim)
-        .collect(Collectors.toList());
-  }
-
-}


[14/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
deleted file mode 100644
index 2171c60..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/CreateCollectionCmd.java
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Properties;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.cloud.overseer.ClusterStateMutator;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.ImplicitDocRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.cloud.ZooKeeperException;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.admin.ConfigSetsHandlerApi;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.handler.component.ShardRequest;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.NoNodeException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.RANDOM;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.common.util.StrUtils.formatString;
-
-public class CreateCollectionCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-  private final DistribStateManager stateManager;
-
-  public CreateCollectionCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.stateManager = ocmh.cloudManager.getDistribStateManager();
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    final String collectionName = message.getStr(NAME);
-    final boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
-    log.info("Create collection {}", collectionName);
-    if (clusterState.hasCollection(collectionName)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "collection already exists: " + collectionName);
-    }
-
-    String configName = getConfigName(collectionName, message);
-    if (configName == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No config set found to associate with the collection.");
-    }
-
-    ocmh.validateConfigOrThrowSolrException(configName);
-    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-
-    try {
-
-      final String async = message.getStr(ASYNC);
-
-      List<String> nodeList = new ArrayList<>();
-      List<String> shardNames = new ArrayList<>();
-      List<ReplicaPosition> replicaPositions = buildReplicaPositions(ocmh.cloudManager, clusterState, message,
-          nodeList, shardNames, sessionWrapper);
-      ZkStateReader zkStateReader = ocmh.zkStateReader;
-      boolean isLegacyCloud = Overseer.isLegacy(zkStateReader);
-
-      ocmh.createConfNode(stateManager, configName, collectionName, isLegacyCloud);
-
-      Map<String,String> collectionParams = new HashMap<>();
-      Map<String,Object> collectionProps = message.getProperties();
-      for (String propName : collectionProps.keySet()) {
-        if (propName.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
-          collectionParams.put(propName.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), (String) collectionProps.get(propName));
-        }
-      }
-      
-      createCollectionZkNode(stateManager, collectionName, collectionParams);
-      
-      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
-
-      // wait for a while until we don't see the collection
-      TimeOut waitUntil = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-      boolean created = false;
-      while (! waitUntil.hasTimedOut()) {
-        waitUntil.sleep(100);
-        created = ocmh.cloudManager.getClusterStateProvider().getClusterState().hasCollection(collectionName);
-        if(created) break;
-      }
-      if (!created)
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not fully create collection: " + collectionName);
-
-      if (nodeList.isEmpty()) {
-        log.debug("Finished create command for collection: {}", collectionName);
-        return;
-      }
-
-      // For tracking async calls.
-      Map<String, String> requestMap = new HashMap<>();
-
-
-      log.debug(formatString("Creating SolrCores for new collection {0}, shardNames {1} , message : {2}",
-          collectionName, shardNames, message));
-      Map<String,ShardRequest> coresToCreate = new LinkedHashMap<>();
-      ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-      for (ReplicaPosition replicaPosition : replicaPositions) {
-        String nodeName = replicaPosition.node;
-        String coreName = Assign.buildSolrCoreName(ocmh.cloudManager.getDistribStateManager(),
-            ocmh.cloudManager.getClusterStateProvider().getClusterState().getCollection(collectionName),
-            replicaPosition.shard, replicaPosition.type, true);
-        log.debug(formatString("Creating core {0} as part of shard {1} of collection {2} on {3}"
-            , coreName, replicaPosition.shard, collectionName, nodeName));
-
-
-        String baseUrl = zkStateReader.getBaseUrlForNodeName(nodeName);
-        //in the new mode, create the replica in clusterstate prior to creating the core.
-        // Otherwise the core creation fails
-        if (!isLegacyCloud) {
-          ZkNodeProps props = new ZkNodeProps(
-              Overseer.QUEUE_OPERATION, ADDREPLICA.toString(),
-              ZkStateReader.COLLECTION_PROP, collectionName,
-              ZkStateReader.SHARD_ID_PROP, replicaPosition.shard,
-              ZkStateReader.CORE_NAME_PROP, coreName,
-              ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
-              ZkStateReader.BASE_URL_PROP, baseUrl,
-              ZkStateReader.REPLICA_TYPE, replicaPosition.type.name(),
-              CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-          Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
-        }
-
-        // Need to create new params for each request
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
-
-        params.set(CoreAdminParams.NAME, coreName);
-        params.set(COLL_CONF, configName);
-        params.set(CoreAdminParams.COLLECTION, collectionName);
-        params.set(CoreAdminParams.SHARD, replicaPosition.shard);
-        params.set(ZkStateReader.NUM_SHARDS_PROP, shardNames.size());
-        params.set(CoreAdminParams.NEW_COLLECTION, "true");
-        params.set(CoreAdminParams.REPLICA_TYPE, replicaPosition.type.name());
-
-        if (async != null) {
-          String coreAdminAsyncId = async + Math.abs(System.nanoTime());
-          params.add(ASYNC, coreAdminAsyncId);
-          requestMap.put(nodeName, coreAdminAsyncId);
-        }
-        ocmh.addPropertyParams(message, params);
-
-        ShardRequest sreq = new ShardRequest();
-        sreq.nodeName = nodeName;
-        params.set("qt", ocmh.adminPath);
-        sreq.purpose = 1;
-        sreq.shards = new String[]{baseUrl};
-        sreq.actualShards = sreq.shards;
-        sreq.params = params;
-
-        if (isLegacyCloud) {
-          shardHandler.submit(sreq, sreq.shards[0], sreq.params);
-        } else {
-          coresToCreate.put(coreName, sreq);
-        }
-      }
-
-      if(!isLegacyCloud) {
-        // wait for all replica entries to be created
-        Map<String, Replica> replicas = ocmh.waitToSeeReplicasInState(collectionName, coresToCreate.keySet());
-        for (Map.Entry<String, ShardRequest> e : coresToCreate.entrySet()) {
-          ShardRequest sreq = e.getValue();
-          sreq.params.set(CoreAdminParams.CORE_NODE_NAME, replicas.get(e.getKey()).getName());
-          shardHandler.submit(sreq, sreq.shards[0], sreq.params);
-        }
-      }
-
-      ocmh.processResponses(results, shardHandler, false, null, async, requestMap, Collections.emptySet());
-      if(results.get("failure") != null && ((SimpleOrderedMap)results.get("failure")).size() > 0) {
-        // Let's cleanup as we hit an exception
-        // We shouldn't be passing 'results' here for the cleanup as the response would then contain 'success'
-        // element, which may be interpreted by the user as a positive ack
-        ocmh.cleanupCollection(collectionName, new NamedList());
-        log.info("Cleaned up artifacts for failed create collection for [{}]", collectionName);
-      } else {
-        log.debug("Finished create command on all shards for collection: {}", collectionName);
-
-        // Emit a warning about production use of data driven functionality
-        boolean defaultConfigSetUsed = message.getStr(COLL_CONF) == null ||
-            message.getStr(COLL_CONF).equals(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
-        if (defaultConfigSetUsed) {
-          results.add("warning", "Using _default configset. Data driven schema functionality"
-              + " is enabled by default, which is NOT RECOMMENDED for production use. To turn it off:"
-              + " curl http://{host:port}/solr/" + collectionName + "/config -d '{\"set-user-property\": {\"update.autoCreateFields\":\"false\"}}'");
-        }
-      }
-    } catch (SolrException ex) {
-      throw ex;
-    } catch (Exception ex) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, ex);
-    } finally {
-      if (sessionWrapper.get() != null) sessionWrapper.get().release();
-    }
-  }
-
-  public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
-                                                            ZkNodeProps message,
-                                                            List<String> nodeList, List<String> shardNames,
-                                                            AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
-    final String collectionName = message.getStr(NAME);
-    // look at the replication factor and see if it matches reality
-    // if it does not, find best nodes to create more cores
-    int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0);
-    int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas>0?0:1));
-    int numPullReplicas = message.getInt(PULL_REPLICAS, 0);
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    String policy = message.getStr(Policy.POLICY);
-    boolean usePolicyFramework = !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty() || policy != null;
-
-    Integer numSlices = message.getInt(NUM_SLICES, null);
-    String router = message.getStr("router.name", DocRouter.DEFAULT_NAME);
-    if(ImplicitDocRouter.NAME.equals(router)){
-      ClusterStateMutator.getShardNames(shardNames, message.getStr("shards", null));
-      numSlices = shardNames.size();
-    } else {
-      if (numSlices == null ) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NUM_SLICES + " is a required param (when using CompositeId router).");
-      }
-      if (numSlices <= 0) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NUM_SLICES + " must be > 0");
-      }
-      ClusterStateMutator.getShardNames(numSlices, shardNames);
-    }
-
-    int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, 1);
-    if (usePolicyFramework && message.getStr(MAX_SHARDS_PER_NODE) != null && maxShardsPerNode > 0) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "'maxShardsPerNode>0' is not supported when autoScaling policies are used");
-    }
-    if (maxShardsPerNode == -1 || usePolicyFramework) maxShardsPerNode = Integer.MAX_VALUE;
-    if (numNrtReplicas + numTlogReplicas <= 0) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
-    }
-
-    // we need to look at every node and see how many cores it serves
-    // add our new cores to existing nodes serving the least number of cores
-    // but (for now) require that each core goes on a distinct node.
-
-    List<ReplicaPosition> replicaPositions;
-    nodeList.addAll(Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, RANDOM));
-    if (nodeList.isEmpty()) {
-      log.warn("It is unusual to create a collection ("+collectionName+") without cores.");
-
-      replicaPositions = new ArrayList<>();
-    } else {
-      int totalNumReplicas = numNrtReplicas + numTlogReplicas + numPullReplicas;
-      if (totalNumReplicas > nodeList.size()) {
-        log.warn("Specified number of replicas of "
-            + totalNumReplicas
-            + " on collection "
-            + collectionName
-            + " is higher than the number of Solr instances currently live or live and part of your " + CREATE_NODE_SET + "("
-            + nodeList.size()
-            + "). It's unusual to run two replica of the same slice on the same Solr-instance.");
-      }
-
-      int maxShardsAllowedToCreate = maxShardsPerNode == Integer.MAX_VALUE ?
-          Integer.MAX_VALUE :
-          maxShardsPerNode * nodeList.size();
-      int requestedShardsToCreate = numSlices * totalNumReplicas;
-      if (maxShardsAllowedToCreate < requestedShardsToCreate) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName + ". Value of "
-            + MAX_SHARDS_PER_NODE + " is " + maxShardsPerNode
-            + ", and the number of nodes currently live or live and part of your "+CREATE_NODE_SET+" is " + nodeList.size()
-            + ". This allows a maximum of " + maxShardsAllowedToCreate
-            + " to be created. Value of " + NUM_SLICES + " is " + numSlices
-            + ", value of " + NRT_REPLICAS + " is " + numNrtReplicas
-            + ", value of " + TLOG_REPLICAS + " is " + numTlogReplicas
-            + " and value of " + PULL_REPLICAS + " is " + numPullReplicas
-            + ". This requires " + requestedShardsToCreate
-            + " shards to be created (higher than the allowed number)");
-      }
-      replicaPositions = Assign.identifyNodes(cloudManager
-          , clusterState, nodeList, collectionName, message, shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas);
-      sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
-    }
-    return replicaPositions;
-  }
-
-  String getConfigName(String coll, ZkNodeProps message) throws KeeperException, InterruptedException {
-    String configName = message.getStr(COLL_CONF);
-
-    if (configName == null) {
-      // if there is only one conf, use that
-      List<String> configNames = null;
-      try {
-        configNames = ocmh.zkStateReader.getZkClient().getChildren(ZkConfigManager.CONFIGS_ZKNODE, null, true);
-        if (configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
-          if (!CollectionAdminParams.SYSTEM_COLL.equals(coll)) {
-            copyDefaultConfigSetTo(configNames, coll);
-          }
-          return coll;
-        } else if (configNames != null && configNames.size() == 1) {
-          configName = configNames.get(0);
-          // no config set named, but there is only 1 - use it
-          log.info("Only one config set found in zk - using it:" + configName);
-        }
-      } catch (KeeperException.NoNodeException e) {
-
-      }
-    }
-    return "".equals(configName)? null: configName;
-  }
-  
-  /**
-   * Copies the _default configset to the specified configset name (overwrites if pre-existing)
-   */
-  private void copyDefaultConfigSetTo(List<String> configNames, String targetConfig) {
-    ZkConfigManager configManager = new ZkConfigManager(ocmh.zkStateReader.getZkClient());
-
-    // if a configset named coll exists, delete the configset so that _default can be copied over
-    if (configNames.contains(targetConfig)) {
-      log.info("There exists a configset by the same name as the collection we're trying to create: " + targetConfig +
-          ", deleting it so that we can copy the _default configs over and create the collection.");
-      try {
-        configManager.deleteConfigDir(targetConfig);
-      } catch (Exception e) {
-        throw new SolrException(ErrorCode.INVALID_STATE, "Error while deleting configset: " + targetConfig, e);
-      }
-    } else {
-      log.info("Only _default config set found, using it.");
-    }
-    // Copy _default into targetConfig
-    try {
-      configManager.copyConfigDir(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME, targetConfig, new HashSet<>());
-    } catch (Exception e) {
-      throw new SolrException(ErrorCode.INVALID_STATE, "Error while copying _default to " + targetConfig, e);
-    }
-  }
-
-  public static void createCollectionZkNode(DistribStateManager stateManager, String collection, Map<String,String> params) {
-    log.debug("Check for collection zkNode:" + collection);
-    String collectionPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
-
-    try {
-      if (!stateManager.hasData(collectionPath)) {
-        log.debug("Creating collection in ZooKeeper:" + collection);
-
-        try {
-          Map<String,Object> collectionProps = new HashMap<>();
-
-          // TODO: if collection.configName isn't set, and there isn't already a conf in zk, just use that?
-          String defaultConfigName = System.getProperty(ZkController.COLLECTION_PARAM_PREFIX + ZkController.CONFIGNAME_PROP, collection);
-
-          if (params.size() > 0) {
-            collectionProps.putAll(params);
-            // if the config name wasn't passed in, use the default
-            if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP)) {
-              // users can create the collection node and conf link ahead of time, or this may return another option
-              getConfName(stateManager, collection, collectionPath, collectionProps);
-            }
-
-          } else if (System.getProperty("bootstrap_confdir") != null) {
-            // if we are bootstrapping a collection, default the config for
-            // a new collection to the collection we are bootstrapping
-            log.info("Setting config for collection:" + collection + " to " + defaultConfigName);
-
-            Properties sysProps = System.getProperties();
-            for (String sprop : System.getProperties().stringPropertyNames()) {
-              if (sprop.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
-                collectionProps.put(sprop.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), sysProps.getProperty(sprop));
-              }
-            }
-
-            // if the config name wasn't passed in, use the default
-            if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP))
-              collectionProps.put(ZkController.CONFIGNAME_PROP, defaultConfigName);
-
-          } else if (Boolean.getBoolean("bootstrap_conf")) {
-            // the conf name should should be the collection name of this core
-            collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
-          } else {
-            getConfName(stateManager, collection, collectionPath, collectionProps);
-          }
-
-          collectionProps.remove(ZkStateReader.NUM_SHARDS_PROP);  // we don't put numShards in the collections properties
-
-          ZkNodeProps zkProps = new ZkNodeProps(collectionProps);
-          stateManager.makePath(collectionPath, Utils.toJSON(zkProps), CreateMode.PERSISTENT, false);
-
-        } catch (KeeperException e) {
-          // it's okay if the node already exists
-          if (e.code() != KeeperException.Code.NODEEXISTS) {
-            throw e;
-          }
-        } catch (AlreadyExistsException e) {
-          // it's okay if the node already exists
-        }
-      } else {
-        log.debug("Collection zkNode exists");
-      }
-
-    } catch (KeeperException e) {
-      // it's okay if another beats us creating the node
-      if (e.code() == KeeperException.Code.NODEEXISTS) {
-        return;
-      }
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
-    } catch (IOException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
-    } catch (InterruptedException e) {
-      Thread.interrupted();
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
-    }
-
-  }
-  
-  private static void getConfName(DistribStateManager stateManager, String collection, String collectionPath, Map<String,Object> collectionProps) throws IOException,
-      KeeperException, InterruptedException {
-    // check for configName
-    log.debug("Looking for collection configName");
-    if (collectionProps.containsKey("configName")) {
-      log.info("configName was passed as a param {}", collectionProps.get("configName"));
-      return;
-    }
-
-    List<String> configNames = null;
-    int retry = 1;
-    int retryLimt = 6;
-    for (; retry < retryLimt; retry++) {
-      if (stateManager.hasData(collectionPath)) {
-        VersionedData data = stateManager.getData(collectionPath);
-        ZkNodeProps cProps = ZkNodeProps.load(data.getData());
-        if (cProps.containsKey(ZkController.CONFIGNAME_PROP)) {
-          break;
-        }
-      }
-
-      try {
-        configNames = stateManager.listData(ZkConfigManager.CONFIGS_ZKNODE);
-      } catch (NoSuchElementException | NoNodeException e) {
-        // just keep trying
-      }
-
-      // check if there's a config set with the same name as the collection
-      if (configNames != null && configNames.contains(collection)) {
-        log.info(
-            "Could not find explicit collection configName, but found config name matching collection name - using that set.");
-        collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
-        break;
-      }
-      // if _default exists, use that
-      if (configNames != null && configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
-        log.info(
-            "Could not find explicit collection configName, but found _default config set - using that set.");
-        collectionProps.put(ZkController.CONFIGNAME_PROP, ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
-        break;
-      }
-      // if there is only one conf, use that
-      if (configNames != null && configNames.size() == 1) {
-        // no config set named, but there is only 1 - use it
-        log.info("Only one config set found in zk - using it:" + configNames.get(0));
-        collectionProps.put(ZkController.CONFIGNAME_PROP, configNames.get(0));
-        break;
-      }
-
-      log.info("Could not find collection configName - pausing for 3 seconds and trying again - try: " + retry);
-      Thread.sleep(3000);
-    }
-    if (retry == retryLimt) {
-      log.error("Could not find configName for collection " + collection);
-      throw new ZooKeeperException(
-          SolrException.ErrorCode.SERVER_ERROR,
-          "Could not find configName for collection " + collection + " found:" + configNames);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
deleted file mode 100644
index c6afdcc..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/CreateShardCmd.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.common.SolrCloseableLatch;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.Utils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.Assign.getNodesForNewReplicas;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.RANDOM;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class CreateShardCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public CreateShardCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String sliceName = message.getStr(SHARD_ID_PROP);
-    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
-
-    log.info("Create shard invoked: {}", message);
-    if (collectionName == null || sliceName == null)
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'collection' and 'shard' are required parameters");
-
-    DocCollection collection = clusterState.getCollection(collectionName);
-
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
-    SolrCloseableLatch countDownLatch;
-    try {
-      List<ReplicaPosition> positions = buildReplicaPositions(ocmh.cloudManager, clusterState, collectionName, message, sessionWrapper);
-      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
-      // wait for a while until we see the shard
-      ocmh.waitForNewShard(collectionName, sliceName);
-
-      String async = message.getStr(ASYNC);
-      countDownLatch = new SolrCloseableLatch(positions.size(), ocmh);
-      for (ReplicaPosition position : positions) {
-        String nodeName = position.node;
-        String coreName = Assign.buildSolrCoreName(ocmh.cloudManager.getDistribStateManager(), collection, sliceName, position.type);
-        log.info("Creating replica " + coreName + " as part of slice " + sliceName + " of collection " + collectionName
-            + " on " + nodeName);
-
-        // Need to create new params for each request
-        ZkNodeProps addReplicasProps = new ZkNodeProps(
-            COLLECTION_PROP, collectionName,
-            SHARD_ID_PROP, sliceName,
-            ZkStateReader.REPLICA_TYPE, position.type.name(),
-            CoreAdminParams.NODE, nodeName,
-            CoreAdminParams.NAME, coreName,
-            CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-        Map<String, Object> propertyParams = new HashMap<>();
-        ocmh.addPropertyParams(message, propertyParams);
-        addReplicasProps = addReplicasProps.plus(propertyParams);
-        if (async != null) addReplicasProps.getProperties().put(ASYNC, async);
-        final NamedList addResult = new NamedList();
-        ocmh.addReplica(zkStateReader.getClusterState(), addReplicasProps, addResult, () -> {
-          countDownLatch.countDown();
-          Object addResultFailure = addResult.get("failure");
-          if (addResultFailure != null) {
-            SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
-            if (failure == null) {
-              failure = new SimpleOrderedMap();
-              results.add("failure", failure);
-            }
-            failure.addAll((NamedList) addResultFailure);
-          } else {
-            SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
-            if (success == null) {
-              success = new SimpleOrderedMap();
-              results.add("success", success);
-            }
-            success.addAll((NamedList) addResult.get("success"));
-          }
-        });
-      }
-    } finally {
-      if (sessionWrapper.get() != null) sessionWrapper.get().release();
-    }
-
-    log.debug("Waiting for create shard action to complete");
-    countDownLatch.await(5, TimeUnit.MINUTES);
-    log.debug("Finished waiting for create shard action to complete");
-
-    log.info("Finished create command on all shards for collection: " + collectionName);
-
-  }
-
-  public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
-         String collectionName, ZkNodeProps message, AtomicReference< PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
-    String sliceName = message.getStr(SHARD_ID_PROP);
-    DocCollection collection = clusterState.getCollection(collectionName);
-
-    int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, collection.getInt(NRT_REPLICAS, collection.getInt(REPLICATION_FACTOR, 1))));
-    int numPullReplicas = message.getInt(PULL_REPLICAS, collection.getInt(PULL_REPLICAS, 0));
-    int numTlogReplicas = message.getInt(TLOG_REPLICAS, collection.getInt(TLOG_REPLICAS, 0));
-    int totalReplicas = numNrtReplicas + numPullReplicas + numTlogReplicas;
-
-    if (numNrtReplicas + numTlogReplicas <= 0) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
-    }
-
-    Object createNodeSetStr = message.get(OverseerCollectionMessageHandler.CREATE_NODE_SET);
-
-    boolean usePolicyFramework = CloudUtil.usePolicyFramework(collection, cloudManager);
-    List<ReplicaPosition> positions;
-    if (usePolicyFramework) {
-      if (collection.getPolicyName() != null) message.getProperties().put(Policy.POLICY, collection.getPolicyName());
-      positions = Assign.identifyNodes(cloudManager,
-          clusterState,
-          Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, RANDOM),
-          collection.getName(),
-          message,
-          Collections.singletonList(sliceName),
-          numNrtReplicas,
-          numTlogReplicas,
-          numPullReplicas);
-      sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
-    } else {
-      List<Assign.ReplicaCount> sortedNodeList = getNodesForNewReplicas(clusterState, collection.getName(), sliceName, totalReplicas,
-          createNodeSetStr, cloudManager);
-      int i = 0;
-      positions = new ArrayList<>();
-      for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, numNrtReplicas,
-          Replica.Type.TLOG, numTlogReplicas,
-          Replica.Type.PULL, numPullReplicas
-      ).entrySet()) {
-        for (int j = 0; j < e.getValue(); j++) {
-          positions.add(new ReplicaPosition(sliceName, j + 1, e.getKey(), sortedNodeList.get(i % sortedNodeList.size()).nodeName));
-          i++;
-        }
-      }
-    }
-    return positions;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/CreateSnapshotCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/CreateSnapshotCmd.java b/solr/core/src/java/org/apache/solr/cloud/CreateSnapshotCmd.java
deleted file mode 100644
index 5de65a4..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/CreateSnapshotCmd.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.Replica.State;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
-import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.handler.component.ShardHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the functionality of creating a collection level snapshot.
- */
-public class CreateSnapshotCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public CreateSnapshotCmd (OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName =  message.getStr(COLLECTION_PROP);
-    String commitName =  message.getStr(CoreAdminParams.COMMIT_NAME);
-    String asyncId = message.getStr(ASYNC);
-    SolrZkClient zkClient = this.ocmh.overseer.getZkController().getZkClient();
-    Date creationDate = new Date();
-
-    if(SolrSnapshotManager.snapshotExists(zkClient, collectionName, commitName)) {
-      throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName
-          + " already exists for collection " + collectionName);
-    }
-
-    log.info("Creating a snapshot for collection={} with commitName={}", collectionName, commitName);
-
-    // Create a node in ZK to store the collection level snapshot meta-data.
-    SolrSnapshotManager.createCollectionLevelSnapshot(zkClient, collectionName, new CollectionSnapshotMetaData(commitName));
-    log.info("Created a ZK path to store snapshot information for collection={} with commitName={}", collectionName, commitName);
-
-    Map<String, String> requestMap = new HashMap<>();
-    NamedList shardRequestResults = new NamedList();
-    Map<String, Slice> shardByCoreName = new HashMap<>();
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-
-    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
-      for (Replica replica : slice.getReplicas()) {
-        if (replica.getState() != State.ACTIVE) {
-          log.info("Replica {} is not active. Hence not sending the createsnapshot request", replica.getCoreName());
-          continue; // Since replica is not active - no point sending a request.
-        }
-
-        String coreName = replica.getStr(CORE_NAME_PROP);
-
-        ModifiableSolrParams params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminAction.CREATESNAPSHOT.toString());
-        params.set(NAME, slice.getName());
-        params.set(CORE_NAME_PROP, coreName);
-        params.set(CoreAdminParams.COMMIT_NAME, commitName);
-
-        ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
-        log.debug("Sent createsnapshot request to core={} with commitName={}", coreName, commitName);
-
-        shardByCoreName.put(coreName, slice);
-      }
-    }
-
-    // At this point we want to make sure that at-least one replica for every shard
-    // is able to create the snapshot. If that is not the case, then we fail the request.
-    // This is to take care of the situation where e.g. entire shard is unavailable.
-    Set<String> failedShards = new HashSet<>();
-
-    ocmh.processResponses(shardRequestResults, shardHandler, false, null, asyncId, requestMap);
-    NamedList success = (NamedList) shardRequestResults.get("success");
-    List<CoreSnapshotMetaData> replicas = new ArrayList<>();
-    if (success != null) {
-      for ( int i = 0 ; i < success.size() ; i++) {
-        NamedList resp = (NamedList)success.getVal(i);
-
-        // Check if this core is the leader for the shard. The idea here is that during the backup
-        // operation we preferably use the snapshot of the "leader" replica since it is most likely
-        // to have latest state.
-        String coreName = (String)resp.get(CoreAdminParams.CORE);
-        Slice slice = shardByCoreName.remove(coreName);
-        boolean leader = (slice.getLeader() != null && slice.getLeader().getCoreName().equals(coreName));
-        resp.add(SolrSnapshotManager.SHARD_ID, slice.getName());
-        resp.add(SolrSnapshotManager.LEADER, leader);
-
-        CoreSnapshotMetaData c = new CoreSnapshotMetaData(resp);
-        replicas.add(c);
-        log.info("Snapshot with commitName {} is created successfully for core {}", commitName, c.getCoreName());
-      }
-    }
-
-    if (!shardByCoreName.isEmpty()) { // One or more failures.
-      log.warn("Unable to create a snapshot with name {} for following cores {}", commitName, shardByCoreName.keySet());
-
-      // Count number of failures per shard.
-      Map<String, Integer> failuresByShardId = new HashMap<>();
-      for (Map.Entry<String,Slice> entry : shardByCoreName.entrySet()) {
-        int f = 0;
-        if (failuresByShardId.get(entry.getValue().getName()) != null) {
-          f = failuresByShardId.get(entry.getValue().getName());
-        }
-        failuresByShardId.put(entry.getValue().getName(), f + 1);
-      }
-
-      // Now that we know number of failures per shard, we can figure out
-      // if at-least one replica per shard was able to create a snapshot or not.
-      DocCollection collectionStatus = ocmh.zkStateReader.getClusterState().getCollection(collectionName);
-      for (Map.Entry<String,Integer> entry : failuresByShardId.entrySet()) {
-        int replicaCount = collectionStatus.getSlice(entry.getKey()).getReplicas().size();
-        if (replicaCount <= entry.getValue()) {
-          failedShards.add(entry.getKey());
-        }
-      }
-    }
-
-    if (failedShards.isEmpty()) { // No failures.
-      CollectionSnapshotMetaData meta = new CollectionSnapshotMetaData(commitName, SnapshotStatus.Successful, creationDate, replicas);
-      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, meta);
-      log.info("Saved following snapshot information for collection={} with commitName={} in Zookeeper : {}", collectionName,
-          commitName, meta.toNamedList());
-    } else {
-      log.warn("Failed to create a snapshot for collection {} with commitName = {}. Snapshot could not be captured for following shards {}",
-          collectionName, commitName, failedShards);
-      // Update the ZK meta-data to include only cores with the snapshot. This will enable users to figure out
-      // which cores have the named snapshot.
-      CollectionSnapshotMetaData meta = new CollectionSnapshotMetaData(commitName, SnapshotStatus.Failed, creationDate, replicas);
-      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, meta);
-      log.info("Saved following snapshot information for collection={} with commitName={} in Zookeeper : {}", collectionName,
-          commitName, meta.toNamedList());
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to create snapshot on shards " + failedShards);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/DeleteAliasCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteAliasCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteAliasCmd.java
deleted file mode 100644
index 9c9f1c6..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteAliasCmd.java
+++ /dev/null
@@ -1,43 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class DeleteAliasCmd implements OverseerCollectionMessageHandler.Cmd {
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public DeleteAliasCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    String aliasName = message.getStr(NAME);
-
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    zkStateReader.aliasesHolder.applyModificationAndExportToZk(a -> a.cloneWithCollectionAlias(aliasName, null));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/DeleteCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteCollectionCmd.java
deleted file mode 100644
index 8ee0168..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteCollectionCmd.java
+++ /dev/null
@@ -1,141 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.common.NonExistentCoreException;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Aliases;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.snapshots.SolrSnapshotManager;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-public class DeleteCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-
-  public DeleteCollectionCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    Aliases aliases = zkStateReader.getAliases();
-    final String collection = message.getStr(NAME);
-    for (Map.Entry<String, List<String>> ent :  aliases.getCollectionAliasListMap().entrySet()) {
-      if (ent.getValue().contains(collection)) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Collection : " + collection + " is part of alias " + ent.getKey() + " remove or modify the alias before removing this collection.");
-      }
-    }
-
-    try {
-      // Remove the snapshots meta-data for this collection in ZK. Deleting actual index files
-      // should be taken care of as part of collection delete operation.
-      SolrZkClient zkClient = zkStateReader.getZkClient();
-      SolrSnapshotManager.cleanupCollectionLevelSnapshots(zkClient, collection);
-
-      if (zkStateReader.getClusterState().getCollectionOrNull(collection) == null) {
-        if (zkStateReader.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection, true)) {
-          // if the collection is not in the clusterstate, but is listed in zk, do nothing, it will just
-          // be removed in the finally - we cannot continue, because the below code will error if the collection
-          // is not in the clusterstate
-          return;
-        }
-      }
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.UNLOAD.toString());
-      params.set(CoreAdminParams.DELETE_INSTANCE_DIR, true);
-      params.set(CoreAdminParams.DELETE_DATA_DIR, true);
-
-      String asyncId = message.getStr(ASYNC);
-      Map<String, String> requestMap = null;
-      if (asyncId != null) {
-        requestMap = new HashMap<>();
-      }
-
-      Set<String> okayExceptions = new HashSet<>(1);
-      okayExceptions.add(NonExistentCoreException.class.getName());
-
-      ocmh.collectionCmd(message, params, results, null, asyncId, requestMap, okayExceptions);
-
-      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, DELETE.toLower(), NAME, collection);
-      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-
-      // wait for a while until we don't see the collection
-      TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-      boolean removed = false;
-      while (! timeout.hasTimedOut()) {
-        timeout.sleep(100);
-        removed = !zkStateReader.getClusterState().hasCollection(collection);
-        if (removed) {
-          timeout.sleep(500); // just a bit of time so it's more likely other
-          // readers see on return
-          break;
-        }
-      }
-      if (!removed) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Could not fully remove collection: " + collection);
-      }
-
-    } finally {
-
-      try {
-        if (zkStateReader.getZkClient().exists(
-            ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection, true)) {
-          zkStateReader.getZkClient().clean(
-              ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection);
-        }
-      } catch (InterruptedException e) {
-        SolrException.log(log, "Cleaning up collection in zk was interrupted:"
-            + collection, e);
-        Thread.currentThread().interrupt();
-      } catch (KeeperException e) {
-        SolrException.log(log, "Problem cleaning up collection in zk:"
-            + collection, e);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
deleted file mode 100644
index 51b0956..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteNodeCmd.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Locale;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class DeleteNodeCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public DeleteNodeCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ocmh.checkRequired(message, "node");
-    String node = message.getStr("node");
-    if (!state.liveNodesContain(node)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source Node: " + node + " is not live");
-    }
-    List<ZkNodeProps> sourceReplicas = ReplaceNodeCmd.getReplicasOfNode(node, state);
-    List<String> singleReplicas = verifyReplicaAvailability(sourceReplicas, state);
-    if (!singleReplicas.isEmpty()) {
-      results.add("failure", "Can't delete the only existing non-PULL replica(s) on node " + node + ": " + singleReplicas.toString());
-    } else {
-      cleanupReplicas(results, state, sourceReplicas, ocmh, node, message.getStr(ASYNC));
-    }
-  }
-
-  // collect names of replicas that cannot be deleted
-  static List<String> verifyReplicaAvailability(List<ZkNodeProps> sourceReplicas, ClusterState state) {
-    List<String> res = new ArrayList<>();
-    for (ZkNodeProps sourceReplica : sourceReplicas) {
-      String coll = sourceReplica.getStr(COLLECTION_PROP);
-      String shard = sourceReplica.getStr(SHARD_ID_PROP);
-      String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
-      DocCollection collection = state.getCollection(coll);
-      Slice slice = collection.getSlice(shard);
-      if (slice.getReplicas().size() < 2) {
-        // can't delete the only replica in existence
-        res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
-      } else { // check replica types
-        int otherNonPullReplicas = 0;
-        for (Replica r : slice.getReplicas()) {
-          if (!r.getName().equals(replicaName) && !r.getType().equals(Replica.Type.PULL)) {
-            otherNonPullReplicas++;
-          }
-        }
-        // can't delete - there are no other non-pull replicas
-        if (otherNonPullReplicas == 0) {
-          res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
-        }
-      }
-    }
-    return res;
-  }
-
-  static void cleanupReplicas(NamedList results,
-                              ClusterState clusterState,
-                              List<ZkNodeProps> sourceReplicas,
-                              OverseerCollectionMessageHandler ocmh,
-                              String node,
-                              String async) throws InterruptedException {
-    CountDownLatch cleanupLatch = new CountDownLatch(sourceReplicas.size());
-    for (ZkNodeProps sourceReplica : sourceReplicas) {
-      String coll = sourceReplica.getStr(COLLECTION_PROP);
-      String shard = sourceReplica.getStr(SHARD_ID_PROP);
-      String type = sourceReplica.getStr(ZkStateReader.REPLICA_TYPE);
-      log.info("Deleting replica type={} for collection={} shard={} on node={}", type, coll, shard, node);
-      NamedList deleteResult = new NamedList();
-      try {
-        if (async != null) sourceReplica = sourceReplica.plus(ASYNC, async);
-        ((DeleteReplicaCmd)ocmh.commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, sourceReplica.plus("parallel", "true"), deleteResult, () -> {
-          cleanupLatch.countDown();
-          if (deleteResult.get("failure") != null) {
-            synchronized (results) {
-
-              results.add("failure", String.format(Locale.ROOT, "Failed to delete replica for collection=%s shard=%s" +
-                  " on node=%s", coll, shard, node));
-            }
-          }
-        });
-      } catch (KeeperException e) {
-        log.warn("Error deleting ", e);
-        cleanupLatch.countDown();
-      } catch (Exception e) {
-        log.warn("Error deleting ", e);
-        cleanupLatch.countDown();
-        throw e;
-      }
-    }
-    log.debug("Waiting for delete node action to complete");
-    cleanupLatch.await(5, TimeUnit.MINUTES);
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/DeleteReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteReplicaCmd.java
deleted file mode 100644
index e71d7e8..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteReplicaCmd.java
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.ONLY_IF_DOWN;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-
-public class DeleteReplicaCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public DeleteReplicaCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    deleteReplica(clusterState, message, results,null);
-  }
-
-
-  @SuppressWarnings("unchecked")
-  void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
-          throws KeeperException, InterruptedException {
-    log.debug("deleteReplica() : {}", Utils.toJSONString(message));
-    boolean parallel = message.getBool("parallel", false);
-
-    //If a count is specified the strategy needs be different
-    if (message.getStr(COUNT_PROP) != null) {
-      deleteReplicaBasedOnCount(clusterState, message, results, onComplete, parallel);
-      return;
-    }
-
-
-    ocmh.checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP);
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String shard = message.getStr(SHARD_ID_PROP);
-    String replicaName = message.getStr(REPLICA_PROP);
-
-    DocCollection coll = clusterState.getCollection(collectionName);
-    Slice slice = coll.getSlice(shard);
-    if (slice == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Invalid shard name : " +  shard + " in collection : " +  collectionName);
-    }
-
-    deleteCore(slice, collectionName, replicaName, message, shard, results, onComplete,  parallel);
-
-  }
-
-
-  /**
-   * Delete replicas based on count for a given collection. If a shard is passed, uses that
-   * else deletes given num replicas across all shards for the given collection.
-   */
-  void deleteReplicaBasedOnCount(ClusterState clusterState,
-                                 ZkNodeProps message,
-                                 NamedList results,
-                                 Runnable onComplete,
-                                 boolean parallel)
-          throws KeeperException, InterruptedException {
-    ocmh.checkRequired(message, COLLECTION_PROP, COUNT_PROP);
-    int count = Integer.parseInt(message.getStr(COUNT_PROP));
-    String collectionName = message.getStr(COLLECTION_PROP);
-    String shard = message.getStr(SHARD_ID_PROP);
-    DocCollection coll = clusterState.getCollection(collectionName);
-    Slice slice = null;
-    //Validate if shard is passed.
-    if (shard != null) {
-      slice = coll.getSlice(shard);
-      if (slice == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                "Invalid shard name : " +  shard +  " in collection : " + collectionName);
-      }
-    }
-
-    Map<Slice, Set<String>> shardToReplicasMapping = new HashMap<Slice, Set<String>>();
-    if (slice != null) {
-      Set<String> replicasToBeDeleted = pickReplicasTobeDeleted(slice, shard, collectionName, count);
-      shardToReplicasMapping.put(slice,replicasToBeDeleted);
-    } else {
-
-      //If there are many replicas left, remove the rest based on count.
-      Collection<Slice> allSlices = coll.getSlices();
-      for (Slice individualSlice : allSlices) {
-        Set<String> replicasToBeDeleted = pickReplicasTobeDeleted(individualSlice, individualSlice.getName(), collectionName, count);
-        shardToReplicasMapping.put(individualSlice, replicasToBeDeleted);
-      }
-    }
-
-    for (Slice shardSlice: shardToReplicasMapping.keySet()) {
-      String shardId = shardSlice.getName();
-      Set<String> replicas = shardToReplicasMapping.get(shardSlice);
-      //callDeleteReplica on all replicas
-      for (String replica: replicas) {
-        log.debug("Deleting replica {}  for shard {} based on count {}", replica, shardId, count);
-        deleteCore(shardSlice, collectionName, replica, message, shard, results, onComplete, parallel);
-      }
-      results.add("shard_id", shardId);
-      results.add("replicas_deleted", replicas);
-    }
-
-  }
-
-
-  /**
-   * Pick replicas to be deleted. Avoid picking the leader.
-   */
-  private Set<String> pickReplicasTobeDeleted(Slice slice, String shard, String collectionName, int count) {
-    validateReplicaAvailability(slice, shard, collectionName, count);
-    Collection<Replica> allReplicas = slice.getReplicas();
-    Set<String> replicasToBeRemoved = new HashSet<String>();
-    Replica leader = slice.getLeader();
-    for (Replica replica: allReplicas) {
-      if (count == 0) {
-        break;
-      }
-      //Try avoiding to pick up the leader to minimize activity on the cluster.
-      if (leader.getCoreName().equals(replica.getCoreName())) {
-        continue;
-      }
-      replicasToBeRemoved.add(replica.getName());
-      count --;
-    }
-    return replicasToBeRemoved;
-  }
-
-  /**
-   * Validate if there is less replicas than requested to remove. Also error out if there is
-   * only one replica available
-   */
-  private void validateReplicaAvailability(Slice slice, String shard, String collectionName, int count) {
-    //If there is a specific shard passed, validate if there any or just 1 replica left
-    if (slice != null) {
-      Collection<Replica> allReplicasForShard = slice.getReplicas();
-      if (allReplicasForShard == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No replicas found  in shard/collection: " +
-                shard + "/"  + collectionName);
-      }
-
-
-      if (allReplicasForShard.size() == 1) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There is only one replica available in shard/collection: " +
-                shard + "/" + collectionName + ". Cannot delete that.");
-      }
-
-      if (allReplicasForShard.size() <= count) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There are lesser num replicas requested to be deleted than are available in shard/collection : " +
-                shard + "/"  + collectionName  + " Requested: "  + count + " Available: " + allReplicasForShard.size() + ".");
-      }
-    }
-  }
-
-  void deleteCore(Slice slice, String collectionName, String replicaName,ZkNodeProps message, String shard, NamedList results, Runnable onComplete, boolean parallel) throws KeeperException, InterruptedException {
-
-    Replica replica = slice.getReplica(replicaName);
-    if (replica == null) {
-      ArrayList<String> l = new ArrayList<>();
-      for (Replica r : slice.getReplicas())
-        l.add(r.getName());
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid replica : " +  replicaName + " in shard/collection : " +
-              shard  + "/" + collectionName + " available replicas are " +  StrUtils.join(l, ','));
-    }
-
-    // If users are being safe and only want to remove a shard if it is down, they can specify onlyIfDown=true
-    // on the command.
-    if (Boolean.parseBoolean(message.getStr(ONLY_IF_DOWN)) && replica.getState() != Replica.State.DOWN) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Attempted to remove replica : " + collectionName + "/"  + shard + "/" + replicaName +
-              " with onlyIfDown='true', but state is '" + replica.getStr(ZkStateReader.STATE_PROP) + "'");
-    }
-
-    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-    String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
-    String asyncId = message.getStr(ASYNC);
-    AtomicReference<Map<String, String>> requestMap = new AtomicReference<>(null);
-    if (asyncId != null) {
-      requestMap.set(new HashMap<>(1, 1.0f));
-    }
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.add(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.UNLOAD.toString());
-    params.add(CoreAdminParams.CORE, core);
-
-    params.set(CoreAdminParams.DELETE_INDEX, message.getBool(CoreAdminParams.DELETE_INDEX, true));
-    params.set(CoreAdminParams.DELETE_INSTANCE_DIR, message.getBool(CoreAdminParams.DELETE_INSTANCE_DIR, true));
-    params.set(CoreAdminParams.DELETE_DATA_DIR, message.getBool(CoreAdminParams.DELETE_DATA_DIR, true));
-
-    boolean isLive = ocmh.zkStateReader.getClusterState().getLiveNodes().contains(replica.getNodeName());
-    if (isLive) {
-      ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap.get());
-    }
-
-    Callable<Boolean> callable = () -> {
-      try {
-        if (isLive) {
-          ocmh.processResponses(results, shardHandler, false, null, asyncId, requestMap.get());
-
-          //check if the core unload removed the corenode zk entry
-          if (ocmh.waitForCoreNodeGone(collectionName, shard, replicaName, 5000)) return Boolean.TRUE;
-        }
-
-        // try and ensure core info is removed from cluster state
-        ocmh.deleteCoreNode(collectionName, replicaName, replica, core);
-        if (ocmh.waitForCoreNodeGone(collectionName, shard, replicaName, 30000)) return Boolean.TRUE;
-        return Boolean.FALSE;
-      } catch (Exception e) {
-        results.add("failure", "Could not complete delete " + e.getMessage());
-        throw e;
-      } finally {
-        if (onComplete != null) onComplete.run();
-      }
-    };
-
-    if (!parallel) {
-      try {
-        if (!callable.call())
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                  "Could not remove replica : " + collectionName + "/" + shard + "/" + replicaName);
-      } catch (InterruptedException | KeeperException e) {
-        throw e;
-      } catch (Exception ex) {
-        throw new SolrException(SolrException.ErrorCode.UNKNOWN, "Error waiting for corenode gone", ex);
-      }
-
-    } else {
-      ocmh.tpe.submit(callable);
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
deleted file mode 100644
index 58c4e63..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/DeleteShardCmd.java
+++ /dev/null
@@ -1,178 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class DeleteShardCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-  private final TimeSource timeSource;
-
-  public DeleteShardCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-    this.timeSource = ocmh.cloudManager.getTimeSource();
-  }
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
-    String sliceId = message.getStr(ZkStateReader.SHARD_ID_PROP);
-
-    log.info("Delete shard invoked");
-    Slice slice = clusterState.getCollection(collectionName).getSlice(sliceId);
-    if (slice == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-        "No shard with name " + sliceId + " exists for collection " + collectionName);
-
-    // For now, only allow for deletions of Inactive slices or custom hashes (range==null).
-    // TODO: Add check for range gaps on Slice deletion
-    final Slice.State state = slice.getState();
-    if (!(slice.getRange() == null || state == Slice.State.INACTIVE || state == Slice.State.RECOVERY
-        || state == Slice.State.CONSTRUCTION) || state == Slice.State.RECOVERY_FAILED) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The slice: " + slice.getName() + " is currently " + state
-          + ". Only non-active (or custom-hashed) slices can be deleted.");
-    }
-
-    if (state == Slice.State.RECOVERY)  {
-      // mark the slice as 'construction' and only then try to delete the cores
-      // see SOLR-9455
-      DistributedQueue inQueue = Overseer.getStateUpdateQueue(ocmh.zkStateReader.getZkClient());
-      Map<String, Object> propMap = new HashMap<>();
-      propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-      propMap.put(sliceId, Slice.State.CONSTRUCTION.toString());
-      propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-      ZkNodeProps m = new ZkNodeProps(propMap);
-      inQueue.offer(Utils.toJSON(m));
-    }
-
-    String asyncId = message.getStr(ASYNC);
-
-    try {
-      List<ZkNodeProps> replicas = getReplicasForSlice(collectionName, slice);
-      CountDownLatch cleanupLatch = new CountDownLatch(replicas.size());
-      for (ZkNodeProps r : replicas) {
-        final ZkNodeProps replica = r.plus(message.getProperties()).plus("parallel", "true").plus(ASYNC, asyncId);
-        log.info("Deleting replica for collection={} shard={} on node={}", replica.getStr(COLLECTION_PROP), replica.getStr(SHARD_ID_PROP), replica.getStr(CoreAdminParams.NODE));
-        NamedList deleteResult = new NamedList();
-        try {
-          ((DeleteReplicaCmd)ocmh.commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, replica, deleteResult, () -> {
-            cleanupLatch.countDown();
-            if (deleteResult.get("failure") != null) {
-              synchronized (results) {
-                results.add("failure", String.format(Locale.ROOT, "Failed to delete replica for collection=%s shard=%s" +
-                    " on node=%s", replica.getStr(COLLECTION_PROP), replica.getStr(SHARD_ID_PROP), replica.getStr(NODE_NAME_PROP)));
-              }
-            }
-            SimpleOrderedMap success = (SimpleOrderedMap) deleteResult.get("success");
-            if (success != null) {
-              synchronized (results)  {
-                results.add("success", success);
-              }
-            }
-          });
-        } catch (KeeperException e) {
-          log.warn("Error deleting replica: " + r, e);
-          cleanupLatch.countDown();
-        } catch (Exception e) {
-          log.warn("Error deleting replica: " + r, e);
-          cleanupLatch.countDown();
-          throw e;
-        }
-      }
-      log.debug("Waiting for delete shard action to complete");
-      cleanupLatch.await(5, TimeUnit.MINUTES);
-
-      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, DELETESHARD.toLower(), ZkStateReader.COLLECTION_PROP,
-          collectionName, ZkStateReader.SHARD_ID_PROP, sliceId);
-      ZkStateReader zkStateReader = ocmh.zkStateReader;
-      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
-
-      // wait for a while until we don't see the shard
-      TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
-      boolean removed = false;
-      while (!timeout.hasTimedOut()) {
-        timeout.sleep(100);
-        DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
-        removed = collection.getSlice(sliceId) == null;
-        if (removed) {
-          timeout.sleep(100); // just a bit of time so it's more likely other readers see on return
-          break;
-        }
-      }
-      if (!removed) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Could not fully remove collection: " + collectionName + " shard: " + sliceId);
-      }
-
-      log.info("Successfully deleted collection: " + collectionName + ", shard: " + sliceId);
-    } catch (SolrException e) {
-      throw e;
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Error executing delete operation for collection: " + collectionName + " shard: " + sliceId, e);
-    }
-  }
-
-  private List<ZkNodeProps> getReplicasForSlice(String collectionName, Slice slice) {
-    List<ZkNodeProps> sourceReplicas = new ArrayList<>();
-    for (Replica replica : slice.getReplicas()) {
-      ZkNodeProps props = new ZkNodeProps(
-          COLLECTION_PROP, collectionName,
-          SHARD_ID_PROP, slice.getName(),
-          ZkStateReader.CORE_NAME_PROP, replica.getCoreName(),
-          ZkStateReader.REPLICA_PROP, replica.getName(),
-          CoreAdminParams.NODE, replica.getNodeName());
-      sourceReplicas.add(props);
-    }
-    return sourceReplicas;
-  }
-}


[11/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/RoutedAliasCreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/RoutedAliasCreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/RoutedAliasCreateCollectionCmd.java
deleted file mode 100644
index 607588c..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/RoutedAliasCreateCollectionCmd.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Aliases;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.handler.admin.CollectionsHandler;
-import org.apache.solr.request.LocalSolrQueryRequest;
-import org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor;
-import org.apache.solr.util.TimeZoneUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor.ROUTER_FIELD_METADATA;
-import static org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessor.ROUTER_INTERVAL_METADATA;
-
-/**
- * For "routed aliases", creates another collection and adds it to the alias. In some cases it will not
- * add a new collection.
- * If a collection is created, then collection creation info is returned.
- *
- * Note: this logic is within an Overseer because we want to leverage the mutual exclusion
- * property afforded by the lock it obtains on the alias name.
- * @since 7.3
- */
-public class RoutedAliasCreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String IF_MOST_RECENT_COLL_NAME = "ifMostRecentCollName";
-
-  public static final String COLL_METAPREFIX = "collection-create.";
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public RoutedAliasCreateCollectionCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  /* TODO:
-  There are a few classes related to time routed alias processing.  We need to share some logic better.
-   */
-
-
-  @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    //---- PARSE PRIMARY MESSAGE PARAMS
-    // important that we use NAME for the alias as that is what the Overseer will get a lock on before calling us
-    final String aliasName = message.getStr(NAME);
-    // the client believes this is the mostRecent collection name.  We assert this if provided.
-    final String ifMostRecentCollName = message.getStr(IF_MOST_RECENT_COLL_NAME); // optional
-
-    // TODO collection param (or intervalDateMath override?), useful for data capped collections
-
-    //---- PARSE ALIAS INFO FROM ZK
-    final ZkStateReader.AliasesManager aliasesHolder = ocmh.zkStateReader.aliasesHolder;
-    final Aliases aliases = aliasesHolder.getAliases();
-    final Map<String, String> aliasMetadata = aliases.getCollectionAliasMetadata(aliasName);
-    if (aliasMetadata == null) {
-      throw newAliasMustExistException(aliasName); // if it did exist, we'd have a non-null map
-    }
-
-    String routeField = aliasMetadata.get(ROUTER_FIELD_METADATA);
-    if (routeField == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "This command only works on time routed aliases.  Expected alias metadata not found.");
-    }
-    String intervalDateMath = aliasMetadata.getOrDefault(ROUTER_INTERVAL_METADATA, "+1DAY");
-    TimeZone intervalTimeZone = TimeZoneUtils.parseTimezone(aliasMetadata.get(CommonParams.TZ));
-
-    //TODO this is ugly; how can we organize the code related to this feature better?
-    final List<Map.Entry<Instant, String>> parsedCollections =
-        TimeRoutedAliasUpdateProcessor.parseCollections(aliasName, aliases, () -> newAliasMustExistException(aliasName));
-
-    //---- GET MOST RECENT COLL
-    final Map.Entry<Instant, String> mostRecentEntry = parsedCollections.get(0);
-    final Instant mostRecentCollTimestamp = mostRecentEntry.getKey();
-    final String mostRecentCollName = mostRecentEntry.getValue();
-    if (ifMostRecentCollName != null) {
-      if (!mostRecentCollName.equals(ifMostRecentCollName)) {
-        // Possibly due to race conditions in URPs on multiple leaders calling us at the same time
-        String msg = IF_MOST_RECENT_COLL_NAME + " expected " + ifMostRecentCollName + " but it's " + mostRecentCollName;
-        if (parsedCollections.stream().map(Map.Entry::getValue).noneMatch(ifMostRecentCollName::equals)) {
-          msg += ". Furthermore this collection isn't in the list of collections referenced by the alias.";
-        }
-        log.info(msg);
-        results.add("message", msg);
-        return;
-      }
-    } else if (mostRecentCollTimestamp.isAfter(Instant.now())) {
-      final String msg = "Most recent collection is in the future, so we won't create another.";
-      log.info(msg);
-      results.add("message", msg);
-      return;
-    }
-
-    //---- COMPUTE NEXT COLLECTION NAME
-    final Instant nextCollTimestamp = TimeRoutedAliasUpdateProcessor.computeNextCollTimestamp(mostRecentCollTimestamp, intervalDateMath, intervalTimeZone);
-    assert nextCollTimestamp.isAfter(mostRecentCollTimestamp);
-    final String createCollName = TimeRoutedAliasUpdateProcessor.formatCollectionNameFromInstant(aliasName, nextCollTimestamp);
-
-    //---- CREATE THE COLLECTION
-    // Map alias metadata starting with a prefix to a create-collection API request
-    final ModifiableSolrParams createReqParams = new ModifiableSolrParams();
-    for (Map.Entry<String, String> e : aliasMetadata.entrySet()) {
-      if (e.getKey().startsWith(COLL_METAPREFIX)) {
-        createReqParams.set(e.getKey().substring(COLL_METAPREFIX.length()), e.getValue());
-      }
-    }
-    if (createReqParams.get(COLL_CONF) == null) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "We require an explicit " + COLL_CONF );
-    }
-    createReqParams.set(NAME, createCollName);
-    createReqParams.set("property." + TimeRoutedAliasUpdateProcessor.TIME_PARTITION_ALIAS_NAME_CORE_PROP, aliasName);
-    // a CollectionOperation reads params and produces a message (Map) that is supposed to be sent to the Overseer.
-    //   Although we could create the Map without it, there are a fair amount of rules we don't want to reproduce.
-    final Map<String, Object> createMsgMap = CollectionsHandler.CollectionOperation.CREATE_OP.execute(
-        new LocalSolrQueryRequest(null, createReqParams),
-        null,
-        ocmh.overseer.getCoreContainer().getCollectionsHandler());
-    createMsgMap.put(Overseer.QUEUE_OPERATION, "create");
-    // Since we are running in the Overseer here, send the message directly to the Overseer CreateCollectionCmd
-    ocmh.commandMap.get(CollectionParams.CollectionAction.CREATE).call(clusterState, new ZkNodeProps(createMsgMap), results);
-
-    CollectionsHandler.waitForActiveCollection(createCollName, null, ocmh.overseer.getCoreContainer(), new OverseerSolrResponse(results));
-
-    //TODO delete some of the oldest collection(s) ?
-
-    //---- UPDATE THE ALIAS
-    aliasesHolder.applyModificationAndExportToZk(curAliases -> {
-      final List<String> curTargetCollections = curAliases.getCollectionAliasListMap().get(aliasName);
-      if (curTargetCollections.contains(createCollName)) {
-        return curAliases;
-      } else {
-        List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size() + 1);
-        // prepend it on purpose (thus reverse sorted). Solr alias resolution defaults to the first collection in a list
-        newTargetCollections.add(createCollName);
-        newTargetCollections.addAll(curTargetCollections);
-        return curAliases.cloneWithCollectionAlias(aliasName, StrUtils.join(newTargetCollections, ','));
-      }
-    });
-
-  }
-
-  private SolrException newAliasMustExistException(String aliasName) {
-    return new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-        "Alias " + aliasName + " does not exist.");
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
deleted file mode 100644
index 9732616..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/SplitShardCmd.java
+++ /dev/null
@@ -1,542 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.solr.client.solrj.cloud.DistributedQueue;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
-import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CompositeIdRouter;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.DocRouter;
-import org.apache.solr.common.cloud.PlainIdRouter;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ReplicaPosition;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.handler.component.ShardHandler;
-import org.apache.solr.util.TestInjection;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE;
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-
-public class SplitShardCmd implements Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public SplitShardCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    split(state, message, results);
-  }
-
-  public boolean split(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
-    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
-    String collectionName = message.getStr(CoreAdminParams.COLLECTION);
-
-    log.info("Split shard invoked");
-    ZkStateReader zkStateReader = ocmh.zkStateReader;
-    zkStateReader.forceUpdateCollection(collectionName);
-    AtomicReference<String> slice = new AtomicReference<>();
-    slice.set(message.getStr(ZkStateReader.SHARD_ID_PROP));
-
-    String splitKey = message.getStr("split.key");
-    DocCollection collection = clusterState.getCollection(collectionName);
-
-    PolicyHelper.SessionWrapper sessionWrapper = null;
-
-    Slice parentSlice = getParentSlice(clusterState, collectionName, slice, splitKey);
-
-    // find the leader for the shard
-    Replica parentShardLeader = null;
-    try {
-      parentShardLeader = zkStateReader.getLeaderRetry(collectionName, slice.get(), 10000);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-    }
-
-    // let's record the ephemeralOwner of the parent leader node
-    Stat leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
-    if (leaderZnodeStat == null)  {
-      // we just got to know the leader but its live node is gone already!
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
-    }
-
-    List<DocRouter.Range> subRanges = new ArrayList<>();
-    List<String> subSlices = new ArrayList<>();
-    List<String> subShardNames = new ArrayList<>();
-
-    String rangesStr = fillRanges(ocmh.cloudManager, message, collection, parentSlice, subRanges, subSlices, subShardNames);
-
-    try {
-
-      boolean oldShardsDeleted = false;
-      for (String subSlice : subSlices) {
-        Slice oSlice = collection.getSlice(subSlice);
-        if (oSlice != null) {
-          final Slice.State state = oSlice.getState();
-          if (state == Slice.State.ACTIVE) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                "Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
-          } else if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY) {
-            // delete the shards
-            log.info("Sub-shard: {} already exists therefore requesting its deletion", subSlice);
-            Map<String, Object> propMap = new HashMap<>();
-            propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
-            propMap.put(COLLECTION_PROP, collectionName);
-            propMap.put(SHARD_ID_PROP, subSlice);
-            ZkNodeProps m = new ZkNodeProps(propMap);
-            try {
-              ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
-            } catch (Exception e) {
-              throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
-                  e);
-            }
-
-            oldShardsDeleted = true;
-          }
-        }
-      }
-
-      if (oldShardsDeleted) {
-        // refresh the locally cached cluster state
-        // we know we have the latest because otherwise deleteshard would have failed
-        clusterState = zkStateReader.getClusterState();
-        collection = clusterState.getCollection(collectionName);
-      }
-
-      final String asyncId = message.getStr(ASYNC);
-      Map<String, String> requestMap = new HashMap<>();
-      String nodeName = parentShardLeader.getNodeName();
-
-      for (int i = 0; i < subRanges.size(); i++) {
-        String subSlice = subSlices.get(i);
-        String subShardName = subShardNames.get(i);
-        DocRouter.Range subRange = subRanges.get(i);
-
-        log.info("Creating slice " + subSlice + " of collection " + collectionName + " on " + nodeName);
-
-        Map<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD.toLower());
-        propMap.put(ZkStateReader.SHARD_ID_PROP, subSlice);
-        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-        propMap.put(ZkStateReader.SHARD_RANGE_PROP, subRange.toString());
-        propMap.put(ZkStateReader.SHARD_STATE_PROP, Slice.State.CONSTRUCTION.toString());
-        propMap.put(ZkStateReader.SHARD_PARENT_PROP, parentSlice.getName());
-        propMap.put("shard_parent_node", parentShardLeader.getNodeName());
-        propMap.put("shard_parent_zk_session", leaderZnodeStat.getEphemeralOwner());
-        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-        inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
-
-        // wait until we are able to see the new shard in cluster state
-        ocmh.waitForNewShard(collectionName, subSlice);
-
-        // refresh cluster state
-        clusterState = zkStateReader.getClusterState();
-
-        log.info("Adding replica " + subShardName + " as part of slice " + subSlice + " of collection " + collectionName
-            + " on " + nodeName);
-        propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
-        propMap.put(COLLECTION_PROP, collectionName);
-        propMap.put(SHARD_ID_PROP, subSlice);
-        propMap.put("node", nodeName);
-        propMap.put(CoreAdminParams.NAME, subShardName);
-        propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-        // copy over property params:
-        for (String key : message.keySet()) {
-          if (key.startsWith(COLL_PROP_PREFIX)) {
-            propMap.put(key, message.getStr(key));
-          }
-        }
-        // add async param
-        if (asyncId != null) {
-          propMap.put(ASYNC, asyncId);
-        }
-        ocmh.addReplica(clusterState, new ZkNodeProps(propMap), results, null);
-      }
-
-      ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard leaders", asyncId, requestMap);
-
-      for (String subShardName : subShardNames) {
-        // wait for parent leader to acknowledge the sub-shard core
-        log.info("Asking parent leader to wait for: " + subShardName + " to be alive on: " + nodeName);
-        String coreNodeName = ocmh.waitForCoreNodeName(collectionName, nodeName, subShardName);
-        CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
-        cmd.setCoreName(subShardName);
-        cmd.setNodeName(nodeName);
-        cmd.setCoreNodeName(coreNodeName);
-        cmd.setState(Replica.State.ACTIVE);
-        cmd.setCheckLive(true);
-        cmd.setOnlyIfLeader(true);
-
-        ModifiableSolrParams p = new ModifiableSolrParams(cmd.getParams());
-        ocmh.sendShardRequest(nodeName, p, shardHandler, asyncId, requestMap);
-      }
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD timed out waiting for subshard leaders to come up",
-          asyncId, requestMap);
-
-      log.info("Successfully created all sub-shards for collection " + collectionName + " parent shard: " + slice
-          + " on: " + parentShardLeader);
-
-      log.info("Splitting shard " + parentShardLeader.getName() + " as part of slice " + slice + " of collection "
-          + collectionName + " on " + parentShardLeader);
-
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
-      params.set(CoreAdminParams.CORE, parentShardLeader.getStr("core"));
-      for (int i = 0; i < subShardNames.size(); i++) {
-        String subShardName = subShardNames.get(i);
-        params.add(CoreAdminParams.TARGET_CORE, subShardName);
-      }
-      params.set(CoreAdminParams.RANGES, rangesStr);
-
-      ocmh.sendShardRequest(parentShardLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to invoke SPLIT core admin command", asyncId,
-          requestMap);
-
-      log.info("Index on shard: " + nodeName + " split into two successfully");
-
-      // apply buffered updates on sub-shards
-      for (int i = 0; i < subShardNames.size(); i++) {
-        String subShardName = subShardNames.get(i);
-
-        log.info("Applying buffered updates on : " + subShardName);
-
-        params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
-        params.set(CoreAdminParams.NAME, subShardName);
-
-        ocmh.sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap);
-      }
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed while asking sub shard leaders" +
-          " to apply buffered updates", asyncId, requestMap);
-
-      log.info("Successfully applied buffered updates on : " + subShardNames);
-
-      // Replica creation for the new Slices
-
-      // look at the replication factor and see if it matches reality
-      // if it does not, find best nodes to create more cores
-
-      // TODO: Have replication factor decided in some other way instead of numShards for the parent
-
-      int repFactor = parentSlice.getReplicas().size();
-
-      // we need to look at every node and see how many cores it serves
-      // add our new cores to existing nodes serving the least number of cores
-      // but (for now) require that each core goes on a distinct node.
-
-      // TODO: add smarter options that look at the current number of cores per
-      // node?
-      // for now we just go random
-      Set<String> nodes = clusterState.getLiveNodes();
-      List<String> nodeList = new ArrayList<>(nodes.size());
-      nodeList.addAll(nodes);
-
-      // TODO: Have maxShardsPerNode param for this operation?
-
-      // Remove the node that hosts the parent shard for replica creation.
-      nodeList.remove(nodeName);
-
-      // TODO: change this to handle sharding a slice into > 2 sub-shards.
-
-      List<ReplicaPosition> replicaPositions = Assign.identifyNodes(ocmh.cloudManager,
-          clusterState,
-          new ArrayList<>(clusterState.getLiveNodes()),
-          collectionName,
-          new ZkNodeProps(collection.getProperties()),
-          subSlices, repFactor - 1, 0, 0);
-      sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
-
-      List<Map<String, Object>> replicas = new ArrayList<>((repFactor - 1) * 2);
-
-      for (ReplicaPosition replicaPosition : replicaPositions) {
-        String sliceName = replicaPosition.shard;
-        String subShardNodeName = replicaPosition.node;
-        String solrCoreName = collectionName + "_" + sliceName + "_replica" + (replicaPosition.index);
-
-        log.info("Creating replica shard " + solrCoreName + " as part of slice " + sliceName + " of collection "
-            + collectionName + " on " + subShardNodeName);
-
-        ZkNodeProps props = new ZkNodeProps(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
-            ZkStateReader.COLLECTION_PROP, collectionName,
-            ZkStateReader.SHARD_ID_PROP, sliceName,
-            ZkStateReader.CORE_NAME_PROP, solrCoreName,
-            ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
-            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(subShardNodeName),
-            ZkStateReader.NODE_NAME_PROP, subShardNodeName,
-            CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-        Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
-
-        HashMap<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
-        propMap.put(COLLECTION_PROP, collectionName);
-        propMap.put(SHARD_ID_PROP, sliceName);
-        propMap.put("node", subShardNodeName);
-        propMap.put(CoreAdminParams.NAME, solrCoreName);
-        // copy over property params:
-        for (String key : message.keySet()) {
-          if (key.startsWith(COLL_PROP_PREFIX)) {
-            propMap.put(key, message.getStr(key));
-          }
-        }
-        // add async param
-        if (asyncId != null) {
-          propMap.put(ASYNC, asyncId);
-        }
-        // special flag param to instruct addReplica not to create the replica in cluster state again
-        propMap.put(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, "true");
-
-        propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
-
-        replicas.add(propMap);
-      }
-
-      assert TestInjection.injectSplitFailureBeforeReplicaCreation();
-
-      long ephemeralOwner = leaderZnodeStat.getEphemeralOwner();
-      // compare against the ephemeralOwner of the parent leader node
-      leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
-      if (leaderZnodeStat == null || ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
-        // put sub-shards in recovery_failed state
-        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-        Map<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-        for (String subSlice : subSlices) {
-          propMap.put(subSlice, Slice.State.RECOVERY_FAILED.toString());
-        }
-        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-        ZkNodeProps m = new ZkNodeProps(propMap);
-        inQueue.offer(Utils.toJSON(m));
-
-        if (leaderZnodeStat == null)  {
-          // the leader is not live anymore, fail the split!
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
-        } else if (ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
-          // there's a new leader, fail the split!
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "The zk session id for the shard leader node: " + parentShardLeader.getNodeName() + " has changed from "
-                  + ephemeralOwner + " to " + leaderZnodeStat.getEphemeralOwner() + ". This can cause data loss so we must abort the split");
-        }
-      }
-
-      // we must set the slice state into recovery before actually creating the replica cores
-      // this ensures that the logic inside Overseer to update sub-shard state to 'active'
-      // always gets a chance to execute. See SOLR-7673
-
-      if (repFactor == 1) {
-        // switch sub shard states to 'active'
-        log.info("Replication factor is 1 so switching shard states");
-        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-        Map<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-        propMap.put(slice.get(), Slice.State.INACTIVE.toString());
-        for (String subSlice : subSlices) {
-          propMap.put(subSlice, Slice.State.ACTIVE.toString());
-        }
-        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-        ZkNodeProps m = new ZkNodeProps(propMap);
-        inQueue.offer(Utils.toJSON(m));
-      } else {
-        log.info("Requesting shard state be set to 'recovery'");
-        DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-        Map<String, Object> propMap = new HashMap<>();
-        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
-        for (String subSlice : subSlices) {
-          propMap.put(subSlice, Slice.State.RECOVERY.toString());
-        }
-        propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
-        ZkNodeProps m = new ZkNodeProps(propMap);
-        inQueue.offer(Utils.toJSON(m));
-      }
-
-      // now actually create replica cores on sub shard nodes
-      for (Map<String, Object> replica : replicas) {
-        ocmh.addReplica(clusterState, new ZkNodeProps(replica), results, null);
-      }
-
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard replicas", asyncId, requestMap);
-
-      log.info("Successfully created all replica shards for all sub-slices " + subSlices);
-
-      ocmh.commit(results, slice.get(), parentShardLeader);
-
-      return true;
-    } catch (SolrException e) {
-      throw e;
-    } catch (Exception e) {
-      log.error("Error executing split operation for collection: " + collectionName + " parent shard: " + slice, e);
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e);
-    } finally {
-      if (sessionWrapper != null) sessionWrapper.release();
-    }
-  }
-
-  public static Slice getParentSlice(ClusterState clusterState, String collectionName, AtomicReference<String> slice, String splitKey) {
-    DocCollection collection = clusterState.getCollection(collectionName);
-    DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
-
-    Slice parentSlice;
-
-    if (slice.get() == null) {
-      if (router instanceof CompositeIdRouter) {
-        Collection<Slice> searchSlices = router.getSearchSlicesSingle(splitKey, new ModifiableSolrParams(), collection);
-        if (searchSlices.isEmpty()) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to find an active shard for split.key: " + splitKey);
-        }
-        if (searchSlices.size() > 1) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Splitting a split.key: " + splitKey + " which spans multiple shards is not supported");
-        }
-        parentSlice = searchSlices.iterator().next();
-        slice.set(parentSlice.getName());
-        log.info("Split by route.key: {}, parent shard is: {} ", splitKey, slice);
-      } else {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Split by route key can only be used with CompositeIdRouter or subclass. Found router: "
-                + router.getClass().getName());
-      }
-    } else {
-      parentSlice = collection.getSlice(slice.get());
-    }
-
-    if (parentSlice == null) {
-      // no chance of the collection being null because ClusterState#getCollection(String) would have thrown
-      // an exception already
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No shard with the specified name exists: " + slice);
-    }
-    return parentSlice;
-  }
-
-  public static String fillRanges(SolrCloudManager cloudManager, ZkNodeProps message, DocCollection collection, Slice parentSlice,
-                                List<DocRouter.Range> subRanges, List<String> subSlices, List<String> subShardNames) {
-    String splitKey = message.getStr("split.key");
-    DocRouter.Range range = parentSlice.getRange();
-    if (range == null) {
-      range = new PlainIdRouter().fullRange();
-    }
-    DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
-
-    String rangesStr = message.getStr(CoreAdminParams.RANGES);
-    if (rangesStr != null) {
-      String[] ranges = rangesStr.split(",");
-      if (ranges.length == 0 || ranges.length == 1) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least two ranges specified to split a shard");
-      } else {
-        for (int i = 0; i < ranges.length; i++) {
-          String r = ranges[i];
-          try {
-            subRanges.add(DocRouter.DEFAULT.fromString(r));
-          } catch (Exception e) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception in parsing hexadecimal hash range: " + r, e);
-          }
-          if (!subRanges.get(i).isSubsetOf(range)) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-                "Specified hash range: " + r + " is not a subset of parent shard's range: " + range.toString());
-          }
-        }
-        List<DocRouter.Range> temp = new ArrayList<>(subRanges); // copy to preserve original order
-        Collections.sort(temp);
-        if (!range.equals(new DocRouter.Range(temp.get(0).min, temp.get(temp.size() - 1).max))) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Specified hash ranges: " + rangesStr + " do not cover the entire range of parent shard: " + range);
-        }
-        for (int i = 1; i < temp.size(); i++) {
-          if (temp.get(i - 1).max + 1 != temp.get(i).min) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specified hash ranges: " + rangesStr
-                + " either overlap with each other or " + "do not cover the entire range of parent shard: " + range);
-          }
-        }
-      }
-    } else if (splitKey != null) {
-      if (router instanceof CompositeIdRouter) {
-        CompositeIdRouter compositeIdRouter = (CompositeIdRouter) router;
-        List<DocRouter.Range> tmpSubRanges = compositeIdRouter.partitionRangeByKey(splitKey, range);
-        if (tmpSubRanges.size() == 1) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey
-              + " has a hash range that is exactly equal to hash range of shard: " + parentSlice.getName());
-        }
-        for (DocRouter.Range subRange : tmpSubRanges) {
-          if (subRange.min == subRange.max) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey + " must be a compositeId");
-          }
-        }
-        subRanges.addAll(tmpSubRanges);
-        log.info("Partitioning parent shard " + parentSlice.getName() + " range: " + parentSlice.getRange() + " yields: " + subRanges);
-        rangesStr = "";
-        for (int i = 0; i < subRanges.size(); i++) {
-          DocRouter.Range subRange = subRanges.get(i);
-          rangesStr += subRange.toString();
-          if (i < subRanges.size() - 1) rangesStr += ',';
-        }
-      }
-    } else {
-      // todo: fixed to two partitions?
-      subRanges.addAll(router.partitionRange(2, range));
-    }
-
-    for (int i = 0; i < subRanges.size(); i++) {
-      String subSlice = parentSlice.getName() + "_" + i;
-      subSlices.add(subSlice);
-      String subShardName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), collection, subSlice, Replica.Type.NRT);
-      subShardNames.add(subShardName);
-    }
-    return rangesStr;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/UtilizeNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/UtilizeNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/UtilizeNodeCmd.java
deleted file mode 100644
index 6a55cfd..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/UtilizeNodeCmd.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
-import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
-import org.apache.solr.client.solrj.request.V2Request;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
-import static org.apache.solr.common.params.AutoScalingParams.NODE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
-import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
-
-public class UtilizeNodeCmd implements OverseerCollectionMessageHandler.Cmd {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private final OverseerCollectionMessageHandler ocmh;
-
-  public UtilizeNodeCmd(OverseerCollectionMessageHandler ocmh) {
-    this.ocmh = ocmh;
-  }
-
-  @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
-    ocmh.checkRequired(message, NODE);
-    String nodeName = message.getStr(NODE);
-    String async = message.getStr(ASYNC);
-    AutoScalingConfig autoScalingConfig = ocmh.overseer.getSolrCloudManager().getDistribStateManager().getAutoScalingConfig();
-
-    //first look for any violation that may use this replica
-    List<ZkNodeProps> requests = new ArrayList<>();
-    //first look for suggestions if any
-    List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(autoScalingConfig, ocmh.overseer.getSolrCloudManager());
-    for (Suggester.SuggestionInfo suggestionInfo : suggestions) {
-      log.info("op: " + suggestionInfo.getOperation());
-      String coll = null;
-      List<String> pieces = StrUtils.splitSmart(suggestionInfo.getOperation().getPath(), '/');
-      if (pieces.size() > 1) {
-        coll = pieces.get(2);
-      } else {
-        continue;
-      }
-      log.info("coll: " + coll);
-      if (suggestionInfo.getOperation() instanceof V2Request) {
-        String targetNode = (String) Utils.getObjectByPath(suggestionInfo.getOperation(), true, "command/move-replica/targetNode");
-        if (Objects.equals(targetNode, nodeName)) {
-          String replica = (String) Utils.getObjectByPath(suggestionInfo.getOperation(), true, "command/move-replica/replica");
-          requests.add(new ZkNodeProps(COLLECTION_PROP, coll,
-              CollectionParams.TARGET_NODE, targetNode,
-              ASYNC, async,
-              REPLICA_PROP, replica));
-        }
-      }
-    }
-    executeAll(requests);
-    PolicyHelper.SessionWrapper sessionWrapper = PolicyHelper.getSession(ocmh.overseer.getSolrCloudManager());
-    Policy.Session session =  sessionWrapper.get();
-    for (; ; ) {
-      Suggester suggester = session.getSuggester(MOVEREPLICA)
-          .hint(Suggester.Hint.TARGET_NODE, nodeName);
-      session = suggester.getSession();
-      SolrRequest request = suggester.getSuggestion();
-      if (request == null) break;
-      requests.add(new ZkNodeProps(COLLECTION_PROP, request.getParams().get(COLLECTION_PROP),
-          CollectionParams.TARGET_NODE, request.getParams().get(CollectionParams.TARGET_NODE),
-          REPLICA_PROP, request.getParams().get(REPLICA_PROP),
-          ASYNC, request.getParams().get(ASYNC)));
-    }
-    sessionWrapper.returnSession(session);
-    try {
-      executeAll(requests);
-    } finally {
-      sessionWrapper.release();
-    }
-  }
-
-  private void executeAll(List<ZkNodeProps> requests) throws Exception {
-    if (requests.isEmpty()) return;
-    for (ZkNodeProps props : requests) {
-      NamedList result = new NamedList();
-      ocmh.commandMap.get(MOVEREPLICA)
-          .call(ocmh.overseer.getSolrCloudManager().getClusterStateProvider().getClusterState(),
-              props,
-              result);
-    }
-    requests.clear();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
new file mode 100644
index 0000000..6b4e427
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+import org.apache.solr.cloud.ActiveReplicaWatcher;
+import org.apache.solr.cloud.CloudUtil;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.common.SolrCloseableLatch;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.ShardParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.handler.component.ShardHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonAdminParams.TIMEOUT;
+import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
+
+public class AddReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public AddReplicaCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    addReplica(state, message, results, null);
+  }
+
+  ZkNodeProps addReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
+      throws IOException, InterruptedException {
+    log.debug("addReplica() : {}", Utils.toJSONString(message));
+    boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
+    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
+    final String asyncId = message.getStr(ASYNC);
+
+    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
+    message = assignReplicaDetails(ocmh.cloudManager, clusterState, message, sessionWrapper);
+
+    String collection = message.getStr(COLLECTION_PROP);
+    DocCollection coll = clusterState.getCollection(collection);
+
+    String node = message.getStr(CoreAdminParams.NODE);
+    String shard = message.getStr(SHARD_ID_PROP);
+    String coreName = message.getStr(CoreAdminParams.NAME);
+    String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
+    int timeout = message.getInt(TIMEOUT, 10 * 60); // 10 minutes
+    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
+    boolean parallel = message.getBool("parallel", false);
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    if (!Overseer.isLegacy(zkStateReader)) {
+      if (!skipCreateReplicaInClusterState) {
+        ZkNodeProps props = new ZkNodeProps(
+            Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
+            ZkStateReader.COLLECTION_PROP, collection,
+            ZkStateReader.SHARD_ID_PROP, shard,
+            ZkStateReader.CORE_NAME_PROP, coreName,
+            ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
+            ZkStateReader.BASE_URL_PROP, zkStateReader.getBaseUrlForNodeName(node),
+            ZkStateReader.NODE_NAME_PROP, node,
+            ZkStateReader.REPLICA_TYPE, replicaType.name());
+        if (coreNodeName != null) {
+          props = props.plus(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
+        }
+        try {
+          Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
+        } catch (Exception e) {
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Exception updating Overseer state queue", e);
+        }
+      }
+      params.set(CoreAdminParams.CORE_NODE_NAME,
+          ocmh.waitToSeeReplicasInState(collection, Collections.singletonList(coreName)).get(coreName).getName());
+    }
+
+    String configName = zkStateReader.readConfigName(collection);
+    String routeKey = message.getStr(ShardParams._ROUTE_);
+    String dataDir = message.getStr(CoreAdminParams.DATA_DIR);
+    String ulogDir = message.getStr(CoreAdminParams.ULOG_DIR);
+    String instanceDir = message.getStr(CoreAdminParams.INSTANCE_DIR);
+
+    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
+    params.set(CoreAdminParams.NAME, coreName);
+    params.set(COLL_CONF, configName);
+    params.set(CoreAdminParams.COLLECTION, collection);
+    params.set(CoreAdminParams.REPLICA_TYPE, replicaType.name());
+    if (shard != null) {
+      params.set(CoreAdminParams.SHARD, shard);
+    } else if (routeKey != null) {
+      Collection<Slice> slices = coll.getRouter().getSearchSlicesSingle(routeKey, null, coll);
+      if (slices.isEmpty()) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No active shard serving _route_=" + routeKey + " found");
+      } else {
+        params.set(CoreAdminParams.SHARD, slices.iterator().next().getName());
+      }
+    } else {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specify either 'shard' or _route_ param");
+    }
+    if (dataDir != null) {
+      params.set(CoreAdminParams.DATA_DIR, dataDir);
+    }
+    if (ulogDir != null) {
+      params.set(CoreAdminParams.ULOG_DIR, ulogDir);
+    }
+    if (instanceDir != null) {
+      params.set(CoreAdminParams.INSTANCE_DIR, instanceDir);
+    }
+    if (coreNodeName != null) {
+      params.set(CoreAdminParams.CORE_NODE_NAME, coreNodeName);
+    }
+    ocmh.addPropertyParams(message, params);
+
+    // For tracking async calls.
+    Map<String,String> requestMap = new HashMap<>();
+    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+
+    ocmh.sendShardRequest(node, params, shardHandler, asyncId, requestMap);
+
+    final String fnode = node;
+    final String fcoreName = coreName;
+
+    Runnable runnable = () -> {
+      ocmh.processResponses(results, shardHandler, true, "ADDREPLICA failed to create replica", asyncId, requestMap);
+      ocmh.waitForCoreNodeName(collection, fnode, fcoreName);
+      if (sessionWrapper.get() != null) {
+        sessionWrapper.get().release();
+      }
+      if (onComplete != null) onComplete.run();
+    };
+
+    if (!parallel || waitForFinalState) {
+      if (waitForFinalState) {
+        SolrCloseableLatch latch = new SolrCloseableLatch(1, ocmh);
+        ActiveReplicaWatcher watcher = new ActiveReplicaWatcher(collection, null, Collections.singletonList(coreName), latch);
+        try {
+          zkStateReader.registerCollectionStateWatcher(collection, watcher);
+          runnable.run();
+          if (!latch.await(timeout, TimeUnit.SECONDS)) {
+            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Timeout waiting " + timeout + " seconds for replica to become active.");
+          }
+        } finally {
+          zkStateReader.removeCollectionStateWatcher(collection, watcher);
+        }
+      } else {
+        runnable.run();
+      }
+    } else {
+      ocmh.tpe.submit(runnable);
+    }
+
+
+    return new ZkNodeProps(
+        ZkStateReader.COLLECTION_PROP, collection,
+        ZkStateReader.SHARD_ID_PROP, shard,
+        ZkStateReader.CORE_NAME_PROP, coreName,
+        ZkStateReader.NODE_NAME_PROP, node
+    );
+  }
+
+  public static ZkNodeProps assignReplicaDetails(SolrCloudManager cloudManager, ClusterState clusterState,
+                                                 ZkNodeProps message, AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
+    boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
+
+    String collection = message.getStr(COLLECTION_PROP);
+    String node = message.getStr(CoreAdminParams.NODE);
+    String shard = message.getStr(SHARD_ID_PROP);
+    String coreName = message.getStr(CoreAdminParams.NAME);
+    String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME);
+    Replica.Type replicaType = Replica.Type.valueOf(message.getStr(ZkStateReader.REPLICA_TYPE, Replica.Type.NRT.name()).toUpperCase(Locale.ROOT));
+    if (StringUtils.isBlank(coreName)) {
+      coreName = message.getStr(CoreAdminParams.PROPERTY_PREFIX + CoreAdminParams.NAME);
+    }
+
+    DocCollection coll = clusterState.getCollection(collection);
+    if (coll == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + collection + " does not exist");
+    }
+    if (coll.getSlice(shard) == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+          "Collection: " + collection + " shard: " + shard + " does not exist");
+    }
+
+    // Kind of unnecessary, but it does put the logic of whether to override maxShardsPerNode in one place.
+    if (!skipCreateReplicaInClusterState) {
+      if (CloudUtil.usePolicyFramework(coll, cloudManager)) {
+        if (node == null) {
+          if(coll.getPolicyName() != null) message.getProperties().put(Policy.POLICY, coll.getPolicyName());
+          node = Assign.identifyNodes(cloudManager,
+              clusterState,
+              Collections.emptyList(),
+              collection,
+              message,
+              Collections.singletonList(shard),
+              replicaType == Replica.Type.NRT ? 0 : 1,
+              replicaType == Replica.Type.TLOG ? 0 : 1,
+              replicaType == Replica.Type.PULL ? 0 : 1
+          ).get(0).node;
+          sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
+        }
+      } else {
+        node = Assign.getNodesForNewReplicas(clusterState, collection, shard, 1, node,
+            cloudManager).get(0).nodeName;// TODO: use replica type in this logic too
+      }
+    }
+    log.info("Node Identified {} for creating new replica", node);
+
+    if (!clusterState.liveNodesContain(node)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Node: " + node + " is not live");
+    }
+    if (coreName == null) {
+      coreName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), coll, shard, replicaType);
+    } else if (!skipCreateReplicaInClusterState) {
+      //Validate that the core name is unique in that collection
+      for (Slice slice : coll.getSlices()) {
+        for (Replica replica : slice.getReplicas()) {
+          String replicaCoreName = replica.getStr(CORE_NAME_PROP);
+          if (coreName.equals(replicaCoreName)) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Another replica with the same core name already exists" +
+                " for this collection");
+          }
+        }
+      }
+    }
+    if (coreNodeName != null) {
+      message = message.plus(CoreAdminParams.CORE_NODE_NAME, coreNodeName);
+    }
+    message = message.plus(CoreAdminParams.NAME, coreName);
+    message = message.plus(CoreAdminParams.NODE, node);
+    return message;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
new file mode 100644
index 0000000..e7ce583
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
@@ -0,0 +1,483 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
+import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
+import org.apache.solr.client.solrj.cloud.autoscaling.BadVersionException;
+import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
+import org.apache.solr.cloud.rule.ReplicaAssigner;
+import org.apache.solr.cloud.rule.Rule;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ReplicaPosition;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.util.NumberUtils;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.POLICY;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET;
+import static org.apache.solr.common.cloud.DocCollection.SNITCH;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+
+public class Assign {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  public static int incAndGetId(DistribStateManager stateManager, String collection, int defaultValue) {
+    String path = "/collections/"+collection;
+    try {
+      if (!stateManager.hasData(path)) {
+        try {
+          stateManager.makePath(path);
+        } catch (AlreadyExistsException e) {
+          // it's okay if another beats us creating the node
+        }
+      }
+      path += "/counter";
+      if (!stateManager.hasData(path)) {
+        try {
+          stateManager.createData(path, NumberUtils.intToBytes(defaultValue), CreateMode.PERSISTENT);
+        } catch (AlreadyExistsException e) {
+          // it's okay if another beats us creating the node
+        }
+      }
+    } catch (InterruptedException e) {
+      Thread.interrupted();
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
+    } catch (IOException | KeeperException e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error creating counter node in Zookeeper for collection:" + collection, e);
+    }
+
+    while (true) {
+      try {
+        int version = 0;
+        int currentId = 0;
+        VersionedData data = stateManager.getData(path, null);
+        if (data != null) {
+          currentId = NumberUtils.bytesToInt(data.getData());
+          version = data.getVersion();
+        }
+        byte[] bytes = NumberUtils.intToBytes(++currentId);
+        stateManager.setData(path, bytes, version);
+        return currentId;
+      } catch (BadVersionException e) {
+        continue;
+      } catch (IOException | KeeperException e) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:"+collection, e);
+      } catch (InterruptedException e) {
+        Thread.interrupted();
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error inc and get counter from Zookeeper for collection:" + collection, e);
+      }
+    }
+  }
+
+  public static String assignCoreNodeName(DistribStateManager stateManager, DocCollection collection) {
+    // for backward compatibility;
+    int defaultValue = defaultCounterValue(collection, false);
+    String coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
+    while (collection.getReplica(coreNodeName) != null) {
+      // there is wee chance that, the new coreNodeName id not totally unique,
+      // but this will be guaranteed unique for new collections
+      coreNodeName = "core_node" + incAndGetId(stateManager, collection.getName(), defaultValue);
+    }
+    return coreNodeName;
+  }
+
+  /**
+   * Assign a new unique id up to slices count - then add replicas evenly.
+   *
+   * @return the assigned shard id
+   */
+  public static String assignShard(DocCollection collection, Integer numShards) {
+    if (numShards == null) {
+      numShards = 1;
+    }
+    String returnShardId = null;
+    Map<String, Slice> sliceMap = collection != null ? collection.getActiveSlicesMap() : null;
+
+
+    // TODO: now that we create shards ahead of time, is this code needed?  Esp since hash ranges aren't assigned when creating via this method?
+
+    if (sliceMap == null) {
+      return "shard1";
+    }
+
+    List<String> shardIdNames = new ArrayList<>(sliceMap.keySet());
+
+    if (shardIdNames.size() < numShards) {
+      return "shard" + (shardIdNames.size() + 1);
+    }
+
+    // TODO: don't need to sort to find shard with fewest replicas!
+
+    // else figure out which shard needs more replicas
+    final Map<String, Integer> map = new HashMap<>();
+    for (String shardId : shardIdNames) {
+      int cnt = sliceMap.get(shardId).getReplicasMap().size();
+      map.put(shardId, cnt);
+    }
+
+    Collections.sort(shardIdNames, (String o1, String o2) -> {
+      Integer one = map.get(o1);
+      Integer two = map.get(o2);
+      return one.compareTo(two);
+    });
+
+    returnShardId = shardIdNames.get(0);
+    return returnShardId;
+  }
+
+  private static String buildSolrCoreName(String collectionName, String shard, Replica.Type type, int replicaNum) {
+    // TODO: Adding the suffix is great for debugging, but may be an issue if at some point we want to support a way to change replica type
+    return String.format(Locale.ROOT, "%s_%s_replica_%s%s", collectionName, shard, type.name().substring(0,1).toLowerCase(Locale.ROOT), replicaNum);
+  }
+
+  private static int defaultCounterValue(DocCollection collection, boolean newCollection) {
+    if (newCollection) return 0;
+    int defaultValue = collection.getReplicas().size();
+    if (collection.getReplicationFactor() != null) {
+      // numReplicas and replicationFactor * numSlices can be not equals,
+      // in case of many addReplicas or deleteReplicas are executed
+      defaultValue = Math.max(defaultValue,
+          collection.getReplicationFactor() * collection.getSlices().size());
+    }
+    return defaultValue * 20;
+  }
+
+  public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type, boolean newCollection) {
+    Slice slice = collection.getSlice(shard);
+    int defaultValue = defaultCounterValue(collection, newCollection);
+    int replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
+    String coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum);
+    while (existCoreName(coreName, slice)) {
+      replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue);
+      coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum);
+    }
+    return coreName;
+  }
+
+  public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type) {
+    return buildSolrCoreName(stateManager, collection, shard, type, false);
+  }
+
+  private static boolean existCoreName(String coreName, Slice slice) {
+    if (slice == null) return false;
+    for (Replica replica : slice.getReplicas()) {
+      if (coreName.equals(replica.getStr(CORE_NAME_PROP))) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  public static List<String> getLiveOrLiveAndCreateNodeSetList(final Set<String> liveNodes, final ZkNodeProps message, final Random random) {
+    // TODO: add smarter options that look at the current number of cores per
+    // node?
+    // for now we just go random (except when createNodeSet and createNodeSet.shuffle=false are passed in)
+
+    List<String> nodeList;
+
+    final String createNodeSetStr = message.getStr(CREATE_NODE_SET);
+    final List<String> createNodeList = (createNodeSetStr == null) ? null :
+        StrUtils.splitSmart((OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY.equals(createNodeSetStr) ?
+            "" : createNodeSetStr), ",", true);
+
+    if (createNodeList != null) {
+      nodeList = new ArrayList<>(createNodeList);
+      nodeList.retainAll(liveNodes);
+      if (message.getBool(OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE,
+          OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE_DEFAULT)) {
+        Collections.shuffle(nodeList, random);
+      }
+    } else {
+      nodeList = new ArrayList<>(liveNodes);
+      Collections.shuffle(nodeList, random);
+    }
+
+    return nodeList;
+  }
+
+  public static List<ReplicaPosition> identifyNodes(SolrCloudManager cloudManager,
+                                                    ClusterState clusterState,
+                                                    List<String> nodeList,
+                                                    String collectionName,
+                                                    ZkNodeProps message,
+                                                    List<String> shardNames,
+                                                    int numNrtReplicas,
+                                                    int numTlogReplicas,
+                                                    int numPullReplicas) throws IOException, InterruptedException {
+    List<Map> rulesMap = (List) message.get("rule");
+    String policyName = message.getStr(POLICY);
+    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
+
+    if (rulesMap == null && policyName == null && autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
+      log.debug("Identify nodes using default");
+      int i = 0;
+      List<ReplicaPosition> result = new ArrayList<>();
+      for (String aShard : shardNames)
+        for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, numNrtReplicas,
+            Replica.Type.TLOG, numTlogReplicas,
+            Replica.Type.PULL, numPullReplicas
+        ).entrySet()) {
+          for (int j = 0; j < e.getValue(); j++){
+            result.add(new ReplicaPosition(aShard, j, e.getKey(), nodeList.get(i % nodeList.size())));
+            i++;
+          }
+        }
+      return result;
+    } else {
+      if (numTlogReplicas + numPullReplicas != 0 && rulesMap != null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            Replica.Type.TLOG + " or " + Replica.Type.PULL + " replica types not supported with placement rules or cluster policies");
+      }
+    }
+
+    if (rulesMap != null && !rulesMap.isEmpty()) {
+      List<Rule> rules = new ArrayList<>();
+      for (Object map : rulesMap) rules.add(new Rule((Map) map));
+      Map<String, Integer> sharVsReplicaCount = new HashMap<>();
+
+      for (String shard : shardNames) sharVsReplicaCount.put(shard, numNrtReplicas);
+      ReplicaAssigner replicaAssigner = new ReplicaAssigner(rules,
+          sharVsReplicaCount,
+          (List<Map>) message.get(SNITCH),
+          new HashMap<>(),//this is a new collection. So, there are no nodes in any shard
+          nodeList,
+          cloudManager,
+          clusterState);
+
+      Map<ReplicaPosition, String> nodeMappings = replicaAssigner.getNodeMappings();
+      return nodeMappings.entrySet().stream()
+          .map(e -> new ReplicaPosition(e.getKey().shard, e.getKey().index, e.getKey().type, e.getValue()))
+          .collect(Collectors.toList());
+    } else  {
+      if (message.getStr(CREATE_NODE_SET) == null)
+        nodeList = Collections.emptyList();// unless explicitly specified do not pass node list to Policy
+      return getPositionsUsingPolicy(collectionName,
+          shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas, policyName, cloudManager, nodeList);
+    }
+  }
+
+  static class ReplicaCount {
+    public final String nodeName;
+    public int thisCollectionNodes = 0;
+    public int totalNodes = 0;
+
+    ReplicaCount(String nodeName) {
+      this.nodeName = nodeName;
+    }
+
+    public int weight() {
+      return (thisCollectionNodes * 100) + totalNodes;
+    }
+  }
+
+  // Only called from createShard and addReplica (so far).
+  //
+  // Gets a list of candidate nodes to put the required replica(s) on. Throws errors if not enough replicas
+  // could be created on live nodes given maxShardsPerNode, Replication factor (if from createShard) etc.
+  public static List<ReplicaCount> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
+                                                          String shard, int nrtReplicas,
+                                                          Object createNodeSet, SolrCloudManager cloudManager) throws IOException, InterruptedException {
+    log.debug("getNodesForNewReplicas() shard: {} , replicas : {} , createNodeSet {}", shard, nrtReplicas, createNodeSet );
+    DocCollection coll = clusterState.getCollection(collectionName);
+    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
+    List<String> createNodeList = null;
+
+    if (createNodeSet instanceof List) {
+      createNodeList = (List) createNodeSet;
+    } else {
+      createNodeList = createNodeSet == null ? null : StrUtils.splitSmart((String) createNodeSet, ",", true);
+    }
+
+     HashMap<String, ReplicaCount> nodeNameVsShardCount = getNodeNameVsShardCount(collectionName, clusterState, createNodeList);
+
+    if (createNodeList == null) { // We only care if we haven't been told to put new replicas on specific nodes.
+      int availableSlots = 0;
+      for (Map.Entry<String, ReplicaCount> ent : nodeNameVsShardCount.entrySet()) {
+        //ADDREPLICA can put more than maxShardsPerNode on an instance, so this test is necessary.
+        if (maxShardsPerNode > ent.getValue().thisCollectionNodes) {
+          availableSlots += (maxShardsPerNode - ent.getValue().thisCollectionNodes);
+        }
+      }
+      if (availableSlots < nrtReplicas) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            String.format(Locale.ROOT, "Cannot create %d new replicas for collection %s given the current number of live nodes and a maxShardsPerNode of %d",
+                nrtReplicas, collectionName, maxShardsPerNode));
+      }
+    }
+
+    List l = (List) coll.get(DocCollection.RULE);
+    List<ReplicaPosition> replicaPositions = null;
+    if (l != null) {
+      // TODO: make it so that this method doesn't require access to CC
+      replicaPositions = getNodesViaRules(clusterState, shard, nrtReplicas, cloudManager, coll, createNodeList, l);
+    }
+    String policyName = coll.getStr(POLICY);
+    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
+    if (policyName != null || !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty()) {
+      replicaPositions = Assign.getPositionsUsingPolicy(collectionName, Collections.singletonList(shard), nrtReplicas, 0, 0,
+          policyName, cloudManager, createNodeList);
+    }
+
+    if(replicaPositions != null){
+      List<ReplicaCount> repCounts = new ArrayList<>();
+      for (ReplicaPosition p : replicaPositions) {
+        repCounts.add(new ReplicaCount(p.node));
+      }
+      return repCounts;
+    }
+
+    ArrayList<ReplicaCount> sortedNodeList = new ArrayList<>(nodeNameVsShardCount.values());
+    Collections.sort(sortedNodeList, (x, y) -> (x.weight() < y.weight()) ? -1 : ((x.weight() == y.weight()) ? 0 : 1));
+    return sortedNodeList;
+
+  }
+
+  public static List<ReplicaPosition> getPositionsUsingPolicy(String collName, List<String> shardNames,
+                                                              int nrtReplicas,
+                                                              int tlogReplicas,
+                                                              int pullReplicas,
+                                                              String policyName, SolrCloudManager cloudManager,
+                                                              List<String> nodesList) throws IOException, InterruptedException {
+    log.debug("shardnames {} NRT {} TLOG {} PULL {} , policy {}, nodeList {}", shardNames, nrtReplicas, tlogReplicas, pullReplicas, policyName, nodesList);
+    List<ReplicaPosition> replicaPositions = null;
+    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
+    try {
+      Map<String, String> kvMap = Collections.singletonMap(collName, policyName);
+      replicaPositions = PolicyHelper.getReplicaLocations(
+          collName,
+          autoScalingConfig,
+          cloudManager,
+          kvMap,
+          shardNames,
+          nrtReplicas,
+          tlogReplicas,
+          pullReplicas,
+          nodesList);
+      return replicaPositions;
+    } catch (Exception e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error getting replica locations", e);
+    } finally {
+      if (log.isTraceEnabled()) {
+        if (replicaPositions != null)
+          log.trace("REPLICA_POSITIONS: " + Utils.toJSONString(Utils.getDeepCopy(replicaPositions, 7, true)));
+        log.trace("AUTOSCALING_CONF: " + Utils.toJSONString(autoScalingConfig));
+      }
+    }
+  }
+
+  private static List<ReplicaPosition> getNodesViaRules(ClusterState clusterState, String shard, int numberOfNodes,
+                                                        SolrCloudManager cloudManager, DocCollection coll, List<String> createNodeList, List l) {
+    ArrayList<Rule> rules = new ArrayList<>();
+    for (Object o : l) rules.add(new Rule((Map) o));
+    Map<String, Map<String, Integer>> shardVsNodes = new LinkedHashMap<>();
+    for (Slice slice : coll.getSlices()) {
+      LinkedHashMap<String, Integer> n = new LinkedHashMap<>();
+      shardVsNodes.put(slice.getName(), n);
+      for (Replica replica : slice.getReplicas()) {
+        Integer count = n.get(replica.getNodeName());
+        if (count == null) count = 0;
+        n.put(replica.getNodeName(), ++count);
+      }
+    }
+    List snitches = (List) coll.get(SNITCH);
+    List<String> nodesList = createNodeList == null ?
+        new ArrayList<>(clusterState.getLiveNodes()) :
+        createNodeList;
+    Map<ReplicaPosition, String> positions = new ReplicaAssigner(
+        rules,
+        Collections.singletonMap(shard, numberOfNodes),
+        snitches,
+        shardVsNodes,
+        nodesList, cloudManager, clusterState).getNodeMappings();
+
+    return positions.entrySet().stream().map(e -> e.getKey().setNode(e.getValue())).collect(Collectors.toList());// getReplicaCounts(positions);
+  }
+
+  private static HashMap<String, ReplicaCount> getNodeNameVsShardCount(String collectionName,
+                                                                       ClusterState clusterState, List<String> createNodeList) {
+    Set<String> nodes = clusterState.getLiveNodes();
+
+    List<String> nodeList = new ArrayList<>(nodes.size());
+    nodeList.addAll(nodes);
+    if (createNodeList != null) nodeList.retainAll(createNodeList);
+
+    HashMap<String, ReplicaCount> nodeNameVsShardCount = new HashMap<>();
+    for (String s : nodeList) {
+      nodeNameVsShardCount.put(s, new ReplicaCount(s));
+    }
+    if (createNodeList != null) { // Overrides petty considerations about maxShardsPerNode
+      if (createNodeList.size() != nodeNameVsShardCount.size()) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            "At least one of the node(s) specified " + createNodeList + " are not currently active in "
+                + nodeNameVsShardCount.keySet() + ", no action taken.");
+      }
+      return nodeNameVsShardCount;
+    }
+    DocCollection coll = clusterState.getCollection(collectionName);
+    Integer maxShardsPerNode = coll.getMaxShardsPerNode();
+    Map<String, DocCollection> collections = clusterState.getCollectionsMap();
+    for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
+      DocCollection c = entry.getValue();
+      //identify suitable nodes  by checking the no:of cores in each of them
+      for (Slice slice : c.getSlices()) {
+        Collection<Replica> replicas = slice.getReplicas();
+        for (Replica replica : replicas) {
+          ReplicaCount count = nodeNameVsShardCount.get(replica.getNodeName());
+          if (count != null) {
+            count.totalNodes++; // Used ot "weigh" whether this node should be used later.
+            if (entry.getKey().equals(collectionName)) {
+              count.thisCollectionNodes++;
+              if (count.thisCollectionNodes >= maxShardsPerNode) nodeNameVsShardCount.remove(replica.getNodeName());
+            }
+          }
+        }
+      }
+    }
+
+    return nodeNameVsShardCount;
+  }
+
+
+}


[02/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
new file mode 100644
index 0000000..6d3ce4e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
@@ -0,0 +1,1017 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.BasicDistributedZkTest;
+import org.apache.solr.cloud.ChaosMonkey;
+import org.apache.solr.cloud.StoppableIndexingThread;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.CollectionStateWatcher;
+import org.apache.solr.common.cloud.CompositeIdRouter;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.HashBasedRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.util.TestInjection;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+
+@Slow
+public class ShardSplitTest extends BasicDistributedZkTest {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  public static final String SHARD1_0 = SHARD1 + "_0";
+  public static final String SHARD1_1 = SHARD1 + "_1";
+
+  public ShardSplitTest() {
+    schemaString = "schema15.xml";      // we need a string id
+  }
+
+  @Override
+  public void distribSetUp() throws Exception {
+    super.distribSetUp();
+    useFactory(null);
+  }
+
+  @Test
+  public void test() throws Exception {
+
+    waitForThingsToLevelOut(15);
+
+    if (usually()) {
+      log.info("Using legacyCloud=false for cluster");
+      CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
+          .process(cloudClient);
+    }
+    incompleteOrOverlappingCustomRangeTest();
+    splitByUniqueKeyTest();
+    splitByRouteFieldTest();
+    splitByRouteKeyTest();
+
+    // todo can't call waitForThingsToLevelOut because it looks for jettys of all shards
+    // and the new sub-shards don't have any.
+    waitForRecoveriesToFinish(true);
+    //waitForThingsToLevelOut(15);
+  }
+
+  /*
+  Creates a collection with replicationFactor=1, splits a shard. Restarts the sub-shard leader node.
+  Add a replica. Ensure count matches in leader and replica.
+   */
+  public void testSplitStaticIndexReplication() throws Exception {
+    waitForThingsToLevelOut(15);
+
+    DocCollection defCol = cloudClient.getZkStateReader().getClusterState().getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+    Replica replica = defCol.getReplicas().get(0);
+    String nodeName = replica.getNodeName();
+
+    String collectionName = "testSplitStaticIndexReplication";
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
+    create.setMaxShardsPerNode(5); // some high number so we can create replicas without hindrance
+    create.setCreateNodeSet(nodeName); // we want to create the leader on a fixed node so that we know which one to restart later
+    create.process(cloudClient);
+    try (CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress(), true, cloudClient.getLbClient().getHttpClient())) {
+      client.setDefaultCollection(collectionName);
+      StoppableIndexingThread thread = new StoppableIndexingThread(controlClient, client, "i1", true);
+      try {
+        thread.start();
+        Thread.sleep(1000); // give the indexer sometime to do its work
+        thread.safeStop();
+        thread.join();
+        client.commit();
+        controlClient.commit();
+
+        CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(collectionName);
+        splitShard.setShardName(SHARD1);
+        String asyncId = splitShard.processAsync(client);
+        RequestStatusState state = CollectionAdminRequest.requestStatus(asyncId).waitFor(client, 120);
+        if (state == RequestStatusState.COMPLETED)  {
+          waitForRecoveriesToFinish(collectionName, true);
+          // let's wait to see parent shard become inactive
+          CountDownLatch latch = new CountDownLatch(1);
+          client.getZkStateReader().registerCollectionStateWatcher(collectionName, new CollectionStateWatcher() {
+            @Override
+            public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
+              Slice parent = collectionState.getSlice(SHARD1);
+              Slice slice10 = collectionState.getSlice(SHARD1_0);
+              Slice slice11 = collectionState.getSlice(SHARD1_1);
+              if (slice10 != null && slice11 != null &&
+                  parent.getState() == Slice.State.INACTIVE &&
+                  slice10.getState() == Slice.State.ACTIVE &&
+                  slice11.getState() == Slice.State.ACTIVE) {
+                latch.countDown();
+                return true; // removes the watch
+              }
+              return false;
+            }
+          });
+          latch.await(1, TimeUnit.MINUTES);
+          if (latch.getCount() != 0)  {
+            // sanity check
+            fail("Sub-shards did not become active even after waiting for 1 minute");
+          }
+
+          int liveNodeCount = client.getZkStateReader().getClusterState().getLiveNodes().size();
+
+          // restart the sub-shard leader node
+          boolean restarted = false;
+          for (JettySolrRunner jetty : jettys) {
+            int port = jetty.getBaseUrl().getPort();
+            if (replica.getStr(BASE_URL_PROP).contains(":" + port))  {
+              ChaosMonkey.kill(jetty);
+              ChaosMonkey.start(jetty);
+              restarted = true;
+              break;
+            }
+          }
+          if (!restarted) {
+            // sanity check
+            fail("We could not find a jetty to kill for replica: " + replica.getCoreUrl());
+          }
+
+          // add a new replica for the sub-shard
+          CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(collectionName, SHARD1_0);
+          // use control client because less chances of it being the node being restarted
+          // this is to avoid flakiness of test because of NoHttpResponseExceptions
+          String control_collection = client.getZkStateReader().getClusterState().getCollection("control_collection").getReplicas().get(0).getStr(BASE_URL_PROP);
+          try (HttpSolrClient control = new HttpSolrClient.Builder(control_collection).withHttpClient(client.getLbClient().getHttpClient()).build())  {
+            state = addReplica.processAndWait(control, 30);
+          }
+          if (state == RequestStatusState.COMPLETED)  {
+            CountDownLatch newReplicaLatch = new CountDownLatch(1);
+            client.getZkStateReader().registerCollectionStateWatcher(collectionName, new CollectionStateWatcher() {
+              @Override
+              public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
+                if (liveNodes.size() != liveNodeCount)  {
+                  return false;
+                }
+                Slice slice = collectionState.getSlice(SHARD1_0);
+                if (slice.getReplicas().size() == 2)  {
+                  if (!slice.getReplicas().stream().anyMatch(r -> r.getState() == Replica.State.RECOVERING)) {
+                    // we see replicas and none of them are recovering
+                    newReplicaLatch.countDown();
+                    return true;
+                  }
+                }
+                return false;
+              }
+            });
+            newReplicaLatch.await(30, TimeUnit.SECONDS);
+            // check consistency of sub-shard replica explicitly because checkShardConsistency methods doesn't
+            // handle new shards/replica so well.
+            ClusterState clusterState = client.getZkStateReader().getClusterState();
+            DocCollection collection = clusterState.getCollection(collectionName);
+            int numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_0));
+            assertEquals("We should have checked consistency for exactly 2 replicas of shard1_0", 2, numReplicasChecked);
+          } else  {
+            fail("Adding a replica to sub-shard did not complete even after waiting for 30 seconds!. Saw state = " + state.getKey());
+          }
+        } else {
+          fail("We expected shard split to succeed on a static index but it didn't. Found state = " + state.getKey());
+        }
+      } finally {
+        thread.safeStop();
+        thread.join();
+      }
+    }
+  }
+
+  private int assertConsistentReplicas(Slice shard) throws SolrServerException, IOException {
+    long numFound = Long.MIN_VALUE;
+    int count = 0;
+    for (Replica replica : shard.getReplicas()) {
+      HttpSolrClient client = new HttpSolrClient.Builder(replica.getCoreUrl())
+          .withHttpClient(cloudClient.getLbClient().getHttpClient()).build();
+      QueryResponse response = client.query(new SolrQuery("q", "*:*", "distrib", "false"));
+      log.info("Found numFound={} on replica: {}", response.getResults().getNumFound(), replica.getCoreUrl());
+      if (numFound == Long.MIN_VALUE)  {
+        numFound = response.getResults().getNumFound();
+      } else  {
+        assertEquals("Shard " + shard.getName() + " replicas do not have same number of documents", numFound, response.getResults().getNumFound());
+      }
+      count++;
+    }
+    return count;
+  }
+
+  /**
+   * Used to test that we can split a shard when a previous split event
+   * left sub-shards in construction or recovery state.
+   *
+   * See SOLR-9439
+   */
+  @Test
+  public void testSplitAfterFailedSplit() throws Exception {
+    waitForThingsToLevelOut(15);
+
+    TestInjection.splitFailureBeforeReplicaCreation = "true:100"; // we definitely want split to fail
+    try {
+      try {
+        CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+        splitShard.setShardName(SHARD1);
+        splitShard.process(cloudClient);
+        fail("Shard split was not supposed to succeed after failure injection!");
+      } catch (Exception e) {
+        // expected
+      }
+
+      // assert that sub-shards cores exist and sub-shard is in construction state
+      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      zkStateReader.forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+      ClusterState state = zkStateReader.getClusterState();
+      DocCollection collection = state.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+
+      Slice shard10 = collection.getSlice(SHARD1_0);
+      assertEquals(Slice.State.CONSTRUCTION, shard10.getState());
+      assertEquals(1, shard10.getReplicas().size());
+
+      Slice shard11 = collection.getSlice(SHARD1_1);
+      assertEquals(Slice.State.CONSTRUCTION, shard11.getState());
+      assertEquals(1, shard11.getReplicas().size());
+
+      // lets retry the split
+      TestInjection.reset(); // let the split succeed
+      try {
+        CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+        splitShard.setShardName(SHARD1);
+        splitShard.process(cloudClient);
+        // Yay!
+      } catch (Exception e) {
+        log.error("Shard split failed", e);
+        fail("Shard split did not succeed after a previous failed split attempt left sub-shards in construction state");
+      }
+
+    } finally {
+      TestInjection.reset();
+    }
+  }
+
+  @Test
+  public void testSplitWithChaosMonkey() throws Exception {
+    waitForThingsToLevelOut(15);
+
+    List<StoppableIndexingThread> indexers = new ArrayList<>();
+    try {
+      for (int i = 0; i < 1; i++) {
+        StoppableIndexingThread thread = new StoppableIndexingThread(controlClient, cloudClient, String.valueOf(i), true);
+        indexers.add(thread);
+        thread.start();
+      }
+      Thread.sleep(1000); // give the indexers some time to do their work
+    } catch (Exception e) {
+      log.error("Error in test", e);
+    } finally {
+      for (StoppableIndexingThread indexer : indexers) {
+        indexer.safeStop();
+        indexer.join();
+      }
+    }
+
+    cloudClient.commit();
+    controlClient.commit();
+
+    AtomicBoolean stop = new AtomicBoolean();
+    AtomicBoolean killed = new AtomicBoolean(false);
+    Runnable monkey = new Runnable() {
+      @Override
+      public void run() {
+        ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+        zkStateReader.registerCollectionStateWatcher(AbstractDistribZkTestBase.DEFAULT_COLLECTION, new CollectionStateWatcher() {
+          @Override
+          public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
+            if (stop.get()) {
+              return true; // abort and remove the watch
+            }
+            Slice slice = collectionState.getSlice(SHARD1_0);
+            if (slice != null && slice.getReplicas().size() > 1) {
+              // ensure that only one watcher invocation thread can kill!
+              if (killed.compareAndSet(false, true))  {
+                log.info("Monkey thread found 2 replicas for {} {}", AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
+                CloudJettyRunner cjetty = shardToLeaderJetty.get(SHARD1);
+                try {
+                  Thread.sleep(1000 + random().nextInt(500));
+                  ChaosMonkey.kill(cjetty);
+                  stop.set(true);
+                  return true;
+                } catch (Exception e) {
+                  log.error("Monkey unable to kill jetty at port " + cjetty.jetty.getLocalPort(), e);
+                }
+              }
+            }
+            log.info("Monkey thread found only one replica for {} {}", AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
+            return false;
+          }
+        });
+      }
+    };
+
+    Thread monkeyThread = null;
+    monkeyThread = new Thread(monkey);
+    monkeyThread.start();
+    try {
+      CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+      splitShard.setShardName(SHARD1);
+      String asyncId = splitShard.processAsync(cloudClient);
+      RequestStatusState splitStatus = null;
+      try {
+        splitStatus = CollectionAdminRequest.requestStatus(asyncId).waitFor(cloudClient, 120);
+      } catch (Exception e) {
+        log.warn("Failed to get request status, maybe because the overseer node was shutdown by monkey", e);
+      }
+
+      // we don't care if the split failed because we are injecting faults and it is likely
+      // that the split has failed but in any case we want to assert that all docs that got
+      // indexed are available in SolrCloud and if the split succeeded then all replicas of the sub-shard
+      // must be consistent (i.e. have same numdocs)
+
+      log.info("Shard split request state is COMPLETED");
+      stop.set(true);
+      monkeyThread.join();
+      Set<String> addFails = new HashSet<>();
+      Set<String> deleteFails = new HashSet<>();
+      for (StoppableIndexingThread indexer : indexers) {
+        addFails.addAll(indexer.getAddFails());
+        deleteFails.addAll(indexer.getDeleteFails());
+      }
+
+      CloudJettyRunner cjetty = shardToLeaderJetty.get(SHARD1);
+      log.info("Starting shard1 leader jetty at port {}", cjetty.jetty.getLocalPort());
+      ChaosMonkey.start(cjetty.jetty);
+      cloudClient.getZkStateReader().forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+      log.info("Current collection state: {}", printClusterStateInfo(AbstractDistribZkTestBase.DEFAULT_COLLECTION));
+
+      boolean replicaCreationsFailed = false;
+      if (splitStatus == RequestStatusState.FAILED)  {
+        // either one or more replica creation failed (because it may have been created on the same parent shard leader node)
+        // or the split may have failed while trying to soft-commit *after* all replicas have been created
+        // the latter counts as a successful switch even if the API doesn't say so
+        // so we must find a way to distinguish between the two
+        // an easy way to do that is to look at the sub-shard replicas and check if the replica core actually exists
+        // instead of existing solely inside the cluster state
+        DocCollection collectionState = cloudClient.getZkStateReader().getClusterState().getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+        Slice slice10 = collectionState.getSlice(SHARD1_0);
+        Slice slice11 = collectionState.getSlice(SHARD1_1);
+        if (slice10 != null && slice11 != null) {
+          for (Replica replica : slice10) {
+            if (!doesReplicaCoreExist(replica)) {
+              replicaCreationsFailed = true;
+              break;
+            }
+          }
+          for (Replica replica : slice11) {
+            if (!doesReplicaCoreExist(replica)) {
+              replicaCreationsFailed = true;
+              break;
+            }
+          }
+        }
+      }
+
+      // true if sub-shard states switch to 'active' eventually
+      AtomicBoolean areSubShardsActive = new AtomicBoolean(false);
+
+      if (!replicaCreationsFailed)  {
+        // all sub-shard replicas were created successfully so all cores must recover eventually
+        waitForRecoveriesToFinish(AbstractDistribZkTestBase.DEFAULT_COLLECTION, true);
+        // let's wait for the overseer to switch shard states
+        CountDownLatch latch = new CountDownLatch(1);
+        cloudClient.getZkStateReader().registerCollectionStateWatcher(AbstractDistribZkTestBase.DEFAULT_COLLECTION, new CollectionStateWatcher() {
+          @Override
+          public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
+            Slice parent = collectionState.getSlice(SHARD1);
+            Slice slice10 = collectionState.getSlice(SHARD1_0);
+            Slice slice11 = collectionState.getSlice(SHARD1_1);
+            if (slice10 != null && slice11 != null &&
+                parent.getState() == Slice.State.INACTIVE &&
+                slice10.getState() == Slice.State.ACTIVE &&
+                slice11.getState() == Slice.State.ACTIVE) {
+              areSubShardsActive.set(true);
+              latch.countDown();
+              return true; // removes the watch
+            } else if (slice10 != null && slice11 != null &&
+                parent.getState() == Slice.State.ACTIVE &&
+                slice10.getState() == Slice.State.RECOVERY_FAILED &&
+                slice11.getState() == Slice.State.RECOVERY_FAILED) {
+              areSubShardsActive.set(false);
+              latch.countDown();
+              return true;
+            }
+            return false;
+          }
+        });
+
+        latch.await(2, TimeUnit.MINUTES);
+
+        if (latch.getCount() != 0)  {
+          // sanity check
+          fail("We think that split was successful but sub-shard states were not updated even after 2 minutes.");
+        }
+      }
+
+      cloudClient.commit(); // for visibility of results on sub-shards
+
+      checkShardConsistency(true, true, addFails, deleteFails);
+      long ctrlDocs = controlClient.query(new SolrQuery("*:*")).getResults().getNumFound();
+      // ensure we have added more than 0 docs
+      long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
+      assertTrue("Found " + ctrlDocs + " control docs", cloudClientDocs > 0);
+      assertEquals("Found " + ctrlDocs + " control docs and " + cloudClientDocs + " cloud docs", ctrlDocs, cloudClientDocs);
+
+      // check consistency of sub-shard replica explicitly because checkShardConsistency methods doesn't
+      // handle new shards/replica so well.
+      if (areSubShardsActive.get()) {
+        ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+        DocCollection collection = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+        int numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_0));
+        assertEquals("We should have checked consistency for exactly 2 replicas of shard1_0", 2, numReplicasChecked);
+        numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_1));
+        assertEquals("We should have checked consistency for exactly 2 replicas of shard1_1", 2, numReplicasChecked);
+      }
+    } finally {
+      stop.set(true);
+      monkeyThread.join();
+    }
+  }
+
+  private boolean doesReplicaCoreExist(Replica replica) throws IOException {
+    try (HttpSolrClient client = new HttpSolrClient.Builder(replica.getStr(BASE_URL_PROP))
+        .withHttpClient(cloudClient.getLbClient().getHttpClient()).build())  {
+      String coreName = replica.getCoreName();
+      try {
+        CoreAdminResponse status = CoreAdminRequest.getStatus(coreName, client);
+        if (status.getCoreStatus(coreName) == null || status.getCoreStatus(coreName).size() == 0) {
+          return false;
+        }
+      } catch (Exception e) {
+        log.warn("Error gettting core status of replica " + replica + ". Perhaps it does not exist!", e);
+        return false;
+      }
+    }
+    return true;
+  }
+
+  @Test
+  public void testSplitShardWithRule() throws Exception {
+    waitForThingsToLevelOut(15);
+
+    if (usually()) {
+      log.info("Using legacyCloud=false for cluster");
+      CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
+          .process(cloudClient);
+    }
+
+    log.info("Starting testSplitShardWithRule");
+    String collectionName = "shardSplitWithRule";
+    CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2)
+        .setRule("shard:*,replica:<2,node:*");
+    CollectionAdminResponse response = createRequest.process(cloudClient);
+    assertEquals(0, response.getStatus());
+
+    CollectionAdminRequest.SplitShard splitShardRequest = CollectionAdminRequest.splitShard(collectionName)
+        .setShardName("shard1");
+    response = splitShardRequest.process(cloudClient);
+    assertEquals(String.valueOf(response.getErrorMessages()), 0, response.getStatus());
+  }
+
+  private void incompleteOrOverlappingCustomRangeTest() throws Exception  {
+    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
+    Slice shard1 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice(SHARD1);
+    DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
+
+    List<DocRouter.Range> subRanges = new ArrayList<>();
+    List<DocRouter.Range> ranges = router.partitionRange(4, shard1Range);
+
+    // test with only one range
+    subRanges.add(ranges.get(0));
+    try {
+      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
+      fail("Shard splitting with just one custom hash range should not succeed");
+    } catch (HttpSolrClient.RemoteSolrException e) {
+      log.info("Expected exception:", e);
+    }
+    subRanges.clear();
+
+    // test with ranges with a hole in between them
+    subRanges.add(ranges.get(3)); // order shouldn't matter
+    subRanges.add(ranges.get(0));
+    try {
+      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
+      fail("Shard splitting with missing hashes in between given ranges should not succeed");
+    } catch (HttpSolrClient.RemoteSolrException e) {
+      log.info("Expected exception:", e);
+    }
+    subRanges.clear();
+
+    // test with overlapping ranges
+    subRanges.add(ranges.get(0));
+    subRanges.add(ranges.get(1));
+    subRanges.add(ranges.get(2));
+    subRanges.add(new DocRouter.Range(ranges.get(3).min - 15, ranges.get(3).max));
+    try {
+      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
+      fail("Shard splitting with overlapping ranges should not succeed");
+    } catch (HttpSolrClient.RemoteSolrException e) {
+      log.info("Expected exception:", e);
+    }
+    subRanges.clear();
+  }
+
+  private void splitByUniqueKeyTest() throws Exception {
+    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
+    Slice shard1 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice(SHARD1);
+    DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
+    List<DocRouter.Range> subRanges = new ArrayList<>();
+    if (usually())  {
+      List<DocRouter.Range> ranges = router.partitionRange(4, shard1Range);
+      // 75% of range goes to shard1_0 and the rest to shard1_1
+      subRanges.add(new DocRouter.Range(ranges.get(0).min, ranges.get(2).max));
+      subRanges.add(ranges.get(3));
+    } else  {
+      subRanges = router.partitionRange(2, shard1Range);
+    }
+    final List<DocRouter.Range> ranges = subRanges;
+    final int[] docCounts = new int[ranges.size()];
+    int numReplicas = shard1.getReplicas().size();
+
+    del("*:*");
+    for (int id = 0; id <= 100; id++) {
+      String shardKey = "" + (char)('a' + (id % 26)); // See comment in ShardRoutingTest for hash distribution
+      indexAndUpdateCount(router, ranges, docCounts, shardKey + "!" + String.valueOf(id), id);
+    }
+    commit();
+
+    Thread indexThread = new Thread() {
+      @Override
+      public void run() {
+        Random random = random();
+        int max = atLeast(random, 401);
+        int sleep = atLeast(random, 25);
+        log.info("SHARDSPLITTEST: Going to add " + max + " number of docs at 1 doc per " + sleep + "ms");
+        Set<String> deleted = new HashSet<>();
+        for (int id = 101; id < max; id++) {
+          try {
+            indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id), id);
+            Thread.sleep(sleep);
+            if (usually(random))  {
+              String delId = String.valueOf(random.nextInt(id - 101 + 1) + 101);
+              if (deleted.contains(delId))  continue;
+              try {
+                deleteAndUpdateCount(router, ranges, docCounts, delId);
+                deleted.add(delId);
+              } catch (Exception e) {
+                log.error("Exception while deleting docs", e);
+              }
+            }
+          } catch (Exception e) {
+            log.error("Exception while adding doc id = " + id, e);
+            // do not select this id for deletion ever
+            deleted.add(String.valueOf(id));
+          }
+        }
+      }
+    };
+    indexThread.start();
+
+    try {
+      for (int i = 0; i < 3; i++) {
+        try {
+          splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
+          log.info("Layout after split: \n");
+          printLayout();
+          break;
+        } catch (HttpSolrClient.RemoteSolrException e) {
+          if (e.code() != 500)  {
+            throw e;
+          }
+          log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
+          if (i == 2) {
+            fail("SPLITSHARD was not successful even after three tries");
+          }
+        }
+      }
+    } finally {
+      try {
+        indexThread.join();
+      } catch (InterruptedException e) {
+        log.error("Indexing thread interrupted", e);
+      }
+    }
+
+    waitForRecoveriesToFinish(true);
+    checkDocCountsAndShardStates(docCounts, numReplicas);
+  }
+
+
+  public void splitByRouteFieldTest() throws Exception  {
+    log.info("Starting testSplitWithRouteField");
+    String collectionName = "routeFieldColl";
+    int numShards = 4;
+    int replicationFactor = 2;
+    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
+        .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
+
+    HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
+    String shard_fld = "shard_s";
+    try (CloudSolrClient client = createCloudClient(null)) {
+      Map<String, Object> props = Utils.makeMap(
+          REPLICATION_FACTOR, replicationFactor,
+          MAX_SHARDS_PER_NODE, maxShardsPerNode,
+          OverseerCollectionMessageHandler.NUM_SLICES, numShards,
+          "router.field", shard_fld);
+
+      createCollection(collectionInfos, collectionName,props,client);
+    }
+
+    List<Integer> list = collectionInfos.get(collectionName);
+    checkForCollection(collectionName, list, null);
+
+    waitForRecoveriesToFinish(false);
+
+    String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
+
+    try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
+
+      ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+      final DocRouter router = clusterState.getCollection(collectionName).getRouter();
+      Slice shard1 = clusterState.getCollection(collectionName).getSlice(SHARD1);
+      DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
+      final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
+      final int[] docCounts = new int[ranges.size()];
+
+      for (int i = 100; i <= 200; i++) {
+        String shardKey = "" + (char) ('a' + (i % 26)); // See comment in ShardRoutingTest for hash distribution
+
+        collectionClient.add(getDoc(id, i, "n_ti", i, shard_fld, shardKey));
+        int idx = getHashRangeIdx(router, ranges, shardKey);
+        if (idx != -1) {
+          docCounts[idx]++;
+        }
+      }
+
+      for (int i = 0; i < docCounts.length; i++) {
+        int docCount = docCounts[i];
+        log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
+      }
+
+      collectionClient.commit();
+
+      for (int i = 0; i < 3; i++) {
+        try {
+          splitShard(collectionName, SHARD1, null, null);
+          break;
+        } catch (HttpSolrClient.RemoteSolrException e) {
+          if (e.code() != 500) {
+            throw e;
+          }
+          log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
+          if (i == 2) {
+            fail("SPLITSHARD was not successful even after three tries");
+          }
+        }
+      }
+
+      waitForRecoveriesToFinish(collectionName, false);
+
+      assertEquals(docCounts[0], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_0")).getResults().getNumFound());
+      assertEquals(docCounts[1], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_1")).getResults().getNumFound());
+    }
+  }
+
+  private void splitByRouteKeyTest() throws Exception {
+    log.info("Starting splitByRouteKeyTest");
+    String collectionName = "splitByRouteKeyTest";
+    int numShards = 4;
+    int replicationFactor = 2;
+    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
+        .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
+
+    HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      Map<String, Object> props = Utils.makeMap(
+          REPLICATION_FACTOR, replicationFactor,
+          MAX_SHARDS_PER_NODE, maxShardsPerNode,
+          OverseerCollectionMessageHandler.NUM_SLICES, numShards);
+
+      createCollection(collectionInfos, collectionName,props,client);
+    }
+
+    List<Integer> list = collectionInfos.get(collectionName);
+    checkForCollection(collectionName, list, null);
+
+    waitForRecoveriesToFinish(false);
+
+    String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
+
+    try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
+
+      String splitKey = "b!";
+
+      ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+      final DocRouter router = clusterState.getCollection(collectionName).getRouter();
+      Slice shard1 = clusterState.getCollection(collectionName).getSlice(SHARD1);
+      DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
+      final List<DocRouter.Range> ranges = ((CompositeIdRouter) router).partitionRangeByKey(splitKey, shard1Range);
+      final int[] docCounts = new int[ranges.size()];
+
+      int uniqIdentifier = (1 << 12);
+      int splitKeyDocCount = 0;
+      for (int i = 100; i <= 200; i++) {
+        String shardKey = "" + (char) ('a' + (i % 26)); // See comment in ShardRoutingTest for hash distribution
+
+        String idStr = shardKey + "!" + i;
+        collectionClient.add(getDoc(id, idStr, "n_ti", (shardKey + "!").equals(splitKey) ? uniqIdentifier : i));
+        int idx = getHashRangeIdx(router, ranges, idStr);
+        if (idx != -1) {
+          docCounts[idx]++;
+        }
+        if (splitKey.equals(shardKey + "!"))
+          splitKeyDocCount++;
+      }
+
+      for (int i = 0; i < docCounts.length; i++) {
+        int docCount = docCounts[i];
+        log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
+      }
+      log.info("Route key doc count = {}", splitKeyDocCount);
+
+      collectionClient.commit();
+
+      for (int i = 0; i < 3; i++) {
+        try {
+          splitShard(collectionName, null, null, splitKey);
+          break;
+        } catch (HttpSolrClient.RemoteSolrException e) {
+          if (e.code() != 500) {
+            throw e;
+          }
+          log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
+          if (i == 2) {
+            fail("SPLITSHARD was not successful even after three tries");
+          }
+        }
+      }
+
+      waitForRecoveriesToFinish(collectionName, false);
+      SolrQuery solrQuery = new SolrQuery("*:*");
+      assertEquals("DocCount on shard1_0 does not match", docCounts[0], collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
+      assertEquals("DocCount on shard1_1 does not match", docCounts[1], collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
+      assertEquals("DocCount on shard1_2 does not match", docCounts[2], collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
+
+      solrQuery = new SolrQuery("n_ti:" + uniqIdentifier);
+      assertEquals("shard1_0 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
+      assertEquals("Wrong number of docs on shard1_1 for route key: " + splitKey, splitKeyDocCount, collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
+      assertEquals("shard1_2 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
+    }
+  }
+
+  protected void checkDocCountsAndShardStates(int[] docCounts, int numReplicas) throws Exception {
+    ClusterState clusterState = null;
+    Slice slice1_0 = null, slice1_1 = null;
+    int i = 0;
+    for (i = 0; i < 10; i++) {
+      ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+      clusterState = zkStateReader.getClusterState();
+      slice1_0 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice("shard1_0");
+      slice1_1 = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice("shard1_1");
+      if (slice1_0.getState() == Slice.State.ACTIVE && slice1_1.getState() == Slice.State.ACTIVE) {
+        break;
+      }
+      Thread.sleep(500);
+    }
+
+    log.info("ShardSplitTest waited for {} ms for shard state to be set to active", i * 500);
+
+    assertNotNull("Cluster state does not contain shard1_0", slice1_0);
+    assertNotNull("Cluster state does not contain shard1_0", slice1_1);
+    assertSame("shard1_0 is not active", Slice.State.ACTIVE, slice1_0.getState());
+    assertSame("shard1_1 is not active", Slice.State.ACTIVE, slice1_1.getState());
+    assertEquals("Wrong number of replicas created for shard1_0", numReplicas, slice1_0.getReplicas().size());
+    assertEquals("Wrong number of replicas created for shard1_1", numReplicas, slice1_1.getReplicas().size());
+
+    commit();
+
+    // can't use checkShardConsistency because it insists on jettys and clients for each shard
+    checkSubShardConsistency(SHARD1_0);
+    checkSubShardConsistency(SHARD1_1);
+
+    SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
+    query.set("distrib", false);
+
+    ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
+    QueryResponse response;
+    try (HttpSolrClient shard1_0Client = getHttpSolrClient(shard1_0.getCoreUrl())) {
+      response = shard1_0Client.query(query);
+    }
+    long shard10Count = response.getResults().getNumFound();
+
+    ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(
+        AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
+    QueryResponse response2;
+    try (HttpSolrClient shard1_1Client = getHttpSolrClient(shard1_1.getCoreUrl())) {
+      response2 = shard1_1Client.query(query);
+    }
+    long shard11Count = response2.getResults().getNumFound();
+
+    logDebugHelp(docCounts, response, shard10Count, response2, shard11Count);
+
+    assertEquals("Wrong doc count on shard1_0. See SOLR-5309", docCounts[0], shard10Count);
+    assertEquals("Wrong doc count on shard1_1. See SOLR-5309", docCounts[1], shard11Count);
+  }
+
+  protected void checkSubShardConsistency(String shard) throws SolrServerException, IOException {
+    SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
+    query.set("distrib", false);
+
+    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
+    Slice slice = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getSlice(shard);
+    long[] numFound = new long[slice.getReplicasMap().size()];
+    int c = 0;
+    for (Replica replica : slice.getReplicas()) {
+      String coreUrl = new ZkCoreNodeProps(replica).getCoreUrl();
+      QueryResponse response;
+      try (HttpSolrClient client = getHttpSolrClient(coreUrl)) {
+        response = client.query(query);
+      }
+      numFound[c++] = response.getResults().getNumFound();
+      log.info("Shard: " + shard + " Replica: {} has {} docs", coreUrl, String.valueOf(response.getResults().getNumFound()));
+      assertTrue("Shard: " + shard + " Replica: " + coreUrl + " has 0 docs", response.getResults().getNumFound() > 0);
+    }
+    for (int i = 0; i < slice.getReplicasMap().size(); i++) {
+      assertEquals(shard + " is not consistent", numFound[0], numFound[i]);
+    }
+  }
+
+  protected void splitShard(String collection, String shardId, List<DocRouter.Range> subRanges, String splitKey) throws SolrServerException, IOException {
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionParams.CollectionAction.SPLITSHARD.toString());
+    params.set("collection", collection);
+    if (shardId != null)  {
+      params.set("shard", shardId);
+    }
+    if (subRanges != null)  {
+      StringBuilder ranges = new StringBuilder();
+      for (int i = 0; i < subRanges.size(); i++) {
+        DocRouter.Range subRange = subRanges.get(i);
+        ranges.append(subRange.toString());
+        if (i < subRanges.size() - 1)
+          ranges.append(",");
+      }
+      params.set("ranges", ranges.toString());
+    }
+    if (splitKey != null) {
+      params.set("split.key", splitKey);
+    }
+    SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.getSolrClient()).getBaseURL();
+    baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
+
+    try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 30000, 60000 * 5)) {
+      baseServer.request(request);
+    }
+  }
+
+  protected void indexAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id, int n) throws Exception {
+    index("id", id, "n_ti", n);
+
+    int idx = getHashRangeIdx(router, ranges, id);
+    if (idx != -1)  {
+      docCounts[idx]++;
+    }
+  }
+
+  protected void deleteAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id) throws Exception {
+    controlClient.deleteById(id);
+    cloudClient.deleteById(id);
+
+    int idx = getHashRangeIdx(router, ranges, id);
+    if (idx != -1)  {
+      docCounts[idx]--;
+    }
+  }
+
+  public static int getHashRangeIdx(DocRouter router, List<DocRouter.Range> ranges, String id) {
+    int hash = 0;
+    if (router instanceof HashBasedRouter) {
+      HashBasedRouter hashBasedRouter = (HashBasedRouter) router;
+      hash = hashBasedRouter.sliceHash(id, null, null,null);
+    }
+    for (int i = 0; i < ranges.size(); i++) {
+      DocRouter.Range range = ranges.get(i);
+      if (range.includes(hash))
+        return i;
+    }
+    return -1;
+  }
+
+  protected void logDebugHelp(int[] docCounts, QueryResponse response, long shard10Count, QueryResponse response2, long shard11Count) {
+    for (int i = 0; i < docCounts.length; i++) {
+      int docCount = docCounts[i];
+      log.info("Expected docCount for shard1_{} = {}", i, docCount);
+    }
+
+    log.info("Actual docCount for shard1_0 = {}", shard10Count);
+    log.info("Actual docCount for shard1_1 = {}", shard11Count);
+    Map<String, String> idVsVersion = new HashMap<>();
+    Map<String, SolrDocument> shard10Docs = new HashMap<>();
+    Map<String, SolrDocument> shard11Docs = new HashMap<>();
+    for (int i = 0; i < response.getResults().size(); i++) {
+      SolrDocument document = response.getResults().get(i);
+      idVsVersion.put(document.getFieldValue("id").toString(), document.getFieldValue("_version_").toString());
+      SolrDocument old = shard10Docs.put(document.getFieldValue("id").toString(), document);
+      if (old != null) {
+        log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_0. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
+      }
+    }
+    for (int i = 0; i < response2.getResults().size(); i++) {
+      SolrDocument document = response2.getResults().get(i);
+      String value = document.getFieldValue("id").toString();
+      String version = idVsVersion.get(value);
+      if (version != null) {
+        log.error("DUPLICATE: ID: " + value + " , shard1_0Version: " + version + " shard1_1Version:" + document.getFieldValue("_version_"));
+      }
+      SolrDocument old = shard11Docs.put(document.getFieldValue("id").toString(), document);
+      if (old != null) {
+        log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_1. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
+      }
+    }
+  }
+
+  @Override
+  protected SolrClient createNewSolrClient(String collection, String baseUrl) {
+    HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(collection, baseUrl, DEFAULT_CONNECTION_TIMEOUT, 5 * 60 * 1000);
+    return client;
+  }
+
+  @Override
+  protected SolrClient createNewSolrClient(int port) {
+    HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(port, DEFAULT_CONNECTION_TIMEOUT, 5 * 60 * 1000);
+    return client;
+  }
+
+  @Override
+  protected CloudSolrClient createCloudClient(String defaultCollection) {
+    CloudSolrClient client = super.createCloudClient(defaultCollection);
+    return client;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/SimpleCollectionCreateDeleteTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/SimpleCollectionCreateDeleteTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/SimpleCollectionCreateDeleteTest.java
new file mode 100644
index 0000000..0b75bd5
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/SimpleCollectionCreateDeleteTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
+import org.apache.solr.cloud.OverseerCollectionConfigSetProcessor;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.NamedList;
+import org.junit.Test;
+
+public class SimpleCollectionCreateDeleteTest extends AbstractFullDistribZkTestBase {
+
+  public SimpleCollectionCreateDeleteTest() {
+    sliceCount = 1;
+  }
+
+  @Test
+  @ShardsFixed(num = 1)
+  public void test() throws Exception {
+    String overseerNode = OverseerCollectionConfigSetProcessor.getLeaderNode(cloudClient.getZkStateReader().getZkClient());
+    String notOverseerNode = null;
+    for (CloudJettyRunner cloudJetty : cloudJettys) {
+      if (!overseerNode.equals(cloudJetty.nodeName)) {
+        notOverseerNode = cloudJetty.nodeName;
+        break;
+      }
+    }
+    String collectionName = "SimpleCollectionCreateDeleteTest";
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,1,1)
+            .setCreateNodeSet(overseerNode)
+            .setStateFormat(2);
+
+    NamedList<Object> request = create.process(cloudClient).getResponse();
+
+    if (request.get("success") != null) {
+      assertTrue(cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, false));
+
+      CollectionAdminRequest delete = CollectionAdminRequest.deleteCollection(collectionName);
+      cloudClient.request(delete);
+
+      assertFalse(cloudClient.getZkStateReader().getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, false));
+
+      // create collection again on a node other than the overseer leader
+      create = CollectionAdminRequest.createCollection(collectionName,1,1)
+              .setCreateNodeSet(notOverseerNode)
+              .setStateFormat(2);
+      request = create.process(cloudClient).getResponse();
+      assertTrue("Collection creation should not have failed", request.get("success") != null);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
new file mode 100644
index 0000000..5ffed50
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
@@ -0,0 +1,795 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.google.common.collect.Lists;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.V2Request;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.ShardParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Test;
+
+public class TestCollectionAPI extends ReplicaPropertiesBase {
+
+  public static final String COLLECTION_NAME = "testcollection";
+  public static final String COLLECTION_NAME1 = "testcollection1";
+
+  public TestCollectionAPI() {
+    schemaString = "schema15.xml";      // we need a string id
+    sliceCount = 2;
+  }
+
+  @Test
+  @ShardsFixed(num = 2)
+  public void test() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      CollectionAdminRequest.Create req;
+      if (useTlogReplicas()) {
+        req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 0, 1, 1);
+      } else {
+        req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 1, 0, 1);
+      }
+      req.setMaxShardsPerNode(2);
+      setV2(req);
+      client.request(req);
+      assertV2CallsCount();
+      createCollection(null, COLLECTION_NAME1, 1, 1, 1, client, null, "conf1");
+    }
+
+    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);
+    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME1, 1);
+    waitForRecoveriesToFinish(COLLECTION_NAME, false);
+    waitForRecoveriesToFinish(COLLECTION_NAME1, false);
+
+    listCollection();
+    clusterStatusNoCollection();
+    clusterStatusWithCollection();
+    clusterStatusWithCollectionAndShard();
+    clusterStatusWithRouteKey();
+    clusterStatusAliasTest();
+    clusterStatusRolesTest();
+    clusterStatusBadCollectionTest();
+    replicaPropTest();
+    clusterStatusZNodeVersion();
+    testClusterStateMigration();
+    testCollectionCreationCollectionNameValidation();
+    testCollectionCreationShardNameValidation();
+    testAliasCreationNameValidation();
+    testShardCreationNameValidation();
+  }
+
+  private void clusterStatusWithCollectionAndShard() throws IOException, SolrServerException {
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", COLLECTION_NAME);
+      params.set("shard", SHARD1);
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
+      assertNotNull("Collections should not be null in cluster state", collections);
+      assertNotNull(collections.get(COLLECTION_NAME));
+      assertEquals(1, collections.size());
+      Map<String, Object> collection = (Map<String, Object>) collections.get(COLLECTION_NAME);
+      Map<String, Object> shardStatus = (Map<String, Object>) collection.get("shards");
+      assertEquals(1, shardStatus.size());
+      Map<String, Object> selectedShardStatus = (Map<String, Object>) shardStatus.get(SHARD1);
+      assertNotNull(selectedShardStatus);
+
+    }
+  }
+
+
+  private void listCollection() throws IOException, SolrServerException {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.LIST.toString());
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      List<String> collections = (List<String>) rsp.get("collections");
+      assertTrue("control_collection was not found in list", collections.contains("control_collection"));
+      assertTrue(DEFAULT_COLLECTION + " was not found in list", collections.contains(DEFAULT_COLLECTION));
+      assertTrue(COLLECTION_NAME + " was not found in list", collections.contains(COLLECTION_NAME));
+      assertTrue(COLLECTION_NAME1 + " was not found in list", collections.contains(COLLECTION_NAME1));
+    }
+
+  }
+
+  private void clusterStatusNoCollection() throws Exception {
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
+      assertNotNull("Collections should not be null in cluster state", collections);
+      assertNotNull(collections.get(COLLECTION_NAME1));
+      assertEquals(4, collections.size());
+
+      List<String> liveNodes = (List<String>) cluster.get("live_nodes");
+      assertNotNull("Live nodes should not be null", liveNodes);
+      assertFalse(liveNodes.isEmpty());
+    }
+
+  }
+
+  private void clusterStatusWithCollection() throws IOException, SolrServerException {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", COLLECTION_NAME);
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
+      assertNotNull("Collections should not be null in cluster state", collections);
+      assertEquals(1, collections.size());
+      Map<String, Object> collection = (Map<String, Object>) collections.get(COLLECTION_NAME);
+      assertNotNull(collection);
+      assertEquals("conf1", collection.get("configName"));
+//      assertEquals("1", collection.get("nrtReplicas"));
+    }
+  }
+
+  private void clusterStatusZNodeVersion() throws Exception {
+    String cname = "clusterStatusZNodeVersion";
+    try (CloudSolrClient client = createCloudClient(null)) {
+      setV2(CollectionAdminRequest.createCollection(cname, "conf1", 1, 1).setMaxShardsPerNode(1)).process(client);
+      assertV2CallsCount();
+      waitForRecoveriesToFinish(cname, true);
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", cname);
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
+      assertNotNull("Collections should not be null in cluster state", collections);
+      assertEquals(1, collections.size());
+      Map<String, Object> collection = (Map<String, Object>) collections.get(cname);
+      assertNotNull(collection);
+      assertEquals("conf1", collection.get("configName"));
+      Integer znodeVersion = (Integer) collection.get("znodeVersion");
+      assertNotNull(znodeVersion);
+
+      CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(cname, "shard1");
+      setV2(addReplica);
+      addReplica.process(client);
+      assertV2CallsCount();
+      waitForRecoveriesToFinish(cname, true);
+
+      rsp = client.request(request);
+      cluster = (NamedList<Object>) rsp.get("cluster");
+      collections = (NamedList<Object>) cluster.get("collections");
+      collection = (Map<String, Object>) collections.get(cname);
+      Integer newVersion = (Integer) collection.get("znodeVersion");
+      assertNotNull(newVersion);
+      assertTrue(newVersion > znodeVersion);
+    }
+  }
+
+  private static long totalexpectedV2Calls;
+
+  public static SolrRequest setV2(SolrRequest req) {
+    if (V2Request.v2Calls.get() == null) V2Request.v2Calls.set(new AtomicLong());
+    totalexpectedV2Calls = V2Request.v2Calls.get().get();
+    if (random().nextBoolean()) {
+      req.setUseV2(true);
+      req.setUseBinaryV2(random().nextBoolean());
+      totalexpectedV2Calls++;
+    }
+    return req;
+  }
+
+  public static void assertV2CallsCount() {
+    assertEquals(totalexpectedV2Calls, V2Request.v2Calls.get().get());
+  }
+
+  private void clusterStatusWithRouteKey() throws IOException, SolrServerException {
+    try (CloudSolrClient client = createCloudClient(DEFAULT_COLLECTION)) {
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", "a!123"); // goes to shard2. see ShardRoutingTest for details
+      client.add(doc);
+      client.commit();
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", DEFAULT_COLLECTION);
+      params.set(ShardParams._ROUTE_, "a!");
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
+      assertNotNull("Collections should not be null in cluster state", collections);
+      assertNotNull(collections.get(DEFAULT_COLLECTION));
+      assertEquals(1, collections.size());
+      Map<String, Object> collection = (Map<String, Object>) collections.get(DEFAULT_COLLECTION);
+      assertEquals("conf1", collection.get("configName"));
+      Map<String, Object> shardStatus = (Map<String, Object>) collection.get("shards");
+      assertEquals(1, shardStatus.size());
+      Map<String, Object> selectedShardStatus = (Map<String, Object>) shardStatus.get(SHARD2);
+      assertNotNull(selectedShardStatus);
+    }
+  }
+
+  private void clusterStatusAliasTest() throws Exception  {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CREATEALIAS.toString());
+      params.set("name", "myalias");
+      params.set("collections", DEFAULT_COLLECTION + "," + COLLECTION_NAME);
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+      client.request(request);
+      params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", DEFAULT_COLLECTION);
+      request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+
+
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      Map<String, String> aliases = (Map<String, String>) cluster.get("aliases");
+      assertNotNull("Aliases should not be null", aliases);
+      assertEquals("Alias: myalias not found in cluster status",
+          DEFAULT_COLLECTION + "," + COLLECTION_NAME, aliases.get("myalias"));
+
+      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
+      assertNotNull("Collections should not be null in cluster state", collections);
+      assertNotNull(collections.get(DEFAULT_COLLECTION));
+      Map<String, Object> collection = (Map<String, Object>) collections.get(DEFAULT_COLLECTION);
+      assertEquals("conf1", collection.get("configName"));
+      List<String> collAlias = (List<String>) collection.get("aliases");
+      assertEquals("Aliases not found", Lists.newArrayList("myalias"), collAlias);
+    }
+  }
+
+  private void clusterStatusRolesTest() throws Exception  {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      client.connect();
+      Replica replica = client.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1);
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.ADDROLE.toString());
+      params.set("node", replica.getNodeName());
+      params.set("role", "overseer");
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+      client.request(request);
+
+      params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", DEFAULT_COLLECTION);
+      request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
+      assertNotNull("Cluster state should not be null", cluster);
+      Map<String, Object> roles = (Map<String, Object>) cluster.get("roles");
+      assertNotNull("Role information should not be null", roles);
+      List<String> overseer = (List<String>) roles.get("overseer");
+      assertNotNull(overseer);
+      assertEquals(1, overseer.size());
+      assertTrue(overseer.contains(replica.getNodeName()));
+    }
+  }
+
+  private void clusterStatusBadCollectionTest() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
+      params.set("collection", "bad_collection_name");
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      try {
+        client.request(request);
+        fail("Collection does not exist. An exception should be thrown");
+      } catch (SolrException e) {
+        //expected
+        assertTrue(e.getMessage().contains("Collection: bad_collection_name not found"));
+      }
+    }
+  }
+
+  private void replicaPropTest() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      client.connect();
+      Map<String, Slice> slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
+      List<String> sliceList = new ArrayList<>(slices.keySet());
+      String c1_s1 = sliceList.get(0);
+      List<String> replicasList = new ArrayList<>(slices.get(c1_s1).getReplicasMap().keySet());
+      String c1_s1_r1 = replicasList.get(0);
+      String c1_s1_r2 = replicasList.get(1);
+
+      String c1_s2 = sliceList.get(1);
+      replicasList = new ArrayList<>(slices.get(c1_s2).getReplicasMap().keySet());
+      String c1_s2_r1 = replicasList.get(0);
+      String c1_s2_r2 = replicasList.get(1);
+
+
+      slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME1).getSlicesMap();
+      sliceList = new ArrayList<>(slices.keySet());
+      String c2_s1 = sliceList.get(0);
+      replicasList = new ArrayList<>(slices.get(c2_s1).getReplicasMap().keySet());
+      String c2_s1_r1 = replicasList.get(0);
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString());
+
+      // Insure we get error returns when omitting required parameters
+
+      missingParamsError(client, params);
+      params.set("collection", COLLECTION_NAME);
+      missingParamsError(client, params);
+      params.set("shard", c1_s1);
+      missingParamsError(client, params);
+      params.set("replica", c1_s1_r1);
+      missingParamsError(client, params);
+      params.set("property", "preferredLeader");
+      missingParamsError(client, params);
+      params.set("property.value", "true");
+
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+      client.request(request);
+
+      // The above should have set exactly one preferredleader...
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "preferredleader", "true");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r2,
+          "property", "preferredLeader",
+          "property.value", "true");
+      // The preferred leader property for shard1 should have switched to the other replica.
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s2,
+          "replica", c1_s2_r1,
+          "property", "preferredLeader",
+          "property.value", "true");
+
+      // Now we should have a preferred leader in both shards...
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME1,
+          "shard", c2_s1,
+          "replica", c2_s1_r1,
+          "property", "preferredLeader",
+          "property.value", "true");
+
+      // Now we should have three preferred leaders.
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME1, c2_s1_r1, "preferredleader", "true");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME1,
+          "shard", c2_s1,
+          "replica", c2_s1_r1,
+          "property", "preferredLeader");
+
+      // Now we should have two preferred leaders.
+      // But first we have to wait for the overseer to finish the action
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      // Try adding an arbitrary property to one that has the leader property
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "testprop",
+          "property.value", "true");
+
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "true");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r2,
+          "property", "prop",
+          "property.value", "silly");
+
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "testprop",
+          "property.value", "nonsense",
+          OverseerCollectionMessageHandler.SHARD_UNIQUE, "true");
+
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "nonsense");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "property.testprop",
+          "property.value", "true",
+          OverseerCollectionMessageHandler.SHARD_UNIQUE, "false");
+
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "testprop", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "property.testprop");
+
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "testprop");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      try {
+        doPropertyAction(client,
+            "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+            "collection", COLLECTION_NAME,
+            "shard", c1_s1,
+            "replica", c1_s1_r1,
+            "property", "preferredLeader",
+            "property.value", "true",
+            OverseerCollectionMessageHandler.SHARD_UNIQUE, "false");
+        fail("Should have thrown an exception, setting shardUnique=false is not allowed for 'preferredLeader'.");
+      } catch (SolrException se) {
+        assertTrue("Should have received a specific error message",
+            se.getMessage().contains("with the shardUnique parameter set to something other than 'true'"));
+      }
+
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "preferredleader", "true");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s2_r1, "preferredleader", "true");
+      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "testprop");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r2, "prop", "silly");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME, "preferredLeader");
+      verifyUniquePropertyWithinCollection(client, COLLECTION_NAME1, "preferredLeader");
+
+      Map<String, String> origProps = getProps(client, COLLECTION_NAME, c1_s1_r1,
+          "state", "core", "node_name", "base_url");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "state",
+          "property.value", "state_bad");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "core",
+          "property.value", "core_bad");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "node_name",
+          "property.value", "node_name_bad");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "base_url",
+          "property.value", "base_url_bad");
+
+      // The above should be on new proeprties.
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "state", "state_bad");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "core", "core_bad");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "node_name", "node_name_bad");
+      verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, "base_url", "base_url_bad");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "state");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "core");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "node_name");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.DELETEREPLICAPROP.toLower(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "base_url");
+
+      // They better not have been changed!
+      for (Map.Entry<String, String> ent : origProps.entrySet()) {
+        verifyPropertyVal(client, COLLECTION_NAME, c1_s1_r1, ent.getKey(), ent.getValue());
+      }
+
+      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "state");
+      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "core");
+      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "node_name");
+      verifyPropertyNotPresent(client, COLLECTION_NAME, c1_s1_r1, "base_url");
+
+    }
+  }
+
+  private void testClusterStateMigration() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      client.connect();
+
+      CollectionAdminRequest.createCollection("testClusterStateMigration","conf1",1,1).setStateFormat(1).process(client);
+
+      waitForRecoveriesToFinish("testClusterStateMigration", true);
+
+      assertEquals(1, client.getZkStateReader().getClusterState().getCollection("testClusterStateMigration").getStateFormat());
+
+      for (int i = 0; i < 10; i++) {
+        SolrInputDocument doc = new SolrInputDocument();
+        doc.addField("id", "id_" + i);
+        client.add("testClusterStateMigration", doc);
+      }
+      client.commit("testClusterStateMigration");
+
+      CollectionAdminRequest.migrateCollectionFormat("testClusterStateMigration").process(client);
+
+      client.getZkStateReader().forceUpdateCollection("testClusterStateMigration");
+
+      assertEquals(2, client.getZkStateReader().getClusterState().getCollection("testClusterStateMigration").getStateFormat());
+
+      QueryResponse response = client.query("testClusterStateMigration", new SolrQuery("*:*"));
+      assertEquals(10, response.getResults().getNumFound());
+    }
+  }
+  
+  private void testCollectionCreationCollectionNameValidation() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CREATE.toString());
+      params.set("name", "invalid@name#with$weird%characters");
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      try {
+        client.request(request);
+        fail();
+      } catch (RemoteSolrException e) {
+        final String errorMessage = e.getMessage();
+        assertTrue(errorMessage.contains("Invalid collection"));
+        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
+        assertTrue(errorMessage.contains("collection names must consist entirely of"));
+      }
+    }
+  }
+  
+  private void testCollectionCreationShardNameValidation() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CREATE.toString());
+      params.set("name", "valid_collection_name");
+      params.set("router.name", "implicit");
+      params.set("numShards", "1");
+      params.set("shards", "invalid@name#with$weird%characters");
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      try {
+        client.request(request);
+        fail();
+      } catch (RemoteSolrException e) {
+        final String errorMessage = e.getMessage();
+        assertTrue(errorMessage.contains("Invalid shard"));
+        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
+        assertTrue(errorMessage.contains("shard names must consist entirely of"));
+      }
+    }
+  }
+  
+  private void testAliasCreationNameValidation() throws Exception{
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CREATEALIAS.toString());
+      params.set("name", "invalid@name#with$weird%characters");
+      params.set("collections", COLLECTION_NAME);
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      try {
+        client.request(request);
+        fail();
+      } catch (RemoteSolrException e) {
+        final String errorMessage = e.getMessage();
+        assertTrue(errorMessage.contains("Invalid alias"));
+        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
+        assertTrue(errorMessage.contains("alias names must consist entirely of"));
+      }
+    }
+  }
+
+  private void testShardCreationNameValidation() throws Exception {
+    try (CloudSolrClient client = createCloudClient(null)) {
+      client.connect();
+      // Create a collection w/ implicit router
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CREATE.toString());
+      params.set("name", "valid_collection_name");
+      params.set("shards", "a");
+      params.set("router.name", "implicit");
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+      client.request(request);
+
+      params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.CREATESHARD.toString());
+      params.set("collection", "valid_collection_name");
+      params.set("shard", "invalid@name#with$weird%characters");
+
+      request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      try {
+        client.request(request);
+        fail();
+      } catch (RemoteSolrException e) {
+        final String errorMessage = e.getMessage();
+        assertTrue(errorMessage.contains("Invalid shard"));
+        assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
+        assertTrue(errorMessage.contains("shard names must consist entirely of"));
+      }
+    }
+  }
+
+  // Expects the map will have keys, but blank values.
+  private Map<String, String> getProps(CloudSolrClient client, String collectionName, String replicaName, String... props)
+      throws KeeperException, InterruptedException {
+
+    client.getZkStateReader().forceUpdateCollection(collectionName);
+    ClusterState clusterState = client.getZkStateReader().getClusterState();
+    final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
+    if (docCollection == null || docCollection.getReplica(replicaName) == null) {
+      fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
+    }
+    Replica replica = docCollection.getReplica(replicaName);
+    Map<String, String> propMap = new HashMap<>();
+    for (String prop : props) {
+      propMap.put(prop, replica.getProperty(prop));
+    }
+    return propMap;
+  }
+  private void missingParamsError(CloudSolrClient client, ModifiableSolrParams origParams)
+      throws IOException, SolrServerException {
+
+    SolrRequest request;
+    try {
+      request = new QueryRequest(origParams);
+      request.setPath("/admin/collections");
+      client.request(request);
+      fail("Should have thrown a SolrException due to lack of a required parameter.");
+    } catch (SolrException se) {
+      assertTrue("Should have gotten a specific message back mentioning 'missing required parameter'. Got: " + se.getMessage(),
+          se.getMessage().toLowerCase(Locale.ROOT).contains("missing required parameter:"));
+    }
+  }
+}


[10/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
new file mode 100644
index 0000000..c411fbc
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/BackupCmd.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.time.Instant;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Properties;
+
+import org.apache.lucene.util.Version;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Replica.State;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.backup.BackupManager;
+import org.apache.solr.core.backup.repository.BackupRepository;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
+import org.apache.solr.core.snapshots.SolrSnapshotManager;
+import org.apache.solr.handler.component.ShardHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+public class BackupCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public BackupCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    String collectionName = message.getStr(COLLECTION_PROP);
+    String backupName = message.getStr(NAME);
+    String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
+
+    Instant startTime = Instant.now();
+
+    CoreContainer cc = ocmh.overseer.getCoreContainer();
+    BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
+    BackupManager backupMgr = new BackupManager(repository, ocmh.zkStateReader);
+
+    // Backup location
+    URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
+    URI backupPath = repository.resolve(location, backupName);
+
+    //Validating if the directory already exists.
+    if (repository.exists(backupPath)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The backup directory already exists: " + backupPath);
+    }
+
+    // Create a directory to store backup details.
+    repository.createDirectory(backupPath);
+
+    String strategy = message.getStr(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY);
+    switch (strategy) {
+      case CollectionAdminParams.COPY_FILES_STRATEGY: {
+        copyIndexFiles(backupPath, message, results);
+        break;
+      }
+      case CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY: {
+        break;
+      }
+    }
+
+    log.info("Starting to backup ZK data for backupName={}", backupName);
+
+    //Download the configs
+    String configName = ocmh.zkStateReader.readConfigName(collectionName);
+    backupMgr.downloadConfigDir(location, backupName, configName);
+
+    //Save the collection's state. Can be part of the monolithic clusterstate.json or a individual state.json
+    //Since we don't want to distinguish we extract the state and back it up as a separate json
+    DocCollection collectionState = ocmh.zkStateReader.getClusterState().getCollection(collectionName);
+    backupMgr.writeCollectionState(location, backupName, collectionName, collectionState);
+
+    Properties properties = new Properties();
+
+    properties.put(BackupManager.BACKUP_NAME_PROP, backupName);
+    properties.put(BackupManager.COLLECTION_NAME_PROP, collectionName);
+    properties.put(OverseerCollectionMessageHandler.COLL_CONF, configName);
+    properties.put(BackupManager.START_TIME_PROP, startTime.toString());
+    properties.put(BackupManager.INDEX_VERSION_PROP, Version.LATEST.toString());
+    //TODO: Add MD5 of the configset. If during restore the same name configset exists then we can compare checksums to see if they are the same.
+    //if they are not the same then we can throw an error or have an 'overwriteConfig' flag
+    //TODO save numDocs for the shardLeader. We can use it to sanity check the restore.
+
+    backupMgr.writeBackupProperties(location, backupName, properties);
+
+    log.info("Completed backing up ZK data for backupName={}", backupName);
+  }
+
+  private Replica selectReplicaWithSnapshot(CollectionSnapshotMetaData snapshotMeta, Slice slice) {
+    // The goal here is to choose the snapshot of the replica which was the leader at the time snapshot was created.
+    // If that is not possible, we choose any other replica for the given shard.
+    Collection<CoreSnapshotMetaData> snapshots = snapshotMeta.getReplicaSnapshotsForShard(slice.getName());
+
+    Optional<CoreSnapshotMetaData> leaderCore = snapshots.stream().filter(x -> x.isLeader()).findFirst();
+    if (leaderCore.isPresent()) {
+      log.info("Replica {} was the leader when snapshot {} was created.", leaderCore.get().getCoreName(), snapshotMeta.getName());
+      Replica r = slice.getReplica(leaderCore.get().getCoreName());
+      if ((r != null) && !r.getState().equals(State.DOWN)) {
+        return r;
+      }
+    }
+
+    Optional<Replica> r = slice.getReplicas().stream()
+                               .filter(x -> x.getState() != State.DOWN && snapshotMeta.isSnapshotExists(slice.getName(), x))
+                               .findFirst();
+
+    if (!r.isPresent()) {
+      throw new SolrException(ErrorCode.SERVER_ERROR,
+          "Unable to find any live replica with a snapshot named " + snapshotMeta.getName() + " for shard " + slice.getName());
+    }
+
+    return r.get();
+  }
+
+  private void copyIndexFiles(URI backupPath, ZkNodeProps request, NamedList results) throws Exception {
+    String collectionName = request.getStr(COLLECTION_PROP);
+    String backupName = request.getStr(NAME);
+    String asyncId = request.getStr(ASYNC);
+    String repoName = request.getStr(CoreAdminParams.BACKUP_REPOSITORY);
+    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+    Map<String, String> requestMap = new HashMap<>();
+
+    String commitName = request.getStr(CoreAdminParams.COMMIT_NAME);
+    Optional<CollectionSnapshotMetaData> snapshotMeta = Optional.empty();
+    if (commitName != null) {
+      SolrZkClient zkClient = ocmh.zkStateReader.getZkClient();
+      snapshotMeta = SolrSnapshotManager.getCollectionLevelSnapshot(zkClient, collectionName, commitName);
+      if (!snapshotMeta.isPresent()) {
+        throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName
+            + " does not exist for collection " + collectionName);
+      }
+      if (snapshotMeta.get().getStatus() != SnapshotStatus.Successful) {
+        throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName + " for collection " + collectionName
+            + " has not completed successfully. The status is " + snapshotMeta.get().getStatus());
+      }
+    }
+
+    log.info("Starting backup of collection={} with backupName={} at location={}", collectionName, backupName,
+        backupPath);
+
+    Collection<String> shardsToConsider = Collections.emptySet();
+    if (snapshotMeta.isPresent()) {
+      shardsToConsider = snapshotMeta.get().getShards();
+    }
+
+    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getActiveSlices()) {
+      Replica replica = null;
+
+      if (snapshotMeta.isPresent()) {
+        if (!shardsToConsider.contains(slice.getName())) {
+          log.warn("Skipping the backup for shard {} since it wasn't part of the collection {} when snapshot {} was created.",
+              slice.getName(), collectionName, snapshotMeta.get().getName());
+          continue;
+        }
+        replica = selectReplicaWithSnapshot(snapshotMeta.get(), slice);
+      } else {
+        // Note - Actually this can return a null value when there is no leader for this shard.
+        replica = slice.getLeader();
+        if (replica == null) {
+          throw new SolrException(ErrorCode.SERVER_ERROR, "No 'leader' replica available for shard " + slice.getName() + " of collection " + collectionName);
+        }
+      }
+
+      String coreName = replica.getStr(CORE_NAME_PROP);
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.BACKUPCORE.toString());
+      params.set(NAME, slice.getName());
+      params.set(CoreAdminParams.BACKUP_REPOSITORY, repoName);
+      params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString()); // note: index dir will be here then the "snapshot." + slice name
+      params.set(CORE_NAME_PROP, coreName);
+      if (snapshotMeta.isPresent()) {
+        params.set(CoreAdminParams.COMMIT_NAME, snapshotMeta.get().getName());
+      }
+
+      ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
+      log.debug("Sent backup request to core={} for backupName={}", coreName, backupName);
+    }
+    log.debug("Sent backup requests to all shard leaders for backupName={}", backupName);
+
+    ocmh.processResponses(results, shardHandler, true, "Could not backup all replicas", asyncId, requestMap);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateAliasCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateAliasCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateAliasCmd.java
new file mode 100644
index 0000000..c54d792
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateAliasCmd.java
@@ -0,0 +1,100 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
+
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+
+public class CreateAliasCmd implements OverseerCollectionMessageHandler.Cmd {
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public CreateAliasCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results)
+      throws Exception {
+    final String aliasName = message.getStr(NAME);
+    final List<String> canonicalCollectionList = parseCollectionsParameter(message.get("collections"));
+    final String canonicalCollectionsString = StrUtils.join(canonicalCollectionList, ',');
+
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    validateAllCollectionsExistAndNoDups(canonicalCollectionList, zkStateReader);
+
+    zkStateReader.aliasesHolder.applyModificationAndExportToZk(aliases -> aliases.cloneWithCollectionAlias(aliasName, canonicalCollectionsString));
+
+    // Sleep a bit to allow ZooKeeper state propagation.
+    //
+    // THIS IS A KLUDGE.
+    //
+    // Solr's view of the cluster is eventually consistent. *Eventually* all nodes and CloudSolrClients will be aware of
+    // alias changes, but not immediately. If a newly created alias is queried, things should work right away since Solr
+    // will attempt to see if it needs to get the latest aliases when it can't otherwise resolve the name.  However
+    // modifications to an alias will take some time.
+    //
+    // We could levy this requirement on the client but they would probably always add an obligatory sleep, which is
+    // just kicking the can down the road.  Perhaps ideally at this juncture here we could somehow wait until all
+    // Solr nodes in the cluster have the latest aliases?
+    Thread.sleep(100);
+  }
+
+  private void validateAllCollectionsExistAndNoDups(List<String> collectionList, ZkStateReader zkStateReader) {
+    final String collectionStr = StrUtils.join(collectionList, ',');
+
+    if (new HashSet<>(collectionList).size() != collectionList.size()) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+          String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', since it contains duplicates", collectionStr));
+    }
+    ClusterState clusterState = zkStateReader.getClusterState();
+    Set<String> aliasNames = zkStateReader.getAliases().getCollectionAliasListMap().keySet();
+    for (String collection : collectionList) {
+      if (clusterState.getCollectionOrNull(collection) == null && !aliasNames.contains(collection)) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', '%s' is not an existing collection or alias", collectionStr, collection));
+      }
+    }
+  }
+  
+  /**
+   * The v2 API directs that the 'collections' parameter be provided as a JSON array (e.g. ["a", "b"]).  We also
+   * maintain support for the legacy format, a comma-separated list (e.g. a,b).
+   */
+  @SuppressWarnings("unchecked")
+  private List<String> parseCollectionsParameter(Object colls) {
+    if (colls == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "missing collections param");
+    if (colls instanceof List) return (List<String>) colls;
+    return StrUtils.splitSmart(colls.toString(), ",", true).stream()
+        .map(String::trim)
+        .collect(Collectors.toList());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
new file mode 100644
index 0000000..4d9c971
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
@@ -0,0 +1,531 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
+import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
+import org.apache.solr.client.solrj.cloud.autoscaling.DistribStateManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.ZkController;
+import org.apache.solr.cloud.overseer.ClusterStateMutator;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.ImplicitDocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ReplicaPosition;
+import org.apache.solr.common.cloud.ZkConfigManager;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.cloud.ZooKeeperException;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.handler.admin.ConfigSetsHandlerApi;
+import org.apache.solr.handler.component.ShardHandler;
+import org.apache.solr.handler.component.ShardRequest;
+import org.apache.solr.util.TimeOut;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NoNodeException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
+import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.common.util.StrUtils.formatString;
+
+public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+  private final TimeSource timeSource;
+  private final DistribStateManager stateManager;
+
+  public CreateCollectionCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+    this.stateManager = ocmh.cloudManager.getDistribStateManager();
+    this.timeSource = ocmh.cloudManager.getTimeSource();
+  }
+
+  @Override
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    final String collectionName = message.getStr(NAME);
+    final boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
+    log.info("Create collection {}", collectionName);
+    if (clusterState.hasCollection(collectionName)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "collection already exists: " + collectionName);
+    }
+
+    String configName = getConfigName(collectionName, message);
+    if (configName == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No config set found to associate with the collection.");
+    }
+
+    ocmh.validateConfigOrThrowSolrException(configName);
+    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
+
+    try {
+
+      final String async = message.getStr(ASYNC);
+
+      List<String> nodeList = new ArrayList<>();
+      List<String> shardNames = new ArrayList<>();
+      List<ReplicaPosition> replicaPositions = buildReplicaPositions(ocmh.cloudManager, clusterState, message,
+          nodeList, shardNames, sessionWrapper);
+      ZkStateReader zkStateReader = ocmh.zkStateReader;
+      boolean isLegacyCloud = Overseer.isLegacy(zkStateReader);
+
+      ocmh.createConfNode(stateManager, configName, collectionName, isLegacyCloud);
+
+      Map<String,String> collectionParams = new HashMap<>();
+      Map<String,Object> collectionProps = message.getProperties();
+      for (String propName : collectionProps.keySet()) {
+        if (propName.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
+          collectionParams.put(propName.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), (String) collectionProps.get(propName));
+        }
+      }
+      
+      createCollectionZkNode(stateManager, collectionName, collectionParams);
+      
+      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
+
+      // wait for a while until we don't see the collection
+      TimeOut waitUntil = new TimeOut(30, TimeUnit.SECONDS, timeSource);
+      boolean created = false;
+      while (! waitUntil.hasTimedOut()) {
+        waitUntil.sleep(100);
+        created = ocmh.cloudManager.getClusterStateProvider().getClusterState().hasCollection(collectionName);
+        if(created) break;
+      }
+      if (!created)
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not fully create collection: " + collectionName);
+
+      if (nodeList.isEmpty()) {
+        log.debug("Finished create command for collection: {}", collectionName);
+        return;
+      }
+
+      // For tracking async calls.
+      Map<String, String> requestMap = new HashMap<>();
+
+
+      log.debug(formatString("Creating SolrCores for new collection {0}, shardNames {1} , message : {2}",
+          collectionName, shardNames, message));
+      Map<String,ShardRequest> coresToCreate = new LinkedHashMap<>();
+      ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+      for (ReplicaPosition replicaPosition : replicaPositions) {
+        String nodeName = replicaPosition.node;
+        String coreName = Assign.buildSolrCoreName(ocmh.cloudManager.getDistribStateManager(),
+            ocmh.cloudManager.getClusterStateProvider().getClusterState().getCollection(collectionName),
+            replicaPosition.shard, replicaPosition.type, true);
+        log.debug(formatString("Creating core {0} as part of shard {1} of collection {2} on {3}"
+            , coreName, replicaPosition.shard, collectionName, nodeName));
+
+
+        String baseUrl = zkStateReader.getBaseUrlForNodeName(nodeName);
+        //in the new mode, create the replica in clusterstate prior to creating the core.
+        // Otherwise the core creation fails
+        if (!isLegacyCloud) {
+          ZkNodeProps props = new ZkNodeProps(
+              Overseer.QUEUE_OPERATION, ADDREPLICA.toString(),
+              ZkStateReader.COLLECTION_PROP, collectionName,
+              ZkStateReader.SHARD_ID_PROP, replicaPosition.shard,
+              ZkStateReader.CORE_NAME_PROP, coreName,
+              ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
+              ZkStateReader.BASE_URL_PROP, baseUrl,
+              ZkStateReader.REPLICA_TYPE, replicaPosition.type.name(),
+              CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+          Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(props));
+        }
+
+        // Need to create new params for each request
+        ModifiableSolrParams params = new ModifiableSolrParams();
+        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
+
+        params.set(CoreAdminParams.NAME, coreName);
+        params.set(COLL_CONF, configName);
+        params.set(CoreAdminParams.COLLECTION, collectionName);
+        params.set(CoreAdminParams.SHARD, replicaPosition.shard);
+        params.set(ZkStateReader.NUM_SHARDS_PROP, shardNames.size());
+        params.set(CoreAdminParams.NEW_COLLECTION, "true");
+        params.set(CoreAdminParams.REPLICA_TYPE, replicaPosition.type.name());
+
+        if (async != null) {
+          String coreAdminAsyncId = async + Math.abs(System.nanoTime());
+          params.add(ASYNC, coreAdminAsyncId);
+          requestMap.put(nodeName, coreAdminAsyncId);
+        }
+        ocmh.addPropertyParams(message, params);
+
+        ShardRequest sreq = new ShardRequest();
+        sreq.nodeName = nodeName;
+        params.set("qt", ocmh.adminPath);
+        sreq.purpose = 1;
+        sreq.shards = new String[]{baseUrl};
+        sreq.actualShards = sreq.shards;
+        sreq.params = params;
+
+        if (isLegacyCloud) {
+          shardHandler.submit(sreq, sreq.shards[0], sreq.params);
+        } else {
+          coresToCreate.put(coreName, sreq);
+        }
+      }
+
+      if(!isLegacyCloud) {
+        // wait for all replica entries to be created
+        Map<String, Replica> replicas = ocmh.waitToSeeReplicasInState(collectionName, coresToCreate.keySet());
+        for (Map.Entry<String, ShardRequest> e : coresToCreate.entrySet()) {
+          ShardRequest sreq = e.getValue();
+          sreq.params.set(CoreAdminParams.CORE_NODE_NAME, replicas.get(e.getKey()).getName());
+          shardHandler.submit(sreq, sreq.shards[0], sreq.params);
+        }
+      }
+
+      ocmh.processResponses(results, shardHandler, false, null, async, requestMap, Collections.emptySet());
+      if(results.get("failure") != null && ((SimpleOrderedMap)results.get("failure")).size() > 0) {
+        // Let's cleanup as we hit an exception
+        // We shouldn't be passing 'results' here for the cleanup as the response would then contain 'success'
+        // element, which may be interpreted by the user as a positive ack
+        ocmh.cleanupCollection(collectionName, new NamedList());
+        log.info("Cleaned up artifacts for failed create collection for [{}]", collectionName);
+      } else {
+        log.debug("Finished create command on all shards for collection: {}", collectionName);
+
+        // Emit a warning about production use of data driven functionality
+        boolean defaultConfigSetUsed = message.getStr(COLL_CONF) == null ||
+            message.getStr(COLL_CONF).equals(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
+        if (defaultConfigSetUsed) {
+          results.add("warning", "Using _default configset. Data driven schema functionality"
+              + " is enabled by default, which is NOT RECOMMENDED for production use. To turn it off:"
+              + " curl http://{host:port}/solr/" + collectionName + "/config -d '{\"set-user-property\": {\"update.autoCreateFields\":\"false\"}}'");
+        }
+      }
+    } catch (SolrException ex) {
+      throw ex;
+    } catch (Exception ex) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, ex);
+    } finally {
+      if (sessionWrapper.get() != null) sessionWrapper.get().release();
+    }
+  }
+
+  public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
+                                                            ZkNodeProps message,
+                                                            List<String> nodeList, List<String> shardNames,
+                                                            AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
+    final String collectionName = message.getStr(NAME);
+    // look at the replication factor and see if it matches reality
+    // if it does not, find best nodes to create more cores
+    int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0);
+    int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas>0?0:1));
+    int numPullReplicas = message.getInt(PULL_REPLICAS, 0);
+    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
+    String policy = message.getStr(Policy.POLICY);
+    boolean usePolicyFramework = !autoScalingConfig.getPolicy().getClusterPolicy().isEmpty() || policy != null;
+
+    Integer numSlices = message.getInt(OverseerCollectionMessageHandler.NUM_SLICES, null);
+    String router = message.getStr("router.name", DocRouter.DEFAULT_NAME);
+    if(ImplicitDocRouter.NAME.equals(router)){
+      ClusterStateMutator.getShardNames(shardNames, message.getStr("shards", null));
+      numSlices = shardNames.size();
+    } else {
+      if (numSlices == null ) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, OverseerCollectionMessageHandler.NUM_SLICES + " is a required param (when using CompositeId router).");
+      }
+      if (numSlices <= 0) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, OverseerCollectionMessageHandler.NUM_SLICES + " must be > 0");
+      }
+      ClusterStateMutator.getShardNames(numSlices, shardNames);
+    }
+
+    int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, 1);
+    if (usePolicyFramework && message.getStr(MAX_SHARDS_PER_NODE) != null && maxShardsPerNode > 0) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "'maxShardsPerNode>0' is not supported when autoScaling policies are used");
+    }
+    if (maxShardsPerNode == -1 || usePolicyFramework) maxShardsPerNode = Integer.MAX_VALUE;
+    if (numNrtReplicas + numTlogReplicas <= 0) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
+    }
+
+    // we need to look at every node and see how many cores it serves
+    // add our new cores to existing nodes serving the least number of cores
+    // but (for now) require that each core goes on a distinct node.
+
+    List<ReplicaPosition> replicaPositions;
+    nodeList.addAll(Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM));
+    if (nodeList.isEmpty()) {
+      log.warn("It is unusual to create a collection ("+collectionName+") without cores.");
+
+      replicaPositions = new ArrayList<>();
+    } else {
+      int totalNumReplicas = numNrtReplicas + numTlogReplicas + numPullReplicas;
+      if (totalNumReplicas > nodeList.size()) {
+        log.warn("Specified number of replicas of "
+            + totalNumReplicas
+            + " on collection "
+            + collectionName
+            + " is higher than the number of Solr instances currently live or live and part of your " + OverseerCollectionMessageHandler.CREATE_NODE_SET + "("
+            + nodeList.size()
+            + "). It's unusual to run two replica of the same slice on the same Solr-instance.");
+      }
+
+      int maxShardsAllowedToCreate = maxShardsPerNode == Integer.MAX_VALUE ?
+          Integer.MAX_VALUE :
+          maxShardsPerNode * nodeList.size();
+      int requestedShardsToCreate = numSlices * totalNumReplicas;
+      if (maxShardsAllowedToCreate < requestedShardsToCreate) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName + ". Value of "
+            + MAX_SHARDS_PER_NODE + " is " + maxShardsPerNode
+            + ", and the number of nodes currently live or live and part of your "+OverseerCollectionMessageHandler.CREATE_NODE_SET+" is " + nodeList.size()
+            + ". This allows a maximum of " + maxShardsAllowedToCreate
+            + " to be created. Value of " + OverseerCollectionMessageHandler.NUM_SLICES + " is " + numSlices
+            + ", value of " + NRT_REPLICAS + " is " + numNrtReplicas
+            + ", value of " + TLOG_REPLICAS + " is " + numTlogReplicas
+            + " and value of " + PULL_REPLICAS + " is " + numPullReplicas
+            + ". This requires " + requestedShardsToCreate
+            + " shards to be created (higher than the allowed number)");
+      }
+      replicaPositions = Assign.identifyNodes(cloudManager
+          , clusterState, nodeList, collectionName, message, shardNames, numNrtReplicas, numTlogReplicas, numPullReplicas);
+      sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
+    }
+    return replicaPositions;
+  }
+
+  String getConfigName(String coll, ZkNodeProps message) throws KeeperException, InterruptedException {
+    String configName = message.getStr(COLL_CONF);
+
+    if (configName == null) {
+      // if there is only one conf, use that
+      List<String> configNames = null;
+      try {
+        configNames = ocmh.zkStateReader.getZkClient().getChildren(ZkConfigManager.CONFIGS_ZKNODE, null, true);
+        if (configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
+          if (!CollectionAdminParams.SYSTEM_COLL.equals(coll)) {
+            copyDefaultConfigSetTo(configNames, coll);
+          }
+          return coll;
+        } else if (configNames != null && configNames.size() == 1) {
+          configName = configNames.get(0);
+          // no config set named, but there is only 1 - use it
+          log.info("Only one config set found in zk - using it:" + configName);
+        }
+      } catch (KeeperException.NoNodeException e) {
+
+      }
+    }
+    return "".equals(configName)? null: configName;
+  }
+  
+  /**
+   * Copies the _default configset to the specified configset name (overwrites if pre-existing)
+   */
+  private void copyDefaultConfigSetTo(List<String> configNames, String targetConfig) {
+    ZkConfigManager configManager = new ZkConfigManager(ocmh.zkStateReader.getZkClient());
+
+    // if a configset named coll exists, delete the configset so that _default can be copied over
+    if (configNames.contains(targetConfig)) {
+      log.info("There exists a configset by the same name as the collection we're trying to create: " + targetConfig +
+          ", deleting it so that we can copy the _default configs over and create the collection.");
+      try {
+        configManager.deleteConfigDir(targetConfig);
+      } catch (Exception e) {
+        throw new SolrException(ErrorCode.INVALID_STATE, "Error while deleting configset: " + targetConfig, e);
+      }
+    } else {
+      log.info("Only _default config set found, using it.");
+    }
+    // Copy _default into targetConfig
+    try {
+      configManager.copyConfigDir(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME, targetConfig, new HashSet<>());
+    } catch (Exception e) {
+      throw new SolrException(ErrorCode.INVALID_STATE, "Error while copying _default to " + targetConfig, e);
+    }
+  }
+
+  public static void createCollectionZkNode(DistribStateManager stateManager, String collection, Map<String,String> params) {
+    log.debug("Check for collection zkNode:" + collection);
+    String collectionPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
+
+    try {
+      if (!stateManager.hasData(collectionPath)) {
+        log.debug("Creating collection in ZooKeeper:" + collection);
+
+        try {
+          Map<String,Object> collectionProps = new HashMap<>();
+
+          // TODO: if collection.configName isn't set, and there isn't already a conf in zk, just use that?
+          String defaultConfigName = System.getProperty(ZkController.COLLECTION_PARAM_PREFIX + ZkController.CONFIGNAME_PROP, collection);
+
+          if (params.size() > 0) {
+            collectionProps.putAll(params);
+            // if the config name wasn't passed in, use the default
+            if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP)) {
+              // users can create the collection node and conf link ahead of time, or this may return another option
+              getConfName(stateManager, collection, collectionPath, collectionProps);
+            }
+
+          } else if (System.getProperty("bootstrap_confdir") != null) {
+            // if we are bootstrapping a collection, default the config for
+            // a new collection to the collection we are bootstrapping
+            log.info("Setting config for collection:" + collection + " to " + defaultConfigName);
+
+            Properties sysProps = System.getProperties();
+            for (String sprop : System.getProperties().stringPropertyNames()) {
+              if (sprop.startsWith(ZkController.COLLECTION_PARAM_PREFIX)) {
+                collectionProps.put(sprop.substring(ZkController.COLLECTION_PARAM_PREFIX.length()), sysProps.getProperty(sprop));
+              }
+            }
+
+            // if the config name wasn't passed in, use the default
+            if (!collectionProps.containsKey(ZkController.CONFIGNAME_PROP))
+              collectionProps.put(ZkController.CONFIGNAME_PROP, defaultConfigName);
+
+          } else if (Boolean.getBoolean("bootstrap_conf")) {
+            // the conf name should should be the collection name of this core
+            collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
+          } else {
+            getConfName(stateManager, collection, collectionPath, collectionProps);
+          }
+
+          collectionProps.remove(ZkStateReader.NUM_SHARDS_PROP);  // we don't put numShards in the collections properties
+
+          ZkNodeProps zkProps = new ZkNodeProps(collectionProps);
+          stateManager.makePath(collectionPath, Utils.toJSON(zkProps), CreateMode.PERSISTENT, false);
+
+        } catch (KeeperException e) {
+          // it's okay if the node already exists
+          if (e.code() != KeeperException.Code.NODEEXISTS) {
+            throw e;
+          }
+        } catch (AlreadyExistsException e) {
+          // it's okay if the node already exists
+        }
+      } else {
+        log.debug("Collection zkNode exists");
+      }
+
+    } catch (KeeperException e) {
+      // it's okay if another beats us creating the node
+      if (e.code() == KeeperException.Code.NODEEXISTS) {
+        return;
+      }
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
+    } catch (IOException e) {
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
+    } catch (InterruptedException e) {
+      Thread.interrupted();
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Error creating collection node in Zookeeper", e);
+    }
+
+  }
+  
+  private static void getConfName(DistribStateManager stateManager, String collection, String collectionPath, Map<String,Object> collectionProps) throws IOException,
+      KeeperException, InterruptedException {
+    // check for configName
+    log.debug("Looking for collection configName");
+    if (collectionProps.containsKey("configName")) {
+      log.info("configName was passed as a param {}", collectionProps.get("configName"));
+      return;
+    }
+
+    List<String> configNames = null;
+    int retry = 1;
+    int retryLimt = 6;
+    for (; retry < retryLimt; retry++) {
+      if (stateManager.hasData(collectionPath)) {
+        VersionedData data = stateManager.getData(collectionPath);
+        ZkNodeProps cProps = ZkNodeProps.load(data.getData());
+        if (cProps.containsKey(ZkController.CONFIGNAME_PROP)) {
+          break;
+        }
+      }
+
+      try {
+        configNames = stateManager.listData(ZkConfigManager.CONFIGS_ZKNODE);
+      } catch (NoSuchElementException | NoNodeException e) {
+        // just keep trying
+      }
+
+      // check if there's a config set with the same name as the collection
+      if (configNames != null && configNames.contains(collection)) {
+        log.info(
+            "Could not find explicit collection configName, but found config name matching collection name - using that set.");
+        collectionProps.put(ZkController.CONFIGNAME_PROP, collection);
+        break;
+      }
+      // if _default exists, use that
+      if (configNames != null && configNames.contains(ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME)) {
+        log.info(
+            "Could not find explicit collection configName, but found _default config set - using that set.");
+        collectionProps.put(ZkController.CONFIGNAME_PROP, ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
+        break;
+      }
+      // if there is only one conf, use that
+      if (configNames != null && configNames.size() == 1) {
+        // no config set named, but there is only 1 - use it
+        log.info("Only one config set found in zk - using it:" + configNames.get(0));
+        collectionProps.put(ZkController.CONFIGNAME_PROP, configNames.get(0));
+        break;
+      }
+
+      log.info("Could not find collection configName - pausing for 3 seconds and trying again - try: " + retry);
+      Thread.sleep(3000);
+    }
+    if (retry == retryLimt) {
+      log.error("Could not find configName for collection " + collection);
+      throw new ZooKeeperException(
+          SolrException.ErrorCode.SERVER_ERROR,
+          "Could not find configName for collection " + collection + " found:" + configNames);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java
new file mode 100644
index 0000000..311d9ef
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateShardCmd.java
@@ -0,0 +1,190 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
+import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
+import org.apache.solr.client.solrj.cloud.autoscaling.SolrCloudManager;
+import org.apache.solr.cloud.CloudUtil;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.common.SolrCloseableLatch;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.ReplicaPosition;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+public class CreateShardCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public CreateShardCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    String collectionName = message.getStr(COLLECTION_PROP);
+    String sliceName = message.getStr(SHARD_ID_PROP);
+    boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
+
+    log.info("Create shard invoked: {}", message);
+    if (collectionName == null || sliceName == null)
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'collection' and 'shard' are required parameters");
+
+    DocCollection collection = clusterState.getCollection(collectionName);
+
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    AtomicReference<PolicyHelper.SessionWrapper> sessionWrapper = new AtomicReference<>();
+    SolrCloseableLatch countDownLatch;
+    try {
+      List<ReplicaPosition> positions = buildReplicaPositions(ocmh.cloudManager, clusterState, collectionName, message, sessionWrapper);
+      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
+      // wait for a while until we see the shard
+      ocmh.waitForNewShard(collectionName, sliceName);
+
+      String async = message.getStr(ASYNC);
+      countDownLatch = new SolrCloseableLatch(positions.size(), ocmh);
+      for (ReplicaPosition position : positions) {
+        String nodeName = position.node;
+        String coreName = Assign.buildSolrCoreName(ocmh.cloudManager.getDistribStateManager(), collection, sliceName, position.type);
+        log.info("Creating replica " + coreName + " as part of slice " + sliceName + " of collection " + collectionName
+            + " on " + nodeName);
+
+        // Need to create new params for each request
+        ZkNodeProps addReplicasProps = new ZkNodeProps(
+            COLLECTION_PROP, collectionName,
+            SHARD_ID_PROP, sliceName,
+            ZkStateReader.REPLICA_TYPE, position.type.name(),
+            CoreAdminParams.NODE, nodeName,
+            CoreAdminParams.NAME, coreName,
+            CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
+        Map<String, Object> propertyParams = new HashMap<>();
+        ocmh.addPropertyParams(message, propertyParams);
+        addReplicasProps = addReplicasProps.plus(propertyParams);
+        if (async != null) addReplicasProps.getProperties().put(ASYNC, async);
+        final NamedList addResult = new NamedList();
+        ocmh.addReplica(zkStateReader.getClusterState(), addReplicasProps, addResult, () -> {
+          countDownLatch.countDown();
+          Object addResultFailure = addResult.get("failure");
+          if (addResultFailure != null) {
+            SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
+            if (failure == null) {
+              failure = new SimpleOrderedMap();
+              results.add("failure", failure);
+            }
+            failure.addAll((NamedList) addResultFailure);
+          } else {
+            SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
+            if (success == null) {
+              success = new SimpleOrderedMap();
+              results.add("success", success);
+            }
+            success.addAll((NamedList) addResult.get("success"));
+          }
+        });
+      }
+    } finally {
+      if (sessionWrapper.get() != null) sessionWrapper.get().release();
+    }
+
+    log.debug("Waiting for create shard action to complete");
+    countDownLatch.await(5, TimeUnit.MINUTES);
+    log.debug("Finished waiting for create shard action to complete");
+
+    log.info("Finished create command on all shards for collection: " + collectionName);
+
+  }
+
+  public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
+         String collectionName, ZkNodeProps message, AtomicReference< PolicyHelper.SessionWrapper> sessionWrapper) throws IOException, InterruptedException {
+    String sliceName = message.getStr(SHARD_ID_PROP);
+    DocCollection collection = clusterState.getCollection(collectionName);
+
+    int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, collection.getInt(NRT_REPLICAS, collection.getInt(REPLICATION_FACTOR, 1))));
+    int numPullReplicas = message.getInt(PULL_REPLICAS, collection.getInt(PULL_REPLICAS, 0));
+    int numTlogReplicas = message.getInt(TLOG_REPLICAS, collection.getInt(TLOG_REPLICAS, 0));
+    int totalReplicas = numNrtReplicas + numPullReplicas + numTlogReplicas;
+
+    if (numNrtReplicas + numTlogReplicas <= 0) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, NRT_REPLICAS + " + " + TLOG_REPLICAS + " must be greater than 0");
+    }
+
+    Object createNodeSetStr = message.get(OverseerCollectionMessageHandler.CREATE_NODE_SET);
+
+    boolean usePolicyFramework = CloudUtil.usePolicyFramework(collection, cloudManager);
+    List<ReplicaPosition> positions;
+    if (usePolicyFramework) {
+      if (collection.getPolicyName() != null) message.getProperties().put(Policy.POLICY, collection.getPolicyName());
+      positions = Assign.identifyNodes(cloudManager,
+          clusterState,
+          Assign.getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, OverseerCollectionMessageHandler.RANDOM),
+          collection.getName(),
+          message,
+          Collections.singletonList(sliceName),
+          numNrtReplicas,
+          numTlogReplicas,
+          numPullReplicas);
+      sessionWrapper.set(PolicyHelper.getLastSessionWrapper(true));
+    } else {
+      List<Assign.ReplicaCount> sortedNodeList = Assign.getNodesForNewReplicas(clusterState, collection.getName(), sliceName, totalReplicas,
+          createNodeSetStr, cloudManager);
+      int i = 0;
+      positions = new ArrayList<>();
+      for (Map.Entry<Replica.Type, Integer> e : ImmutableMap.of(Replica.Type.NRT, numNrtReplicas,
+          Replica.Type.TLOG, numTlogReplicas,
+          Replica.Type.PULL, numPullReplicas
+      ).entrySet()) {
+        for (int j = 0; j < e.getValue(); j++) {
+          positions.add(new ReplicaPosition(sliceName, j + 1, e.getKey(), sortedNodeList.get(i % sortedNodeList.size()).nodeName));
+          i++;
+        }
+      }
+    }
+    return positions;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateSnapshotCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateSnapshotCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateSnapshotCmd.java
new file mode 100644
index 0000000..32715d6
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateSnapshotCmd.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.Replica.State;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
+import org.apache.solr.core.snapshots.SolrSnapshotManager;
+import org.apache.solr.handler.component.ShardHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the functionality of creating a collection level snapshot.
+ */
+public class CreateSnapshotCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public CreateSnapshotCmd (OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    String collectionName =  message.getStr(COLLECTION_PROP);
+    String commitName =  message.getStr(CoreAdminParams.COMMIT_NAME);
+    String asyncId = message.getStr(ASYNC);
+    SolrZkClient zkClient = ocmh.zkStateReader.getZkClient();
+    Date creationDate = new Date();
+
+    if(SolrSnapshotManager.snapshotExists(zkClient, collectionName, commitName)) {
+      throw new SolrException(ErrorCode.BAD_REQUEST, "Snapshot with name " + commitName
+          + " already exists for collection " + collectionName);
+    }
+
+    log.info("Creating a snapshot for collection={} with commitName={}", collectionName, commitName);
+
+    // Create a node in ZK to store the collection level snapshot meta-data.
+    SolrSnapshotManager.createCollectionLevelSnapshot(zkClient, collectionName, new CollectionSnapshotMetaData(commitName));
+    log.info("Created a ZK path to store snapshot information for collection={} with commitName={}", collectionName, commitName);
+
+    Map<String, String> requestMap = new HashMap<>();
+    NamedList shardRequestResults = new NamedList();
+    Map<String, Slice> shardByCoreName = new HashMap<>();
+    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+
+    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
+      for (Replica replica : slice.getReplicas()) {
+        if (replica.getState() != State.ACTIVE) {
+          log.info("Replica {} is not active. Hence not sending the createsnapshot request", replica.getCoreName());
+          continue; // Since replica is not active - no point sending a request.
+        }
+
+        String coreName = replica.getStr(CORE_NAME_PROP);
+
+        ModifiableSolrParams params = new ModifiableSolrParams();
+        params.set(CoreAdminParams.ACTION, CoreAdminAction.CREATESNAPSHOT.toString());
+        params.set(NAME, slice.getName());
+        params.set(CORE_NAME_PROP, coreName);
+        params.set(CoreAdminParams.COMMIT_NAME, commitName);
+
+        ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
+        log.debug("Sent createsnapshot request to core={} with commitName={}", coreName, commitName);
+
+        shardByCoreName.put(coreName, slice);
+      }
+    }
+
+    // At this point we want to make sure that at-least one replica for every shard
+    // is able to create the snapshot. If that is not the case, then we fail the request.
+    // This is to take care of the situation where e.g. entire shard is unavailable.
+    Set<String> failedShards = new HashSet<>();
+
+    ocmh.processResponses(shardRequestResults, shardHandler, false, null, asyncId, requestMap);
+    NamedList success = (NamedList) shardRequestResults.get("success");
+    List<CoreSnapshotMetaData> replicas = new ArrayList<>();
+    if (success != null) {
+      for ( int i = 0 ; i < success.size() ; i++) {
+        NamedList resp = (NamedList)success.getVal(i);
+
+        // Check if this core is the leader for the shard. The idea here is that during the backup
+        // operation we preferably use the snapshot of the "leader" replica since it is most likely
+        // to have latest state.
+        String coreName = (String)resp.get(CoreAdminParams.CORE);
+        Slice slice = shardByCoreName.remove(coreName);
+        boolean leader = (slice.getLeader() != null && slice.getLeader().getCoreName().equals(coreName));
+        resp.add(SolrSnapshotManager.SHARD_ID, slice.getName());
+        resp.add(SolrSnapshotManager.LEADER, leader);
+
+        CoreSnapshotMetaData c = new CoreSnapshotMetaData(resp);
+        replicas.add(c);
+        log.info("Snapshot with commitName {} is created successfully for core {}", commitName, c.getCoreName());
+      }
+    }
+
+    if (!shardByCoreName.isEmpty()) { // One or more failures.
+      log.warn("Unable to create a snapshot with name {} for following cores {}", commitName, shardByCoreName.keySet());
+
+      // Count number of failures per shard.
+      Map<String, Integer> failuresByShardId = new HashMap<>();
+      for (Map.Entry<String,Slice> entry : shardByCoreName.entrySet()) {
+        int f = 0;
+        if (failuresByShardId.get(entry.getValue().getName()) != null) {
+          f = failuresByShardId.get(entry.getValue().getName());
+        }
+        failuresByShardId.put(entry.getValue().getName(), f + 1);
+      }
+
+      // Now that we know number of failures per shard, we can figure out
+      // if at-least one replica per shard was able to create a snapshot or not.
+      DocCollection collectionStatus = ocmh.zkStateReader.getClusterState().getCollection(collectionName);
+      for (Map.Entry<String,Integer> entry : failuresByShardId.entrySet()) {
+        int replicaCount = collectionStatus.getSlice(entry.getKey()).getReplicas().size();
+        if (replicaCount <= entry.getValue()) {
+          failedShards.add(entry.getKey());
+        }
+      }
+    }
+
+    if (failedShards.isEmpty()) { // No failures.
+      CollectionSnapshotMetaData meta = new CollectionSnapshotMetaData(commitName, SnapshotStatus.Successful, creationDate, replicas);
+      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, meta);
+      log.info("Saved following snapshot information for collection={} with commitName={} in Zookeeper : {}", collectionName,
+          commitName, meta.toNamedList());
+    } else {
+      log.warn("Failed to create a snapshot for collection {} with commitName = {}. Snapshot could not be captured for following shards {}",
+          collectionName, commitName, failedShards);
+      // Update the ZK meta-data to include only cores with the snapshot. This will enable users to figure out
+      // which cores have the named snapshot.
+      CollectionSnapshotMetaData meta = new CollectionSnapshotMetaData(commitName, SnapshotStatus.Failed, creationDate, replicas);
+      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, meta);
+      log.info("Saved following snapshot information for collection={} with commitName={} in Zookeeper : {}", collectionName,
+          commitName, meta.toNamedList());
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to create snapshot on shards " + failedShards);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteAliasCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteAliasCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteAliasCmd.java
new file mode 100644
index 0000000..e199d7d
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteAliasCmd.java
@@ -0,0 +1,43 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.NamedList;
+
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+public class DeleteAliasCmd implements OverseerCollectionMessageHandler.Cmd {
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public DeleteAliasCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    String aliasName = message.getStr(NAME);
+
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    zkStateReader.aliasesHolder.applyModificationAndExportToZk(a -> a.cloneWithCollectionAlias(aliasName, null));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
new file mode 100644
index 0000000..bdae8b9
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
@@ -0,0 +1,142 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.common.NonExistentCoreException;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.Aliases;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.core.snapshots.SolrSnapshotManager;
+import org.apache.solr.util.TimeOut;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+public class DeleteCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+  private final TimeSource timeSource;
+
+  public DeleteCollectionCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+    this.timeSource = ocmh.cloudManager.getTimeSource();
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    Aliases aliases = zkStateReader.getAliases();
+    final String collection = message.getStr(NAME);
+    for (Map.Entry<String, List<String>> ent :  aliases.getCollectionAliasListMap().entrySet()) {
+      if (ent.getValue().contains(collection)) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+            "Collection : " + collection + " is part of alias " + ent.getKey() + " remove or modify the alias before removing this collection.");
+      }
+    }
+
+    try {
+      // Remove the snapshots meta-data for this collection in ZK. Deleting actual index files
+      // should be taken care of as part of collection delete operation.
+      SolrZkClient zkClient = zkStateReader.getZkClient();
+      SolrSnapshotManager.cleanupCollectionLevelSnapshots(zkClient, collection);
+
+      if (zkStateReader.getClusterState().getCollectionOrNull(collection) == null) {
+        if (zkStateReader.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection, true)) {
+          // if the collection is not in the clusterstate, but is listed in zk, do nothing, it will just
+          // be removed in the finally - we cannot continue, because the below code will error if the collection
+          // is not in the clusterstate
+          return;
+        }
+      }
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.UNLOAD.toString());
+      params.set(CoreAdminParams.DELETE_INSTANCE_DIR, true);
+      params.set(CoreAdminParams.DELETE_DATA_DIR, true);
+
+      String asyncId = message.getStr(ASYNC);
+      Map<String, String> requestMap = null;
+      if (asyncId != null) {
+        requestMap = new HashMap<>();
+      }
+
+      Set<String> okayExceptions = new HashSet<>(1);
+      okayExceptions.add(NonExistentCoreException.class.getName());
+
+      ocmh.collectionCmd(message, params, results, null, asyncId, requestMap, okayExceptions);
+
+      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, DELETE.toLower(), NAME, collection);
+      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
+
+      // wait for a while until we don't see the collection
+      TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
+      boolean removed = false;
+      while (! timeout.hasTimedOut()) {
+        timeout.sleep(100);
+        removed = !zkStateReader.getClusterState().hasCollection(collection);
+        if (removed) {
+          timeout.sleep(500); // just a bit of time so it's more likely other
+          // readers see on return
+          break;
+        }
+      }
+      if (!removed) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+            "Could not fully remove collection: " + collection);
+      }
+
+    } finally {
+
+      try {
+        if (zkStateReader.getZkClient().exists(
+            ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection, true)) {
+          zkStateReader.getZkClient().clean(
+              ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection);
+        }
+      } catch (InterruptedException e) {
+        SolrException.log(log, "Cleaning up collection in zk was interrupted:"
+            + collection, e);
+        Thread.currentThread().interrupt();
+      } catch (KeeperException e) {
+        SolrException.log(log, "Problem cleaning up collection in zk:"
+            + collection, e);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java
new file mode 100644
index 0000000..ab4dc0c
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.util.NamedList;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+public class DeleteNodeCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public DeleteNodeCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    ocmh.checkRequired(message, "node");
+    String node = message.getStr("node");
+    if (!state.liveNodesContain(node)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source Node: " + node + " is not live");
+    }
+    List<ZkNodeProps> sourceReplicas = ReplaceNodeCmd.getReplicasOfNode(node, state);
+    List<String> singleReplicas = verifyReplicaAvailability(sourceReplicas, state);
+    if (!singleReplicas.isEmpty()) {
+      results.add("failure", "Can't delete the only existing non-PULL replica(s) on node " + node + ": " + singleReplicas.toString());
+    } else {
+      cleanupReplicas(results, state, sourceReplicas, ocmh, node, message.getStr(ASYNC));
+    }
+  }
+
+  // collect names of replicas that cannot be deleted
+  static List<String> verifyReplicaAvailability(List<ZkNodeProps> sourceReplicas, ClusterState state) {
+    List<String> res = new ArrayList<>();
+    for (ZkNodeProps sourceReplica : sourceReplicas) {
+      String coll = sourceReplica.getStr(COLLECTION_PROP);
+      String shard = sourceReplica.getStr(SHARD_ID_PROP);
+      String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP);
+      DocCollection collection = state.getCollection(coll);
+      Slice slice = collection.getSlice(shard);
+      if (slice.getReplicas().size() < 2) {
+        // can't delete the only replica in existence
+        res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
+      } else { // check replica types
+        int otherNonPullReplicas = 0;
+        for (Replica r : slice.getReplicas()) {
+          if (!r.getName().equals(replicaName) && !r.getType().equals(Replica.Type.PULL)) {
+            otherNonPullReplicas++;
+          }
+        }
+        // can't delete - there are no other non-pull replicas
+        if (otherNonPullReplicas == 0) {
+          res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE));
+        }
+      }
+    }
+    return res;
+  }
+
+  static void cleanupReplicas(NamedList results,
+                              ClusterState clusterState,
+                              List<ZkNodeProps> sourceReplicas,
+                              OverseerCollectionMessageHandler ocmh,
+                              String node,
+                              String async) throws InterruptedException {
+    CountDownLatch cleanupLatch = new CountDownLatch(sourceReplicas.size());
+    for (ZkNodeProps sourceReplica : sourceReplicas) {
+      String coll = sourceReplica.getStr(COLLECTION_PROP);
+      String shard = sourceReplica.getStr(SHARD_ID_PROP);
+      String type = sourceReplica.getStr(ZkStateReader.REPLICA_TYPE);
+      log.info("Deleting replica type={} for collection={} shard={} on node={}", type, coll, shard, node);
+      NamedList deleteResult = new NamedList();
+      try {
+        if (async != null) sourceReplica = sourceReplica.plus(ASYNC, async);
+        ((DeleteReplicaCmd)ocmh.commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, sourceReplica.plus("parallel", "true"), deleteResult, () -> {
+          cleanupLatch.countDown();
+          if (deleteResult.get("failure") != null) {
+            synchronized (results) {
+
+              results.add("failure", String.format(Locale.ROOT, "Failed to delete replica for collection=%s shard=%s" +
+                  " on node=%s", coll, shard, node));
+            }
+          }
+        });
+      } catch (KeeperException e) {
+        log.warn("Error deleting ", e);
+        cleanupLatch.countDown();
+      } catch (Exception e) {
+        log.warn("Error deleting ", e);
+        cleanupLatch.countDown();
+        throw e;
+      }
+    }
+    log.debug("Waiting for delete node action to complete");
+    cleanupLatch.await(5, TimeUnit.MINUTES);
+  }
+
+
+}


[04/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java b/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java
deleted file mode 100644
index 154673a..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Test of the Collections API with the MiniSolrCloudCluster.
- */
-@LuceneTestCase.Slow
-public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private static final int numShards = 2;
-  private static final int numReplicas = 2;
-  private static final int maxShardsPerNode = 1;
-  private static final int nodeCount = 5;
-  private static final String configName = "solrCloudCollectionConfig";
-  private static final Map<String,String> collectionProperties  // ensure indexes survive core shutdown
-      = Collections.singletonMap("solr.directoryFactory", "solr.StandardDirectoryFactory");
-
-  @Override
-  public void setUp() throws Exception {
-    configureCluster(nodeCount).addConfig(configName, configset("cloud-minimal")).configure();
-    super.setUp();
-  }
-  
-  @Override
-  public void tearDown() throws Exception {
-    cluster.shutdown();
-    super.tearDown();
-  }
-
-  private void createCollection(String collectionName, String createNodeSet) throws Exception {
-    if (random().nextBoolean()) { // process asynchronously
-      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
-          .setMaxShardsPerNode(maxShardsPerNode)
-          .setCreateNodeSet(createNodeSet)
-          .setProperties(collectionProperties)
-          .processAndWait(cluster.getSolrClient(), 30);
-    }
-    else {
-      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
-          .setMaxShardsPerNode(maxShardsPerNode)
-          .setCreateNodeSet(createNodeSet)
-          .setProperties(collectionProperties)
-          .process(cluster.getSolrClient());
-    }
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish
-        (collectionName, cluster.getSolrClient().getZkStateReader(), true, true, 330);
-  }
-
-  @Test
-  public void testCollectionCreateSearchDelete() throws Exception {
-    final CloudSolrClient client = cluster.getSolrClient();
-    final String collectionName = "testcollection";
-
-    assertNotNull(cluster.getZkServer());
-    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
-    assertEquals(nodeCount, jettys.size());
-    for (JettySolrRunner jetty : jettys) {
-      assertTrue(jetty.isRunning());
-    }
-
-    // shut down a server
-    JettySolrRunner stoppedServer = cluster.stopJettySolrRunner(0);
-    assertTrue(stoppedServer.isStopped());
-    assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
-
-    // create a server
-    JettySolrRunner startedServer = cluster.startJettySolrRunner();
-    assertTrue(startedServer.isRunning());
-    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
-
-    // create collection
-    createCollection(collectionName, null);
-
-    // modify/query collection
-    new UpdateRequest().add("id", "1").commit(client, collectionName);
-    QueryResponse rsp = client.query(collectionName, new SolrQuery("*:*"));
-    assertEquals(1, rsp.getResults().getNumFound());
-
-    // remove a server not hosting any replicas
-    ZkStateReader zkStateReader = client.getZkStateReader();
-    zkStateReader.forceUpdateCollection(collectionName);
-    ClusterState clusterState = zkStateReader.getClusterState();
-    Map<String,JettySolrRunner> jettyMap = new HashMap<>();
-    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
-      String key = jetty.getBaseUrl().toString().substring((jetty.getBaseUrl().getProtocol() + "://").length());
-      jettyMap.put(key, jetty);
-    }
-    Collection<Slice> slices = clusterState.getCollection(collectionName).getSlices();
-    // track the servers not host replicas
-    for (Slice slice : slices) {
-      jettyMap.remove(slice.getLeader().getNodeName().replace("_solr", "/solr"));
-      for (Replica replica : slice.getReplicas()) {
-        jettyMap.remove(replica.getNodeName().replace("_solr", "/solr"));
-      }
-    }
-    assertTrue("Expected to find a node without a replica", jettyMap.size() > 0);
-    JettySolrRunner jettyToStop = jettyMap.entrySet().iterator().next().getValue();
-    jettys = cluster.getJettySolrRunners();
-    for (int i = 0; i < jettys.size(); ++i) {
-      if (jettys.get(i).equals(jettyToStop)) {
-        cluster.stopJettySolrRunner(i);
-        assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
-      }
-    }
-
-    // re-create a server (to restore original nodeCount count)
-    startedServer = cluster.startJettySolrRunner(jettyToStop);
-    assertTrue(startedServer.isRunning());
-    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
-
-    CollectionAdminRequest.deleteCollection(collectionName).process(client);
-    AbstractDistribZkTestBase.waitForCollectionToDisappear
-        (collectionName, client.getZkStateReader(), true, true, 330);
-
-    // create it again
-    createCollection(collectionName, null);
-
-    // check that there's no left-over state
-    assertEquals(0, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
-
-    // modify/query collection
-    new UpdateRequest().add("id", "1").commit(client, collectionName);
-    assertEquals(1, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
-  }
-
-  @Test
-  public void testCollectionCreateWithoutCoresThenDelete() throws Exception {
-
-    final String collectionName = "testSolrCloudCollectionWithoutCores";
-    final CloudSolrClient client = cluster.getSolrClient();
-
-    assertNotNull(cluster.getZkServer());
-    assertFalse(cluster.getJettySolrRunners().isEmpty());
-
-    // create collection
-    createCollection(collectionName, OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY);
-
-    // check the collection's corelessness
-    int coreCount = 0;
-    DocCollection docCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
-    for (Map.Entry<String,Slice> entry : docCollection.getSlicesMap().entrySet()) {
-      coreCount += entry.getValue().getReplicasMap().entrySet().size();
-    }
-    assertEquals(0, coreCount);
-
-    // delete the collection
-    CollectionAdminRequest.deleteCollection(collectionName).process(client);
-    AbstractDistribZkTestBase.waitForCollectionToDisappear
-        (collectionName, client.getZkStateReader(), true, true, 330);
-  }
-
-  @Test
-  public void testStopAllStartAll() throws Exception {
-
-    final String collectionName = "testStopAllStartAllCollection";
-    final CloudSolrClient client = cluster.getSolrClient();
-
-    assertNotNull(cluster.getZkServer());
-    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
-    assertEquals(nodeCount, jettys.size());
-    for (JettySolrRunner jetty : jettys) {
-      assertTrue(jetty.isRunning());
-    }
-
-    final SolrQuery query = new SolrQuery("*:*");
-    final SolrInputDocument doc = new SolrInputDocument();
-
-    // create collection
-    createCollection(collectionName, null);
-
-    ZkStateReader zkStateReader = client.getZkStateReader();
-
-    // modify collection
-    final int numDocs = 1 + random().nextInt(10);
-    for (int ii = 1; ii <= numDocs; ++ii) {
-      doc.setField("id", ""+ii);
-      client.add(collectionName, doc);
-      if (ii*2 == numDocs) client.commit(collectionName);
-    }
-    client.commit(collectionName);
-
-    // query collection
-    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
-
-    // the test itself
-    zkStateReader.forceUpdateCollection(collectionName);
-    final ClusterState clusterState = zkStateReader.getClusterState();
-
-    final Set<Integer> leaderIndices = new HashSet<>();
-    final Set<Integer> followerIndices = new HashSet<>();
-    {
-      final Map<String,Boolean> shardLeaderMap = new HashMap<>();
-      for (final Slice slice : clusterState.getCollection(collectionName).getSlices()) {
-        for (final Replica replica : slice.getReplicas()) {
-          shardLeaderMap.put(replica.getNodeName().replace("_solr", "/solr"), Boolean.FALSE);
-        }
-        shardLeaderMap.put(slice.getLeader().getNodeName().replace("_solr", "/solr"), Boolean.TRUE);
-      }
-      for (int ii = 0; ii < jettys.size(); ++ii) {
-        final URL jettyBaseUrl = jettys.get(ii).getBaseUrl();
-        final String jettyBaseUrlString = jettyBaseUrl.toString().substring((jettyBaseUrl.getProtocol() + "://").length());
-        final Boolean isLeader = shardLeaderMap.get(jettyBaseUrlString);
-        if (Boolean.TRUE.equals(isLeader)) {
-          leaderIndices.add(ii);
-        } else if (Boolean.FALSE.equals(isLeader)) {
-          followerIndices.add(ii);
-        } // else neither leader nor follower i.e. node without a replica (for our collection)
-      }
-    }
-    final List<Integer> leaderIndicesList = new ArrayList<>(leaderIndices);
-    final List<Integer> followerIndicesList = new ArrayList<>(followerIndices);
-
-    // first stop the followers (in no particular order)
-    Collections.shuffle(followerIndicesList, random());
-    for (Integer ii : followerIndicesList) {
-      if (!leaderIndices.contains(ii)) {
-        cluster.stopJettySolrRunner(jettys.get(ii));
-      }
-    }
-
-    // then stop the leaders (again in no particular order)
-    Collections.shuffle(leaderIndicesList, random());
-    for (Integer ii : leaderIndicesList) {
-      cluster.stopJettySolrRunner(jettys.get(ii));
-    }
-
-    // calculate restart order
-    final List<Integer> restartIndicesList = new ArrayList<>();
-    Collections.shuffle(leaderIndicesList, random());
-    restartIndicesList.addAll(leaderIndicesList);
-    Collections.shuffle(followerIndicesList, random());
-    restartIndicesList.addAll(followerIndicesList);
-    if (random().nextBoolean()) Collections.shuffle(restartIndicesList, random());
-
-    // and then restart jettys in that order
-    for (Integer ii : restartIndicesList) {
-      final JettySolrRunner jetty = jettys.get(ii);
-      if (!jetty.isRunning()) {
-        cluster.startJettySolrRunner(jetty);
-        assertTrue(jetty.isRunning());
-      }
-    }
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
-
-    zkStateReader.forceUpdateCollection(collectionName);
-
-    // re-query collection
-    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java
deleted file mode 100644
index 40a6e30..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.core.backup.BackupManager.*;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.cloud.hdfs.HdfsTestUtil;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.backup.BackupManager;
-import org.apache.solr.core.backup.repository.HdfsBackupRepository;
-import org.apache.solr.util.BadHdfsThreadsFilter;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the tests for HDFS integration for Solr backup/restore capability.
- */
-@ThreadLeakFilters(defaultFilters = true, filters = {
-    BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
-})
-public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
-  public static final String SOLR_XML = "<solr>\n" +
-      "\n" +
-      "  <str name=\"shareSchema\">${shareSchema:false}</str>\n" +
-      "  <str name=\"configSetBaseDir\">${configSetBaseDir:configsets}</str>\n" +
-      "  <str name=\"coreRootDirectory\">${coreRootDirectory:.}</str>\n" +
-      "\n" +
-      "  <shardHandlerFactory name=\"shardHandlerFactory\" class=\"HttpShardHandlerFactory\">\n" +
-      "    <str name=\"urlScheme\">${urlScheme:}</str>\n" +
-      "    <int name=\"socketTimeout\">${socketTimeout:90000}</int>\n" +
-      "    <int name=\"connTimeout\">${connTimeout:15000}</int>\n" +
-      "  </shardHandlerFactory>\n" +
-      "\n" +
-      "  <solrcloud>\n" +
-      "    <str name=\"host\">127.0.0.1</str>\n" +
-      "    <int name=\"hostPort\">${hostPort:8983}</int>\n" +
-      "    <str name=\"hostContext\">${hostContext:solr}</str>\n" +
-      "    <int name=\"zkClientTimeout\">${solr.zkclienttimeout:30000}</int>\n" +
-      "    <bool name=\"genericCoreNodeNames\">${genericCoreNodeNames:true}</bool>\n" +
-      "    <int name=\"leaderVoteWait\">10000</int>\n" +
-      "    <int name=\"distribUpdateConnTimeout\">${distribUpdateConnTimeout:45000}</int>\n" +
-      "    <int name=\"distribUpdateSoTimeout\">${distribUpdateSoTimeout:340000}</int>\n" +
-      "  </solrcloud>\n" +
-      "  \n" +
-      "  <backup>\n" +
-      "    <repository  name=\"hdfs\" class=\"org.apache.solr.core.backup.repository.HdfsBackupRepository\"> \n" +
-      "      <str name=\"location\">${solr.hdfs.default.backup.path}</str>\n" +
-      "      <str name=\"solr.hdfs.home\">${solr.hdfs.home:}</str>\n" +
-      "      <str name=\"solr.hdfs.confdir\">${solr.hdfs.confdir:}</str>\n" +
-      "    </repository>\n" +
-      "  </backup>\n" +
-      "  \n" +
-      "</solr>\n";
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static MiniDFSCluster dfsCluster;
-  private static String hdfsUri;
-  private static FileSystem fs;
-
-  @BeforeClass
-  public static void setupClass() throws Exception {
-    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
-    hdfsUri = HdfsTestUtil.getURI(dfsCluster);
-    try {
-      URI uri = new URI(hdfsUri);
-      Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
-      conf.setBoolean("fs.hdfs.impl.disable.cache", true);
-      fs = FileSystem.get(uri, conf);
-
-      if (fs instanceof DistributedFileSystem) {
-        // Make sure dfs is not in safe mode
-        while (((DistributedFileSystem) fs).setSafeMode(SafeModeAction.SAFEMODE_GET, true)) {
-          log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
-          try {
-            Thread.sleep(5000);
-          } catch (InterruptedException e) {
-            Thread.interrupted();
-            // continue
-          }
-        }
-      }
-
-      fs.mkdirs(new org.apache.hadoop.fs.Path("/backup"));
-    } catch (IOException | URISyntaxException e) {
-      throw new RuntimeException(e);
-    }
-
-    System.setProperty("solr.hdfs.default.backup.path", "/backup");
-    System.setProperty("solr.hdfs.home", hdfsUri + "/solr");
-    useFactory("solr.StandardDirectoryFactory");
-
-    configureCluster(NUM_SHARDS)// nodes
-    .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
-    .withSolrXml(SOLR_XML)
-    .configure();
-  }
-
-  @AfterClass
-  public static void teardownClass() throws Exception {
-    System.clearProperty("solr.hdfs.home");
-    System.clearProperty("solr.hdfs.default.backup.path");
-    System.clearProperty("test.build.data");
-    System.clearProperty("test.cache.data");
-    IOUtils.closeQuietly(fs);
-    fs = null;
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
-  }
-
-  @Override
-  public String getCollectionName() {
-    return "hdfsbackuprestore";
-  }
-
-  @Override
-  public String getBackupRepoName() {
-    return "hdfs";
-  }
-
-  @Override
-  public String getBackupLocation() {
-    return null;
-  }
-
-  protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
-    String backupName = "configonlybackup";
-    CloudSolrClient solrClient = cluster.getSolrClient();
-
-    CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
-        .setRepositoryName(getBackupRepoName())
-        .setIndexBackupStrategy(CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY);
-    backup.process(solrClient);
-
-    Map<String,String> params = new HashMap<>();
-    params.put("location", "/backup");
-    params.put("solr.hdfs.home", hdfsUri + "/solr");
-
-    HdfsBackupRepository repo = new HdfsBackupRepository();
-    repo.init(new NamedList<>(params));
-    BackupManager mgr = new BackupManager(repo, solrClient.getZkStateReader());
-
-    URI baseLoc = repo.createURI("/backup");
-
-    Properties props = mgr.readBackupProperties(baseLoc, backupName);
-    assertNotNull(props);
-    assertEquals(collectionName, props.getProperty(COLLECTION_NAME_PROP));
-    assertEquals(backupName, props.getProperty(BACKUP_NAME_PROP));
-    assertEquals(configName, props.getProperty(COLL_CONF));
-
-    DocCollection collectionState = mgr.readCollectionState(baseLoc, backupName, collectionName);
-    assertNotNull(collectionState);
-    assertEquals(collectionName, collectionState.getName());
-
-    URI configDirLoc = repo.resolve(baseLoc, backupName, ZK_STATE_DIR, CONFIG_STATE_DIR, configName);
-    assertTrue(repo.exists(configDirLoc));
-
-    Collection<String> expected = Arrays.asList(BACKUP_PROPS_FILE, ZK_STATE_DIR);
-    URI backupLoc = repo.resolve(baseLoc, backupName);
-    String[] dirs = repo.listAll(backupLoc);
-    for (String d : dirs) {
-      assertTrue(expected.contains(d));
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
deleted file mode 100644
index c0db46e..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.junit.BeforeClass;
-
-/**
- * This class implements the tests for local file-system integration for Solr backup/restore capability.
- * Note that the Solr backup/restore still requires a "shared" file-system. Its just that in this case
- * such file-system would be exposed via local file-system API.
- */
-public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
-  private static String backupLocation;
-
-  @BeforeClass
-  public static void setupClass() throws Exception {
-    configureCluster(NUM_SHARDS)// nodes
-        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
-        .configure();
-
-    boolean whitespacesInPath = random().nextBoolean();
-    if (whitespacesInPath) {
-      backupLocation = createTempDir("my backup").toAbsolutePath().toString();
-    } else {
-      backupLocation = createTempDir("mybackup").toAbsolutePath().toString();
-    }
-  }
-
-  @Override
-  public String getCollectionName() {
-    return "backuprestore";
-  }
-
-  @Override
-  public String getBackupRepoName() {
-    return null;
-  }
-
-  @Override
-  public String getBackupLocation() {
-    return backupLocation;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java b/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java
deleted file mode 100644
index f654e8f..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-@Slow
-public class TestReplicaProperties extends ReplicaPropertiesBase {
-
-  public static final String COLLECTION_NAME = "testcollection";
-
-  public TestReplicaProperties() {
-    schemaString = "schema15.xml";      // we need a string id
-    sliceCount = 2;
-  }
-
-  @Test
-  @ShardsFixed(num = 4)
-  public void test() throws Exception {
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      // Mix up a bunch of different combinations of shards and replicas in order to exercise boundary cases.
-      // shards, replicationfactor, maxreplicaspernode
-      int shards = random().nextInt(7);
-      if (shards < 2) shards = 2;
-      int rFactor = random().nextInt(4);
-      if (rFactor < 2) rFactor = 2;
-      createCollection(null, COLLECTION_NAME, shards, rFactor, shards * rFactor + 1, client, null, "conf1");
-    }
-
-    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);
-    waitForRecoveriesToFinish(COLLECTION_NAME, false);
-
-    listCollection();
-
-    clusterAssignPropertyTest();
-  }
-
-  private void listCollection() throws IOException, SolrServerException {
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.LIST.toString());
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      List<String> collections = (List<String>) rsp.get("collections");
-      assertTrue("control_collection was not found in list", collections.contains("control_collection"));
-      assertTrue(DEFAULT_COLLECTION + " was not found in list", collections.contains(DEFAULT_COLLECTION));
-      assertTrue(COLLECTION_NAME + " was not found in list", collections.contains(COLLECTION_NAME));
-    }
-  }
-
-
-  private void clusterAssignPropertyTest() throws Exception {
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      client.connect();
-      try {
-        doPropertyAction(client,
-            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-            "property", "preferredLeader");
-      } catch (SolrException se) {
-        assertTrue("Should have seen missing required parameter 'collection' error",
-            se.getMessage().contains("Missing required parameter: collection"));
-      }
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-          "collection", COLLECTION_NAME,
-          "property", "preferredLeader");
-
-      verifyUniqueAcrossCollection(client, COLLECTION_NAME, "preferredleader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-          "collection", COLLECTION_NAME,
-          "property", "property.newunique",
-          "shardUnique", "true");
-      verifyUniqueAcrossCollection(client, COLLECTION_NAME, "property.newunique");
-
-      try {
-        doPropertyAction(client,
-            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-            "collection", COLLECTION_NAME,
-            "property", "whatever",
-            "shardUnique", "false");
-        fail("Should have thrown an exception here.");
-      } catch (SolrException se) {
-        assertTrue("Should have gotten a specific error message here",
-            se.getMessage().contains("Balancing properties amongst replicas in a slice requires that the " +
-                "property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'"));
-      }
-      // Should be able to set non-unique-per-slice values in several places.
-      Map<String, Slice> slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
-      List<String> sliceList = new ArrayList<>(slices.keySet());
-      String c1_s1 = sliceList.get(0);
-      List<String> replicasList = new ArrayList<>(slices.get(c1_s1).getReplicasMap().keySet());
-      String c1_s1_r1 = replicasList.get(0);
-      String c1_s1_r2 = replicasList.get(1);
-
-      addProperty(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "bogus1",
-          "property.value", "true");
-
-      addProperty(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r2,
-          "property", "property.bogus1",
-          "property.value", "whatever");
-
-      try {
-        doPropertyAction(client,
-            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-            "collection", COLLECTION_NAME,
-            "property", "bogus1",
-            "shardUnique", "false");
-        fail("Should have thrown parameter error here");
-      } catch (SolrException se) {
-        assertTrue("Should have caught specific exception ",
-            se.getMessage().contains("Balancing properties amongst replicas in a slice requires that the property be " +
-                "pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'"));
-      }
-
-      // Should have no effect despite the "shardUnique" param being set.
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-          "collection", COLLECTION_NAME,
-          "property", "property.bogus1",
-          "shardUnique", "true");
-
-      verifyPropertyVal(client, COLLECTION_NAME,
-          c1_s1_r1, "bogus1", "true");
-      verifyPropertyVal(client, COLLECTION_NAME,
-          c1_s1_r2, "property.bogus1", "whatever");
-
-      // At this point we've assigned a preferred leader. Make it happen and check that all the nodes that are
-      // leaders _also_ have the preferredLeader property set.
-
-
-      NamedList<Object> res = doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.REBALANCELEADERS.toString(),
-          "collection", COLLECTION_NAME);
-
-      verifyLeaderAssignment(client, COLLECTION_NAME);
-
-    }
-  }
-
-  private void verifyLeaderAssignment(CloudSolrClient client, String collectionName)
-      throws InterruptedException, KeeperException {
-    String lastFailMsg = "";
-    for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
-      lastFailMsg = "";
-      ClusterState clusterState = client.getZkStateReader().getClusterState();
-      for (Slice slice : clusterState.getCollection(collectionName).getSlices()) {
-        Boolean foundLeader = false;
-        Boolean foundPreferred = false;
-        for (Replica replica : slice.getReplicas()) {
-          Boolean isLeader = replica.getBool("leader", false);
-          Boolean isPreferred = replica.getBool("property.preferredleader", false);
-          if (isLeader != isPreferred) {
-            lastFailMsg = "Replica should NOT have preferredLeader != leader. Preferred: " + isPreferred.toString() +
-                " leader is " + isLeader.toString();
-          }
-          if (foundLeader && isLeader) {
-            lastFailMsg = "There should only be a single leader in _any_ shard! Replica " + replica.getName() +
-                " is the second leader in slice " + slice.getName();
-          }
-          if (foundPreferred && isPreferred) {
-            lastFailMsg = "There should only be a single preferredLeader in _any_ shard! Replica " + replica.getName() +
-                " is the second preferredLeader in slice " + slice.getName();
-          }
-          foundLeader = foundLeader ? foundLeader : isLeader;
-          foundPreferred = foundPreferred ? foundPreferred : isPreferred;
-        }
-      }
-      if (lastFailMsg.length() == 0) return;
-      Thread.sleep(100);
-    }
-    fail(lastFailMsg);
-  }
-
-  private void addProperty(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
-    assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    for (int idx = 0; idx < paramsIn.length; idx += 2) {
-      params.set(paramsIn[idx], paramsIn[idx + 1]);
-    }
-    QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    client.request(request);
-
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java
deleted file mode 100644
index a560e75..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.junit.Test;
-
-import java.io.IOException;
-
-public class TestRequestStatusCollectionAPI extends BasicDistributedZkTest {
-
-  public static final int MAX_WAIT_TIMEOUT_SECONDS = 90;
-
-  public TestRequestStatusCollectionAPI() {
-    schemaString = "schema15.xml";      // we need a string id
-  }
-
-  @Test
-  public void test() throws Exception {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
-    params.set("name", "collection2");
-    params.set("numShards", 2);
-    params.set("replicationFactor", 1);
-    params.set("maxShardsPerNode", 100);
-    params.set("collection.configName", "conf1");
-    params.set(CommonAdminParams.ASYNC, "1000");
-    try {
-      sendRequest(params);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    // Check for the request to be completed.
-
-    NamedList r = null;
-    NamedList status = null;
-    String message = null;
-
-    params = new ModifiableSolrParams();
-
-    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
-    params.set(OverseerCollectionMessageHandler.REQUESTID, "1000");
-
-    try {
-      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    assertEquals("found [1000] in completed tasks", message);
-
-    // Check for a random (hopefully non-existent request id
-    params = new ModifiableSolrParams();
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.REQUESTSTATUS.toString());
-    params.set(OverseerCollectionMessageHandler.REQUESTID, "9999999");
-    try {
-      r = sendRequest(params);
-      status = (NamedList) r.get("status");
-      message = (String) status.get("msg");
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    assertEquals("Did not find [9999999] in any tasks queue", message);
-
-    params = new ModifiableSolrParams();
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.SPLITSHARD.toString());
-    params.set("collection", "collection2");
-    params.set("shard", "shard1");
-    params.set(CommonAdminParams.ASYNC, "1001");
-    try {
-      sendRequest(params);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    // Check for the request to be completed.
-    params = new ModifiableSolrParams();
-    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
-    params.set(OverseerCollectionMessageHandler.REQUESTID, "1001");
-    try {
-      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    assertEquals("found [1001] in completed tasks", message);
-
-    params = new ModifiableSolrParams();
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
-    params.set("name", "collection2");
-    params.set("numShards", 2);
-    params.set("replicationFactor", 1);
-    params.set("maxShardsPerNode", 100);
-    params.set("collection.configName", "conf1");
-    params.set(CommonAdminParams.ASYNC, "1002");
-    try {
-      sendRequest(params);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    params = new ModifiableSolrParams();
-
-    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
-    params.set(OverseerCollectionMessageHandler.REQUESTID, "1002");
-
-    try {
-      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-
-    assertEquals("found [1002] in failed tasks", message);
-
-    params = new ModifiableSolrParams();
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
-    params.set("name", "collection3");
-    params.set("numShards", 1);
-    params.set("replicationFactor", 1);
-    params.set("maxShardsPerNode", 100);
-    params.set("collection.configName", "conf1");
-    params.set(CommonAdminParams.ASYNC, "1002");
-    try {
-      r = sendRequest(params);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    assertEquals("Task with the same requestid already exists.", r.get("error"));
-  }
-
-  /**
-   * Helper method to send a status request with specific retry limit and return
-   * the message/null from the success response.
-   */
-  private String sendStatusRequestWithRetry(ModifiableSolrParams params, int maxCounter)
-      throws SolrServerException, IOException{
-    String message = null;
-    while (maxCounter-- > 0) {
-      final NamedList r = sendRequest(params);
-      final NamedList status = (NamedList) r.get("status");
-      final RequestStatusState state = RequestStatusState.fromKey((String) status.get("state"));
-      message = (String) status.get("msg");
-
-      if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED) {
-        return message;
-      }
-
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException e) {
-      }
-
-    }
-    // Return last state?
-    return message;
-  }
-
-  protected NamedList sendRequest(ModifiableSolrParams params) throws SolrServerException, IOException {
-    SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient).getBaseURL();
-    baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
-
-    try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 15000)) {
-      return baseServer.request(request);
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
new file mode 100644
index 0000000..058814c
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
@@ -0,0 +1,348 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Random;
+import java.util.TreeMap;
+
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest.ClusterProp;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.ImplicitDocRouter;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the logic required to test Solr cloud backup/restore capability.
+ */
+public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  protected static final int NUM_SHARDS = 2;//granted we sometimes shard split to get more
+
+  int replFactor;
+  int numTlogReplicas;
+  int numPullReplicas;
+
+  private static long docsSeed; // see indexDocs()
+
+  @BeforeClass
+  public static void createCluster() throws Exception {
+    docsSeed = random().nextLong();
+  }
+
+  /**
+   * @return The name of the collection to use.
+   */
+  public abstract String getCollectionName();
+
+  /**
+   * @return The name of the backup repository to use.
+   */
+  public abstract String getBackupRepoName();
+
+  /**
+   * @return The absolute path for the backup location.
+   *         Could return null.
+   */
+  public abstract String getBackupLocation();
+
+  @Test
+  public void test() throws Exception {
+    boolean isImplicit = random().nextBoolean();
+    boolean doSplitShardOperation = !isImplicit && random().nextBoolean();
+    replFactor = TestUtil.nextInt(random(), 1, 2);
+    numTlogReplicas = TestUtil.nextInt(random(), 0, 1);
+    numPullReplicas = TestUtil.nextInt(random(), 0, 1);
+    
+    CollectionAdminRequest.Create create = isImplicit ?
+      // NOTE: use shard list with same # of shards as NUM_SHARDS; we assume this later
+      CollectionAdminRequest.createCollectionWithImplicitRouter(getCollectionName(), "conf1", "shard1,shard2", replFactor, numTlogReplicas, numPullReplicas) :
+      CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor, numTlogReplicas, numPullReplicas);
+    
+    if (NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) > cluster.getJettySolrRunners().size() || random().nextBoolean()) {
+      create.setMaxShardsPerNode((int)Math.ceil(NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) / cluster.getJettySolrRunners().size()));//just to assert it survives the restoration
+      if (doSplitShardOperation) {
+        create.setMaxShardsPerNode(create.getMaxShardsPerNode() * 2);
+      }
+    }
+    if (random().nextBoolean()) {
+      create.setAutoAddReplicas(true);//just to assert it survives the restoration
+    }
+    Properties coreProps = new Properties();
+    coreProps.put("customKey", "customValue");//just to assert it survives the restoration
+    create.setProperties(coreProps);
+    if (isImplicit) { //implicit router
+      create.setRouterField("shard_s");
+    } else {//composite id router
+      if (random().nextBoolean()) {
+        create.setRouterField("shard_s");
+      }
+    }
+
+    CloudSolrClient solrClient = cluster.getSolrClient();
+    create.process(solrClient);
+
+    indexDocs(getCollectionName());
+
+    if (doSplitShardOperation) {
+      // shard split the first shard
+      int prevActiveSliceCount = getActiveSliceCount(getCollectionName());
+      CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(getCollectionName());
+      splitShard.setShardName("shard1");
+      splitShard.process(solrClient);
+      // wait until we see one more active slice...
+      for (int i = 0; getActiveSliceCount(getCollectionName()) != prevActiveSliceCount + 1; i++) {
+        assertTrue(i < 30);
+        Thread.sleep(500);
+      }
+      // issue a hard commit.  Split shard does a soft commit which isn't good enough for the backup/snapshooter to see
+      solrClient.commit(getCollectionName());
+    }
+
+    testBackupAndRestore(getCollectionName());
+    testConfigBackupOnly("conf1", getCollectionName());
+    testInvalidPath(getCollectionName());
+  }
+
+  /**
+   * This test validates the backup of collection configuration using
+   *  {@linkplain CollectionAdminParams#NO_INDEX_BACKUP_STRATEGY}.
+   *
+   * @param configName The config name for the collection to be backed up.
+   * @param collectionName The name of the collection to be backed up.
+   * @throws Exception in case of errors.
+   */
+  protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
+    // This is deliberately no-op since we want to run this test only for one of the backup repository
+    // implementation (mainly to avoid redundant test execution). Currently HDFS backup repository test
+    // implements this.
+  }
+
+  // This test verifies the system behavior when the backup location cluster property is configured with an invalid
+  // value for the specified repository (and the default backup location is not configured in solr.xml).
+  private void testInvalidPath(String collectionName) throws Exception {
+    // Execute this test only if the default backup location is NOT configured in solr.xml
+    if (getBackupLocation() == null) {
+      return;
+    }
+
+    String backupName = "invalidbackuprequest";
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    ClusterProp req = CollectionAdminRequest.setClusterProperty(CoreAdminParams.BACKUP_LOCATION, "/location/does/not/exist");
+    assertEquals(0, req.process(solrClient).getStatus());
+
+    // Do not specify the backup location.
+    CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
+        .setRepositoryName(getBackupRepoName());
+    try {
+      backup.process(solrClient);
+      fail("This request should have failed since the cluster property value for backup location property is invalid.");
+    } catch (SolrException ex) {
+      assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
+    }
+
+    String restoreCollectionName = collectionName + "_invalidrequest";
+    CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
+        .setRepositoryName(getBackupRepoName());
+    try {
+      restore.process(solrClient);
+      fail("This request should have failed since the cluster property value for backup location property is invalid.");
+    } catch (SolrException ex) {
+      assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
+    }
+  }
+
+  private int getActiveSliceCount(String collectionName) {
+    return cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(collectionName).getActiveSlices().size();
+  }
+
+  private void indexDocs(String collectionName) throws Exception {
+    Random random = new Random(docsSeed);// use a constant seed for the whole test run so that we can easily re-index.
+    int numDocs = random.nextInt(100);
+    if (numDocs == 0) {
+      log.info("Indexing ZERO test docs");
+      return;
+    }
+    List<SolrInputDocument> docs = new ArrayList<>(numDocs);
+    for (int i=0; i<numDocs; i++) {
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", i);
+      doc.addField("shard_s", "shard" + (1 + random.nextInt(NUM_SHARDS))); // for implicit router
+      docs.add(doc);
+    }
+    CloudSolrClient client = cluster.getSolrClient();
+    client.add(collectionName, docs);// batch
+    client.commit(collectionName);
+  }
+
+  private void testBackupAndRestore(String collectionName) throws Exception {
+    String backupLocation = getBackupLocation();
+    String backupName = "mytestbackup";
+
+    CloudSolrClient client = cluster.getSolrClient();
+    DocCollection backupCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
+
+    Map<String, Integer> origShardToDocCount = getShardToDocCountMap(client, backupCollection);
+    assert origShardToDocCount.isEmpty() == false;
+
+    log.info("Triggering Backup command");
+
+    {
+      CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
+          .setLocation(backupLocation).setRepositoryName(getBackupRepoName());
+      if (random().nextBoolean()) {
+        assertEquals(0, backup.process(client).getStatus());
+      } else {
+        assertEquals(RequestStatusState.COMPLETED, backup.processAndWait(client, 30));//async
+      }
+    }
+
+    log.info("Triggering Restore command");
+
+    String restoreCollectionName = collectionName + "_restored";
+    boolean sameConfig = random().nextBoolean();
+
+    {
+      CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
+          .setLocation(backupLocation).setRepositoryName(getBackupRepoName());
+
+
+      //explicitly specify the replicationFactor/pullReplicas/nrtReplicas/tlogReplicas .
+      //Value is still the same as the original. maybe test with different values that the original for better test coverage
+      if (random().nextBoolean())  {
+        restore.setReplicationFactor(replFactor);
+      }
+      if (backupCollection.getReplicas().size() > cluster.getJettySolrRunners().size()) {
+        // may need to increase maxShardsPerNode (e.g. if it was shard split, then now we need more)
+        restore.setMaxShardsPerNode((int)Math.ceil(backupCollection.getReplicas().size()/cluster.getJettySolrRunners().size()));
+      }
+      
+
+      if (rarely()) { // Try with createNodeSet configuration
+        int nodeSetSize = cluster.getJettySolrRunners().size() / 2;
+        List<String> nodeStrs = new ArrayList<>(nodeSetSize);
+        Iterator<JettySolrRunner> iter = cluster.getJettySolrRunners().iterator();
+        for (int i = 0; i < nodeSetSize ; i++) {
+          nodeStrs.add(iter.next().getNodeName());
+        }
+        restore.setCreateNodeSet(String.join(",", nodeStrs));
+        restore.setCreateNodeSetShuffle(usually());
+        // we need to double maxShardsPerNode value since we reduced number of available nodes by half.
+        if (restore.getMaxShardsPerNode() != null) {
+          restore.setMaxShardsPerNode(restore.getMaxShardsPerNode() * 2);
+        } else {
+          restore.setMaxShardsPerNode(origShardToDocCount.size() * 2);
+        }
+      }
+
+      Properties props = new Properties();
+      props.setProperty("customKey", "customVal");
+      restore.setProperties(props);
+
+      if (sameConfig==false) {
+        restore.setConfigName("customConfigName");
+      }
+      if (random().nextBoolean()) {
+        assertEquals(0, restore.process(client).getStatus());
+      } else {
+        assertEquals(RequestStatusState.COMPLETED, restore.processAndWait(client, 30));//async
+      }
+      AbstractDistribZkTestBase.waitForRecoveriesToFinish(
+          restoreCollectionName, cluster.getSolrClient().getZkStateReader(), log.isDebugEnabled(), true, 30);
+    }
+
+    //Check the number of results are the same
+    DocCollection restoreCollection = client.getZkStateReader().getClusterState().getCollection(restoreCollectionName);
+    assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
+    //Re-index same docs (should be identical docs given same random seed) and test we have the same result.  Helps
+    //  test we reconstituted the hash ranges / doc router.
+    if (!(restoreCollection.getRouter() instanceof ImplicitDocRouter) && random().nextBoolean()) {
+      indexDocs(restoreCollectionName);
+      assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
+    }
+
+    assertEquals(backupCollection.getReplicationFactor(), restoreCollection.getReplicationFactor());
+    assertEquals(backupCollection.getAutoAddReplicas(), restoreCollection.getAutoAddReplicas());
+    assertEquals(backupCollection.getActiveSlices().iterator().next().getReplicas().size(),
+        restoreCollection.getActiveSlices().iterator().next().getReplicas().size());
+    assertEquals(sameConfig ? "conf1" : "customConfigName",
+        cluster.getSolrClient().getZkStateReader().readConfigName(restoreCollectionName));
+
+    Map<String, Integer> numReplicasByNodeName = new HashMap<>();
+    restoreCollection.getReplicas().forEach(x -> {
+      numReplicasByNodeName.put(x.getNodeName(), numReplicasByNodeName.getOrDefault(x.getNodeName(), 0) + 1);
+    });
+    numReplicasByNodeName.forEach((k, v) -> {
+      assertTrue("Node " + k + " has " + v + " replicas. Expected num replicas : " + restoreCollection.getMaxShardsPerNode() ,
+          v <= restoreCollection.getMaxShardsPerNode());
+    });
+
+    assertEquals("Different count of nrtReplicas. Backup collection state=" + backupCollection + "\nRestore " +
+        "collection state=" + restoreCollection, replFactor, restoreCollection.getNumNrtReplicas().intValue());
+    assertEquals("Different count of pullReplicas. Backup collection state=" + backupCollection + "\nRestore" +
+        " collection state=" + restoreCollection, numPullReplicas, restoreCollection.getNumPullReplicas().intValue());
+    assertEquals("Different count of TlogReplica. Backup collection state=" + backupCollection + "\nRestore" +
+        " collection state=" + restoreCollection, numTlogReplicas, restoreCollection.getNumTlogReplicas().intValue());
+
+    assertEquals("Restore collection should use stateFormat=2", 2, restoreCollection.getStateFormat());
+
+
+    // assert added core properties:
+    // DWS: did via manual inspection.
+    // TODO Find the applicable core.properties on the file system but how?
+  }
+
+  private Map<String, Integer> getShardToDocCountMap(CloudSolrClient client, DocCollection docCollection) throws SolrServerException, IOException {
+    Map<String,Integer> shardToDocCount = new TreeMap<>();
+    for (Slice slice : docCollection.getActiveSlices()) {
+      String shardName = slice.getName();
+      try (HttpSolrClient leaderClient = new HttpSolrClient.Builder(slice.getLeader().getCoreUrl()).withHttpClient(client.getHttpClient()).build()) {
+        long docsInShard = leaderClient.query(new SolrQuery("*:*").setParam("distrib", "false"))
+            .getResults().getNumFound();
+        shardToDocCount.put(shardName, (int) docsInShard);
+      }
+    }
+    return shardToDocCount;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
new file mode 100644
index 0000000..d2b35e4
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.client.solrj.impl.ZkDistribStateManager;
+import org.apache.solr.cloud.ZkTestServer;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyInt;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class AssignTest extends SolrTestCaseJ4 {
+  
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+
+  }
+  
+  @Override
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+  
+  @Test
+  public void testAssignNode() throws Exception {
+    assumeWorkingMockito();
+    
+    SolrZkClient zkClient = mock(SolrZkClient.class);
+    Map<String, byte[]> zkClientData = new HashMap<>();
+    when(zkClient.setData(anyString(), any(), anyInt(), anyBoolean())).then(invocation -> {
+        zkClientData.put(invocation.getArgument(0), invocation.getArgument(1));
+        return null;
+      }
+    );
+    when(zkClient.getData(anyString(), any(), any(), anyBoolean())).then(invocation ->
+        zkClientData.get(invocation.getArgument(0)));
+    // TODO: fix this to be independent of ZK
+    ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
+    String nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
+    assertEquals("core_node1", nodeName);
+    nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection2", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
+    assertEquals("core_node1", nodeName);
+    nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
+    assertEquals("core_node2", nodeName);
+  }
+
+  @Test
+  public void testIdIsUnique() throws Exception {
+    String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
+    ZkTestServer server = new ZkTestServer(zkDir);
+    Object fixedValue = new Object();
+    String[] collections = new String[]{"c1","c2","c3","c4","c5","c6","c7","c8","c9"};
+    Map<String, ConcurrentHashMap<Integer, Object>> collectionUniqueIds = new HashMap<>();
+    for (String c : collections) {
+      collectionUniqueIds.put(c, new ConcurrentHashMap<>());
+    }
+
+    ExecutorService executor = ExecutorUtil.newMDCAwareCachedThreadPool("threadpool");
+    try {
+      server.run();
+
+      try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
+        assertTrue(zkClient.isConnected());
+        zkClient.makePath("/", true);
+        for (String c : collections) {
+          zkClient.makePath("/collections/"+c, true);
+        }
+        // TODO: fix this to be independent of ZK
+        ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
+        List<Future<?>> futures = new ArrayList<>();
+        for (int i = 0; i < 1000; i++) {
+          futures.add(executor.submit(() -> {
+            String collection = collections[random().nextInt(collections.length)];
+            int id = Assign.incAndGetId(stateManager, collection, 0);
+            Object val = collectionUniqueIds.get(collection).put(id, fixedValue);
+            if (val != null) {
+              fail("ZkController do not generate unique id for " + collection);
+            }
+          }));
+        }
+        for (Future<?> future : futures) {
+          future.get();
+        }
+      }
+      assertEquals(1000, (long) collectionUniqueIds.values().stream()
+          .map(ConcurrentHashMap::size)
+          .reduce((m1, m2) -> m1 + m2).get());
+    } finally {
+      server.shutdown();
+      ExecutorUtil.shutdownAndAwaitTermination(executor);
+    }
+  }
+
+
+  @Test
+  public void testBuildCoreName() throws IOException, InterruptedException, KeeperException {
+    String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
+    ZkTestServer server = new ZkTestServer(zkDir);
+    server.run();
+    try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
+      zkClient.makePath("/", true);
+      // TODO: fix this to be independent of ZK
+      ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
+      Map<String, Slice> slices = new HashMap<>();
+      slices.put("shard1", new Slice("shard1", new HashMap<>(), null));
+      slices.put("shard2", new Slice("shard2", new HashMap<>(), null));
+
+      DocCollection docCollection = new DocCollection("collection1", slices, null, DocRouter.DEFAULT);
+      assertEquals("Core name pattern changed", "collection1_shard1_replica_n1", Assign.buildSolrCoreName(stateManager, docCollection, "shard1", Replica.Type.NRT));
+      assertEquals("Core name pattern changed", "collection1_shard2_replica_p2", Assign.buildSolrCoreName(stateManager, docCollection, "shard2", Replica.Type.PULL));
+    } finally {
+      server.shutdown();
+    }
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java
new file mode 100644
index 0000000..c084412
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.util.RetryUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Verifies cluster state remains consistent after collection reload.
+ */
+@SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
+public class CollectionReloadTest extends SolrCloudTestCase {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(1)
+        .addConfig("conf", configset("cloud-minimal"))
+        .configure();
+  }
+  
+  @Test
+  public void testReloadedLeaderStateAfterZkSessionLoss() throws Exception {
+
+    log.info("testReloadedLeaderStateAfterZkSessionLoss initialized OK ... running test logic");
+
+    final String testCollectionName = "c8n_1x1";
+    CollectionAdminRequest.createCollection(testCollectionName, "conf", 1, 1)
+        .process(cluster.getSolrClient());
+
+    Replica leader
+        = cluster.getSolrClient().getZkStateReader().getLeaderRetry(testCollectionName, "shard1", DEFAULT_TIMEOUT);
+
+    long coreStartTime = getCoreStatus(leader).getCoreStartTime().getTime();
+    CollectionAdminRequest.reloadCollection(testCollectionName).process(cluster.getSolrClient());
+
+    RetryUtil.retryUntil("Timed out waiting for core to reload", 30, 1000, TimeUnit.MILLISECONDS, () -> {
+      long restartTime = 0;
+      try {
+        restartTime = getCoreStatus(leader).getCoreStartTime().getTime();
+      } catch (Exception e) {
+        log.warn("Exception getting core start time: {}", e.getMessage());
+        return false;
+      }
+      return restartTime > coreStartTime;
+    });
+
+    final int initialStateVersion = getCollectionState(testCollectionName).getZNodeVersion();
+
+    cluster.expireZkSession(cluster.getReplicaJetty(leader));
+
+    waitForState("Timed out waiting for core to re-register as ACTIVE after session expiry", testCollectionName, (n, c) -> {
+      log.info("Collection state: {}", c.toString());
+      Replica expiredReplica = c.getReplica(leader.getName());
+      return expiredReplica.getState() == Replica.State.ACTIVE && c.getZNodeVersion() > initialStateVersion;
+    });
+
+    log.info("testReloadedLeaderStateAfterZkSessionLoss succeeded ... shutting down now!");
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
new file mode 100644
index 0000000..f68fa9e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+@Slow
+public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(3)
+        .addConfig("conf", configset("cloud-minimal"))
+        .configure();
+  }
+
+  @Before
+  public void deleteCollections() throws Exception {
+    cluster.deleteAllCollections();
+  }
+
+  @Test
+  public void testAddTooManyReplicas() throws Exception {
+    final String collectionName = "TooManyReplicasInSeveralFlavors";
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .setMaxShardsPerNode(1)
+        .process(cluster.getSolrClient());
+
+    // I have two replicas, one for each shard
+
+    // Curiously, I should be able to add a bunch of replicas if I specify the node, even more than maxShardsPerNode
+    // Just get the first node any way we can.
+    // Get a node to use for the "node" parameter.
+    String nodeName = getAllNodeNames(collectionName).get(0);
+
+    // Add a replica using the "node" parameter (no "too many replicas check")
+    // this node should have 2 replicas on it
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .setNode(nodeName)
+        .process(cluster.getSolrClient());
+
+    // Three replicas so far, should be able to create another one "normally"
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .process(cluster.getSolrClient());
+
+    // This one should fail though, no "node" parameter specified
+    Exception e = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+          .process(cluster.getSolrClient());
+    });
+
+    assertTrue("Should have gotten the right error message back",
+          e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+
+    // Oddly, we should succeed next just because setting property.name will not check for nodes being "full up"
+    // TODO: Isn't this a bug?
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .withProperty("name", "bogus2")
+        .setNode(nodeName)
+        .process(cluster.getSolrClient());
+
+    DocCollection collectionState = getCollectionState(collectionName);
+    Slice slice = collectionState.getSlice("shard1");
+    Replica replica = getRandomReplica(slice, r -> r.getCoreName().equals("bogus2"));
+    assertNotNull("Should have found a replica named 'bogus2'", replica);
+    assertEquals("Replica should have been put on correct core", nodeName, replica.getNodeName());
+
+    // Shard1 should have 4 replicas
+    assertEquals("There should be 4 replicas for shard 1", 4, slice.getReplicas().size());
+
+    // And let's fail one more time because to ensure that the math doesn't do weird stuff it we have more replicas
+    // than simple calcs would indicate.
+    Exception e2 = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+          .process(cluster.getSolrClient());
+    });
+
+    assertTrue("Should have gotten the right error message back",
+        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+    // wait for recoveries to finish, for a clean shutdown - see SOLR-9645
+    waitForState("Expected to see all replicas active", collectionName, (n, c) -> {
+      for (Replica r : c.getReplicas()) {
+        if (r.getState() != Replica.State.ACTIVE)
+          return false;
+      }
+      return true;
+    });
+  }
+
+  @Test
+  public void testAddShard() throws Exception {
+
+    String collectionName = "TooManyReplicasWhenAddingShards";
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 2)
+        .setMaxShardsPerNode(2)
+        .process(cluster.getSolrClient());
+
+    // We have two nodes, maxShardsPerNode is set to 2. Therefore, we should be able to add 2 shards each with
+    // two replicas, but fail on the third.
+    CollectionAdminRequest.createShard(collectionName, "shard1")
+        .process(cluster.getSolrClient());
+
+    // Now we should have one replica on each Jetty, add another to reach maxShardsPerNode
+    CollectionAdminRequest.createShard(collectionName, "shard2")
+        .process(cluster.getSolrClient());
+
+    // Now fail to add the third as it should exceed maxShardsPerNode
+    Exception e = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.createShard(collectionName, "shard3")
+          .process(cluster.getSolrClient());
+    });
+    assertTrue("Should have gotten the right error message back",
+        e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+    // Hmmm, providing a nodeset also overrides the checks for max replicas, so prove it.
+    List<String> nodes = getAllNodeNames(collectionName);
+
+    CollectionAdminRequest.createShard(collectionName, "shard4")
+        .setNodeSet(StringUtils.join(nodes, ","))
+        .process(cluster.getSolrClient());
+
+    // And just for yucks, insure we fail the "regular" one again.
+    Exception e2 = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.createShard(collectionName, "shard5")
+          .process(cluster.getSolrClient());
+    });
+    assertTrue("Should have gotten the right error message back",
+        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+    // And finally, insure that there are all the replcias we expect. We should have shards 1, 2 and 4 and each
+    // should have exactly two replicas
+    waitForState("Expected shards shardstart, 1, 2 and 4, each with two active replicas", collectionName, (n, c) -> {
+      return DocCollection.isFullyActive(n, c, 4, 2);
+    });
+    Map<String, Slice> slices = getCollectionState(collectionName).getSlicesMap();
+    assertEquals("There should be exaclty four slices", slices.size(), 4);
+    assertNotNull("shardstart should exist", slices.get("shardstart"));
+    assertNotNull("shard1 should exist", slices.get("shard1"));
+    assertNotNull("shard2 should exist", slices.get("shard2"));
+    assertNotNull("shard4 should exist", slices.get("shard4"));
+    assertEquals("Shardstart should have exactly 2 replicas", 2, slices.get("shardstart").getReplicas().size());
+    assertEquals("Shard1 should have exactly 2 replicas", 2, slices.get("shard1").getReplicas().size());
+    assertEquals("Shard2 should have exactly 2 replicas", 2, slices.get("shard2").getReplicas().size());
+    assertEquals("Shard4 should have exactly 2 replicas", 2, slices.get("shard4").getReplicas().size());
+
+  }
+
+  @Test
+  public void testDownedShards() throws Exception {
+    String collectionName = "TooManyReplicasWhenAddingDownedNode";
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 1)
+        .setMaxShardsPerNode(2)
+        .process(cluster.getSolrClient());
+
+    // Shut down a Jetty, I really don't care which
+    JettySolrRunner jetty = cluster.getRandomJetty(random());
+    String deadNode = jetty.getBaseUrl().toString();
+    cluster.stopJettySolrRunner(jetty);
+
+    try {
+
+      // Adding a replica on a dead node should fail
+      Exception e1 = expectThrows(Exception.class, () -> {
+        CollectionAdminRequest.addReplicaToShard(collectionName, "shardstart")
+            .setNode(deadNode)
+            .process(cluster.getSolrClient());
+      });
+      assertTrue("Should have gotten a message about shard not currently active: " + e1.toString(),
+          e1.toString().contains("At least one of the node(s) specified [" + deadNode + "] are not currently active in"));
+
+      // Should also die if we just add a shard
+      Exception e2 = expectThrows(Exception.class, () -> {
+        CollectionAdminRequest.createShard(collectionName, "shard1")
+            .setNodeSet(deadNode)
+            .process(cluster.getSolrClient());
+      });
+
+      assertTrue("Should have gotten a message about shard not currently active: " + e2.toString(),
+          e2.toString().contains("At least one of the node(s) specified [" + deadNode + "] are not currently active in"));
+    }
+    finally {
+      cluster.startJettySolrRunner(jetty);
+    }
+  }
+
+  private List<String> getAllNodeNames(String collectionName) throws KeeperException, InterruptedException {
+    DocCollection state = getCollectionState(collectionName);
+    return state.getReplicas().stream().map(Replica::getNodeName).distinct().collect(Collectors.toList());
+  }
+
+}


[09/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
new file mode 100644
index 0000000..eefe903
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
@@ -0,0 +1,280 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.Cmd;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.handler.component.ShardHandler;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionAdminParams.COUNT_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+
+public class DeleteReplicaCmd implements Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public DeleteReplicaCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    deleteReplica(clusterState, message, results,null);
+  }
+
+
+  @SuppressWarnings("unchecked")
+  void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
+          throws KeeperException, InterruptedException {
+    log.debug("deleteReplica() : {}", Utils.toJSONString(message));
+    boolean parallel = message.getBool("parallel", false);
+
+    //If a count is specified the strategy needs be different
+    if (message.getStr(COUNT_PROP) != null) {
+      deleteReplicaBasedOnCount(clusterState, message, results, onComplete, parallel);
+      return;
+    }
+
+
+    ocmh.checkRequired(message, COLLECTION_PROP, SHARD_ID_PROP, REPLICA_PROP);
+    String collectionName = message.getStr(COLLECTION_PROP);
+    String shard = message.getStr(SHARD_ID_PROP);
+    String replicaName = message.getStr(REPLICA_PROP);
+
+    DocCollection coll = clusterState.getCollection(collectionName);
+    Slice slice = coll.getSlice(shard);
+    if (slice == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+              "Invalid shard name : " +  shard + " in collection : " +  collectionName);
+    }
+
+    deleteCore(slice, collectionName, replicaName, message, shard, results, onComplete,  parallel);
+
+  }
+
+
+  /**
+   * Delete replicas based on count for a given collection. If a shard is passed, uses that
+   * else deletes given num replicas across all shards for the given collection.
+   */
+  void deleteReplicaBasedOnCount(ClusterState clusterState,
+                                 ZkNodeProps message,
+                                 NamedList results,
+                                 Runnable onComplete,
+                                 boolean parallel)
+          throws KeeperException, InterruptedException {
+    ocmh.checkRequired(message, COLLECTION_PROP, COUNT_PROP);
+    int count = Integer.parseInt(message.getStr(COUNT_PROP));
+    String collectionName = message.getStr(COLLECTION_PROP);
+    String shard = message.getStr(SHARD_ID_PROP);
+    DocCollection coll = clusterState.getCollection(collectionName);
+    Slice slice = null;
+    //Validate if shard is passed.
+    if (shard != null) {
+      slice = coll.getSlice(shard);
+      if (slice == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+                "Invalid shard name : " +  shard +  " in collection : " + collectionName);
+      }
+    }
+
+    Map<Slice, Set<String>> shardToReplicasMapping = new HashMap<Slice, Set<String>>();
+    if (slice != null) {
+      Set<String> replicasToBeDeleted = pickReplicasTobeDeleted(slice, shard, collectionName, count);
+      shardToReplicasMapping.put(slice,replicasToBeDeleted);
+    } else {
+
+      //If there are many replicas left, remove the rest based on count.
+      Collection<Slice> allSlices = coll.getSlices();
+      for (Slice individualSlice : allSlices) {
+        Set<String> replicasToBeDeleted = pickReplicasTobeDeleted(individualSlice, individualSlice.getName(), collectionName, count);
+        shardToReplicasMapping.put(individualSlice, replicasToBeDeleted);
+      }
+    }
+
+    for (Slice shardSlice: shardToReplicasMapping.keySet()) {
+      String shardId = shardSlice.getName();
+      Set<String> replicas = shardToReplicasMapping.get(shardSlice);
+      //callDeleteReplica on all replicas
+      for (String replica: replicas) {
+        log.debug("Deleting replica {}  for shard {} based on count {}", replica, shardId, count);
+        deleteCore(shardSlice, collectionName, replica, message, shard, results, onComplete, parallel);
+      }
+      results.add("shard_id", shardId);
+      results.add("replicas_deleted", replicas);
+    }
+
+  }
+
+
+  /**
+   * Pick replicas to be deleted. Avoid picking the leader.
+   */
+  private Set<String> pickReplicasTobeDeleted(Slice slice, String shard, String collectionName, int count) {
+    validateReplicaAvailability(slice, shard, collectionName, count);
+    Collection<Replica> allReplicas = slice.getReplicas();
+    Set<String> replicasToBeRemoved = new HashSet<String>();
+    Replica leader = slice.getLeader();
+    for (Replica replica: allReplicas) {
+      if (count == 0) {
+        break;
+      }
+      //Try avoiding to pick up the leader to minimize activity on the cluster.
+      if (leader.getCoreName().equals(replica.getCoreName())) {
+        continue;
+      }
+      replicasToBeRemoved.add(replica.getName());
+      count --;
+    }
+    return replicasToBeRemoved;
+  }
+
+  /**
+   * Validate if there is less replicas than requested to remove. Also error out if there is
+   * only one replica available
+   */
+  private void validateReplicaAvailability(Slice slice, String shard, String collectionName, int count) {
+    //If there is a specific shard passed, validate if there any or just 1 replica left
+    if (slice != null) {
+      Collection<Replica> allReplicasForShard = slice.getReplicas();
+      if (allReplicasForShard == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No replicas found  in shard/collection: " +
+                shard + "/"  + collectionName);
+      }
+
+
+      if (allReplicasForShard.size() == 1) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There is only one replica available in shard/collection: " +
+                shard + "/" + collectionName + ". Cannot delete that.");
+      }
+
+      if (allReplicasForShard.size() <= count) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There are lesser num replicas requested to be deleted than are available in shard/collection : " +
+                shard + "/"  + collectionName  + " Requested: "  + count + " Available: " + allReplicasForShard.size() + ".");
+      }
+    }
+  }
+
+  void deleteCore(Slice slice, String collectionName, String replicaName,ZkNodeProps message, String shard, NamedList results, Runnable onComplete, boolean parallel) throws KeeperException, InterruptedException {
+
+    Replica replica = slice.getReplica(replicaName);
+    if (replica == null) {
+      ArrayList<String> l = new ArrayList<>();
+      for (Replica r : slice.getReplicas())
+        l.add(r.getName());
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid replica : " +  replicaName + " in shard/collection : " +
+              shard  + "/" + collectionName + " available replicas are " +  StrUtils.join(l, ','));
+    }
+
+    // If users are being safe and only want to remove a shard if it is down, they can specify onlyIfDown=true
+    // on the command.
+    if (Boolean.parseBoolean(message.getStr(OverseerCollectionMessageHandler.ONLY_IF_DOWN)) && replica.getState() != Replica.State.DOWN) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+              "Attempted to remove replica : " + collectionName + "/"  + shard + "/" + replicaName +
+              " with onlyIfDown='true', but state is '" + replica.getStr(ZkStateReader.STATE_PROP) + "'");
+    }
+
+    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+    String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+    String asyncId = message.getStr(ASYNC);
+    AtomicReference<Map<String, String>> requestMap = new AtomicReference<>(null);
+    if (asyncId != null) {
+      requestMap.set(new HashMap<>(1, 1.0f));
+    }
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.add(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.UNLOAD.toString());
+    params.add(CoreAdminParams.CORE, core);
+
+    params.set(CoreAdminParams.DELETE_INDEX, message.getBool(CoreAdminParams.DELETE_INDEX, true));
+    params.set(CoreAdminParams.DELETE_INSTANCE_DIR, message.getBool(CoreAdminParams.DELETE_INSTANCE_DIR, true));
+    params.set(CoreAdminParams.DELETE_DATA_DIR, message.getBool(CoreAdminParams.DELETE_DATA_DIR, true));
+
+    boolean isLive = ocmh.zkStateReader.getClusterState().getLiveNodes().contains(replica.getNodeName());
+    if (isLive) {
+      ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap.get());
+    }
+
+    Callable<Boolean> callable = () -> {
+      try {
+        if (isLive) {
+          ocmh.processResponses(results, shardHandler, false, null, asyncId, requestMap.get());
+
+          //check if the core unload removed the corenode zk entry
+          if (ocmh.waitForCoreNodeGone(collectionName, shard, replicaName, 5000)) return Boolean.TRUE;
+        }
+
+        // try and ensure core info is removed from cluster state
+        ocmh.deleteCoreNode(collectionName, replicaName, replica, core);
+        if (ocmh.waitForCoreNodeGone(collectionName, shard, replicaName, 30000)) return Boolean.TRUE;
+        return Boolean.FALSE;
+      } catch (Exception e) {
+        results.add("failure", "Could not complete delete " + e.getMessage());
+        throw e;
+      } finally {
+        if (onComplete != null) onComplete.run();
+      }
+    };
+
+    if (!parallel) {
+      try {
+        if (!callable.call())
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+                  "Could not remove replica : " + collectionName + "/" + shard + "/" + replicaName);
+      } catch (InterruptedException | KeeperException e) {
+        throw e;
+      } catch (Exception ex) {
+        throw new SolrException(SolrException.ErrorCode.UNKNOWN, "Error waiting for corenode gone", ex);
+      }
+
+    } else {
+      ocmh.tpe.submit(callable);
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteShardCmd.java
new file mode 100644
index 0000000..2ef2955
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteShardCmd.java
@@ -0,0 +1,178 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.client.solrj.cloud.DistributedQueue;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.overseer.OverseerAction;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.util.TimeOut;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+
+public class DeleteShardCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+  private final TimeSource timeSource;
+
+  public DeleteShardCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+    this.timeSource = ocmh.cloudManager.getTimeSource();
+  }
+
+  @Override
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
+    String sliceId = message.getStr(ZkStateReader.SHARD_ID_PROP);
+
+    log.info("Delete shard invoked");
+    Slice slice = clusterState.getCollection(collectionName).getSlice(sliceId);
+    if (slice == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+        "No shard with name " + sliceId + " exists for collection " + collectionName);
+
+    // For now, only allow for deletions of Inactive slices or custom hashes (range==null).
+    // TODO: Add check for range gaps on Slice deletion
+    final Slice.State state = slice.getState();
+    if (!(slice.getRange() == null || state == Slice.State.INACTIVE || state == Slice.State.RECOVERY
+        || state == Slice.State.CONSTRUCTION) || state == Slice.State.RECOVERY_FAILED) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The slice: " + slice.getName() + " is currently " + state
+          + ". Only non-active (or custom-hashed) slices can be deleted.");
+    }
+
+    if (state == Slice.State.RECOVERY)  {
+      // mark the slice as 'construction' and only then try to delete the cores
+      // see SOLR-9455
+      DistributedQueue inQueue = Overseer.getStateUpdateQueue(ocmh.zkStateReader.getZkClient());
+      Map<String, Object> propMap = new HashMap<>();
+      propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+      propMap.put(sliceId, Slice.State.CONSTRUCTION.toString());
+      propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+      ZkNodeProps m = new ZkNodeProps(propMap);
+      inQueue.offer(Utils.toJSON(m));
+    }
+
+    String asyncId = message.getStr(ASYNC);
+
+    try {
+      List<ZkNodeProps> replicas = getReplicasForSlice(collectionName, slice);
+      CountDownLatch cleanupLatch = new CountDownLatch(replicas.size());
+      for (ZkNodeProps r : replicas) {
+        final ZkNodeProps replica = r.plus(message.getProperties()).plus("parallel", "true").plus(ASYNC, asyncId);
+        log.info("Deleting replica for collection={} shard={} on node={}", replica.getStr(COLLECTION_PROP), replica.getStr(SHARD_ID_PROP), replica.getStr(CoreAdminParams.NODE));
+        NamedList deleteResult = new NamedList();
+        try {
+          ((DeleteReplicaCmd)ocmh.commandMap.get(DELETEREPLICA)).deleteReplica(clusterState, replica, deleteResult, () -> {
+            cleanupLatch.countDown();
+            if (deleteResult.get("failure") != null) {
+              synchronized (results) {
+                results.add("failure", String.format(Locale.ROOT, "Failed to delete replica for collection=%s shard=%s" +
+                    " on node=%s", replica.getStr(COLLECTION_PROP), replica.getStr(SHARD_ID_PROP), replica.getStr(NODE_NAME_PROP)));
+              }
+            }
+            SimpleOrderedMap success = (SimpleOrderedMap) deleteResult.get("success");
+            if (success != null) {
+              synchronized (results)  {
+                results.add("success", success);
+              }
+            }
+          });
+        } catch (KeeperException e) {
+          log.warn("Error deleting replica: " + r, e);
+          cleanupLatch.countDown();
+        } catch (Exception e) {
+          log.warn("Error deleting replica: " + r, e);
+          cleanupLatch.countDown();
+          throw e;
+        }
+      }
+      log.debug("Waiting for delete shard action to complete");
+      cleanupLatch.await(5, TimeUnit.MINUTES);
+
+      ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, DELETESHARD.toLower(), ZkStateReader.COLLECTION_PROP,
+          collectionName, ZkStateReader.SHARD_ID_PROP, sliceId);
+      ZkStateReader zkStateReader = ocmh.zkStateReader;
+      Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
+
+      // wait for a while until we don't see the shard
+      TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, timeSource);
+      boolean removed = false;
+      while (!timeout.hasTimedOut()) {
+        timeout.sleep(100);
+        DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName);
+        removed = collection.getSlice(sliceId) == null;
+        if (removed) {
+          timeout.sleep(100); // just a bit of time so it's more likely other readers see on return
+          break;
+        }
+      }
+      if (!removed) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+            "Could not fully remove collection: " + collectionName + " shard: " + sliceId);
+      }
+
+      log.info("Successfully deleted collection: " + collectionName + ", shard: " + sliceId);
+    } catch (SolrException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+          "Error executing delete operation for collection: " + collectionName + " shard: " + sliceId, e);
+    }
+  }
+
+  private List<ZkNodeProps> getReplicasForSlice(String collectionName, Slice slice) {
+    List<ZkNodeProps> sourceReplicas = new ArrayList<>();
+    for (Replica replica : slice.getReplicas()) {
+      ZkNodeProps props = new ZkNodeProps(
+          COLLECTION_PROP, collectionName,
+          SHARD_ID_PROP, slice.getName(),
+          ZkStateReader.CORE_NAME_PROP, replica.getCoreName(),
+          ZkStateReader.REPLICA_PROP, replica.getName(),
+          CoreAdminParams.NODE, replica.getNodeName());
+      sourceReplicas.add(props);
+    }
+    return sourceReplicas;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteSnapshotCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteSnapshotCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteSnapshotCmd.java
new file mode 100644
index 0000000..cf0a234
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteSnapshotCmd.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.Replica.State;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
+import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.SnapshotStatus;
+import org.apache.solr.core.snapshots.SolrSnapshotManager;
+import org.apache.solr.handler.component.ShardHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the functionality of deleting a collection level snapshot.
+ */
+public class DeleteSnapshotCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public DeleteSnapshotCmd (OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    String collectionName =  message.getStr(COLLECTION_PROP);
+    String commitName =  message.getStr(CoreAdminParams.COMMIT_NAME);
+    String asyncId = message.getStr(ASYNC);
+    Map<String, String> requestMap = new HashMap<>();
+    NamedList shardRequestResults = new NamedList();
+    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
+    SolrZkClient zkClient = ocmh.zkStateReader.getZkClient();
+
+    Optional<CollectionSnapshotMetaData> meta = SolrSnapshotManager.getCollectionLevelSnapshot(zkClient, collectionName, commitName);
+    if (!meta.isPresent()) { // Snapshot not found. Nothing to do.
+      return;
+    }
+
+    log.info("Deleting a snapshot for collection={} with commitName={}", collectionName, commitName);
+
+    Set<String> existingCores = new HashSet<>();
+    for (Slice s : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
+      for (Replica r : s.getReplicas()) {
+        existingCores.add(r.getCoreName());
+      }
+    }
+
+    Set<String> coresWithSnapshot = new HashSet<>();
+    for (CoreSnapshotMetaData m : meta.get().getReplicaSnapshots()) {
+      if (existingCores.contains(m.getCoreName())) {
+        coresWithSnapshot.add(m.getCoreName());
+      }
+    }
+
+    log.info("Existing cores with snapshot for collection={} are {}", collectionName, existingCores);
+    for (Slice slice : ocmh.zkStateReader.getClusterState().getCollection(collectionName).getSlices()) {
+      for (Replica replica : slice.getReplicas()) {
+        if (replica.getState() == State.DOWN) {
+          continue; // Since replica is down - no point sending a request.
+        }
+
+        // Note - when a snapshot is found in_progress state - it is the result of overseer
+        // failure while handling the snapshot creation. Since we don't know the exact set of
+        // replicas to contact at this point, we try on all replicas.
+        if (meta.get().getStatus() == SnapshotStatus.InProgress || coresWithSnapshot.contains(replica.getCoreName())) {
+          String coreName = replica.getStr(CORE_NAME_PROP);
+
+          ModifiableSolrParams params = new ModifiableSolrParams();
+          params.set(CoreAdminParams.ACTION, CoreAdminAction.DELETESNAPSHOT.toString());
+          params.set(NAME, slice.getName());
+          params.set(CORE_NAME_PROP, coreName);
+          params.set(CoreAdminParams.COMMIT_NAME, commitName);
+
+          log.info("Sending deletesnapshot request to core={} with commitName={}", coreName, commitName);
+          ocmh.sendShardRequest(replica.getNodeName(), params, shardHandler, asyncId, requestMap);
+        }
+      }
+    }
+
+    ocmh.processResponses(shardRequestResults, shardHandler, false, null, asyncId, requestMap);
+    NamedList success = (NamedList) shardRequestResults.get("success");
+    List<CoreSnapshotMetaData> replicas = new ArrayList<>();
+    if (success != null) {
+      for ( int i = 0 ; i < success.size() ; i++) {
+        NamedList resp = (NamedList)success.getVal(i);
+        // Unfortunately async processing logic doesn't provide the "core" name automatically.
+        String coreName = (String)resp.get("core");
+        coresWithSnapshot.remove(coreName);
+      }
+    }
+
+    if (!coresWithSnapshot.isEmpty()) { // One or more failures.
+      log.warn("Failed to delete a snapshot for collection {} with commitName = {}. Snapshot could not be deleted for following cores {}",
+          collectionName, commitName, coresWithSnapshot);
+
+      List<CoreSnapshotMetaData> replicasWithSnapshot = new ArrayList<>();
+      for (CoreSnapshotMetaData m : meta.get().getReplicaSnapshots()) {
+        if (coresWithSnapshot.contains(m.getCoreName())) {
+          replicasWithSnapshot.add(m);
+        }
+      }
+
+      // Update the ZK meta-data to include only cores with the snapshot. This will enable users to figure out
+      // which cores still contain the named snapshot.
+      CollectionSnapshotMetaData newResult = new CollectionSnapshotMetaData(meta.get().getName(), SnapshotStatus.Failed,
+          meta.get().getCreationDate(), replicasWithSnapshot);
+      SolrSnapshotManager.updateCollectionLevelSnapshot(zkClient, collectionName, newResult);
+      log.info("Saved snapshot information for collection={} with commitName={} in Zookeeper as follows", collectionName, commitName,
+          Utils.toJSON(newResult));
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to delete snapshot on cores " + coresWithSnapshot);
+
+    } else {
+      // Delete the ZK path so that we eliminate the references of this snapshot from collection level meta-data.
+      SolrSnapshotManager.deleteCollectionLevelSnapshot(zkClient, collectionName, commitName);
+      log.info("Deleted Zookeeper snapshot metdata for collection={} with commitName={}", collectionName, commitName);
+      log.info("Successfully deleted snapshot for collection={} with commitName={}", collectionName, commitName);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/LeaderRecoveryWatcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/LeaderRecoveryWatcher.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/LeaderRecoveryWatcher.java
new file mode 100644
index 0000000..a80fdc0
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/LeaderRecoveryWatcher.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.util.Set;
+
+import org.apache.solr.common.SolrCloseableLatch;
+import org.apache.solr.common.cloud.CollectionStateWatcher;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
+
+/**
+ * We use this watcher to wait for any eligible replica in a shard to become active so that it can become a leader.
+ */
+public class LeaderRecoveryWatcher implements CollectionStateWatcher {
+  String collectionId;
+  String shardId;
+  String replicaId;
+  String targetCore;
+  SolrCloseableLatch latch;
+
+  /**
+   * Watch for recovery of a replica
+   *
+   * @param collectionId   collection name
+   * @param shardId        shard id
+   * @param replicaId      source replica name (coreNodeName)
+   * @param targetCore     specific target core name - if null then any active replica will do
+   * @param latch countdown when recovered
+   */
+  LeaderRecoveryWatcher(String collectionId, String shardId, String replicaId, String targetCore, SolrCloseableLatch latch) {
+    this.collectionId = collectionId;
+    this.shardId = shardId;
+    this.replicaId = replicaId;
+    this.targetCore = targetCore;
+    this.latch = latch;
+  }
+
+  @Override
+  public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
+    if (collectionState == null) { // collection has been deleted - don't wait
+      latch.countDown();
+      return true;
+    }
+    Slice slice = collectionState.getSlice(shardId);
+    if (slice == null) { // shard has been removed - don't wait
+      latch.countDown();
+      return true;
+    }
+    for (Replica replica : slice.getReplicas()) {
+      // check if another replica exists - doesn't have to be the one we're moving
+      // as long as it's active and can become a leader, in which case we don't have to wait
+      // for recovery of specifically the one that we've just added
+      if (!replica.getName().equals(replicaId)) {
+        if (replica.getType().equals(Replica.Type.PULL)) { // not eligible for leader election
+          continue;
+        }
+        // check its state
+        String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+        if (targetCore != null && !targetCore.equals(coreName)) {
+          continue;
+        }
+        if (replica.isActive(liveNodes)) { // recovered - stop waiting
+          latch.countDown();
+          return true;
+        }
+      }
+    }
+    // set the watch again to wait for the new replica to recover
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java
new file mode 100644
index 0000000..4edc363
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java
@@ -0,0 +1,334 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.cloud.overseer.OverseerAction;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.CompositeIdRouter;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.RoutingRule;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.handler.component.ShardHandler;
+import org.apache.solr.handler.component.ShardHandlerFactory;
+import org.apache.solr.update.SolrIndexSplitter;
+import org.apache.solr.util.TimeOut;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.common.util.Utils.makeMap;
+
+public class MigrateCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private final OverseerCollectionMessageHandler ocmh;
+  private final TimeSource timeSource;
+
+  public MigrateCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+    this.timeSource = ocmh.cloudManager.getTimeSource();
+  }
+
+
+  @Override
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    String sourceCollectionName = message.getStr("collection");
+    String splitKey = message.getStr("split.key");
+    String targetCollectionName = message.getStr("target.collection");
+    int timeout = message.getInt("forward.timeout", 10 * 60) * 1000;
+
+    DocCollection sourceCollection = clusterState.getCollection(sourceCollectionName);
+    if (sourceCollection == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown source collection: " + sourceCollectionName);
+    }
+    DocCollection targetCollection = clusterState.getCollection(targetCollectionName);
+    if (targetCollection == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown target collection: " + sourceCollectionName);
+    }
+    if (!(sourceCollection.getRouter() instanceof CompositeIdRouter)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source collection must use a compositeId router");
+    }
+    if (!(targetCollection.getRouter() instanceof CompositeIdRouter)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target collection must use a compositeId router");
+    }
+    CompositeIdRouter sourceRouter = (CompositeIdRouter) sourceCollection.getRouter();
+    CompositeIdRouter targetRouter = (CompositeIdRouter) targetCollection.getRouter();
+    Collection<Slice> sourceSlices = sourceRouter.getSearchSlicesSingle(splitKey, null, sourceCollection);
+    if (sourceSlices.isEmpty()) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+          "No active slices available in source collection: " + sourceCollection + "for given split.key: " + splitKey);
+    }
+    Collection<Slice> targetSlices = targetRouter.getSearchSlicesSingle(splitKey, null, targetCollection);
+    if (targetSlices.isEmpty()) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+          "No active slices available in target collection: " + targetCollection + "for given split.key: " + splitKey);
+    }
+
+    String asyncId = null;
+    if (message.containsKey(ASYNC) && message.get(ASYNC) != null)
+      asyncId = message.getStr(ASYNC);
+
+    for (Slice sourceSlice : sourceSlices) {
+      for (Slice targetSlice : targetSlices) {
+        log.info("Migrating source shard: {} to target shard: {} for split.key = " + splitKey, sourceSlice, targetSlice);
+        migrateKey(clusterState, sourceCollection, sourceSlice, targetCollection, targetSlice, splitKey,
+            timeout, results, asyncId, message);
+      }
+    }
+  }
+
+  private void migrateKey(ClusterState clusterState, DocCollection sourceCollection, Slice sourceSlice,
+                          DocCollection targetCollection, Slice targetSlice,
+                          String splitKey, int timeout,
+                          NamedList results, String asyncId, ZkNodeProps message) throws Exception {
+    String tempSourceCollectionName = "split_" + sourceSlice.getName() + "_temp_" + targetSlice.getName();
+    ZkStateReader zkStateReader = ocmh.zkStateReader;
+    if (clusterState.hasCollection(tempSourceCollectionName)) {
+      log.info("Deleting temporary collection: " + tempSourceCollectionName);
+      Map<String, Object> props = makeMap(
+          Overseer.QUEUE_OPERATION, DELETE.toLower(),
+          NAME, tempSourceCollectionName);
+
+      try {
+        ocmh.commandMap.get(DELETE).call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
+        clusterState = zkStateReader.getClusterState();
+      } catch (Exception e) {
+        log.warn("Unable to clean up existing temporary collection: " + tempSourceCollectionName, e);
+      }
+    }
+
+    CompositeIdRouter sourceRouter = (CompositeIdRouter) sourceCollection.getRouter();
+    DocRouter.Range keyHashRange = sourceRouter.keyHashRange(splitKey);
+
+    ShardHandlerFactory shardHandlerFactory = ocmh.shardHandlerFactory;
+    ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
+
+    log.info("Hash range for split.key: {} is: {}", splitKey, keyHashRange);
+    // intersect source range, keyHashRange and target range
+    // this is the range that has to be split from source and transferred to target
+    DocRouter.Range splitRange = ocmh.intersect(targetSlice.getRange(), ocmh.intersect(sourceSlice.getRange(), keyHashRange));
+    if (splitRange == null) {
+      log.info("No common hashes between source shard: {} and target shard: {}", sourceSlice.getName(), targetSlice.getName());
+      return;
+    }
+    log.info("Common hash range between source shard: {} and target shard: {} = " + splitRange, sourceSlice.getName(), targetSlice.getName());
+
+    Replica targetLeader = zkStateReader.getLeaderRetry(targetCollection.getName(), targetSlice.getName(), 10000);
+    // For tracking async calls.
+    Map<String, String> requestMap = new HashMap<>();
+
+    log.info("Asking target leader node: " + targetLeader.getNodeName() + " core: "
+        + targetLeader.getStr("core") + " to buffer updates");
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTBUFFERUPDATES.toString());
+    params.set(CoreAdminParams.NAME, targetLeader.getStr("core"));
+
+    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
+
+    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to request node to buffer updates", asyncId, requestMap);
+
+    ZkNodeProps m = new ZkNodeProps(
+        Overseer.QUEUE_OPERATION, OverseerAction.ADDROUTINGRULE.toLower(),
+        COLLECTION_PROP, sourceCollection.getName(),
+        SHARD_ID_PROP, sourceSlice.getName(),
+        "routeKey", SolrIndexSplitter.getRouteKey(splitKey) + "!",
+        "range", splitRange.toString(),
+        "targetCollection", targetCollection.getName(),
+        "expireAt", RoutingRule.makeExpiryAt(timeout));
+    log.info("Adding routing rule: " + m);
+    Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(m));
+
+    // wait for a while until we see the new rule
+    log.info("Waiting to see routing rule updated in clusterstate");
+    TimeOut waitUntil = new TimeOut(60, TimeUnit.SECONDS, timeSource);
+    boolean added = false;
+    while (!waitUntil.hasTimedOut()) {
+      waitUntil.sleep(100);
+      sourceCollection = zkStateReader.getClusterState().getCollection(sourceCollection.getName());
+      sourceSlice = sourceCollection.getSlice(sourceSlice.getName());
+      Map<String, RoutingRule> rules = sourceSlice.getRoutingRules();
+      if (rules != null) {
+        RoutingRule rule = rules.get(SolrIndexSplitter.getRouteKey(splitKey) + "!");
+        if (rule != null && rule.getRouteRanges().contains(splitRange)) {
+          added = true;
+          break;
+        }
+      }
+    }
+    if (!added) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not add routing rule: " + m);
+    }
+
+    log.info("Routing rule added successfully");
+
+    // Create temp core on source shard
+    Replica sourceLeader = zkStateReader.getLeaderRetry(sourceCollection.getName(), sourceSlice.getName(), 10000);
+
+    // create a temporary collection with just one node on the shard leader
+    String configName = zkStateReader.readConfigName(sourceCollection.getName());
+    Map<String, Object> props = makeMap(
+        Overseer.QUEUE_OPERATION, CREATE.toLower(),
+        NAME, tempSourceCollectionName,
+        NRT_REPLICAS, 1,
+        OverseerCollectionMessageHandler.NUM_SLICES, 1,
+        OverseerCollectionMessageHandler.COLL_CONF, configName,
+        OverseerCollectionMessageHandler.CREATE_NODE_SET, sourceLeader.getNodeName());
+    if (asyncId != null) {
+      String internalAsyncId = asyncId + Math.abs(System.nanoTime());
+      props.put(ASYNC, internalAsyncId);
+    }
+
+    log.info("Creating temporary collection: " + props);
+    ocmh.commandMap.get(CREATE).call(clusterState, new ZkNodeProps(props), results);
+    // refresh cluster state
+    clusterState = zkStateReader.getClusterState();
+    Slice tempSourceSlice = clusterState.getCollection(tempSourceCollectionName).getSlices().iterator().next();
+    Replica tempSourceLeader = zkStateReader.getLeaderRetry(tempSourceCollectionName, tempSourceSlice.getName(), 120000);
+
+    String tempCollectionReplica1 = tempSourceLeader.getCoreName();
+    String coreNodeName = ocmh.waitForCoreNodeName(tempSourceCollectionName,
+        sourceLeader.getNodeName(), tempCollectionReplica1);
+    // wait for the replicas to be seen as active on temp source leader
+    log.info("Asking source leader to wait for: " + tempCollectionReplica1 + " to be alive on: " + sourceLeader.getNodeName());
+    CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
+    cmd.setCoreName(tempCollectionReplica1);
+    cmd.setNodeName(sourceLeader.getNodeName());
+    cmd.setCoreNodeName(coreNodeName);
+    cmd.setState(Replica.State.ACTIVE);
+    cmd.setCheckLive(true);
+    cmd.setOnlyIfLeader(true);
+    // we don't want this to happen asynchronously
+    ocmh.sendShardRequest(tempSourceLeader.getNodeName(), new ModifiableSolrParams(cmd.getParams()), shardHandler, null, null);
+
+    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create temp collection leader" +
+        " or timed out waiting for it to come up", asyncId, requestMap);
+
+    log.info("Asking source leader to split index");
+    params = new ModifiableSolrParams();
+    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
+    params.set(CoreAdminParams.CORE, sourceLeader.getStr("core"));
+    params.add(CoreAdminParams.TARGET_CORE, tempSourceLeader.getStr("core"));
+    params.set(CoreAdminParams.RANGES, splitRange.toString());
+    params.set("split.key", splitKey);
+
+    String tempNodeName = sourceLeader.getNodeName();
+
+    ocmh.sendShardRequest(tempNodeName, params, shardHandler, asyncId, requestMap);
+    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to invoke SPLIT core admin command", asyncId, requestMap);
+
+    log.info("Creating a replica of temporary collection: {} on the target leader node: {}",
+        tempSourceCollectionName, targetLeader.getNodeName());
+    String tempCollectionReplica2 = Assign.buildSolrCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(),
+        zkStateReader.getClusterState().getCollection(tempSourceCollectionName), tempSourceSlice.getName(), Replica.Type.NRT);
+    props = new HashMap<>();
+    props.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
+    props.put(COLLECTION_PROP, tempSourceCollectionName);
+    props.put(SHARD_ID_PROP, tempSourceSlice.getName());
+    props.put("node", targetLeader.getNodeName());
+    props.put(CoreAdminParams.NAME, tempCollectionReplica2);
+    // copy over property params:
+    for (String key : message.keySet()) {
+      if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
+        props.put(key, message.getStr(key));
+      }
+    }
+    // add async param
+    if (asyncId != null) {
+      props.put(ASYNC, asyncId);
+    }
+    ((AddReplicaCmd)ocmh.commandMap.get(ADDREPLICA)).addReplica(clusterState, new ZkNodeProps(props), results, null);
+
+    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create replica of " +
+        "temporary collection in target leader node.", asyncId, requestMap);
+
+    coreNodeName = ocmh.waitForCoreNodeName(tempSourceCollectionName,
+        targetLeader.getNodeName(), tempCollectionReplica2);
+    // wait for the replicas to be seen as active on temp source leader
+    log.info("Asking temp source leader to wait for: " + tempCollectionReplica2 + " to be alive on: " + targetLeader.getNodeName());
+    cmd = new CoreAdminRequest.WaitForState();
+    cmd.setCoreName(tempSourceLeader.getStr("core"));
+    cmd.setNodeName(targetLeader.getNodeName());
+    cmd.setCoreNodeName(coreNodeName);
+    cmd.setState(Replica.State.ACTIVE);
+    cmd.setCheckLive(true);
+    cmd.setOnlyIfLeader(true);
+    params = new ModifiableSolrParams(cmd.getParams());
+
+    ocmh.sendShardRequest(tempSourceLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
+
+    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to create temp collection" +
+        " replica or timed out waiting for them to come up", asyncId, requestMap);
+
+    log.info("Successfully created replica of temp source collection on target leader node");
+
+    log.info("Requesting merge of temp source collection replica to target leader");
+    params = new ModifiableSolrParams();
+    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.MERGEINDEXES.toString());
+    params.set(CoreAdminParams.CORE, targetLeader.getStr("core"));
+    params.set(CoreAdminParams.SRC_CORE, tempCollectionReplica2);
+
+    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
+    String msg = "MIGRATE failed to merge " + tempCollectionReplica2 + " to "
+        + targetLeader.getStr("core") + " on node: " + targetLeader.getNodeName();
+    ocmh.processResponses(results, shardHandler, true, msg, asyncId, requestMap);
+
+    log.info("Asking target leader to apply buffered updates");
+    params = new ModifiableSolrParams();
+    params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
+    params.set(CoreAdminParams.NAME, targetLeader.getStr("core"));
+
+    ocmh.sendShardRequest(targetLeader.getNodeName(), params, shardHandler, asyncId, requestMap);
+    ocmh.processResponses(results, shardHandler, true, "MIGRATE failed to request node to apply buffered updates",
+        asyncId, requestMap);
+
+    try {
+      log.info("Deleting temporary collection: " + tempSourceCollectionName);
+      props = makeMap(
+          Overseer.QUEUE_OPERATION, DELETE.toLower(),
+          NAME, tempSourceCollectionName);
+      ocmh.commandMap.get(DELETE). call(zkStateReader.getClusterState(), new ZkNodeProps(props), results);
+    } catch (Exception e) {
+      log.error("Unable to delete temporary collection: " + tempSourceCollectionName
+          + ". Please remove it manually", e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
new file mode 100644
index 0000000..f9392b5
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/MoveReplicaCmd.java
@@ -0,0 +1,303 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Locale;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.cloud.ActiveReplicaWatcher;
+import org.apache.solr.common.SolrCloseableLatch;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.update.UpdateLog;
+import org.apache.solr.util.TimeOut;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE;
+import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
+import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
+import static org.apache.solr.common.params.CommonAdminParams.IN_PLACE_MOVE;
+import static org.apache.solr.common.params.CommonAdminParams.TIMEOUT;
+import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
+
+public class MoveReplicaCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final OverseerCollectionMessageHandler ocmh;
+  private final TimeSource timeSource;
+
+  public MoveReplicaCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+    this.timeSource = ocmh.cloudManager.getTimeSource();
+  }
+
+  @Override
+  public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
+    moveReplica(ocmh.zkStateReader.getClusterState(), message, results);
+  }
+
+  private void moveReplica(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+    log.debug("moveReplica() : {}", Utils.toJSONString(message));
+    ocmh.checkRequired(message, COLLECTION_PROP, CollectionParams.TARGET_NODE);
+    String collection = message.getStr(COLLECTION_PROP);
+    String targetNode = message.getStr(CollectionParams.TARGET_NODE);
+    boolean waitForFinalState = message.getBool(WAIT_FOR_FINAL_STATE, false);
+    boolean inPlaceMove = message.getBool(IN_PLACE_MOVE, true);
+    int timeout = message.getInt(TIMEOUT, 10 * 60); // 10 minutes
+
+    String async = message.getStr(ASYNC);
+
+    DocCollection coll = clusterState.getCollection(collection);
+    if (coll == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + collection + " does not exist");
+    }
+    if (!clusterState.getLiveNodes().contains(targetNode)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target node: " + targetNode + " not in live nodes: " + clusterState.getLiveNodes());
+    }
+    Replica replica = null;
+    if (message.containsKey(REPLICA_PROP)) {
+      String replicaName = message.getStr(REPLICA_PROP);
+      replica = coll.getReplica(replicaName);
+      if (replica == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            "Collection: " + collection + " replica: " + replicaName + " does not exist");
+      }
+    } else {
+      String sourceNode = message.getStr(CollectionParams.SOURCE_NODE, message.getStr(CollectionParams.FROM_NODE));
+      if (sourceNode == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'" + CollectionParams.SOURCE_NODE +
+            " or '" + CollectionParams.FROM_NODE + "' is a required param");
+      }
+      String shardId = message.getStr(SHARD_ID_PROP);
+      if (shardId == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'" + SHARD_ID_PROP + "' is a required param");
+      }
+      Slice slice = clusterState.getCollection(collection).getSlice(shardId);
+      List<Replica> sliceReplicas = new ArrayList<>(slice.getReplicas());
+      Collections.shuffle(sliceReplicas, OverseerCollectionMessageHandler.RANDOM);
+      // this picks up a single random replica from the sourceNode
+      for (Replica r : slice.getReplicas()) {
+        if (r.getNodeName().equals(sourceNode)) {
+          replica = r;
+        }
+      }
+      if (replica == null) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+            "Collection: " + collection + " node: " + sourceNode + " does not have any replica belonging to shard: " + shardId);
+      }
+    }
+
+    log.info("Replica will be moved to node {}: {}", targetNode, replica);
+    Slice slice = null;
+    for (Slice s : coll.getSlices()) {
+      if (s.getReplicas().contains(replica)) {
+        slice = s;
+      }
+    }
+    assert slice != null;
+    Object dataDir = replica.get("dataDir");
+    boolean isSharedFS = replica.getBool(ZkStateReader.SHARED_STORAGE_PROP, false) && dataDir != null;
+
+    if (isSharedFS && inPlaceMove) {
+      log.debug("-- moveHdfsReplica");
+      moveHdfsReplica(clusterState, results, dataDir.toString(), targetNode, async, coll, replica, slice, timeout, waitForFinalState);
+    } else {
+      log.debug("-- moveNormalReplica (inPlaceMove=" + inPlaceMove + ", isSharedFS=" + isSharedFS);
+      moveNormalReplica(clusterState, results, targetNode, async, coll, replica, slice, timeout, waitForFinalState);
+    }
+  }
+
+  private void moveHdfsReplica(ClusterState clusterState, NamedList results, String dataDir, String targetNode, String async,
+                                 DocCollection coll, Replica replica, Slice slice, int timeout, boolean waitForFinalState) throws Exception {
+    String skipCreateReplicaInClusterState = "true";
+    if (clusterState.getLiveNodes().contains(replica.getNodeName())) {
+      skipCreateReplicaInClusterState = "false";
+      ZkNodeProps removeReplicasProps = new ZkNodeProps(
+          COLLECTION_PROP, coll.getName(),
+          SHARD_ID_PROP, slice.getName(),
+          REPLICA_PROP, replica.getName()
+      );
+      removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_DATA_DIR, false);
+      removeReplicasProps.getProperties().put(CoreAdminParams.DELETE_INDEX, false);
+      if(async!=null) removeReplicasProps.getProperties().put(ASYNC, async);
+      NamedList deleteResult = new NamedList();
+      ocmh.deleteReplica(clusterState, removeReplicasProps, deleteResult, null);
+      if (deleteResult.get("failure") != null) {
+        String errorString = String.format(Locale.ROOT, "Failed to cleanup replica collection=%s shard=%s name=%s, failure=%s",
+            coll.getName(), slice.getName(), replica.getName(), deleteResult.get("failure"));
+        log.warn(errorString);
+        results.add("failure", errorString);
+        return;
+      }
+
+      TimeOut timeOut = new TimeOut(20L, TimeUnit.SECONDS, timeSource);
+      while (!timeOut.hasTimedOut()) {
+        coll = ocmh.zkStateReader.getClusterState().getCollection(coll.getName());
+        if (coll.getReplica(replica.getName()) != null) {
+          timeOut.sleep(100);
+        } else {
+          break;
+        }
+      }
+      if (timeOut.hasTimedOut()) {
+        results.add("failure", "Still see deleted replica in clusterstate!");
+        return;
+      }
+
+    }
+
+    String ulogDir = replica.getStr(CoreAdminParams.ULOG_DIR);
+    ZkNodeProps addReplicasProps = new ZkNodeProps(
+        COLLECTION_PROP, coll.getName(),
+        SHARD_ID_PROP, slice.getName(),
+        CoreAdminParams.NODE, targetNode,
+        CoreAdminParams.CORE_NODE_NAME, replica.getName(),
+        CoreAdminParams.NAME, replica.getCoreName(),
+        WAIT_FOR_FINAL_STATE, String.valueOf(waitForFinalState),
+        SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, skipCreateReplicaInClusterState,
+        CoreAdminParams.ULOG_DIR, ulogDir.substring(0, ulogDir.lastIndexOf(UpdateLog.TLOG_NAME)),
+        CoreAdminParams.DATA_DIR, dataDir);
+    if(async!=null) addReplicasProps.getProperties().put(ASYNC, async);
+    NamedList addResult = new NamedList();
+    try {
+      ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, addResult, null);
+    } catch (Exception e) {
+      // fatal error - try rolling back
+      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
+          " on node=%s, failure=%s", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
+      results.add("failure", errorString);
+      log.warn("Error adding replica " + addReplicasProps + " - trying to roll back...", e);
+      addReplicasProps = addReplicasProps.plus(CoreAdminParams.NODE, replica.getNodeName());
+      NamedList rollback = new NamedList();
+      ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, rollback, null);
+      if (rollback.get("failure") != null) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
+            + ", collection may be inconsistent: " + rollback.get("failure"));
+      }
+      return;
+    }
+    if (addResult.get("failure") != null) {
+      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
+          " on node=%s, failure=%s", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
+      log.warn(errorString);
+      results.add("failure", errorString);
+      log.debug("--- trying to roll back...");
+      // try to roll back
+      addReplicasProps = addReplicasProps.plus(CoreAdminParams.NODE, replica.getNodeName());
+      NamedList rollback = new NamedList();
+      try {
+        ocmh.addReplica(ocmh.zkStateReader.getClusterState(), addReplicasProps, rollback, null);
+      } catch (Exception e) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
+            + ", collection may be inconsistent!", e);
+      }
+      if (rollback.get("failure") != null) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Fatal error during MOVEREPLICA of " + replica
+            + ", collection may be inconsistent! Failure: " + rollback.get("failure"));
+      }
+      return;
+    } else {
+      String successString = String.format(Locale.ROOT, "MOVEREPLICA action completed successfully, moved replica=%s at node=%s " +
+          "to replica=%s at node=%s", replica.getCoreName(), replica.getNodeName(), replica.getCoreName(), targetNode);
+      results.add("success", successString);
+    }
+  }
+
+  private void moveNormalReplica(ClusterState clusterState, NamedList results, String targetNode, String async,
+                                 DocCollection coll, Replica replica, Slice slice, int timeout, boolean waitForFinalState) throws Exception {
+    String newCoreName = Assign.buildSolrCoreName(ocmh.overseer.getSolrCloudManager().getDistribStateManager(), coll, slice.getName(), replica.getType());
+    ZkNodeProps addReplicasProps = new ZkNodeProps(
+        COLLECTION_PROP, coll.getName(),
+        SHARD_ID_PROP, slice.getName(),
+        CoreAdminParams.NODE, targetNode,
+        CoreAdminParams.NAME, newCoreName);
+    if (async != null) addReplicasProps.getProperties().put(ASYNC, async);
+    NamedList addResult = new NamedList();
+    SolrCloseableLatch countDownLatch = new SolrCloseableLatch(1, ocmh);
+    ActiveReplicaWatcher watcher = null;
+    ZkNodeProps props = ocmh.addReplica(clusterState, addReplicasProps, addResult, null);
+    log.debug("props " + props);
+    if (replica.equals(slice.getLeader()) || waitForFinalState) {
+      watcher = new ActiveReplicaWatcher(coll.getName(), null, Collections.singletonList(newCoreName), countDownLatch);
+      log.debug("-- registered watcher " + watcher);
+      ocmh.zkStateReader.registerCollectionStateWatcher(coll.getName(), watcher);
+    }
+    if (addResult.get("failure") != null) {
+      String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" +
+          " on node=%s, failure=", coll.getName(), slice.getName(), targetNode, addResult.get("failure"));
+      log.warn(errorString);
+      results.add("failure", errorString);
+      if (watcher != null) { // unregister
+        ocmh.zkStateReader.removeCollectionStateWatcher(coll.getName(), watcher);
+      }
+      return;
+    }
+    // wait for the other replica to be active if the source replica was a leader
+    if (watcher != null) {
+      try {
+        log.debug("Waiting for leader's replica to recover.");
+        if (!countDownLatch.await(timeout, TimeUnit.SECONDS)) {
+          String errorString = String.format(Locale.ROOT, "Timed out waiting for leader's replica to recover, collection=%s shard=%s" +
+              " on node=%s", coll.getName(), slice.getName(), targetNode);
+          log.warn(errorString);
+          results.add("failure", errorString);
+          return;
+        } else {
+          log.debug("Replica " + watcher.getActiveReplicas() + " is active - deleting the source...");
+        }
+      } finally {
+        ocmh.zkStateReader.removeCollectionStateWatcher(coll.getName(), watcher);
+      }
+    }
+
+    ZkNodeProps removeReplicasProps = new ZkNodeProps(
+        COLLECTION_PROP, coll.getName(),
+        SHARD_ID_PROP, slice.getName(),
+        REPLICA_PROP, replica.getName());
+    if (async != null) removeReplicasProps.getProperties().put(ASYNC, async);
+    NamedList deleteResult = new NamedList();
+    ocmh.deleteReplica(clusterState, removeReplicasProps, deleteResult, null);
+    if (deleteResult.get("failure") != null) {
+      String errorString = String.format(Locale.ROOT, "Failed to cleanup replica collection=%s shard=%s name=%s, failure=%s",
+          coll.getName(), slice.getName(), replica.getName(), deleteResult.get("failure"));
+      log.warn(errorString);
+      results.add("failure", errorString);
+    } else {
+      String successString = String.format(Locale.ROOT, "MOVEREPLICA action completed successfully, moved replica=%s at node=%s " +
+          "to replica=%s at node=%s", replica.getCoreName(), replica.getNodeName(), newCoreName, targetNode);
+      results.add("success", successString);
+    }
+  }
+}


[06/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
deleted file mode 100644
index daa267d..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.zookeeper.KeeperException;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-@Slow
-public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(3)
-        .addConfig("conf", configset("cloud-minimal"))
-        .configure();
-  }
-
-  @Before
-  public void deleteCollections() throws Exception {
-    cluster.deleteAllCollections();
-  }
-
-  @Test
-  public void testAddTooManyReplicas() throws Exception {
-    final String collectionName = "TooManyReplicasInSeveralFlavors";
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
-        .setMaxShardsPerNode(1)
-        .process(cluster.getSolrClient());
-
-    // I have two replicas, one for each shard
-
-    // Curiously, I should be able to add a bunch of replicas if I specify the node, even more than maxShardsPerNode
-    // Just get the first node any way we can.
-    // Get a node to use for the "node" parameter.
-    String nodeName = getAllNodeNames(collectionName).get(0);
-
-    // Add a replica using the "node" parameter (no "too many replicas check")
-    // this node should have 2 replicas on it
-    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .setNode(nodeName)
-        .process(cluster.getSolrClient());
-
-    // Three replicas so far, should be able to create another one "normally"
-    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .process(cluster.getSolrClient());
-
-    // This one should fail though, no "node" parameter specified
-    Exception e = expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-          .process(cluster.getSolrClient());
-    });
-
-    assertTrue("Should have gotten the right error message back",
-          e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-
-
-    // Oddly, we should succeed next just because setting property.name will not check for nodes being "full up"
-    // TODO: Isn't this a bug?
-    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .withProperty("name", "bogus2")
-        .setNode(nodeName)
-        .process(cluster.getSolrClient());
-
-    DocCollection collectionState = getCollectionState(collectionName);
-    Slice slice = collectionState.getSlice("shard1");
-    Replica replica = getRandomReplica(slice, r -> r.getCoreName().equals("bogus2"));
-    assertNotNull("Should have found a replica named 'bogus2'", replica);
-    assertEquals("Replica should have been put on correct core", nodeName, replica.getNodeName());
-
-    // Shard1 should have 4 replicas
-    assertEquals("There should be 4 replicas for shard 1", 4, slice.getReplicas().size());
-
-    // And let's fail one more time because to ensure that the math doesn't do weird stuff it we have more replicas
-    // than simple calcs would indicate.
-    Exception e2 = expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-          .process(cluster.getSolrClient());
-    });
-
-    assertTrue("Should have gotten the right error message back",
-        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-
-    // wait for recoveries to finish, for a clean shutdown - see SOLR-9645
-    waitForState("Expected to see all replicas active", collectionName, (n, c) -> {
-      for (Replica r : c.getReplicas()) {
-        if (r.getState() != Replica.State.ACTIVE)
-          return false;
-      }
-      return true;
-    });
-  }
-
-  @Test
-  public void testAddShard() throws Exception {
-
-    String collectionName = "TooManyReplicasWhenAddingShards";
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 2)
-        .setMaxShardsPerNode(2)
-        .process(cluster.getSolrClient());
-
-    // We have two nodes, maxShardsPerNode is set to 2. Therefore, we should be able to add 2 shards each with
-    // two replicas, but fail on the third.
-    CollectionAdminRequest.createShard(collectionName, "shard1")
-        .process(cluster.getSolrClient());
-
-    // Now we should have one replica on each Jetty, add another to reach maxShardsPerNode
-    CollectionAdminRequest.createShard(collectionName, "shard2")
-        .process(cluster.getSolrClient());
-
-    // Now fail to add the third as it should exceed maxShardsPerNode
-    Exception e = expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.createShard(collectionName, "shard3")
-          .process(cluster.getSolrClient());
-    });
-    assertTrue("Should have gotten the right error message back",
-        e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-
-    // Hmmm, providing a nodeset also overrides the checks for max replicas, so prove it.
-    List<String> nodes = getAllNodeNames(collectionName);
-
-    CollectionAdminRequest.createShard(collectionName, "shard4")
-        .setNodeSet(StringUtils.join(nodes, ","))
-        .process(cluster.getSolrClient());
-
-    // And just for yucks, insure we fail the "regular" one again.
-    Exception e2 = expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.createShard(collectionName, "shard5")
-          .process(cluster.getSolrClient());
-    });
-    assertTrue("Should have gotten the right error message back",
-        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-
-    // And finally, insure that there are all the replcias we expect. We should have shards 1, 2 and 4 and each
-    // should have exactly two replicas
-    waitForState("Expected shards shardstart, 1, 2 and 4, each with two active replicas", collectionName, (n, c) -> {
-      return DocCollection.isFullyActive(n, c, 4, 2);
-    });
-    Map<String, Slice> slices = getCollectionState(collectionName).getSlicesMap();
-    assertEquals("There should be exaclty four slices", slices.size(), 4);
-    assertNotNull("shardstart should exist", slices.get("shardstart"));
-    assertNotNull("shard1 should exist", slices.get("shard1"));
-    assertNotNull("shard2 should exist", slices.get("shard2"));
-    assertNotNull("shard4 should exist", slices.get("shard4"));
-    assertEquals("Shardstart should have exactly 2 replicas", 2, slices.get("shardstart").getReplicas().size());
-    assertEquals("Shard1 should have exactly 2 replicas", 2, slices.get("shard1").getReplicas().size());
-    assertEquals("Shard2 should have exactly 2 replicas", 2, slices.get("shard2").getReplicas().size());
-    assertEquals("Shard4 should have exactly 2 replicas", 2, slices.get("shard4").getReplicas().size());
-
-  }
-
-  @Test
-  public void testDownedShards() throws Exception {
-    String collectionName = "TooManyReplicasWhenAddingDownedNode";
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 1)
-        .setMaxShardsPerNode(2)
-        .process(cluster.getSolrClient());
-
-    // Shut down a Jetty, I really don't care which
-    JettySolrRunner jetty = cluster.getRandomJetty(random());
-    String deadNode = jetty.getBaseUrl().toString();
-    cluster.stopJettySolrRunner(jetty);
-
-    try {
-
-      // Adding a replica on a dead node should fail
-      Exception e1 = expectThrows(Exception.class, () -> {
-        CollectionAdminRequest.addReplicaToShard(collectionName, "shardstart")
-            .setNode(deadNode)
-            .process(cluster.getSolrClient());
-      });
-      assertTrue("Should have gotten a message about shard not currently active: " + e1.toString(),
-          e1.toString().contains("At least one of the node(s) specified [" + deadNode + "] are not currently active in"));
-
-      // Should also die if we just add a shard
-      Exception e2 = expectThrows(Exception.class, () -> {
-        CollectionAdminRequest.createShard(collectionName, "shard1")
-            .setNodeSet(deadNode)
-            .process(cluster.getSolrClient());
-      });
-
-      assertTrue("Should have gotten a message about shard not currently active: " + e2.toString(),
-          e2.toString().contains("At least one of the node(s) specified [" + deadNode + "] are not currently active in"));
-    }
-    finally {
-      cluster.startJettySolrRunner(jetty);
-    }
-  }
-
-  private List<String> getAllNodeNames(String collectionName) throws KeeperException, InterruptedException {
-    DocCollection state = getCollectionState(collectionName);
-    return state.getReplicas().stream().map(Replica::getNodeName).distinct().collect(Collectors.toList());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
deleted file mode 100644
index c3dc44b..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIAsyncDistributedZkTest.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Tests the Cloud Collections API.
- */
-@Slow
-public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
-
-  private static final int MAX_TIMEOUT_SECONDS = 60;
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(2)
-        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
-        .configure();
-  }
-
-  @Test
-  public void testSolrJAPICalls() throws Exception {
-
-    final CloudSolrClient client = cluster.getSolrClient();
-
-    RequestStatusState state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("Recreating a collection with the same should have failed.", RequestStatusState.FAILED, state);
-
-    state = CollectionAdminRequest.addReplicaToShard("testasynccollectioncreation", "shard1")
-      .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("Add replica did not complete", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.splitShard("testasynccollectioncreation")
-        .setShardName("shard1")
-        .processAndWait(client, MAX_TIMEOUT_SECONDS * 2);
-    assertEquals("Shard split did not complete. Last recorded state: " + state, RequestStatusState.COMPLETED, state);
-
-  }
-
-  @Test
-  public void testAsyncRequests() throws Exception {
-
-    final String collection = "testAsyncOperations";
-    final CloudSolrClient client = cluster.getSolrClient();
-
-    RequestStatusState state = CollectionAdminRequest.createCollection(collection,"conf1",1,1)
-        .setRouterName("implicit")
-        .setShards("shard1")
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
-
-    //Add a few documents to shard1
-    int numDocs = TestUtil.nextInt(random(), 10, 100);
-    List<SolrInputDocument> docs = new ArrayList<>(numDocs);
-    for (int i=0; i<numDocs; i++) {
-      SolrInputDocument doc = new SolrInputDocument();
-      doc.addField("id", i);
-      doc.addField("_route_", "shard1");
-      docs.add(doc);
-    }
-    client.add(collection, docs);
-    client.commit(collection);
-
-    SolrQuery query = new SolrQuery("*:*");
-    query.set("shards", "shard1");
-    assertEquals(numDocs, client.query(collection, query).getResults().getNumFound());
-
-    state = CollectionAdminRequest.reloadCollection(collection)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("ReloadCollection did not complete", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.createShard(collection,"shard2")
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("CreateShard did not complete", RequestStatusState.COMPLETED, state);
-
-    //Add a doc to shard2 to make sure shard2 was created properly
-    SolrInputDocument doc = new SolrInputDocument();
-    doc.addField("id", numDocs + 1);
-    doc.addField("_route_", "shard2");
-    client.add(collection, doc);
-    client.commit(collection);
-    query = new SolrQuery("*:*");
-    query.set("shards", "shard2");
-    assertEquals(1, client.query(collection, query).getResults().getNumFound());
-
-    state = CollectionAdminRequest.deleteShard(collection,"shard2").processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("DeleteShard did not complete", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.addReplicaToShard(collection, "shard1")
-      .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("AddReplica did not complete", RequestStatusState.COMPLETED, state);
-
-    //cloudClient watch might take a couple of seconds to reflect it
-    Slice shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
-    int count = 0;
-    while (shard1.getReplicas().size() != 2) {
-      if (count++ > 1000) {
-        fail("2nd Replica not reflecting in the cluster state");
-      }
-      Thread.sleep(100);
-    }
-
-    state = CollectionAdminRequest.createAlias("myalias",collection)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("CreateAlias did not complete", RequestStatusState.COMPLETED, state);
-
-    query = new SolrQuery("*:*");
-    query.set("shards", "shard1");
-    assertEquals(numDocs, client.query("myalias", query).getResults().getNumFound());
-
-    state = CollectionAdminRequest.deleteAlias("myalias")
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("DeleteAlias did not complete", RequestStatusState.COMPLETED, state);
-
-    try {
-      client.query("myalias", query);
-      fail("Alias should not exist");
-    } catch (SolrException e) {
-      //expected
-    }
-
-    Replica replica = shard1.getReplicas().iterator().next();
-    for (String liveNode : client.getZkStateReader().getClusterState().getLiveNodes()) {
-      if (!replica.getNodeName().equals(liveNode)) {
-        state = new CollectionAdminRequest.MoveReplica(collection, replica.getName(), liveNode)
-            .processAndWait(client, MAX_TIMEOUT_SECONDS);
-        assertSame("MoveReplica did not complete", RequestStatusState.COMPLETED, state);
-        break;
-      }
-    }
-
-    shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
-    String replicaName = shard1.getReplicas().iterator().next().getName();
-    state = CollectionAdminRequest.deleteReplica(collection, "shard1", replicaName)
-      .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("DeleteReplica did not complete", RequestStatusState.COMPLETED, state);
-
-    state = CollectionAdminRequest.deleteCollection(collection)
-        .processAndWait(client, MAX_TIMEOUT_SECONDS);
-    assertSame("DeleteCollection did not complete", RequestStatusState.COMPLETED, state);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
deleted file mode 100644
index 5615918..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
+++ /dev/null
@@ -1,684 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import javax.management.MBeanServer;
-import javax.management.MBeanServerFactory;
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.lang.management.ManagementFactory;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.collect.ImmutableList;
-import org.apache.commons.io.IOUtils;
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.client.solrj.request.CoreStatus;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.client.solrj.response.CoreAdminResponse;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkCoreNodeProps;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams.CollectionAction;
-import org.apache.solr.common.params.CoreAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.SolrInfoBean.Category;
-import org.apache.solr.util.LogLevel;
-import org.apache.solr.util.TestInjection;
-import org.apache.solr.util.TimeOut;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-
-/**
- * Tests the Cloud Collections API.
- */
-@Slow
-public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @BeforeClass
-  public static void beforeCollectionsAPIDistributedZkTest() {
-    // we don't want this test to have zk timeouts
-    System.setProperty("zkClientTimeout", "240000");
-    TestInjection.randomDelayInCoreCreation = "true:20";
-    System.setProperty("validateAfterInactivity", "200");
-  }
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    String solrXml = IOUtils.toString(CollectionsAPIDistributedZkTest.class.getResourceAsStream("/solr/solr-jmxreporter.xml"), "UTF-8");
-    configureCluster(4)
-        .addConfig("conf", configset("cloud-minimal"))
-        .addConfig("conf2", configset("cloud-minimal-jmx"))
-        .withSolrXml(solrXml)
-        .configure();
-  }
-
-  @Before
-  public void clearCluster() throws Exception {
-    try {
-      cluster.deleteAllCollections();
-    } finally {
-      System.clearProperty("zkClientTimeout");
-    }
-  }
-
-  @Test
-  public void testCreationAndDeletion() throws Exception {
-
-    String collectionName = "created_and_deleted";
-
-    CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1).process(cluster.getSolrClient());
-    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient())
-                  .contains(collectionName));
-
-    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
-        .contains(collectionName));
-
-    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
-
-
-  }
-
-  @Test
-  public void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
-    
-    String collectionName = "out_of_sync_collection";
-
-    // manually create a collections zknode
-    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
-
-    CollectionAdminRequest.deleteCollection(collectionName)
-        .process(cluster.getSolrClient());
-
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
-                  .contains(collectionName));
-    
-    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
-
-  }
-
-  @Test
-  public void deletePartiallyCreatedCollection() throws Exception {
-
-    final String collectionName = "halfdeletedcollection";
-
-    assertEquals(0, CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
-        .setCreateNodeSet("")
-        .process(cluster.getSolrClient()).getStatus());
-    String dataDir = createTempDir().toFile().getAbsolutePath();
-    // create a core that simulates something left over from a partially-deleted collection
-    assertTrue(CollectionAdminRequest
-        .addReplicaToShard(collectionName, "shard1")
-        .setDataDir(dataDir)
-        .process(cluster.getSolrClient()).isSuccess());
-
-    CollectionAdminRequest.deleteCollection(collectionName)
-        .process(cluster.getSolrClient());
-
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
-
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
-        .process(cluster.getSolrClient());
-
-    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
-
-  }
-
-  @Test
-  public void deleteCollectionOnlyInZk() throws Exception {
-
-    final String collectionName = "onlyinzk";
-
-    // create the collections node, but nothing else
-    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
-
-    // delete via API - should remove collections node
-    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
-    
-    // now creating that collection should work
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
-        .process(cluster.getSolrClient());
-    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
-
-  }
-
-  @Test
-  public void testBadActionNames() throws Exception {
-
-    // try a bad action
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", "BADACTION");
-    String collectionName = "badactioncollection";
-    params.set("name", collectionName);
-    params.set("numShards", 2);
-    final QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(request);
-    });
-
-  }
-
-  @Test
-  public void testMissingRequiredParameters() {
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    params.set("numShards", 2);
-    // missing required collection parameter
-    final SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(request);
-    });
-  }
-
-  @Test
-  public void testTooManyReplicas() {
-
-    CollectionAdminRequest req = CollectionAdminRequest.createCollection("collection", "conf", 2, 10);
-
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(req);
-    });
-
-  }
-
-  @Test
-  public void testMissingNumShards() {
-
-    // No numShards should fail
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    params.set("name", "acollection");
-    params.set(REPLICATION_FACTOR, 10);
-    params.set("collection.configName", "conf");
-
-    final SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(request);
-    });
-
-  }
-
-  @Test
-  public void testZeroNumShards() {
-
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.set("action", CollectionAction.CREATE.toString());
-    params.set("name", "acollection");
-    params.set(REPLICATION_FACTOR, 10);
-    params.set("numShards", 0);
-    params.set("collection.configName", "conf");
-
-    final SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    expectThrows(Exception.class, () -> {
-      cluster.getSolrClient().request(request);
-    });
-
-  }
-
-  @Test
-  public void testCreateShouldFailOnExistingCore() throws Exception {
-    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker", "conf", 1, 1)
-        .setCreateNodeSet("")
-        .process(cluster.getSolrClient()).getStatus());
-    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker", "shard1")
-        .setNode(cluster.getJettySolrRunner(0).getNodeName())
-        .setCoreName("halfcollection_shard1_replica_n1")
-        .process(cluster.getSolrClient()).isSuccess());
-
-    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker2", "conf",1, 1)
-        .setCreateNodeSet("")
-        .process(cluster.getSolrClient()).getStatus());
-    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker2", "shard1")
-        .setNode(cluster.getJettySolrRunner(1).getNodeName())
-        .setCoreName("halfcollection_shard1_replica_n1")
-        .process(cluster.getSolrClient()).isSuccess());
-
-    String nn1 = cluster.getJettySolrRunner(0).getNodeName();
-    String nn2 = cluster.getJettySolrRunner(1).getNodeName();
-
-    CollectionAdminResponse resp = CollectionAdminRequest.createCollection("halfcollection", "conf", 2, 1)
-        .setCreateNodeSet(nn1 + "," + nn2)
-        .process(cluster.getSolrClient());
-    
-    SimpleOrderedMap success = (SimpleOrderedMap) resp.getResponse().get("success");
-    SimpleOrderedMap failure = (SimpleOrderedMap) resp.getResponse().get("failure");
-
-    assertNotNull(resp.toString(), success);
-    assertNotNull(resp.toString(), failure);
-    
-    String val1 = success.getVal(0).toString();
-    String val2 = failure.getVal(0).toString();
-    assertTrue(val1.contains("SolrException") || val2.contains("SolrException"));
-  }
-
-  @Test
-  public void testNoConfigSetExist() throws Exception {
-
-    expectThrows(Exception.class, () -> {
-      CollectionAdminRequest.createCollection("noconfig", "conf123", 1, 1)
-          .process(cluster.getSolrClient());
-    });
-
-    TimeUnit.MILLISECONDS.sleep(1000);
-    // in both cases, the collection should have default to the core name
-    cluster.getSolrClient().getZkStateReader().forceUpdateCollection("noconfig");
-    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains("noconfig"));
-  }
-
-  @Test
-  public void testCoresAreDistributedAcrossNodes() throws Exception {
-
-    CollectionAdminRequest.createCollection("nodes_used_collection", "conf", 2, 2)
-        .process(cluster.getSolrClient());
-
-    Set<String> liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
-
-    List<String> createNodeList = new ArrayList<>();
-    createNodeList.addAll(liveNodes);
-
-    DocCollection collection = getCollectionState("nodes_used_collection");
-    for (Slice slice : collection.getSlices()) {
-      for (Replica replica : slice.getReplicas()) {
-        createNodeList.remove(replica.getNodeName());
-      }
-    }
-
-    assertEquals(createNodeList.toString(), 0, createNodeList.size());
-
-  }
-
-  @Test
-  public void testDeleteNonExistentCollection() throws Exception {
-
-    SolrException e = expectThrows(SolrException.class, () -> {
-      CollectionAdminRequest.deleteCollection("unknown_collection").process(cluster.getSolrClient());
-    });
-
-    // create another collection should still work
-    CollectionAdminRequest.createCollection("acollectionafterbaddelete", "conf", 1, 2)
-        .process(cluster.getSolrClient());
-    waitForState("Collection creation after a bad delete failed", "acollectionafterbaddelete",
-        (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
-  }
-
-  @Test
-  public void testSpecificConfigsets() throws Exception {
-    CollectionAdminRequest.createCollection("withconfigset2", "conf2", 1, 1).process(cluster.getSolrClient());
-    byte[] data = zkClient().getData(ZkStateReader.COLLECTIONS_ZKNODE + "/" + "withconfigset2", null, null, true);
-    assertNotNull(data);
-    ZkNodeProps props = ZkNodeProps.load(data);
-    String configName = props.getStr(ZkController.CONFIGNAME_PROP);
-    assertEquals("conf2", configName);
-  }
-
-  @Test
-  public void testMaxNodesPerShard() throws Exception {
-
-    // test maxShardsPerNode
-    int numLiveNodes = cluster.getJettySolrRunners().size();
-    int numShards = (numLiveNodes/2) + 1;
-    int replicationFactor = 2;
-    int maxShardsPerNode = 1;
-
-    SolrException e = expectThrows(SolrException.class, () -> {
-      CollectionAdminRequest.createCollection("oversharded", "conf", numShards, replicationFactor)
-          .process(cluster.getSolrClient());
-    });
-
-  }
-
-  @Test
-  public void testCreateNodeSet() throws Exception {
-
-    JettySolrRunner jetty1 = cluster.getRandomJetty(random());
-    JettySolrRunner jetty2 = cluster.getRandomJetty(random());
-
-    List<String> baseUrls = ImmutableList.of(jetty1.getBaseUrl().toString(), jetty2.getBaseUrl().toString());
-
-    CollectionAdminRequest.createCollection("nodeset_collection", "conf", 2, 1)
-        .setCreateNodeSet(baseUrls.get(0) + "," + baseUrls.get(1))
-        .process(cluster.getSolrClient());
-
-    DocCollection collectionState = getCollectionState("nodeset_collection");
-    for (Replica replica : collectionState.getReplicas()) {
-      String replicaUrl = replica.getCoreUrl();
-      boolean matchingJetty = false;
-      for (String jettyUrl : baseUrls) {
-        if (replicaUrl.startsWith(jettyUrl))
-          matchingJetty = true;
-      }
-      if (matchingJetty == false)
-        fail("Expected replica to be on " + baseUrls + " but was on " + replicaUrl);
-    }
-
-  }
-
-  @Test
-  public void testCollectionsAPI() throws Exception {
-
-    // create new collections rapid fire
-    int cnt = random().nextInt(TEST_NIGHTLY ? 3 : 1) + 1;
-    CollectionAdminRequest.Create[] createRequests = new CollectionAdminRequest.Create[cnt];
-
-    for (int i = 0; i < cnt; i++) {
-
-      int numShards = TestUtil.nextInt(random(), 0, cluster.getJettySolrRunners().size()) + 1;
-      int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
-      int maxShardsPerNode = (((numShards * replicationFactor) / cluster.getJettySolrRunners().size())) + 1;
-
-      createRequests[i]
-          = CollectionAdminRequest.createCollection("awhollynewcollection_" + i, "conf2", numShards, replicationFactor)
-          .setMaxShardsPerNode(maxShardsPerNode);
-      createRequests[i].processAsync(cluster.getSolrClient());
-    }
-
-    for (int i = 0; i < cnt; i++) {
-      String collectionName = "awhollynewcollection_" + i;
-      final int j = i;
-      waitForState("Expected to see collection " + collectionName, collectionName,
-          (n, c) -> {
-            CollectionAdminRequest.Create req = createRequests[j];
-            return DocCollection.isFullyActive(n, c, req.getNumShards(), req.getReplicationFactor());
-          });
-    }
-
-    cluster.injectChaos(random());
-
-    for (int i = 0; i < cluster.getJettySolrRunners().size(); i++) {
-      checkInstanceDirs(cluster.getJettySolrRunner(i));
-    }
-
-    String collectionName = createRequests[random().nextInt(createRequests.length)].getCollectionName();
-
-    new UpdateRequest()
-        .add("id", "6")
-        .add("id", "7")
-        .add("id", "8")
-        .commit(cluster.getSolrClient(), collectionName);
-    TimeOut timeOut = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-    while (!timeOut.hasTimedOut()) {
-      try {
-        long numFound = cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound();
-        assertEquals(3, numFound);
-        break;
-      } catch (Exception e) {
-        // Query node can have stale clusterstate
-        log.info("Error when query " + collectionName, e);
-        Thread.sleep(500);
-      }
-    }
-    if (timeOut.hasTimedOut()) {
-      fail("Timeout on query " + collectionName);
-    }
-
-    checkNoTwoShardsUseTheSameIndexDir();
-  }
-
-  @Test
-  public void testCollectionReload() throws Exception {
-
-    final String collectionName = "reloaded_collection";
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2).process(cluster.getSolrClient());
-
-    // get core open times
-    Map<String, Long> urlToTimeBefore = new HashMap<>();
-    collectStartTimes(collectionName, urlToTimeBefore);
-    assertTrue(urlToTimeBefore.size() > 0);
-
-    CollectionAdminRequest.reloadCollection(collectionName).processAsync(cluster.getSolrClient());
-
-    // reloads make take a short while
-    boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
-    assertTrue("some core start times did not change on reload", allTimesAreCorrect);
-  }
-
-  private void checkInstanceDirs(JettySolrRunner jetty) throws IOException {
-    CoreContainer cores = jetty.getCoreContainer();
-    Collection<SolrCore> theCores = cores.getCores();
-    for (SolrCore core : theCores) {
-
-      // look for core props file
-      Path instancedir = (Path) core.getResourceLoader().getInstancePath();
-      assertTrue("Could not find expected core.properties file", Files.exists(instancedir.resolve("core.properties")));
-
-      Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve(core.getName());
-
-      assertTrue("Expected: " + expected + "\nFrom core stats: " + instancedir, Files.isSameFile(expected, instancedir));
-
-    }
-  }
-
-  private boolean waitForReloads(String collectionName, Map<String,Long> urlToTimeBefore) throws SolrServerException, IOException {
-
-
-    TimeOut timeout = new TimeOut(45, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-
-    boolean allTimesAreCorrect = false;
-    while (! timeout.hasTimedOut()) {
-      Map<String,Long> urlToTimeAfter = new HashMap<>();
-      collectStartTimes(collectionName, urlToTimeAfter);
-      
-      boolean retry = false;
-      Set<Entry<String,Long>> entries = urlToTimeBefore.entrySet();
-      for (Entry<String,Long> entry : entries) {
-        Long beforeTime = entry.getValue();
-        Long afterTime = urlToTimeAfter.get(entry.getKey());
-        assertNotNull(afterTime);
-        if (afterTime <= beforeTime) {
-          retry = true;
-          break;
-        }
-
-      }
-      if (!retry) {
-        allTimesAreCorrect = true;
-        break;
-      }
-    }
-    return allTimesAreCorrect;
-  }
-
-  private void collectStartTimes(String collectionName, Map<String,Long> urlToTime)
-      throws SolrServerException, IOException {
-
-    DocCollection collectionState = getCollectionState(collectionName);
-    if (collectionState != null) {
-      for (Slice shard : collectionState) {
-        for (Replica replica : shard) {
-          ZkCoreNodeProps coreProps = new ZkCoreNodeProps(replica);
-          CoreStatus coreStatus;
-          try (HttpSolrClient server = getHttpSolrClient(coreProps.getBaseUrl())) {
-            coreStatus = CoreAdminRequest.getCoreStatus(coreProps.getCoreName(), false, server);
-          }
-          long before = coreStatus.getCoreStartTime().getTime();
-          urlToTime.put(coreProps.getCoreUrl(), before);
-        }
-      }
-    } else {
-      throw new IllegalArgumentException("Could not find collection " + collectionName);
-    }
-  }
-  
-  private void checkNoTwoShardsUseTheSameIndexDir() throws Exception {
-    Map<String, Set<String>> indexDirToShardNamesMap = new HashMap<>();
-    
-    List<MBeanServer> servers = new LinkedList<>();
-    servers.add(ManagementFactory.getPlatformMBeanServer());
-    servers.addAll(MBeanServerFactory.findMBeanServer(null));
-    for (final MBeanServer server : servers) {
-      Set<ObjectName> mbeans = new HashSet<>();
-      mbeans.addAll(server.queryNames(null, null));
-      for (final ObjectName mbean : mbeans) {
-
-        try {
-          Map<String, String> props = mbean.getKeyPropertyList();
-          String category = props.get("category");
-          String name = props.get("name");
-          if ((category != null && category.toString().equals(Category.CORE.toString())) &&
-              (name != null && name.equals("indexDir"))) {
-            String indexDir = server.getAttribute(mbean, "Value").toString();
-            String key = props.get("dom2") + "." + props.get("dom3") + "." + props.get("dom4");
-            if (!indexDirToShardNamesMap.containsKey(indexDir)) {
-              indexDirToShardNamesMap.put(indexDir.toString(), new HashSet<>());
-            }
-            indexDirToShardNamesMap.get(indexDir.toString()).add(key);
-          }
-        } catch (Exception e) {
-          // ignore, just continue - probably a "Value" attribute
-          // not found
-        }
-      }
-    }
-    
-    assertTrue(
-        "Something is broken in the assert for no shards using the same indexDir - probably something was changed in the attributes published in the MBean of "
-            + SolrCore.class.getSimpleName() + " : " + indexDirToShardNamesMap,
-        indexDirToShardNamesMap.size() > 0);
-    for (Entry<String,Set<String>> entry : indexDirToShardNamesMap.entrySet()) {
-      if (entry.getValue().size() > 1) {
-        fail("We have shards using the same indexDir. E.g. shards "
-            + entry.getValue().toString() + " all use indexDir "
-            + entry.getKey());
-      }
-    }
-
-  }
-
-  @Test
-  @LogLevel("org.apache.solr.cloud=DEBUG")
-  public void addReplicaTest() throws Exception {
-    String collectionName = "addReplicaColl";
-
-    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
-        .setMaxShardsPerNode(4)
-        .process(cluster.getSolrClient());
-
-    ArrayList<String> nodeList
-        = new ArrayList<>(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
-    Collections.shuffle(nodeList, random());
-
-    CollectionAdminResponse response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .setNode(nodeList.get(0))
-        .process(cluster.getSolrClient());
-    Replica newReplica = grabNewReplica(response, getCollectionState(collectionName));
-
-    assertEquals("Replica should be created on the right node",
-        cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)),
-        newReplica.getStr(ZkStateReader.BASE_URL_PROP));
-
-    Path instancePath = createTempDir();
-    response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .withProperty(CoreAdminParams.INSTANCE_DIR, instancePath.toString())
-        .process(cluster.getSolrClient());
-    newReplica = grabNewReplica(response, getCollectionState(collectionName));
-    assertNotNull(newReplica);
-
-    try (HttpSolrClient coreclient = getHttpSolrClient(newReplica.getStr(ZkStateReader.BASE_URL_PROP))) {
-      CoreAdminResponse status = CoreAdminRequest.getStatus(newReplica.getStr("core"), coreclient);
-      NamedList<Object> coreStatus = status.getCoreStatus(newReplica.getStr("core"));
-      String instanceDirStr = (String) coreStatus.get("instanceDir");
-      assertEquals(instanceDirStr, instancePath.toString());
-    }
-
-    //Test to make sure we can't create another replica with an existing core_name of that collection
-    String coreName = newReplica.getStr(CORE_NAME_PROP);
-    SolrException e = expectThrows(SolrException.class, () -> {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", "addreplica");
-      params.set("collection", collectionName);
-      params.set("shard", "shard1");
-      params.set("name", coreName);
-      QueryRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-      cluster.getSolrClient().request(request);
-    });
-
-    assertTrue(e.getMessage().contains("Another replica with the same core name already exists for this collection"));
-
-    // Check that specifying property.name works. DO NOT remove this when the "name" property is deprecated
-    // for ADDREPLICA, this is "property.name". See SOLR-7132
-    response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
-        .withProperty(CoreAdminParams.NAME, "propertyDotName")
-        .process(cluster.getSolrClient());
-
-    newReplica = grabNewReplica(response, getCollectionState(collectionName));
-    assertEquals("'core' should be 'propertyDotName' ", "propertyDotName", newReplica.getStr("core"));
-
-  }
-
-  private Replica grabNewReplica(CollectionAdminResponse response, DocCollection docCollection) {
-    String replicaName = response.getCollectionCoresStatus().keySet().iterator().next();
-    Optional<Replica> optional = docCollection.getReplicas().stream()
-        .filter(replica -> replicaName.equals(replica.getCoreName()))
-        .findAny();
-    if (optional.isPresent()) {
-      return optional.get();
-    }
-    throw new AssertionError("Can not find " + replicaName + " from " + docCollection);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java
deleted file mode 100644
index 57d38cd..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/ConcurrentDeleteAndCreateCollectionTest.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.nio.file.Path;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.lucene.util.LuceneTestCase.Nightly;
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.KeeperException;
-import org.junit.After;
-import org.junit.Before;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Nightly
-public class ConcurrentDeleteAndCreateCollectionTest extends SolrTestCaseJ4 {
-  
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
-  private MiniSolrCloudCluster solrCluster;
-  
-  @Override
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
-    solrCluster = new MiniSolrCloudCluster(1, createTempDir(), buildJettyConfig("/solr"));
-  }
-  
-  @Override
-  @After
-  public void tearDown() throws Exception {
-    solrCluster.shutdown();
-    super.tearDown();
-  }
-  
-  public void testConcurrentCreateAndDeleteDoesNotFail() {
-    final AtomicReference<Exception> failure = new AtomicReference<>();
-    final int timeToRunSec = 30;
-    final CreateDeleteCollectionThread[] threads = new CreateDeleteCollectionThread[10];
-    for (int i = 0; i < threads.length; i++) {
-      final String collectionName = "collection" + i;
-      uploadConfig(configset("configset-2"), collectionName);
-      final String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
-      final SolrClient solrClient = getHttpSolrClient(baseUrl);
-      threads[i] = new CreateDeleteSearchCollectionThread("create-delete-search-" + i, collectionName, collectionName, 
-          timeToRunSec, solrClient, failure);
-    }
-    
-    startAll(threads);
-    joinAll(threads);
-    
-    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
-  }
-  
-  public void testConcurrentCreateAndDeleteOverTheSameConfig() {
-    final String configName = "testconfig";
-    uploadConfig(configset("configset-2"), configName); // upload config once, to be used by all collections
-    final String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
-    final AtomicReference<Exception> failure = new AtomicReference<>();
-    final int timeToRunSec = 30;
-    final CreateDeleteCollectionThread[] threads = new CreateDeleteCollectionThread[2];
-    for (int i = 0; i < threads.length; i++) {
-      final String collectionName = "collection" + i;
-      final SolrClient solrClient = getHttpSolrClient(baseUrl);
-      threads[i] = new CreateDeleteCollectionThread("create-delete-" + i, collectionName, configName,
-                                                    timeToRunSec, solrClient, failure);
-    }
-
-    startAll(threads);
-    joinAll(threads);
-
-    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
-  }
-  
-  private void uploadConfig(Path configDir, String configName) {
-    try {
-      solrCluster.uploadConfigSet(configDir, configName);
-    } catch (IOException | KeeperException | InterruptedException e) {
-      throw new RuntimeException(e);
-    }
-  }
-  
-  private void joinAll(final CreateDeleteCollectionThread[] threads) {
-    for (CreateDeleteCollectionThread t : threads) {
-      try {
-        t.joinAndClose();
-      } catch (InterruptedException e) {
-        Thread.interrupted();
-        throw new RuntimeException(e);
-      }
-    }
-  }
-  
-  private void startAll(final Thread[] threads) {
-    for (Thread t : threads) {
-      t.start();
-    }
-  }
-  
-  private static class CreateDeleteCollectionThread extends Thread {
-    protected final String collectionName;
-    protected final String configName;
-    protected final long timeToRunSec;
-    protected final SolrClient solrClient;
-    protected final AtomicReference<Exception> failure;
-    
-    public CreateDeleteCollectionThread(String name, String collectionName, String configName, long timeToRunSec,
-        SolrClient solrClient, AtomicReference<Exception> failure) {
-      super(name);
-      this.collectionName = collectionName;
-      this.timeToRunSec = timeToRunSec;
-      this.solrClient = solrClient;
-      this.failure = failure;
-      this.configName = configName;
-    }
-    
-    @Override
-    public void run() {
-      final TimeOut timeout = new TimeOut(timeToRunSec, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-      while (! timeout.hasTimedOut() && failure.get() == null) {
-        doWork();
-      }
-    }
-    
-    protected void doWork() {
-      createCollection();
-      deleteCollection();
-    }
-    
-    protected void addFailure(Exception e) {
-      log.error("Add Failure", e);
-      synchronized (failure) {
-        if (failure.get() != null) {
-          failure.get().addSuppressed(e);
-        } else {
-          failure.set(e);
-        }
-      }
-    }
-    
-    private void createCollection() {
-      try {
-        final CollectionAdminResponse response = CollectionAdminRequest.createCollection(collectionName,configName,1,1)
-                .process(solrClient);
-        if (response.getStatus() != 0) {
-          addFailure(new RuntimeException("failed to create collection " + collectionName));
-        }
-      } catch (Exception e) {
-        addFailure(e);
-      }
-      
-    }
-    
-    private void deleteCollection() {
-      try {
-        final CollectionAdminRequest.Delete deleteCollectionRequest
-          = CollectionAdminRequest.deleteCollection(collectionName);
-        final CollectionAdminResponse response = deleteCollectionRequest.process(solrClient);
-        if (response.getStatus() != 0) {
-          addFailure(new RuntimeException("failed to delete collection " + collectionName));
-        }
-      } catch (Exception e) {
-        addFailure(e);
-      }
-    }
-    
-    public void joinAndClose() throws InterruptedException {
-      try {
-        super.join(60000);
-      } finally {
-        IOUtils.closeQuietly(solrClient);
-      }
-    }
-  }
-  
-  private static class CreateDeleteSearchCollectionThread extends CreateDeleteCollectionThread {
-
-    public CreateDeleteSearchCollectionThread(String name, String collectionName, String configName, long timeToRunSec,
-        SolrClient solrClient, AtomicReference<Exception> failure) {
-      super(name, collectionName, configName, timeToRunSec, solrClient, failure);
-    }
-    
-    @Override
-    protected void doWork() {
-      super.doWork();
-      searchNonExistingCollection();
-    }
-    
-    private void searchNonExistingCollection() {
-      try {
-        solrClient.query(collectionName, new SolrQuery("*"));
-      } catch (Exception e) {
-        if (!e.getMessage().contains("not found") && !e.getMessage().contains("Can not find")) {
-          addFailure(e);
-        }
-      }
-    }
-    
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
deleted file mode 100644
index 63a3272..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.util.Map;
-
-import org.apache.lucene.util.TestUtil;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
-import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
-import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
-import static org.apache.solr.common.params.ShardParams._ROUTE_;
-
-/**
- * Tests the Custom Sharding API.
- */
-public class CustomCollectionTest extends SolrCloudTestCase {
-
-  private static final int NODE_COUNT = 4;
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(NODE_COUNT)
-        .addConfig("conf", configset("cloud-dynamic"))
-        .configure();
-  }
-
-  @Before
-  public void ensureClusterEmpty() throws Exception {
-    cluster.deleteAllCollections();
-  }
-
-  @Test
-  public void testCustomCollectionsAPI() throws Exception {
-
-    final String collection = "implicitcoll";
-    int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
-    int numShards = 3;
-    int maxShardsPerNode = (((numShards + 1) * replicationFactor) / NODE_COUNT) + 1;
-
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c", replicationFactor)
-        .setMaxShardsPerNode(maxShardsPerNode)
-        .process(cluster.getSolrClient());
-
-    DocCollection coll = getCollectionState(collection);
-    assertEquals("implicit", ((Map) coll.get(DOC_ROUTER)).get("name"));
-    assertNotNull(coll.getStr(REPLICATION_FACTOR));
-    assertNotNull(coll.getStr(MAX_SHARDS_PER_NODE));
-    assertNull("A shard of a Collection configured with implicit router must have null range",
-        coll.getSlice("a").getRange());
-
-    new UpdateRequest()
-        .add("id", "6")
-        .add("id", "7")
-        .add("id", "8")
-        .withRoute("a")
-        .commit(cluster.getSolrClient(), collection);
-
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
-
-    cluster.getSolrClient().deleteByQuery(collection, "*:*");
-    cluster.getSolrClient().commit(collection, true, true);
-    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
-
-    new UpdateRequest()
-        .add("id", "9")
-        .add("id", "10")
-        .add("id", "11")
-        .withRoute("c")
-        .commit(cluster.getSolrClient(), collection);
-
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
-
-    //Testing CREATESHARD
-    CollectionAdminRequest.createShard(collection, "x")
-        .process(cluster.getSolrClient());
-    waitForState("Expected shard 'x' to be active", collection, (n, c) -> {
-      if (c.getSlice("x") == null)
-        return false;
-      for (Replica r : c.getSlice("x")) {
-        if (r.getState() != Replica.State.ACTIVE)
-          return false;
-      }
-      return true;
-    });
-
-    new UpdateRequest()
-        .add("id", "66", _ROUTE_, "x")
-        .commit(cluster.getSolrClient(), collection);
-    // TODO - the local state is cached and causes the request to fail with 'unknown shard'
-    // assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "x")).getResults().getNumFound());
-
-  }
-
-  @Test
-  public void testRouteFieldForImplicitRouter() throws Exception {
-
-    int numShards = 4;
-    int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
-    int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
-    String shard_fld = "shard_s";
-
-    final String collection = "withShardField";
-
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c,d", replicationFactor)
-        .setMaxShardsPerNode(maxShardsPerNode)
-        .setRouterField(shard_fld)
-        .process(cluster.getSolrClient());
-
-    new UpdateRequest()
-        .add("id", "6", shard_fld, "a")
-        .add("id", "7", shard_fld, "a")
-        .add("id", "8", shard_fld, "b")
-        .commit(cluster.getSolrClient(), collection);
-
-    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
-    assertEquals(2, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
-
-  }
-
-  @Test
-  public void testRouteFieldForHashRouter()throws Exception{
-    String collectionName = "routeFieldColl";
-    int numShards = 4;
-    int replicationFactor = 2;
-    int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
-    String shard_fld = "shard_s";
-
-    CollectionAdminRequest.createCollection(collectionName, "conf", numShards, replicationFactor)
-        .setMaxShardsPerNode(maxShardsPerNode)
-        .setRouterField(shard_fld)
-        .process(cluster.getSolrClient());
-
-    new UpdateRequest()
-        .add("id", "6", shard_fld, "a")
-        .add("id", "7", shard_fld, "a")
-        .add("id", "8", shard_fld, "b")
-        .commit(cluster.getSolrClient(), collectionName);
-
-    assertEquals(3, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
-    assertEquals(2, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
-    assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
-    assertEquals(0, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
-
-
-    cluster.getSolrClient().deleteByQuery(collectionName, "*:*");
-    cluster.getSolrClient().commit(collectionName);
-
-    cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "100", shard_fld, "c!doc1"));
-    cluster.getSolrClient().commit(collectionName);
-    assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c!")).getResults().getNumFound());
-
-  }
-
-  @Test
-  public void testCreateShardRepFactor() throws Exception  {
-    final String collectionName = "testCreateShardRepFactor";
-    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "a,b", 1)
-        .process(cluster.getSolrClient());
-
-    CollectionAdminRequest.createShard(collectionName, "x")
-        .process(cluster.getSolrClient());
-
-    waitForState("Not enough active replicas in shard 'x'", collectionName, (n, c) -> {
-      return c.getSlice("x").getReplicas().size() == 1;
-    });
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
index 8a46808..2775a0c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
@@ -17,8 +17,18 @@
 package org.apache.solr.cloud;
 
 import java.lang.invoke.MethodHandles;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Queue;
+import java.util.Set;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.TimeUnit;
 
@@ -32,6 +42,7 @@ import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.cloud.Overseer.LeaderStatus;
 import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.DocRouter;
@@ -65,7 +76,18 @@ import org.mockito.stubbing.Answer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyInt;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.isNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
 
@@ -572,7 +594,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
       }
     }
     
-    if (random().nextBoolean()) Collections.shuffle(createNodeList, OverseerCollectionMessageHandler.RANDOM);
+    if (random().nextBoolean()) Collections.shuffle(createNodeList, random());
 
     underTest = new OverseerCollectionConfigSetProcessorToBeTested(zkStateReaderMock,
         "1234", shardHandlerFactoryMock, ADMIN_PATH, workQueueMock, runningMapMock,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java
index 54b66a0..9a86912 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTaskQueueTest.java
@@ -22,6 +22,7 @@ import java.util.Map;
 
 import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.client.solrj.response.SolrResponseBase;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CommonAdminParams;
 import org.apache.solr.common.params.CommonParams;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java b/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java
deleted file mode 100644
index a3fbb32..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/ReplicaPropertiesBase.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-// Collect useful operations for testing assigning properties to individual replicas
-// Could probably expand this to do something creative with getting random slices
-// and shards, but for now this will do.
-public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBase {
-
-  public static NamedList<Object> doPropertyAction(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
-    assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    for (int idx = 0; idx < paramsIn.length; idx += 2) {
-      params.set(paramsIn[idx], paramsIn[idx + 1]);
-    }
-    QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    return client.request(request);
-  }
-
-  public static void verifyPropertyNotPresent(CloudSolrClient client, String collectionName, String replicaName,
-                                String property)
-      throws KeeperException, InterruptedException {
-    ClusterState clusterState = null;
-    Replica replica = null;
-    for (int idx = 0; idx < 300; ++idx) {
-      clusterState = client.getZkStateReader().getClusterState();
-      final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
-      replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
-      if (replica == null) {
-        fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
-      }
-      if (StringUtils.isBlank(replica.getProperty(property))) return;
-      Thread.sleep(100);
-    }
-    fail("Property " + property + " not set correctly for collection/replica pair: " +
-        collectionName + "/" + replicaName + ". Replica props: " + replica.getProperties().toString() +
-        ". Cluster state is " + clusterState.toString());
-
-  }
-
-  // The params are triplets,
-  // collection
-  // shard
-  // replica
-  public static void verifyPropertyVal(CloudSolrClient client, String collectionName,
-                         String replicaName, String property, String val)
-      throws InterruptedException, KeeperException {
-    Replica replica = null;
-    ClusterState clusterState = null;
-
-    for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
-      clusterState = client.getZkStateReader().getClusterState();
-      final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
-      replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
-      if (replica == null) {
-        fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
-      }
-      if (StringUtils.equals(val, replica.getProperty(property))) return;
-      Thread.sleep(100);
-    }
-
-    fail("Property '" + property + "' with value " + replica.getProperty(property) +
-        " not set correctly for collection/replica pair: " + collectionName + "/" + replicaName + " property map is " +
-        replica.getProperties().toString() + ".");
-
-  }
-
-  // Verify that
-  // 1> the property is only set once in all the replicas in a slice.
-  // 2> the property is balanced evenly across all the nodes hosting collection
-  public static void verifyUniqueAcrossCollection(CloudSolrClient client, String collectionName,
-                                    String property) throws KeeperException, InterruptedException {
-    verifyUnique(client, collectionName, property, true);
-  }
-
-  public static void verifyUniquePropertyWithinCollection(CloudSolrClient client, String collectionName,
-                            String property) throws KeeperException, InterruptedException {
-    verifyUnique(client, collectionName, property, false);
-  }
-
-  public static void verifyUnique(CloudSolrClient client, String collectionName, String property, boolean balanced)
-      throws KeeperException, InterruptedException {
-
-    DocCollection col = null;
-    for (int idx = 0; idx < 300; ++idx) {
-      ClusterState clusterState = client.getZkStateReader().getClusterState();
-
-      col = clusterState.getCollection(collectionName);
-      if (col == null) {
-        fail("Could not find collection " + collectionName);
-      }
-      Map<String, Integer> counts = new HashMap<>();
-      Set<String> uniqueNodes = new HashSet<>();
-      boolean allSlicesHaveProp = true;
-      boolean badSlice = false;
-      for (Slice slice : col.getSlices()) {
-        boolean thisSliceHasProp = false;
-        int propCount = 0;
-        for (Replica replica : slice.getReplicas()) {
-          uniqueNodes.add(replica.getNodeName());
-          String propVal = replica.getProperty(property);
-          if (StringUtils.isNotBlank(propVal)) {
-            ++propCount;
-            if (counts.containsKey(replica.getNodeName()) == false) {
-              counts.put(replica.getNodeName(), 0);
-            }
-            int count = counts.get(replica.getNodeName());
-            thisSliceHasProp = true;
-            counts.put(replica.getNodeName(), count + 1);
-          }
-        }
-        badSlice = (propCount > 1) ? true : badSlice;
-        allSlicesHaveProp = allSlicesHaveProp ? thisSliceHasProp : allSlicesHaveProp;
-      }
-      if (balanced == false && badSlice == false) {
-        return;
-      }
-      if (allSlicesHaveProp && balanced) {
-        // Check that the properties are evenly distributed.
-        int minProps = col.getSlices().size() / uniqueNodes.size();
-        int maxProps = minProps;
-
-        if (col.getSlices().size() % uniqueNodes.size() > 0) {
-          ++maxProps;
-        }
-        boolean doSleep = false;
-        for (Map.Entry<String, Integer> ent : counts.entrySet()) {
-          if (ent.getValue() != minProps && ent.getValue() != maxProps) {
-            doSleep = true;
-          }
-        }
-
-        if (doSleep == false) {
-          assertTrue("We really shouldn't be calling this if there is no node with the property " + property,
-              counts.size() > 0);
-          return;
-        }
-      }
-      Thread.sleep(100);
-    }
-    fail("Collection " + collectionName + " does not have roles evenly distributed. Collection is: " + col.toString());
-  }
-
-}


[03/15] lucene-solr:master: SOLR-11817: Move Collections API classes to it's own package

Posted by va...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
new file mode 100644
index 0000000..eff0d8e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIAsyncDistributedZkTest.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Tests the Cloud Collections API.
+ */
+@Slow
+public class CollectionsAPIAsyncDistributedZkTest extends SolrCloudTestCase {
+
+  private static final int MAX_TIMEOUT_SECONDS = 60;
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(2)
+        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
+        .configure();
+  }
+
+  @Test
+  public void testSolrJAPICalls() throws Exception {
+
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    RequestStatusState state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("Recreating a collection with the same should have failed.", RequestStatusState.FAILED, state);
+
+    state = CollectionAdminRequest.addReplicaToShard("testasynccollectioncreation", "shard1")
+      .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("Add replica did not complete", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.splitShard("testasynccollectioncreation")
+        .setShardName("shard1")
+        .processAndWait(client, MAX_TIMEOUT_SECONDS * 2);
+    assertEquals("Shard split did not complete. Last recorded state: " + state, RequestStatusState.COMPLETED, state);
+
+  }
+
+  @Test
+  public void testAsyncRequests() throws Exception {
+
+    final String collection = "testAsyncOperations";
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    RequestStatusState state = CollectionAdminRequest.createCollection(collection,"conf1",1,1)
+        .setRouterName("implicit")
+        .setShards("shard1")
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);
+
+    //Add a few documents to shard1
+    int numDocs = TestUtil.nextInt(random(), 10, 100);
+    List<SolrInputDocument> docs = new ArrayList<>(numDocs);
+    for (int i=0; i<numDocs; i++) {
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", i);
+      doc.addField("_route_", "shard1");
+      docs.add(doc);
+    }
+    client.add(collection, docs);
+    client.commit(collection);
+
+    SolrQuery query = new SolrQuery("*:*");
+    query.set("shards", "shard1");
+    assertEquals(numDocs, client.query(collection, query).getResults().getNumFound());
+
+    state = CollectionAdminRequest.reloadCollection(collection)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("ReloadCollection did not complete", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.createShard(collection,"shard2")
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("CreateShard did not complete", RequestStatusState.COMPLETED, state);
+
+    //Add a doc to shard2 to make sure shard2 was created properly
+    SolrInputDocument doc = new SolrInputDocument();
+    doc.addField("id", numDocs + 1);
+    doc.addField("_route_", "shard2");
+    client.add(collection, doc);
+    client.commit(collection);
+    query = new SolrQuery("*:*");
+    query.set("shards", "shard2");
+    assertEquals(1, client.query(collection, query).getResults().getNumFound());
+
+    state = CollectionAdminRequest.deleteShard(collection,"shard2").processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("DeleteShard did not complete", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.addReplicaToShard(collection, "shard1")
+      .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("AddReplica did not complete", RequestStatusState.COMPLETED, state);
+
+    //cloudClient watch might take a couple of seconds to reflect it
+    Slice shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
+    int count = 0;
+    while (shard1.getReplicas().size() != 2) {
+      if (count++ > 1000) {
+        fail("2nd Replica not reflecting in the cluster state");
+      }
+      Thread.sleep(100);
+    }
+
+    state = CollectionAdminRequest.createAlias("myalias",collection)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("CreateAlias did not complete", RequestStatusState.COMPLETED, state);
+
+    query = new SolrQuery("*:*");
+    query.set("shards", "shard1");
+    assertEquals(numDocs, client.query("myalias", query).getResults().getNumFound());
+
+    state = CollectionAdminRequest.deleteAlias("myalias")
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("DeleteAlias did not complete", RequestStatusState.COMPLETED, state);
+
+    try {
+      client.query("myalias", query);
+      fail("Alias should not exist");
+    } catch (SolrException e) {
+      //expected
+    }
+
+    Replica replica = shard1.getReplicas().iterator().next();
+    for (String liveNode : client.getZkStateReader().getClusterState().getLiveNodes()) {
+      if (!replica.getNodeName().equals(liveNode)) {
+        state = new CollectionAdminRequest.MoveReplica(collection, replica.getName(), liveNode)
+            .processAndWait(client, MAX_TIMEOUT_SECONDS);
+        assertSame("MoveReplica did not complete", RequestStatusState.COMPLETED, state);
+        break;
+      }
+    }
+
+    shard1 = client.getZkStateReader().getClusterState().getCollection(collection).getSlice("shard1");
+    String replicaName = shard1.getReplicas().iterator().next().getName();
+    state = CollectionAdminRequest.deleteReplica(collection, "shard1", replicaName)
+      .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("DeleteReplica did not complete", RequestStatusState.COMPLETED, state);
+
+    state = CollectionAdminRequest.deleteCollection(collection)
+        .processAndWait(client, MAX_TIMEOUT_SECONDS);
+    assertSame("DeleteCollection did not complete", RequestStatusState.COMPLETED, state);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistributedZkTest.java
new file mode 100644
index 0000000..213e554
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionsAPIDistributedZkTest.java
@@ -0,0 +1,686 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import javax.management.MBeanServer;
+import javax.management.MBeanServerFactory;
+import javax.management.ObjectName;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.lang.management.ManagementFactory;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.commons.io.IOUtils;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.request.CoreStatus;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.cloud.ZkController;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkCoreNodeProps;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.SolrInfoBean.Category;
+import org.apache.solr.util.LogLevel;
+import org.apache.solr.util.TestInjection;
+import org.apache.solr.util.TimeOut;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+
+/**
+ * Tests the Cloud Collections API.
+ */
+@Slow
+public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @BeforeClass
+  public static void beforeCollectionsAPIDistributedZkTest() {
+    // we don't want this test to have zk timeouts
+    System.setProperty("zkClientTimeout", "240000");
+    TestInjection.randomDelayInCoreCreation = "true:20";
+    System.setProperty("validateAfterInactivity", "200");
+  }
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    String solrXml = IOUtils.toString(CollectionsAPIDistributedZkTest.class.getResourceAsStream("/solr/solr-jmxreporter.xml"), "UTF-8");
+    configureCluster(4)
+        .addConfig("conf", configset("cloud-minimal"))
+        .addConfig("conf2", configset("cloud-minimal-jmx"))
+        .withSolrXml(solrXml)
+        .configure();
+  }
+
+  @Before
+  public void clearCluster() throws Exception {
+    try {
+      cluster.deleteAllCollections();
+    } finally {
+      System.clearProperty("zkClientTimeout");
+    }
+  }
+
+  @Test
+  public void testCreationAndDeletion() throws Exception {
+
+    String collectionName = "created_and_deleted";
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1).process(cluster.getSolrClient());
+    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+                  .contains(collectionName));
+
+    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+        .contains(collectionName));
+
+    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
+
+
+  }
+
+  @Test
+  public void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
+    
+    String collectionName = "out_of_sync_collection";
+
+    // manually create a collections zknode
+    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
+
+    CollectionAdminRequest.deleteCollection(collectionName)
+        .process(cluster.getSolrClient());
+
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient())
+                  .contains(collectionName));
+    
+    assertFalse(cluster.getZkClient().exists(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true));
+
+  }
+
+  @Test
+  public void deletePartiallyCreatedCollection() throws Exception {
+
+    final String collectionName = "halfdeletedcollection";
+
+    assertEquals(0, CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    String dataDir = createTempDir().toFile().getAbsolutePath();
+    // create a core that simulates something left over from a partially-deleted collection
+    assertTrue(CollectionAdminRequest
+        .addReplicaToShard(collectionName, "shard1")
+        .setDataDir(dataDir)
+        .process(cluster.getSolrClient()).isSuccess());
+
+    CollectionAdminRequest.deleteCollection(collectionName)
+        .process(cluster.getSolrClient());
+
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .process(cluster.getSolrClient());
+
+    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+  }
+
+  @Test
+  public void deleteCollectionOnlyInZk() throws Exception {
+
+    final String collectionName = "onlyinzk";
+
+    // create the collections node, but nothing else
+    cluster.getZkClient().makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + collectionName, true);
+
+    // delete via API - should remove collections node
+    CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+    
+    // now creating that collection should work
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .process(cluster.getSolrClient());
+    assertTrue(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains(collectionName));
+
+  }
+
+  @Test
+  public void testBadActionNames() throws Exception {
+
+    // try a bad action
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", "BADACTION");
+    String collectionName = "badactioncollection";
+    params.set("name", collectionName);
+    params.set("numShards", 2);
+    final QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+
+  }
+
+  @Test
+  public void testMissingRequiredParameters() {
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATE.toString());
+    params.set("numShards", 2);
+    // missing required collection parameter
+    final SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+  }
+
+  @Test
+  public void testTooManyReplicas() {
+
+    CollectionAdminRequest req = CollectionAdminRequest.createCollection("collection", "conf", 2, 10);
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(req);
+    });
+
+  }
+
+  @Test
+  public void testMissingNumShards() {
+
+    // No numShards should fail
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATE.toString());
+    params.set("name", "acollection");
+    params.set(REPLICATION_FACTOR, 10);
+    params.set("collection.configName", "conf");
+
+    final SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+
+  }
+
+  @Test
+  public void testZeroNumShards() {
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATE.toString());
+    params.set("name", "acollection");
+    params.set(REPLICATION_FACTOR, 10);
+    params.set("numShards", 0);
+    params.set("collection.configName", "conf");
+
+    final SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+    expectThrows(Exception.class, () -> {
+      cluster.getSolrClient().request(request);
+    });
+
+  }
+
+  @Test
+  public void testCreateShouldFailOnExistingCore() throws Exception {
+    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker", "conf", 1, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker", "shard1")
+        .setNode(cluster.getJettySolrRunner(0).getNodeName())
+        .setCoreName("halfcollection_shard1_replica_n1")
+        .process(cluster.getSolrClient()).isSuccess());
+
+    assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker2", "conf",1, 1)
+        .setCreateNodeSet("")
+        .process(cluster.getSolrClient()).getStatus());
+    assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker2", "shard1")
+        .setNode(cluster.getJettySolrRunner(1).getNodeName())
+        .setCoreName("halfcollection_shard1_replica_n1")
+        .process(cluster.getSolrClient()).isSuccess());
+
+    String nn1 = cluster.getJettySolrRunner(0).getNodeName();
+    String nn2 = cluster.getJettySolrRunner(1).getNodeName();
+
+    CollectionAdminResponse resp = CollectionAdminRequest.createCollection("halfcollection", "conf", 2, 1)
+        .setCreateNodeSet(nn1 + "," + nn2)
+        .process(cluster.getSolrClient());
+    
+    SimpleOrderedMap success = (SimpleOrderedMap) resp.getResponse().get("success");
+    SimpleOrderedMap failure = (SimpleOrderedMap) resp.getResponse().get("failure");
+
+    assertNotNull(resp.toString(), success);
+    assertNotNull(resp.toString(), failure);
+    
+    String val1 = success.getVal(0).toString();
+    String val2 = failure.getVal(0).toString();
+    assertTrue(val1.contains("SolrException") || val2.contains("SolrException"));
+  }
+
+  @Test
+  public void testNoConfigSetExist() throws Exception {
+
+    expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.createCollection("noconfig", "conf123", 1, 1)
+          .process(cluster.getSolrClient());
+    });
+
+    TimeUnit.MILLISECONDS.sleep(1000);
+    // in both cases, the collection should have default to the core name
+    cluster.getSolrClient().getZkStateReader().forceUpdateCollection("noconfig");
+    assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains("noconfig"));
+  }
+
+  @Test
+  public void testCoresAreDistributedAcrossNodes() throws Exception {
+
+    CollectionAdminRequest.createCollection("nodes_used_collection", "conf", 2, 2)
+        .process(cluster.getSolrClient());
+
+    Set<String> liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
+
+    List<String> createNodeList = new ArrayList<>();
+    createNodeList.addAll(liveNodes);
+
+    DocCollection collection = getCollectionState("nodes_used_collection");
+    for (Slice slice : collection.getSlices()) {
+      for (Replica replica : slice.getReplicas()) {
+        createNodeList.remove(replica.getNodeName());
+      }
+    }
+
+    assertEquals(createNodeList.toString(), 0, createNodeList.size());
+
+  }
+
+  @Test
+  public void testDeleteNonExistentCollection() throws Exception {
+
+    SolrException e = expectThrows(SolrException.class, () -> {
+      CollectionAdminRequest.deleteCollection("unknown_collection").process(cluster.getSolrClient());
+    });
+
+    // create another collection should still work
+    CollectionAdminRequest.createCollection("acollectionafterbaddelete", "conf", 1, 2)
+        .process(cluster.getSolrClient());
+    waitForState("Collection creation after a bad delete failed", "acollectionafterbaddelete",
+        (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
+  }
+
+  @Test
+  public void testSpecificConfigsets() throws Exception {
+    CollectionAdminRequest.createCollection("withconfigset2", "conf2", 1, 1).process(cluster.getSolrClient());
+    byte[] data = zkClient().getData(ZkStateReader.COLLECTIONS_ZKNODE + "/" + "withconfigset2", null, null, true);
+    assertNotNull(data);
+    ZkNodeProps props = ZkNodeProps.load(data);
+    String configName = props.getStr(ZkController.CONFIGNAME_PROP);
+    assertEquals("conf2", configName);
+  }
+
+  @Test
+  public void testMaxNodesPerShard() throws Exception {
+
+    // test maxShardsPerNode
+    int numLiveNodes = cluster.getJettySolrRunners().size();
+    int numShards = (numLiveNodes/2) + 1;
+    int replicationFactor = 2;
+    int maxShardsPerNode = 1;
+
+    SolrException e = expectThrows(SolrException.class, () -> {
+      CollectionAdminRequest.createCollection("oversharded", "conf", numShards, replicationFactor)
+          .process(cluster.getSolrClient());
+    });
+
+  }
+
+  @Test
+  public void testCreateNodeSet() throws Exception {
+
+    JettySolrRunner jetty1 = cluster.getRandomJetty(random());
+    JettySolrRunner jetty2 = cluster.getRandomJetty(random());
+
+    List<String> baseUrls = ImmutableList.of(jetty1.getBaseUrl().toString(), jetty2.getBaseUrl().toString());
+
+    CollectionAdminRequest.createCollection("nodeset_collection", "conf", 2, 1)
+        .setCreateNodeSet(baseUrls.get(0) + "," + baseUrls.get(1))
+        .process(cluster.getSolrClient());
+
+    DocCollection collectionState = getCollectionState("nodeset_collection");
+    for (Replica replica : collectionState.getReplicas()) {
+      String replicaUrl = replica.getCoreUrl();
+      boolean matchingJetty = false;
+      for (String jettyUrl : baseUrls) {
+        if (replicaUrl.startsWith(jettyUrl))
+          matchingJetty = true;
+      }
+      if (matchingJetty == false)
+        fail("Expected replica to be on " + baseUrls + " but was on " + replicaUrl);
+    }
+
+  }
+
+  @Test
+  public void testCollectionsAPI() throws Exception {
+
+    // create new collections rapid fire
+    int cnt = random().nextInt(TEST_NIGHTLY ? 3 : 1) + 1;
+    CollectionAdminRequest.Create[] createRequests = new CollectionAdminRequest.Create[cnt];
+
+    for (int i = 0; i < cnt; i++) {
+
+      int numShards = TestUtil.nextInt(random(), 0, cluster.getJettySolrRunners().size()) + 1;
+      int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
+      int maxShardsPerNode = (((numShards * replicationFactor) / cluster.getJettySolrRunners().size())) + 1;
+
+      createRequests[i]
+          = CollectionAdminRequest.createCollection("awhollynewcollection_" + i, "conf2", numShards, replicationFactor)
+          .setMaxShardsPerNode(maxShardsPerNode);
+      createRequests[i].processAsync(cluster.getSolrClient());
+    }
+
+    for (int i = 0; i < cnt; i++) {
+      String collectionName = "awhollynewcollection_" + i;
+      final int j = i;
+      waitForState("Expected to see collection " + collectionName, collectionName,
+          (n, c) -> {
+            CollectionAdminRequest.Create req = createRequests[j];
+            return DocCollection.isFullyActive(n, c, req.getNumShards(), req.getReplicationFactor());
+          });
+    }
+
+    cluster.injectChaos(random());
+
+    for (int i = 0; i < cluster.getJettySolrRunners().size(); i++) {
+      checkInstanceDirs(cluster.getJettySolrRunner(i));
+    }
+
+    String collectionName = createRequests[random().nextInt(createRequests.length)].getCollectionName();
+
+    new UpdateRequest()
+        .add("id", "6")
+        .add("id", "7")
+        .add("id", "8")
+        .commit(cluster.getSolrClient(), collectionName);
+    TimeOut timeOut = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+    while (!timeOut.hasTimedOut()) {
+      try {
+        long numFound = cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound();
+        assertEquals(3, numFound);
+        break;
+      } catch (Exception e) {
+        // Query node can have stale clusterstate
+        log.info("Error when query " + collectionName, e);
+        Thread.sleep(500);
+      }
+    }
+    if (timeOut.hasTimedOut()) {
+      fail("Timeout on query " + collectionName);
+    }
+
+    checkNoTwoShardsUseTheSameIndexDir();
+  }
+
+  @Test
+  public void testCollectionReload() throws Exception {
+
+    final String collectionName = "reloaded_collection";
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2).process(cluster.getSolrClient());
+
+    // get core open times
+    Map<String, Long> urlToTimeBefore = new HashMap<>();
+    collectStartTimes(collectionName, urlToTimeBefore);
+    assertTrue(urlToTimeBefore.size() > 0);
+
+    CollectionAdminRequest.reloadCollection(collectionName).processAsync(cluster.getSolrClient());
+
+    // reloads make take a short while
+    boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
+    assertTrue("some core start times did not change on reload", allTimesAreCorrect);
+  }
+
+  private void checkInstanceDirs(JettySolrRunner jetty) throws IOException {
+    CoreContainer cores = jetty.getCoreContainer();
+    Collection<SolrCore> theCores = cores.getCores();
+    for (SolrCore core : theCores) {
+
+      // look for core props file
+      Path instancedir = (Path) core.getResourceLoader().getInstancePath();
+      assertTrue("Could not find expected core.properties file", Files.exists(instancedir.resolve("core.properties")));
+
+      Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve(core.getName());
+
+      assertTrue("Expected: " + expected + "\nFrom core stats: " + instancedir, Files.isSameFile(expected, instancedir));
+
+    }
+  }
+
+  private boolean waitForReloads(String collectionName, Map<String,Long> urlToTimeBefore) throws SolrServerException, IOException {
+
+
+    TimeOut timeout = new TimeOut(45, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+
+    boolean allTimesAreCorrect = false;
+    while (! timeout.hasTimedOut()) {
+      Map<String,Long> urlToTimeAfter = new HashMap<>();
+      collectStartTimes(collectionName, urlToTimeAfter);
+      
+      boolean retry = false;
+      Set<Entry<String,Long>> entries = urlToTimeBefore.entrySet();
+      for (Entry<String,Long> entry : entries) {
+        Long beforeTime = entry.getValue();
+        Long afterTime = urlToTimeAfter.get(entry.getKey());
+        assertNotNull(afterTime);
+        if (afterTime <= beforeTime) {
+          retry = true;
+          break;
+        }
+
+      }
+      if (!retry) {
+        allTimesAreCorrect = true;
+        break;
+      }
+    }
+    return allTimesAreCorrect;
+  }
+
+  private void collectStartTimes(String collectionName, Map<String,Long> urlToTime)
+      throws SolrServerException, IOException {
+
+    DocCollection collectionState = getCollectionState(collectionName);
+    if (collectionState != null) {
+      for (Slice shard : collectionState) {
+        for (Replica replica : shard) {
+          ZkCoreNodeProps coreProps = new ZkCoreNodeProps(replica);
+          CoreStatus coreStatus;
+          try (HttpSolrClient server = getHttpSolrClient(coreProps.getBaseUrl())) {
+            coreStatus = CoreAdminRequest.getCoreStatus(coreProps.getCoreName(), false, server);
+          }
+          long before = coreStatus.getCoreStartTime().getTime();
+          urlToTime.put(coreProps.getCoreUrl(), before);
+        }
+      }
+    } else {
+      throw new IllegalArgumentException("Could not find collection " + collectionName);
+    }
+  }
+  
+  private void checkNoTwoShardsUseTheSameIndexDir() throws Exception {
+    Map<String, Set<String>> indexDirToShardNamesMap = new HashMap<>();
+    
+    List<MBeanServer> servers = new LinkedList<>();
+    servers.add(ManagementFactory.getPlatformMBeanServer());
+    servers.addAll(MBeanServerFactory.findMBeanServer(null));
+    for (final MBeanServer server : servers) {
+      Set<ObjectName> mbeans = new HashSet<>();
+      mbeans.addAll(server.queryNames(null, null));
+      for (final ObjectName mbean : mbeans) {
+
+        try {
+          Map<String, String> props = mbean.getKeyPropertyList();
+          String category = props.get("category");
+          String name = props.get("name");
+          if ((category != null && category.toString().equals(Category.CORE.toString())) &&
+              (name != null && name.equals("indexDir"))) {
+            String indexDir = server.getAttribute(mbean, "Value").toString();
+            String key = props.get("dom2") + "." + props.get("dom3") + "." + props.get("dom4");
+            if (!indexDirToShardNamesMap.containsKey(indexDir)) {
+              indexDirToShardNamesMap.put(indexDir.toString(), new HashSet<>());
+            }
+            indexDirToShardNamesMap.get(indexDir.toString()).add(key);
+          }
+        } catch (Exception e) {
+          // ignore, just continue - probably a "Value" attribute
+          // not found
+        }
+      }
+    }
+    
+    assertTrue(
+        "Something is broken in the assert for no shards using the same indexDir - probably something was changed in the attributes published in the MBean of "
+            + SolrCore.class.getSimpleName() + " : " + indexDirToShardNamesMap,
+        indexDirToShardNamesMap.size() > 0);
+    for (Entry<String,Set<String>> entry : indexDirToShardNamesMap.entrySet()) {
+      if (entry.getValue().size() > 1) {
+        fail("We have shards using the same indexDir. E.g. shards "
+            + entry.getValue().toString() + " all use indexDir "
+            + entry.getKey());
+      }
+    }
+
+  }
+
+  @Test
+  @LogLevel("org.apache.solr.cloud=DEBUG")
+  public void addReplicaTest() throws Exception {
+    String collectionName = "addReplicaColl";
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
+        .setMaxShardsPerNode(4)
+        .process(cluster.getSolrClient());
+
+    ArrayList<String> nodeList
+        = new ArrayList<>(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
+    Collections.shuffle(nodeList, random());
+
+    CollectionAdminResponse response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .setNode(nodeList.get(0))
+        .process(cluster.getSolrClient());
+    Replica newReplica = grabNewReplica(response, getCollectionState(collectionName));
+
+    assertEquals("Replica should be created on the right node",
+        cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)),
+        newReplica.getStr(ZkStateReader.BASE_URL_PROP));
+
+    Path instancePath = createTempDir();
+    response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .withProperty(CoreAdminParams.INSTANCE_DIR, instancePath.toString())
+        .process(cluster.getSolrClient());
+    newReplica = grabNewReplica(response, getCollectionState(collectionName));
+    assertNotNull(newReplica);
+
+    try (HttpSolrClient coreclient = getHttpSolrClient(newReplica.getStr(ZkStateReader.BASE_URL_PROP))) {
+      CoreAdminResponse status = CoreAdminRequest.getStatus(newReplica.getStr("core"), coreclient);
+      NamedList<Object> coreStatus = status.getCoreStatus(newReplica.getStr("core"));
+      String instanceDirStr = (String) coreStatus.get("instanceDir");
+      assertEquals(instanceDirStr, instancePath.toString());
+    }
+
+    //Test to make sure we can't create another replica with an existing core_name of that collection
+    String coreName = newReplica.getStr(CORE_NAME_PROP);
+    SolrException e = expectThrows(SolrException.class, () -> {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", "addreplica");
+      params.set("collection", collectionName);
+      params.set("shard", "shard1");
+      params.set("name", coreName);
+      QueryRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+      cluster.getSolrClient().request(request);
+    });
+
+    assertTrue(e.getMessage().contains("Another replica with the same core name already exists for this collection"));
+
+    // Check that specifying property.name works. DO NOT remove this when the "name" property is deprecated
+    // for ADDREPLICA, this is "property.name". See SOLR-7132
+    response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .withProperty(CoreAdminParams.NAME, "propertyDotName")
+        .process(cluster.getSolrClient());
+
+    newReplica = grabNewReplica(response, getCollectionState(collectionName));
+    assertEquals("'core' should be 'propertyDotName' ", "propertyDotName", newReplica.getStr("core"));
+
+  }
+
+  private Replica grabNewReplica(CollectionAdminResponse response, DocCollection docCollection) {
+    String replicaName = response.getCollectionCoresStatus().keySet().iterator().next();
+    Optional<Replica> optional = docCollection.getReplicas().stream()
+        .filter(replica -> replicaName.equals(replica.getCoreName()))
+        .findAny();
+    if (optional.isPresent()) {
+      return optional.get();
+    }
+    throw new AssertionError("Can not find " + replicaName + " from " + docCollection);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentDeleteAndCreateCollectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentDeleteAndCreateCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentDeleteAndCreateCollectionTest.java
new file mode 100644
index 0000000..1d0036e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentDeleteAndCreateCollectionTest.java
@@ -0,0 +1,227 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.nio.file.Path;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.lucene.util.LuceneTestCase.Nightly;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.cloud.MiniSolrCloudCluster;
+import org.apache.solr.common.util.IOUtils;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.util.TimeOut;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Nightly
+public class ConcurrentDeleteAndCreateCollectionTest extends SolrTestCaseJ4 {
+  
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  
+  private MiniSolrCloudCluster solrCluster;
+  
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+    solrCluster = new MiniSolrCloudCluster(1, createTempDir(), buildJettyConfig("/solr"));
+  }
+  
+  @Override
+  @After
+  public void tearDown() throws Exception {
+    solrCluster.shutdown();
+    super.tearDown();
+  }
+  
+  public void testConcurrentCreateAndDeleteDoesNotFail() {
+    final AtomicReference<Exception> failure = new AtomicReference<>();
+    final int timeToRunSec = 30;
+    final CreateDeleteCollectionThread[] threads = new CreateDeleteCollectionThread[10];
+    for (int i = 0; i < threads.length; i++) {
+      final String collectionName = "collection" + i;
+      uploadConfig(configset("configset-2"), collectionName);
+      final String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
+      final SolrClient solrClient = getHttpSolrClient(baseUrl);
+      threads[i] = new CreateDeleteSearchCollectionThread("create-delete-search-" + i, collectionName, collectionName, 
+          timeToRunSec, solrClient, failure);
+    }
+    
+    startAll(threads);
+    joinAll(threads);
+    
+    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
+  }
+  
+  public void testConcurrentCreateAndDeleteOverTheSameConfig() {
+    final String configName = "testconfig";
+    uploadConfig(configset("configset-2"), configName); // upload config once, to be used by all collections
+    final String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
+    final AtomicReference<Exception> failure = new AtomicReference<>();
+    final int timeToRunSec = 30;
+    final CreateDeleteCollectionThread[] threads = new CreateDeleteCollectionThread[2];
+    for (int i = 0; i < threads.length; i++) {
+      final String collectionName = "collection" + i;
+      final SolrClient solrClient = getHttpSolrClient(baseUrl);
+      threads[i] = new CreateDeleteCollectionThread("create-delete-" + i, collectionName, configName,
+                                                    timeToRunSec, solrClient, failure);
+    }
+
+    startAll(threads);
+    joinAll(threads);
+
+    assertNull("concurrent create and delete collection failed: " + failure.get(), failure.get());
+  }
+  
+  private void uploadConfig(Path configDir, String configName) {
+    try {
+      solrCluster.uploadConfigSet(configDir, configName);
+    } catch (IOException | KeeperException | InterruptedException e) {
+      throw new RuntimeException(e);
+    }
+  }
+  
+  private void joinAll(final CreateDeleteCollectionThread[] threads) {
+    for (CreateDeleteCollectionThread t : threads) {
+      try {
+        t.joinAndClose();
+      } catch (InterruptedException e) {
+        Thread.interrupted();
+        throw new RuntimeException(e);
+      }
+    }
+  }
+  
+  private void startAll(final Thread[] threads) {
+    for (Thread t : threads) {
+      t.start();
+    }
+  }
+  
+  private static class CreateDeleteCollectionThread extends Thread {
+    protected final String collectionName;
+    protected final String configName;
+    protected final long timeToRunSec;
+    protected final SolrClient solrClient;
+    protected final AtomicReference<Exception> failure;
+    
+    public CreateDeleteCollectionThread(String name, String collectionName, String configName, long timeToRunSec,
+        SolrClient solrClient, AtomicReference<Exception> failure) {
+      super(name);
+      this.collectionName = collectionName;
+      this.timeToRunSec = timeToRunSec;
+      this.solrClient = solrClient;
+      this.failure = failure;
+      this.configName = configName;
+    }
+    
+    @Override
+    public void run() {
+      final TimeOut timeout = new TimeOut(timeToRunSec, TimeUnit.SECONDS, TimeSource.NANO_TIME);
+      while (! timeout.hasTimedOut() && failure.get() == null) {
+        doWork();
+      }
+    }
+    
+    protected void doWork() {
+      createCollection();
+      deleteCollection();
+    }
+    
+    protected void addFailure(Exception e) {
+      log.error("Add Failure", e);
+      synchronized (failure) {
+        if (failure.get() != null) {
+          failure.get().addSuppressed(e);
+        } else {
+          failure.set(e);
+        }
+      }
+    }
+    
+    private void createCollection() {
+      try {
+        final CollectionAdminResponse response = CollectionAdminRequest.createCollection(collectionName,configName,1,1)
+                .process(solrClient);
+        if (response.getStatus() != 0) {
+          addFailure(new RuntimeException("failed to create collection " + collectionName));
+        }
+      } catch (Exception e) {
+        addFailure(e);
+      }
+      
+    }
+    
+    private void deleteCollection() {
+      try {
+        final CollectionAdminRequest.Delete deleteCollectionRequest
+          = CollectionAdminRequest.deleteCollection(collectionName);
+        final CollectionAdminResponse response = deleteCollectionRequest.process(solrClient);
+        if (response.getStatus() != 0) {
+          addFailure(new RuntimeException("failed to delete collection " + collectionName));
+        }
+      } catch (Exception e) {
+        addFailure(e);
+      }
+    }
+    
+    public void joinAndClose() throws InterruptedException {
+      try {
+        super.join(60000);
+      } finally {
+        IOUtils.closeQuietly(solrClient);
+      }
+    }
+  }
+  
+  private static class CreateDeleteSearchCollectionThread extends CreateDeleteCollectionThread {
+
+    public CreateDeleteSearchCollectionThread(String name, String collectionName, String configName, long timeToRunSec,
+        SolrClient solrClient, AtomicReference<Exception> failure) {
+      super(name, collectionName, configName, timeToRunSec, solrClient, failure);
+    }
+    
+    @Override
+    protected void doWork() {
+      super.doWork();
+      searchNonExistingCollection();
+    }
+    
+    private void searchNonExistingCollection() {
+      try {
+        solrClient.query(collectionName, new SolrQuery("*"));
+      } catch (Exception e) {
+        if (!e.getMessage().contains("not found") && !e.getMessage().contains("Can not find")) {
+          addFailure(e);
+        }
+      }
+    }
+    
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/CustomCollectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CustomCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CustomCollectionTest.java
new file mode 100644
index 0000000..654c7e9
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CustomCollectionTest.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.util.Map;
+
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.apache.solr.common.cloud.DocCollection.DOC_ROUTER;
+import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
+import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
+import static org.apache.solr.common.params.ShardParams._ROUTE_;
+
+/**
+ * Tests the Custom Sharding API.
+ */
+public class CustomCollectionTest extends SolrCloudTestCase {
+
+  private static final int NODE_COUNT = 4;
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(NODE_COUNT)
+        .addConfig("conf", configset("cloud-dynamic"))
+        .configure();
+  }
+
+  @Before
+  public void ensureClusterEmpty() throws Exception {
+    cluster.deleteAllCollections();
+  }
+
+  @Test
+  public void testCustomCollectionsAPI() throws Exception {
+
+    final String collection = "implicitcoll";
+    int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
+    int numShards = 3;
+    int maxShardsPerNode = (((numShards + 1) * replicationFactor) / NODE_COUNT) + 1;
+
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c", replicationFactor)
+        .setMaxShardsPerNode(maxShardsPerNode)
+        .process(cluster.getSolrClient());
+
+    DocCollection coll = getCollectionState(collection);
+    assertEquals("implicit", ((Map) coll.get(DOC_ROUTER)).get("name"));
+    assertNotNull(coll.getStr(REPLICATION_FACTOR));
+    assertNotNull(coll.getStr(MAX_SHARDS_PER_NODE));
+    assertNull("A shard of a Collection configured with implicit router must have null range",
+        coll.getSlice("a").getRange());
+
+    new UpdateRequest()
+        .add("id", "6")
+        .add("id", "7")
+        .add("id", "8")
+        .withRoute("a")
+        .commit(cluster.getSolrClient(), collection);
+
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+
+    cluster.getSolrClient().deleteByQuery(collection, "*:*");
+    cluster.getSolrClient().commit(collection, true, true);
+    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+
+    new UpdateRequest()
+        .add("id", "9")
+        .add("id", "10")
+        .add("id", "11")
+        .withRoute("c")
+        .commit(cluster.getSolrClient(), collection);
+
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(0, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
+
+    //Testing CREATESHARD
+    CollectionAdminRequest.createShard(collection, "x")
+        .process(cluster.getSolrClient());
+    waitForState("Expected shard 'x' to be active", collection, (n, c) -> {
+      if (c.getSlice("x") == null)
+        return false;
+      for (Replica r : c.getSlice("x")) {
+        if (r.getState() != Replica.State.ACTIVE)
+          return false;
+      }
+      return true;
+    });
+
+    new UpdateRequest()
+        .add("id", "66", _ROUTE_, "x")
+        .commit(cluster.getSolrClient(), collection);
+    // TODO - the local state is cached and causes the request to fail with 'unknown shard'
+    // assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "x")).getResults().getNumFound());
+
+  }
+
+  @Test
+  public void testRouteFieldForImplicitRouter() throws Exception {
+
+    int numShards = 4;
+    int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
+    int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
+    String shard_fld = "shard_s";
+
+    final String collection = "withShardField";
+
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c,d", replicationFactor)
+        .setMaxShardsPerNode(maxShardsPerNode)
+        .setRouterField(shard_fld)
+        .process(cluster.getSolrClient());
+
+    new UpdateRequest()
+        .add("id", "6", shard_fld, "a")
+        .add("id", "7", shard_fld, "a")
+        .add("id", "8", shard_fld, "b")
+        .commit(cluster.getSolrClient(), collection);
+
+    assertEquals(3, cluster.getSolrClient().query(collection, new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(1, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
+    assertEquals(2, cluster.getSolrClient().query(collection, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+
+  }
+
+  @Test
+  public void testRouteFieldForHashRouter()throws Exception{
+    String collectionName = "routeFieldColl";
+    int numShards = 4;
+    int replicationFactor = 2;
+    int maxShardsPerNode = ((numShards * replicationFactor) / NODE_COUNT) + 1;
+    String shard_fld = "shard_s";
+
+    CollectionAdminRequest.createCollection(collectionName, "conf", numShards, replicationFactor)
+        .setMaxShardsPerNode(maxShardsPerNode)
+        .setRouterField(shard_fld)
+        .process(cluster.getSolrClient());
+
+    new UpdateRequest()
+        .add("id", "6", shard_fld, "a")
+        .add("id", "7", shard_fld, "a")
+        .add("id", "8", shard_fld, "b")
+        .commit(cluster.getSolrClient(), collectionName);
+
+    assertEquals(3, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
+    assertEquals(2, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "a")).getResults().getNumFound());
+    assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "b")).getResults().getNumFound());
+    assertEquals(0, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c")).getResults().getNumFound());
+
+
+    cluster.getSolrClient().deleteByQuery(collectionName, "*:*");
+    cluster.getSolrClient().commit(collectionName);
+
+    cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "100", shard_fld, "c!doc1"));
+    cluster.getSolrClient().commit(collectionName);
+    assertEquals(1, cluster.getSolrClient().query(collectionName, new SolrQuery("*:*").setParam(_ROUTE_, "c!")).getResults().getNumFound());
+
+  }
+
+  @Test
+  public void testCreateShardRepFactor() throws Exception  {
+    final String collectionName = "testCreateShardRepFactor";
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "a,b", 1)
+        .process(cluster.getSolrClient());
+
+    CollectionAdminRequest.createShard(collectionName, "x")
+        .process(cluster.getSolrClient());
+
+    waitForState("Not enough active replicas in shard 'x'", collectionName, (n, c) -> {
+      return c.getSlice("x").getReplicas().size() == 1;
+    });
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
new file mode 100644
index 0000000..ae83ebf
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCollectionsAPIDistributedZkTest.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Metric;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.request.CoreStatus;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.cloud.hdfs.HdfsTestUtil;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkConfigManager;
+import org.apache.solr.metrics.SolrMetricManager;
+import org.apache.solr.util.BadHdfsThreadsFilter;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+@Slow
+@Nightly
+@ThreadLeakFilters(defaultFilters = true, filters = {
+    BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
+})
+public class HdfsCollectionsAPIDistributedZkTest extends CollectionsAPIDistributedZkTest {
+
+  private static MiniDFSCluster dfsCluster;
+
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
+    System.setProperty("tests.hdfs.numdatanodes", "1");
+   
+    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
+
+    ZkConfigManager configManager = new ZkConfigManager(zkClient());
+    configManager.uploadConfigDir(configset("cloud-hdfs"), "conf");
+    configManager.uploadConfigDir(configset("cloud-hdfs"), "conf2");
+
+    System.setProperty("solr.hdfs.home", HdfsTestUtil.getDataDir(dfsCluster, "data"));
+  }
+
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    cluster.shutdown(); // need to close before the MiniDFSCluster
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+    System.clearProperty("solr.hdfs.blockcache.blocksperbank");
+    System.clearProperty("tests.hdfs.numdatanodes");
+    System.clearProperty("solr.hdfs.home");
+  }
+
+  @Test
+  public void moveReplicaTest() throws Exception {
+    cluster.waitForAllNodes(5000);
+    String coll = "movereplicatest_coll";
+
+    CloudSolrClient cloudClient = cluster.getSolrClient();
+
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
+    create.setMaxShardsPerNode(2);
+    cloudClient.request(create);
+
+    for (int i = 0; i < 10; i++) {
+      cloudClient.add(coll, sdoc("id",String.valueOf(i)));
+      cloudClient.commit(coll);
+    }
+
+    List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
+    Collections.shuffle(slices, random());
+    Slice slice = null;
+    Replica replica = null;
+    for (Slice s : slices) {
+      slice = s;
+      for (Replica r : s.getReplicas()) {
+        if (s.getLeader() != r) {
+          replica = r;
+        }
+      }
+    }
+    String dataDir = getDataDir(replica);
+
+    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
+    ArrayList<String> l = new ArrayList<>(liveNodes);
+    Collections.shuffle(l, random());
+    String targetNode = null;
+    for (String node : liveNodes) {
+      if (!replica.getNodeName().equals(node)) {
+        targetNode = node;
+        break;
+      }
+    }
+    assertNotNull(targetNode);
+
+    CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
+    moveReplica.process(cloudClient);
+
+    checkNumOfCores(cloudClient, replica.getNodeName(), 0);
+    checkNumOfCores(cloudClient, targetNode, 2);
+
+    waitForState("Wait for recovery finish failed",coll, clusterShape(2,2));
+    slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
+    boolean found = false;
+    for (Replica newReplica : slice.getReplicas()) {
+      if (getDataDir(newReplica).equals(dataDir)) {
+        found = true;
+      }
+    }
+    assertTrue(found);
+
+
+    // data dir is reused so replication will be skipped
+    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
+      SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
+      List<String> registryNames = manager.registryNames().stream()
+          .filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
+      for (String registry : registryNames) {
+        Map<String, Metric> metrics = manager.registry(registry).getMetrics();
+        Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
+        if (counter != null) {
+          assertEquals(0, counter.getCount());
+        }
+      }
+    }
+  }
+
+
+  private void checkNumOfCores(CloudSolrClient cloudClient, String nodeName, int expectedCores) throws IOException, SolrServerException {
+    assertEquals(nodeName + " does not have expected number of cores",expectedCores, getNumOfCores(cloudClient, nodeName));
+  }
+
+  private int getNumOfCores(CloudSolrClient cloudClient, String nodeName) throws IOException, SolrServerException {
+    try (HttpSolrClient coreclient = getHttpSolrClient(cloudClient.getZkStateReader().getBaseUrlForNodeName(nodeName))) {
+      CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
+      return status.getCoreStatus().size();
+    }
+  }
+
+  private String getDataDir(Replica replica) throws IOException, SolrServerException {
+    try (HttpSolrClient coreclient = getHttpSolrClient(replica.getBaseUrl())) {
+      CoreStatus status = CoreAdminRequest.getCoreStatus(replica.getCoreName(), coreclient);
+      return status.getDataDirectory();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java
new file mode 100644
index 0000000..6f7e717
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ReplicaPropertiesBase.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.zookeeper.KeeperException;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+// Collect useful operations for testing assigning properties to individual replicas
+// Could probably expand this to do something creative with getting random slices
+// and shards, but for now this will do.
+public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBase {
+
+  public static NamedList<Object> doPropertyAction(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
+    assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    for (int idx = 0; idx < paramsIn.length; idx += 2) {
+      params.set(paramsIn[idx], paramsIn[idx + 1]);
+    }
+    QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+    return client.request(request);
+  }
+
+  public static void verifyPropertyNotPresent(CloudSolrClient client, String collectionName, String replicaName,
+                                String property)
+      throws KeeperException, InterruptedException {
+    ClusterState clusterState = null;
+    Replica replica = null;
+    for (int idx = 0; idx < 300; ++idx) {
+      clusterState = client.getZkStateReader().getClusterState();
+      final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
+      replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
+      if (replica == null) {
+        fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
+      }
+      if (StringUtils.isBlank(replica.getProperty(property))) return;
+      Thread.sleep(100);
+    }
+    fail("Property " + property + " not set correctly for collection/replica pair: " +
+        collectionName + "/" + replicaName + ". Replica props: " + replica.getProperties().toString() +
+        ". Cluster state is " + clusterState.toString());
+
+  }
+
+  // The params are triplets,
+  // collection
+  // shard
+  // replica
+  public static void verifyPropertyVal(CloudSolrClient client, String collectionName,
+                         String replicaName, String property, String val)
+      throws InterruptedException, KeeperException {
+    Replica replica = null;
+    ClusterState clusterState = null;
+
+    for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
+      clusterState = client.getZkStateReader().getClusterState();
+      final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
+      replica = (docCollection == null) ? null : docCollection.getReplica(replicaName);
+      if (replica == null) {
+        fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
+      }
+      if (StringUtils.equals(val, replica.getProperty(property))) return;
+      Thread.sleep(100);
+    }
+
+    fail("Property '" + property + "' with value " + replica.getProperty(property) +
+        " not set correctly for collection/replica pair: " + collectionName + "/" + replicaName + " property map is " +
+        replica.getProperties().toString() + ".");
+
+  }
+
+  // Verify that
+  // 1> the property is only set once in all the replicas in a slice.
+  // 2> the property is balanced evenly across all the nodes hosting collection
+  public static void verifyUniqueAcrossCollection(CloudSolrClient client, String collectionName,
+                                    String property) throws KeeperException, InterruptedException {
+    verifyUnique(client, collectionName, property, true);
+  }
+
+  public static void verifyUniquePropertyWithinCollection(CloudSolrClient client, String collectionName,
+                            String property) throws KeeperException, InterruptedException {
+    verifyUnique(client, collectionName, property, false);
+  }
+
+  public static void verifyUnique(CloudSolrClient client, String collectionName, String property, boolean balanced)
+      throws KeeperException, InterruptedException {
+
+    DocCollection col = null;
+    for (int idx = 0; idx < 300; ++idx) {
+      ClusterState clusterState = client.getZkStateReader().getClusterState();
+
+      col = clusterState.getCollection(collectionName);
+      if (col == null) {
+        fail("Could not find collection " + collectionName);
+      }
+      Map<String, Integer> counts = new HashMap<>();
+      Set<String> uniqueNodes = new HashSet<>();
+      boolean allSlicesHaveProp = true;
+      boolean badSlice = false;
+      for (Slice slice : col.getSlices()) {
+        boolean thisSliceHasProp = false;
+        int propCount = 0;
+        for (Replica replica : slice.getReplicas()) {
+          uniqueNodes.add(replica.getNodeName());
+          String propVal = replica.getProperty(property);
+          if (StringUtils.isNotBlank(propVal)) {
+            ++propCount;
+            if (counts.containsKey(replica.getNodeName()) == false) {
+              counts.put(replica.getNodeName(), 0);
+            }
+            int count = counts.get(replica.getNodeName());
+            thisSliceHasProp = true;
+            counts.put(replica.getNodeName(), count + 1);
+          }
+        }
+        badSlice = (propCount > 1) ? true : badSlice;
+        allSlicesHaveProp = allSlicesHaveProp ? thisSliceHasProp : allSlicesHaveProp;
+      }
+      if (balanced == false && badSlice == false) {
+        return;
+      }
+      if (allSlicesHaveProp && balanced) {
+        // Check that the properties are evenly distributed.
+        int minProps = col.getSlices().size() / uniqueNodes.size();
+        int maxProps = minProps;
+
+        if (col.getSlices().size() % uniqueNodes.size() > 0) {
+          ++maxProps;
+        }
+        boolean doSleep = false;
+        for (Map.Entry<String, Integer> ent : counts.entrySet()) {
+          if (ent.getValue() != minProps && ent.getValue() != maxProps) {
+            doSleep = true;
+          }
+        }
+
+        if (doSleep == false) {
+          assertTrue("We really shouldn't be calling this if there is no node with the property " + property,
+              counts.size() > 0);
+          return;
+        }
+      }
+      Thread.sleep(100);
+    }
+    fail("Collection " + collectionName + " does not have roles evenly distributed. Collection is: " + col.toString());
+  }
+
+}