You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by va...@apache.org on 2018/01/16 20:39:45 UTC

[04/15] lucene-solr:branch_7x: SOLR-11817: Move Collections API classes to it's own package

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1c6cc20e/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java b/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java
deleted file mode 100644
index 154673a..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestCollectionsAPIViaSolrCloudCluster.java
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Test of the Collections API with the MiniSolrCloudCluster.
- */
-@LuceneTestCase.Slow
-public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private static final int numShards = 2;
-  private static final int numReplicas = 2;
-  private static final int maxShardsPerNode = 1;
-  private static final int nodeCount = 5;
-  private static final String configName = "solrCloudCollectionConfig";
-  private static final Map<String,String> collectionProperties  // ensure indexes survive core shutdown
-      = Collections.singletonMap("solr.directoryFactory", "solr.StandardDirectoryFactory");
-
-  @Override
-  public void setUp() throws Exception {
-    configureCluster(nodeCount).addConfig(configName, configset("cloud-minimal")).configure();
-    super.setUp();
-  }
-  
-  @Override
-  public void tearDown() throws Exception {
-    cluster.shutdown();
-    super.tearDown();
-  }
-
-  private void createCollection(String collectionName, String createNodeSet) throws Exception {
-    if (random().nextBoolean()) { // process asynchronously
-      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
-          .setMaxShardsPerNode(maxShardsPerNode)
-          .setCreateNodeSet(createNodeSet)
-          .setProperties(collectionProperties)
-          .processAndWait(cluster.getSolrClient(), 30);
-    }
-    else {
-      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
-          .setMaxShardsPerNode(maxShardsPerNode)
-          .setCreateNodeSet(createNodeSet)
-          .setProperties(collectionProperties)
-          .process(cluster.getSolrClient());
-    }
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish
-        (collectionName, cluster.getSolrClient().getZkStateReader(), true, true, 330);
-  }
-
-  @Test
-  public void testCollectionCreateSearchDelete() throws Exception {
-    final CloudSolrClient client = cluster.getSolrClient();
-    final String collectionName = "testcollection";
-
-    assertNotNull(cluster.getZkServer());
-    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
-    assertEquals(nodeCount, jettys.size());
-    for (JettySolrRunner jetty : jettys) {
-      assertTrue(jetty.isRunning());
-    }
-
-    // shut down a server
-    JettySolrRunner stoppedServer = cluster.stopJettySolrRunner(0);
-    assertTrue(stoppedServer.isStopped());
-    assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
-
-    // create a server
-    JettySolrRunner startedServer = cluster.startJettySolrRunner();
-    assertTrue(startedServer.isRunning());
-    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
-
-    // create collection
-    createCollection(collectionName, null);
-
-    // modify/query collection
-    new UpdateRequest().add("id", "1").commit(client, collectionName);
-    QueryResponse rsp = client.query(collectionName, new SolrQuery("*:*"));
-    assertEquals(1, rsp.getResults().getNumFound());
-
-    // remove a server not hosting any replicas
-    ZkStateReader zkStateReader = client.getZkStateReader();
-    zkStateReader.forceUpdateCollection(collectionName);
-    ClusterState clusterState = zkStateReader.getClusterState();
-    Map<String,JettySolrRunner> jettyMap = new HashMap<>();
-    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
-      String key = jetty.getBaseUrl().toString().substring((jetty.getBaseUrl().getProtocol() + "://").length());
-      jettyMap.put(key, jetty);
-    }
-    Collection<Slice> slices = clusterState.getCollection(collectionName).getSlices();
-    // track the servers not host replicas
-    for (Slice slice : slices) {
-      jettyMap.remove(slice.getLeader().getNodeName().replace("_solr", "/solr"));
-      for (Replica replica : slice.getReplicas()) {
-        jettyMap.remove(replica.getNodeName().replace("_solr", "/solr"));
-      }
-    }
-    assertTrue("Expected to find a node without a replica", jettyMap.size() > 0);
-    JettySolrRunner jettyToStop = jettyMap.entrySet().iterator().next().getValue();
-    jettys = cluster.getJettySolrRunners();
-    for (int i = 0; i < jettys.size(); ++i) {
-      if (jettys.get(i).equals(jettyToStop)) {
-        cluster.stopJettySolrRunner(i);
-        assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
-      }
-    }
-
-    // re-create a server (to restore original nodeCount count)
-    startedServer = cluster.startJettySolrRunner(jettyToStop);
-    assertTrue(startedServer.isRunning());
-    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
-
-    CollectionAdminRequest.deleteCollection(collectionName).process(client);
-    AbstractDistribZkTestBase.waitForCollectionToDisappear
-        (collectionName, client.getZkStateReader(), true, true, 330);
-
-    // create it again
-    createCollection(collectionName, null);
-
-    // check that there's no left-over state
-    assertEquals(0, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
-
-    // modify/query collection
-    new UpdateRequest().add("id", "1").commit(client, collectionName);
-    assertEquals(1, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
-  }
-
-  @Test
-  public void testCollectionCreateWithoutCoresThenDelete() throws Exception {
-
-    final String collectionName = "testSolrCloudCollectionWithoutCores";
-    final CloudSolrClient client = cluster.getSolrClient();
-
-    assertNotNull(cluster.getZkServer());
-    assertFalse(cluster.getJettySolrRunners().isEmpty());
-
-    // create collection
-    createCollection(collectionName, OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY);
-
-    // check the collection's corelessness
-    int coreCount = 0;
-    DocCollection docCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
-    for (Map.Entry<String,Slice> entry : docCollection.getSlicesMap().entrySet()) {
-      coreCount += entry.getValue().getReplicasMap().entrySet().size();
-    }
-    assertEquals(0, coreCount);
-
-    // delete the collection
-    CollectionAdminRequest.deleteCollection(collectionName).process(client);
-    AbstractDistribZkTestBase.waitForCollectionToDisappear
-        (collectionName, client.getZkStateReader(), true, true, 330);
-  }
-
-  @Test
-  public void testStopAllStartAll() throws Exception {
-
-    final String collectionName = "testStopAllStartAllCollection";
-    final CloudSolrClient client = cluster.getSolrClient();
-
-    assertNotNull(cluster.getZkServer());
-    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
-    assertEquals(nodeCount, jettys.size());
-    for (JettySolrRunner jetty : jettys) {
-      assertTrue(jetty.isRunning());
-    }
-
-    final SolrQuery query = new SolrQuery("*:*");
-    final SolrInputDocument doc = new SolrInputDocument();
-
-    // create collection
-    createCollection(collectionName, null);
-
-    ZkStateReader zkStateReader = client.getZkStateReader();
-
-    // modify collection
-    final int numDocs = 1 + random().nextInt(10);
-    for (int ii = 1; ii <= numDocs; ++ii) {
-      doc.setField("id", ""+ii);
-      client.add(collectionName, doc);
-      if (ii*2 == numDocs) client.commit(collectionName);
-    }
-    client.commit(collectionName);
-
-    // query collection
-    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
-
-    // the test itself
-    zkStateReader.forceUpdateCollection(collectionName);
-    final ClusterState clusterState = zkStateReader.getClusterState();
-
-    final Set<Integer> leaderIndices = new HashSet<>();
-    final Set<Integer> followerIndices = new HashSet<>();
-    {
-      final Map<String,Boolean> shardLeaderMap = new HashMap<>();
-      for (final Slice slice : clusterState.getCollection(collectionName).getSlices()) {
-        for (final Replica replica : slice.getReplicas()) {
-          shardLeaderMap.put(replica.getNodeName().replace("_solr", "/solr"), Boolean.FALSE);
-        }
-        shardLeaderMap.put(slice.getLeader().getNodeName().replace("_solr", "/solr"), Boolean.TRUE);
-      }
-      for (int ii = 0; ii < jettys.size(); ++ii) {
-        final URL jettyBaseUrl = jettys.get(ii).getBaseUrl();
-        final String jettyBaseUrlString = jettyBaseUrl.toString().substring((jettyBaseUrl.getProtocol() + "://").length());
-        final Boolean isLeader = shardLeaderMap.get(jettyBaseUrlString);
-        if (Boolean.TRUE.equals(isLeader)) {
-          leaderIndices.add(ii);
-        } else if (Boolean.FALSE.equals(isLeader)) {
-          followerIndices.add(ii);
-        } // else neither leader nor follower i.e. node without a replica (for our collection)
-      }
-    }
-    final List<Integer> leaderIndicesList = new ArrayList<>(leaderIndices);
-    final List<Integer> followerIndicesList = new ArrayList<>(followerIndices);
-
-    // first stop the followers (in no particular order)
-    Collections.shuffle(followerIndicesList, random());
-    for (Integer ii : followerIndicesList) {
-      if (!leaderIndices.contains(ii)) {
-        cluster.stopJettySolrRunner(jettys.get(ii));
-      }
-    }
-
-    // then stop the leaders (again in no particular order)
-    Collections.shuffle(leaderIndicesList, random());
-    for (Integer ii : leaderIndicesList) {
-      cluster.stopJettySolrRunner(jettys.get(ii));
-    }
-
-    // calculate restart order
-    final List<Integer> restartIndicesList = new ArrayList<>();
-    Collections.shuffle(leaderIndicesList, random());
-    restartIndicesList.addAll(leaderIndicesList);
-    Collections.shuffle(followerIndicesList, random());
-    restartIndicesList.addAll(followerIndicesList);
-    if (random().nextBoolean()) Collections.shuffle(restartIndicesList, random());
-
-    // and then restart jettys in that order
-    for (Integer ii : restartIndicesList) {
-      final JettySolrRunner jetty = jettys.get(ii);
-      if (!jetty.isRunning()) {
-        cluster.startJettySolrRunner(jetty);
-        assertTrue(jetty.isRunning());
-      }
-    }
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
-
-    zkStateReader.forceUpdateCollection(collectionName);
-
-    // re-query collection
-    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1c6cc20e/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java
deleted file mode 100644
index 40a6e30..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestHdfsCloudBackupRestore.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
-import static org.apache.solr.core.backup.BackupManager.*;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.cloud.hdfs.HdfsTestUtil;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.params.CollectionAdminParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.backup.BackupManager;
-import org.apache.solr.core.backup.repository.HdfsBackupRepository;
-import org.apache.solr.util.BadHdfsThreadsFilter;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the tests for HDFS integration for Solr backup/restore capability.
- */
-@ThreadLeakFilters(defaultFilters = true, filters = {
-    BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
-})
-public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
-  public static final String SOLR_XML = "<solr>\n" +
-      "\n" +
-      "  <str name=\"shareSchema\">${shareSchema:false}</str>\n" +
-      "  <str name=\"configSetBaseDir\">${configSetBaseDir:configsets}</str>\n" +
-      "  <str name=\"coreRootDirectory\">${coreRootDirectory:.}</str>\n" +
-      "\n" +
-      "  <shardHandlerFactory name=\"shardHandlerFactory\" class=\"HttpShardHandlerFactory\">\n" +
-      "    <str name=\"urlScheme\">${urlScheme:}</str>\n" +
-      "    <int name=\"socketTimeout\">${socketTimeout:90000}</int>\n" +
-      "    <int name=\"connTimeout\">${connTimeout:15000}</int>\n" +
-      "  </shardHandlerFactory>\n" +
-      "\n" +
-      "  <solrcloud>\n" +
-      "    <str name=\"host\">127.0.0.1</str>\n" +
-      "    <int name=\"hostPort\">${hostPort:8983}</int>\n" +
-      "    <str name=\"hostContext\">${hostContext:solr}</str>\n" +
-      "    <int name=\"zkClientTimeout\">${solr.zkclienttimeout:30000}</int>\n" +
-      "    <bool name=\"genericCoreNodeNames\">${genericCoreNodeNames:true}</bool>\n" +
-      "    <int name=\"leaderVoteWait\">10000</int>\n" +
-      "    <int name=\"distribUpdateConnTimeout\">${distribUpdateConnTimeout:45000}</int>\n" +
-      "    <int name=\"distribUpdateSoTimeout\">${distribUpdateSoTimeout:340000}</int>\n" +
-      "  </solrcloud>\n" +
-      "  \n" +
-      "  <backup>\n" +
-      "    <repository  name=\"hdfs\" class=\"org.apache.solr.core.backup.repository.HdfsBackupRepository\"> \n" +
-      "      <str name=\"location\">${solr.hdfs.default.backup.path}</str>\n" +
-      "      <str name=\"solr.hdfs.home\">${solr.hdfs.home:}</str>\n" +
-      "      <str name=\"solr.hdfs.confdir\">${solr.hdfs.confdir:}</str>\n" +
-      "    </repository>\n" +
-      "  </backup>\n" +
-      "  \n" +
-      "</solr>\n";
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static MiniDFSCluster dfsCluster;
-  private static String hdfsUri;
-  private static FileSystem fs;
-
-  @BeforeClass
-  public static void setupClass() throws Exception {
-    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
-    hdfsUri = HdfsTestUtil.getURI(dfsCluster);
-    try {
-      URI uri = new URI(hdfsUri);
-      Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
-      conf.setBoolean("fs.hdfs.impl.disable.cache", true);
-      fs = FileSystem.get(uri, conf);
-
-      if (fs instanceof DistributedFileSystem) {
-        // Make sure dfs is not in safe mode
-        while (((DistributedFileSystem) fs).setSafeMode(SafeModeAction.SAFEMODE_GET, true)) {
-          log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
-          try {
-            Thread.sleep(5000);
-          } catch (InterruptedException e) {
-            Thread.interrupted();
-            // continue
-          }
-        }
-      }
-
-      fs.mkdirs(new org.apache.hadoop.fs.Path("/backup"));
-    } catch (IOException | URISyntaxException e) {
-      throw new RuntimeException(e);
-    }
-
-    System.setProperty("solr.hdfs.default.backup.path", "/backup");
-    System.setProperty("solr.hdfs.home", hdfsUri + "/solr");
-    useFactory("solr.StandardDirectoryFactory");
-
-    configureCluster(NUM_SHARDS)// nodes
-    .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
-    .withSolrXml(SOLR_XML)
-    .configure();
-  }
-
-  @AfterClass
-  public static void teardownClass() throws Exception {
-    System.clearProperty("solr.hdfs.home");
-    System.clearProperty("solr.hdfs.default.backup.path");
-    System.clearProperty("test.build.data");
-    System.clearProperty("test.cache.data");
-    IOUtils.closeQuietly(fs);
-    fs = null;
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
-  }
-
-  @Override
-  public String getCollectionName() {
-    return "hdfsbackuprestore";
-  }
-
-  @Override
-  public String getBackupRepoName() {
-    return "hdfs";
-  }
-
-  @Override
-  public String getBackupLocation() {
-    return null;
-  }
-
-  protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
-    String backupName = "configonlybackup";
-    CloudSolrClient solrClient = cluster.getSolrClient();
-
-    CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
-        .setRepositoryName(getBackupRepoName())
-        .setIndexBackupStrategy(CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY);
-    backup.process(solrClient);
-
-    Map<String,String> params = new HashMap<>();
-    params.put("location", "/backup");
-    params.put("solr.hdfs.home", hdfsUri + "/solr");
-
-    HdfsBackupRepository repo = new HdfsBackupRepository();
-    repo.init(new NamedList<>(params));
-    BackupManager mgr = new BackupManager(repo, solrClient.getZkStateReader());
-
-    URI baseLoc = repo.createURI("/backup");
-
-    Properties props = mgr.readBackupProperties(baseLoc, backupName);
-    assertNotNull(props);
-    assertEquals(collectionName, props.getProperty(COLLECTION_NAME_PROP));
-    assertEquals(backupName, props.getProperty(BACKUP_NAME_PROP));
-    assertEquals(configName, props.getProperty(COLL_CONF));
-
-    DocCollection collectionState = mgr.readCollectionState(baseLoc, backupName, collectionName);
-    assertNotNull(collectionState);
-    assertEquals(collectionName, collectionState.getName());
-
-    URI configDirLoc = repo.resolve(baseLoc, backupName, ZK_STATE_DIR, CONFIG_STATE_DIR, configName);
-    assertTrue(repo.exists(configDirLoc));
-
-    Collection<String> expected = Arrays.asList(BACKUP_PROPS_FILE, ZK_STATE_DIR);
-    URI backupLoc = repo.resolve(baseLoc, backupName);
-    String[] dirs = repo.listAll(backupLoc);
-    for (String d : dirs) {
-      assertTrue(expected.contains(d));
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1c6cc20e/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
deleted file mode 100644
index c0db46e..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestLocalFSCloudBackupRestore.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.junit.BeforeClass;
-
-/**
- * This class implements the tests for local file-system integration for Solr backup/restore capability.
- * Note that the Solr backup/restore still requires a "shared" file-system. Its just that in this case
- * such file-system would be exposed via local file-system API.
- */
-public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
-  private static String backupLocation;
-
-  @BeforeClass
-  public static void setupClass() throws Exception {
-    configureCluster(NUM_SHARDS)// nodes
-        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
-        .configure();
-
-    boolean whitespacesInPath = random().nextBoolean();
-    if (whitespacesInPath) {
-      backupLocation = createTempDir("my backup").toAbsolutePath().toString();
-    } else {
-      backupLocation = createTempDir("mybackup").toAbsolutePath().toString();
-    }
-  }
-
-  @Override
-  public String getCollectionName() {
-    return "backuprestore";
-  }
-
-  @Override
-  public String getBackupRepoName() {
-    return null;
-  }
-
-  @Override
-  public String getBackupLocation() {
-    return backupLocation;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1c6cc20e/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java b/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java
deleted file mode 100644
index f654e8f..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestReplicaProperties.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.zookeeper.KeeperException;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-@Slow
-public class TestReplicaProperties extends ReplicaPropertiesBase {
-
-  public static final String COLLECTION_NAME = "testcollection";
-
-  public TestReplicaProperties() {
-    schemaString = "schema15.xml";      // we need a string id
-    sliceCount = 2;
-  }
-
-  @Test
-  @ShardsFixed(num = 4)
-  public void test() throws Exception {
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      // Mix up a bunch of different combinations of shards and replicas in order to exercise boundary cases.
-      // shards, replicationfactor, maxreplicaspernode
-      int shards = random().nextInt(7);
-      if (shards < 2) shards = 2;
-      int rFactor = random().nextInt(4);
-      if (rFactor < 2) rFactor = 2;
-      createCollection(null, COLLECTION_NAME, shards, rFactor, shards * rFactor + 1, client, null, "conf1");
-    }
-
-    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);
-    waitForRecoveriesToFinish(COLLECTION_NAME, false);
-
-    listCollection();
-
-    clusterAssignPropertyTest();
-  }
-
-  private void listCollection() throws IOException, SolrServerException {
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      ModifiableSolrParams params = new ModifiableSolrParams();
-      params.set("action", CollectionParams.CollectionAction.LIST.toString());
-      SolrRequest request = new QueryRequest(params);
-      request.setPath("/admin/collections");
-
-      NamedList<Object> rsp = client.request(request);
-      List<String> collections = (List<String>) rsp.get("collections");
-      assertTrue("control_collection was not found in list", collections.contains("control_collection"));
-      assertTrue(DEFAULT_COLLECTION + " was not found in list", collections.contains(DEFAULT_COLLECTION));
-      assertTrue(COLLECTION_NAME + " was not found in list", collections.contains(COLLECTION_NAME));
-    }
-  }
-
-
-  private void clusterAssignPropertyTest() throws Exception {
-
-    try (CloudSolrClient client = createCloudClient(null)) {
-      client.connect();
-      try {
-        doPropertyAction(client,
-            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-            "property", "preferredLeader");
-      } catch (SolrException se) {
-        assertTrue("Should have seen missing required parameter 'collection' error",
-            se.getMessage().contains("Missing required parameter: collection"));
-      }
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-          "collection", COLLECTION_NAME,
-          "property", "preferredLeader");
-
-      verifyUniqueAcrossCollection(client, COLLECTION_NAME, "preferredleader");
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-          "collection", COLLECTION_NAME,
-          "property", "property.newunique",
-          "shardUnique", "true");
-      verifyUniqueAcrossCollection(client, COLLECTION_NAME, "property.newunique");
-
-      try {
-        doPropertyAction(client,
-            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-            "collection", COLLECTION_NAME,
-            "property", "whatever",
-            "shardUnique", "false");
-        fail("Should have thrown an exception here.");
-      } catch (SolrException se) {
-        assertTrue("Should have gotten a specific error message here",
-            se.getMessage().contains("Balancing properties amongst replicas in a slice requires that the " +
-                "property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'"));
-      }
-      // Should be able to set non-unique-per-slice values in several places.
-      Map<String, Slice> slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
-      List<String> sliceList = new ArrayList<>(slices.keySet());
-      String c1_s1 = sliceList.get(0);
-      List<String> replicasList = new ArrayList<>(slices.get(c1_s1).getReplicasMap().keySet());
-      String c1_s1_r1 = replicasList.get(0);
-      String c1_s1_r2 = replicasList.get(1);
-
-      addProperty(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r1,
-          "property", "bogus1",
-          "property.value", "true");
-
-      addProperty(client,
-          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
-          "collection", COLLECTION_NAME,
-          "shard", c1_s1,
-          "replica", c1_s1_r2,
-          "property", "property.bogus1",
-          "property.value", "whatever");
-
-      try {
-        doPropertyAction(client,
-            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-            "collection", COLLECTION_NAME,
-            "property", "bogus1",
-            "shardUnique", "false");
-        fail("Should have thrown parameter error here");
-      } catch (SolrException se) {
-        assertTrue("Should have caught specific exception ",
-            se.getMessage().contains("Balancing properties amongst replicas in a slice requires that the property be " +
-                "pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'"));
-      }
-
-      // Should have no effect despite the "shardUnique" param being set.
-
-      doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
-          "collection", COLLECTION_NAME,
-          "property", "property.bogus1",
-          "shardUnique", "true");
-
-      verifyPropertyVal(client, COLLECTION_NAME,
-          c1_s1_r1, "bogus1", "true");
-      verifyPropertyVal(client, COLLECTION_NAME,
-          c1_s1_r2, "property.bogus1", "whatever");
-
-      // At this point we've assigned a preferred leader. Make it happen and check that all the nodes that are
-      // leaders _also_ have the preferredLeader property set.
-
-
-      NamedList<Object> res = doPropertyAction(client,
-          "action", CollectionParams.CollectionAction.REBALANCELEADERS.toString(),
-          "collection", COLLECTION_NAME);
-
-      verifyLeaderAssignment(client, COLLECTION_NAME);
-
-    }
-  }
-
-  private void verifyLeaderAssignment(CloudSolrClient client, String collectionName)
-      throws InterruptedException, KeeperException {
-    String lastFailMsg = "";
-    for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
-      lastFailMsg = "";
-      ClusterState clusterState = client.getZkStateReader().getClusterState();
-      for (Slice slice : clusterState.getCollection(collectionName).getSlices()) {
-        Boolean foundLeader = false;
-        Boolean foundPreferred = false;
-        for (Replica replica : slice.getReplicas()) {
-          Boolean isLeader = replica.getBool("leader", false);
-          Boolean isPreferred = replica.getBool("property.preferredleader", false);
-          if (isLeader != isPreferred) {
-            lastFailMsg = "Replica should NOT have preferredLeader != leader. Preferred: " + isPreferred.toString() +
-                " leader is " + isLeader.toString();
-          }
-          if (foundLeader && isLeader) {
-            lastFailMsg = "There should only be a single leader in _any_ shard! Replica " + replica.getName() +
-                " is the second leader in slice " + slice.getName();
-          }
-          if (foundPreferred && isPreferred) {
-            lastFailMsg = "There should only be a single preferredLeader in _any_ shard! Replica " + replica.getName() +
-                " is the second preferredLeader in slice " + slice.getName();
-          }
-          foundLeader = foundLeader ? foundLeader : isLeader;
-          foundPreferred = foundPreferred ? foundPreferred : isPreferred;
-        }
-      }
-      if (lastFailMsg.length() == 0) return;
-      Thread.sleep(100);
-    }
-    fail(lastFailMsg);
-  }
-
-  private void addProperty(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
-    assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    for (int idx = 0; idx < paramsIn.length; idx += 2) {
-      params.set(paramsIn[idx], paramsIn[idx + 1]);
-    }
-    QueryRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-    client.request(request);
-
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1c6cc20e/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java
deleted file mode 100644
index a560e75..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/TestRequestStatusCollectionAPI.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.params.CommonAdminParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.NamedList;
-import org.junit.Test;
-
-import java.io.IOException;
-
-public class TestRequestStatusCollectionAPI extends BasicDistributedZkTest {
-
-  public static final int MAX_WAIT_TIMEOUT_SECONDS = 90;
-
-  public TestRequestStatusCollectionAPI() {
-    schemaString = "schema15.xml";      // we need a string id
-  }
-
-  @Test
-  public void test() throws Exception {
-    ModifiableSolrParams params = new ModifiableSolrParams();
-
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
-    params.set("name", "collection2");
-    params.set("numShards", 2);
-    params.set("replicationFactor", 1);
-    params.set("maxShardsPerNode", 100);
-    params.set("collection.configName", "conf1");
-    params.set(CommonAdminParams.ASYNC, "1000");
-    try {
-      sendRequest(params);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    // Check for the request to be completed.
-
-    NamedList r = null;
-    NamedList status = null;
-    String message = null;
-
-    params = new ModifiableSolrParams();
-
-    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
-    params.set(OverseerCollectionMessageHandler.REQUESTID, "1000");
-
-    try {
-      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    assertEquals("found [1000] in completed tasks", message);
-
-    // Check for a random (hopefully non-existent request id
-    params = new ModifiableSolrParams();
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.REQUESTSTATUS.toString());
-    params.set(OverseerCollectionMessageHandler.REQUESTID, "9999999");
-    try {
-      r = sendRequest(params);
-      status = (NamedList) r.get("status");
-      message = (String) status.get("msg");
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    assertEquals("Did not find [9999999] in any tasks queue", message);
-
-    params = new ModifiableSolrParams();
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.SPLITSHARD.toString());
-    params.set("collection", "collection2");
-    params.set("shard", "shard1");
-    params.set(CommonAdminParams.ASYNC, "1001");
-    try {
-      sendRequest(params);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    // Check for the request to be completed.
-    params = new ModifiableSolrParams();
-    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
-    params.set(OverseerCollectionMessageHandler.REQUESTID, "1001");
-    try {
-      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    assertEquals("found [1001] in completed tasks", message);
-
-    params = new ModifiableSolrParams();
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
-    params.set("name", "collection2");
-    params.set("numShards", 2);
-    params.set("replicationFactor", 1);
-    params.set("maxShardsPerNode", 100);
-    params.set("collection.configName", "conf1");
-    params.set(CommonAdminParams.ASYNC, "1002");
-    try {
-      sendRequest(params);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    params = new ModifiableSolrParams();
-
-    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
-    params.set(OverseerCollectionMessageHandler.REQUESTID, "1002");
-
-    try {
-      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-
-    assertEquals("found [1002] in failed tasks", message);
-
-    params = new ModifiableSolrParams();
-    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
-    params.set("name", "collection3");
-    params.set("numShards", 1);
-    params.set("replicationFactor", 1);
-    params.set("maxShardsPerNode", 100);
-    params.set("collection.configName", "conf1");
-    params.set(CommonAdminParams.ASYNC, "1002");
-    try {
-      r = sendRequest(params);
-    } catch (SolrServerException | IOException e) {
-      e.printStackTrace();
-    }
-
-    assertEquals("Task with the same requestid already exists.", r.get("error"));
-  }
-
-  /**
-   * Helper method to send a status request with specific retry limit and return
-   * the message/null from the success response.
-   */
-  private String sendStatusRequestWithRetry(ModifiableSolrParams params, int maxCounter)
-      throws SolrServerException, IOException{
-    String message = null;
-    while (maxCounter-- > 0) {
-      final NamedList r = sendRequest(params);
-      final NamedList status = (NamedList) r.get("status");
-      final RequestStatusState state = RequestStatusState.fromKey((String) status.get("state"));
-      message = (String) status.get("msg");
-
-      if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED) {
-        return message;
-      }
-
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException e) {
-      }
-
-    }
-    // Return last state?
-    return message;
-  }
-
-  protected NamedList sendRequest(ModifiableSolrParams params) throws SolrServerException, IOException {
-    SolrRequest request = new QueryRequest(params);
-    request.setPath("/admin/collections");
-
-    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient).getBaseURL();
-    baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
-
-    try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 15000)) {
-      return baseServer.request(request);
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1c6cc20e/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
new file mode 100644
index 0000000..058814c
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/AbstractCloudBackupRestoreTestCase.java
@@ -0,0 +1,348 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Random;
+import java.util.TreeMap;
+
+import org.apache.lucene.util.TestUtil;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest.ClusterProp;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.ImplicitDocRouter;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the logic required to test Solr cloud backup/restore capability.
+ */
+public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  protected static final int NUM_SHARDS = 2;//granted we sometimes shard split to get more
+
+  int replFactor;
+  int numTlogReplicas;
+  int numPullReplicas;
+
+  private static long docsSeed; // see indexDocs()
+
+  @BeforeClass
+  public static void createCluster() throws Exception {
+    docsSeed = random().nextLong();
+  }
+
+  /**
+   * @return The name of the collection to use.
+   */
+  public abstract String getCollectionName();
+
+  /**
+   * @return The name of the backup repository to use.
+   */
+  public abstract String getBackupRepoName();
+
+  /**
+   * @return The absolute path for the backup location.
+   *         Could return null.
+   */
+  public abstract String getBackupLocation();
+
+  @Test
+  public void test() throws Exception {
+    boolean isImplicit = random().nextBoolean();
+    boolean doSplitShardOperation = !isImplicit && random().nextBoolean();
+    replFactor = TestUtil.nextInt(random(), 1, 2);
+    numTlogReplicas = TestUtil.nextInt(random(), 0, 1);
+    numPullReplicas = TestUtil.nextInt(random(), 0, 1);
+    
+    CollectionAdminRequest.Create create = isImplicit ?
+      // NOTE: use shard list with same # of shards as NUM_SHARDS; we assume this later
+      CollectionAdminRequest.createCollectionWithImplicitRouter(getCollectionName(), "conf1", "shard1,shard2", replFactor, numTlogReplicas, numPullReplicas) :
+      CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor, numTlogReplicas, numPullReplicas);
+    
+    if (NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) > cluster.getJettySolrRunners().size() || random().nextBoolean()) {
+      create.setMaxShardsPerNode((int)Math.ceil(NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) / cluster.getJettySolrRunners().size()));//just to assert it survives the restoration
+      if (doSplitShardOperation) {
+        create.setMaxShardsPerNode(create.getMaxShardsPerNode() * 2);
+      }
+    }
+    if (random().nextBoolean()) {
+      create.setAutoAddReplicas(true);//just to assert it survives the restoration
+    }
+    Properties coreProps = new Properties();
+    coreProps.put("customKey", "customValue");//just to assert it survives the restoration
+    create.setProperties(coreProps);
+    if (isImplicit) { //implicit router
+      create.setRouterField("shard_s");
+    } else {//composite id router
+      if (random().nextBoolean()) {
+        create.setRouterField("shard_s");
+      }
+    }
+
+    CloudSolrClient solrClient = cluster.getSolrClient();
+    create.process(solrClient);
+
+    indexDocs(getCollectionName());
+
+    if (doSplitShardOperation) {
+      // shard split the first shard
+      int prevActiveSliceCount = getActiveSliceCount(getCollectionName());
+      CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(getCollectionName());
+      splitShard.setShardName("shard1");
+      splitShard.process(solrClient);
+      // wait until we see one more active slice...
+      for (int i = 0; getActiveSliceCount(getCollectionName()) != prevActiveSliceCount + 1; i++) {
+        assertTrue(i < 30);
+        Thread.sleep(500);
+      }
+      // issue a hard commit.  Split shard does a soft commit which isn't good enough for the backup/snapshooter to see
+      solrClient.commit(getCollectionName());
+    }
+
+    testBackupAndRestore(getCollectionName());
+    testConfigBackupOnly("conf1", getCollectionName());
+    testInvalidPath(getCollectionName());
+  }
+
+  /**
+   * This test validates the backup of collection configuration using
+   *  {@linkplain CollectionAdminParams#NO_INDEX_BACKUP_STRATEGY}.
+   *
+   * @param configName The config name for the collection to be backed up.
+   * @param collectionName The name of the collection to be backed up.
+   * @throws Exception in case of errors.
+   */
+  protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
+    // This is deliberately no-op since we want to run this test only for one of the backup repository
+    // implementation (mainly to avoid redundant test execution). Currently HDFS backup repository test
+    // implements this.
+  }
+
+  // This test verifies the system behavior when the backup location cluster property is configured with an invalid
+  // value for the specified repository (and the default backup location is not configured in solr.xml).
+  private void testInvalidPath(String collectionName) throws Exception {
+    // Execute this test only if the default backup location is NOT configured in solr.xml
+    if (getBackupLocation() == null) {
+      return;
+    }
+
+    String backupName = "invalidbackuprequest";
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    ClusterProp req = CollectionAdminRequest.setClusterProperty(CoreAdminParams.BACKUP_LOCATION, "/location/does/not/exist");
+    assertEquals(0, req.process(solrClient).getStatus());
+
+    // Do not specify the backup location.
+    CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
+        .setRepositoryName(getBackupRepoName());
+    try {
+      backup.process(solrClient);
+      fail("This request should have failed since the cluster property value for backup location property is invalid.");
+    } catch (SolrException ex) {
+      assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
+    }
+
+    String restoreCollectionName = collectionName + "_invalidrequest";
+    CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
+        .setRepositoryName(getBackupRepoName());
+    try {
+      restore.process(solrClient);
+      fail("This request should have failed since the cluster property value for backup location property is invalid.");
+    } catch (SolrException ex) {
+      assertEquals(ErrorCode.SERVER_ERROR.code, ex.code());
+    }
+  }
+
+  private int getActiveSliceCount(String collectionName) {
+    return cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(collectionName).getActiveSlices().size();
+  }
+
+  private void indexDocs(String collectionName) throws Exception {
+    Random random = new Random(docsSeed);// use a constant seed for the whole test run so that we can easily re-index.
+    int numDocs = random.nextInt(100);
+    if (numDocs == 0) {
+      log.info("Indexing ZERO test docs");
+      return;
+    }
+    List<SolrInputDocument> docs = new ArrayList<>(numDocs);
+    for (int i=0; i<numDocs; i++) {
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", i);
+      doc.addField("shard_s", "shard" + (1 + random.nextInt(NUM_SHARDS))); // for implicit router
+      docs.add(doc);
+    }
+    CloudSolrClient client = cluster.getSolrClient();
+    client.add(collectionName, docs);// batch
+    client.commit(collectionName);
+  }
+
+  private void testBackupAndRestore(String collectionName) throws Exception {
+    String backupLocation = getBackupLocation();
+    String backupName = "mytestbackup";
+
+    CloudSolrClient client = cluster.getSolrClient();
+    DocCollection backupCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
+
+    Map<String, Integer> origShardToDocCount = getShardToDocCountMap(client, backupCollection);
+    assert origShardToDocCount.isEmpty() == false;
+
+    log.info("Triggering Backup command");
+
+    {
+      CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
+          .setLocation(backupLocation).setRepositoryName(getBackupRepoName());
+      if (random().nextBoolean()) {
+        assertEquals(0, backup.process(client).getStatus());
+      } else {
+        assertEquals(RequestStatusState.COMPLETED, backup.processAndWait(client, 30));//async
+      }
+    }
+
+    log.info("Triggering Restore command");
+
+    String restoreCollectionName = collectionName + "_restored";
+    boolean sameConfig = random().nextBoolean();
+
+    {
+      CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName)
+          .setLocation(backupLocation).setRepositoryName(getBackupRepoName());
+
+
+      //explicitly specify the replicationFactor/pullReplicas/nrtReplicas/tlogReplicas .
+      //Value is still the same as the original. maybe test with different values that the original for better test coverage
+      if (random().nextBoolean())  {
+        restore.setReplicationFactor(replFactor);
+      }
+      if (backupCollection.getReplicas().size() > cluster.getJettySolrRunners().size()) {
+        // may need to increase maxShardsPerNode (e.g. if it was shard split, then now we need more)
+        restore.setMaxShardsPerNode((int)Math.ceil(backupCollection.getReplicas().size()/cluster.getJettySolrRunners().size()));
+      }
+      
+
+      if (rarely()) { // Try with createNodeSet configuration
+        int nodeSetSize = cluster.getJettySolrRunners().size() / 2;
+        List<String> nodeStrs = new ArrayList<>(nodeSetSize);
+        Iterator<JettySolrRunner> iter = cluster.getJettySolrRunners().iterator();
+        for (int i = 0; i < nodeSetSize ; i++) {
+          nodeStrs.add(iter.next().getNodeName());
+        }
+        restore.setCreateNodeSet(String.join(",", nodeStrs));
+        restore.setCreateNodeSetShuffle(usually());
+        // we need to double maxShardsPerNode value since we reduced number of available nodes by half.
+        if (restore.getMaxShardsPerNode() != null) {
+          restore.setMaxShardsPerNode(restore.getMaxShardsPerNode() * 2);
+        } else {
+          restore.setMaxShardsPerNode(origShardToDocCount.size() * 2);
+        }
+      }
+
+      Properties props = new Properties();
+      props.setProperty("customKey", "customVal");
+      restore.setProperties(props);
+
+      if (sameConfig==false) {
+        restore.setConfigName("customConfigName");
+      }
+      if (random().nextBoolean()) {
+        assertEquals(0, restore.process(client).getStatus());
+      } else {
+        assertEquals(RequestStatusState.COMPLETED, restore.processAndWait(client, 30));//async
+      }
+      AbstractDistribZkTestBase.waitForRecoveriesToFinish(
+          restoreCollectionName, cluster.getSolrClient().getZkStateReader(), log.isDebugEnabled(), true, 30);
+    }
+
+    //Check the number of results are the same
+    DocCollection restoreCollection = client.getZkStateReader().getClusterState().getCollection(restoreCollectionName);
+    assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
+    //Re-index same docs (should be identical docs given same random seed) and test we have the same result.  Helps
+    //  test we reconstituted the hash ranges / doc router.
+    if (!(restoreCollection.getRouter() instanceof ImplicitDocRouter) && random().nextBoolean()) {
+      indexDocs(restoreCollectionName);
+      assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
+    }
+
+    assertEquals(backupCollection.getReplicationFactor(), restoreCollection.getReplicationFactor());
+    assertEquals(backupCollection.getAutoAddReplicas(), restoreCollection.getAutoAddReplicas());
+    assertEquals(backupCollection.getActiveSlices().iterator().next().getReplicas().size(),
+        restoreCollection.getActiveSlices().iterator().next().getReplicas().size());
+    assertEquals(sameConfig ? "conf1" : "customConfigName",
+        cluster.getSolrClient().getZkStateReader().readConfigName(restoreCollectionName));
+
+    Map<String, Integer> numReplicasByNodeName = new HashMap<>();
+    restoreCollection.getReplicas().forEach(x -> {
+      numReplicasByNodeName.put(x.getNodeName(), numReplicasByNodeName.getOrDefault(x.getNodeName(), 0) + 1);
+    });
+    numReplicasByNodeName.forEach((k, v) -> {
+      assertTrue("Node " + k + " has " + v + " replicas. Expected num replicas : " + restoreCollection.getMaxShardsPerNode() ,
+          v <= restoreCollection.getMaxShardsPerNode());
+    });
+
+    assertEquals("Different count of nrtReplicas. Backup collection state=" + backupCollection + "\nRestore " +
+        "collection state=" + restoreCollection, replFactor, restoreCollection.getNumNrtReplicas().intValue());
+    assertEquals("Different count of pullReplicas. Backup collection state=" + backupCollection + "\nRestore" +
+        " collection state=" + restoreCollection, numPullReplicas, restoreCollection.getNumPullReplicas().intValue());
+    assertEquals("Different count of TlogReplica. Backup collection state=" + backupCollection + "\nRestore" +
+        " collection state=" + restoreCollection, numTlogReplicas, restoreCollection.getNumTlogReplicas().intValue());
+
+    assertEquals("Restore collection should use stateFormat=2", 2, restoreCollection.getStateFormat());
+
+
+    // assert added core properties:
+    // DWS: did via manual inspection.
+    // TODO Find the applicable core.properties on the file system but how?
+  }
+
+  private Map<String, Integer> getShardToDocCountMap(CloudSolrClient client, DocCollection docCollection) throws SolrServerException, IOException {
+    Map<String,Integer> shardToDocCount = new TreeMap<>();
+    for (Slice slice : docCollection.getActiveSlices()) {
+      String shardName = slice.getName();
+      try (HttpSolrClient leaderClient = new HttpSolrClient.Builder(slice.getLeader().getCoreUrl()).withHttpClient(client.getHttpClient()).build()) {
+        long docsInShard = leaderClient.query(new SolrQuery("*:*").setParam("distrib", "false"))
+            .getResults().getNumFound();
+        shardToDocCount.put(shardName, (int) docsInShard);
+      }
+    }
+    return shardToDocCount;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1c6cc20e/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
new file mode 100644
index 0000000..d2b35e4
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/AssignTest.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.client.solrj.impl.ZkDistribStateManager;
+import org.apache.solr.cloud.ZkTestServer;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.util.ExecutorUtil;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyInt;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class AssignTest extends SolrTestCaseJ4 {
+  
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+
+  }
+  
+  @Override
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+  
+  @Test
+  public void testAssignNode() throws Exception {
+    assumeWorkingMockito();
+    
+    SolrZkClient zkClient = mock(SolrZkClient.class);
+    Map<String, byte[]> zkClientData = new HashMap<>();
+    when(zkClient.setData(anyString(), any(), anyInt(), anyBoolean())).then(invocation -> {
+        zkClientData.put(invocation.getArgument(0), invocation.getArgument(1));
+        return null;
+      }
+    );
+    when(zkClient.getData(anyString(), any(), any(), anyBoolean())).then(invocation ->
+        zkClientData.get(invocation.getArgument(0)));
+    // TODO: fix this to be independent of ZK
+    ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
+    String nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
+    assertEquals("core_node1", nodeName);
+    nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection2", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
+    assertEquals("core_node1", nodeName);
+    nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
+    assertEquals("core_node2", nodeName);
+  }
+
+  @Test
+  public void testIdIsUnique() throws Exception {
+    String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
+    ZkTestServer server = new ZkTestServer(zkDir);
+    Object fixedValue = new Object();
+    String[] collections = new String[]{"c1","c2","c3","c4","c5","c6","c7","c8","c9"};
+    Map<String, ConcurrentHashMap<Integer, Object>> collectionUniqueIds = new HashMap<>();
+    for (String c : collections) {
+      collectionUniqueIds.put(c, new ConcurrentHashMap<>());
+    }
+
+    ExecutorService executor = ExecutorUtil.newMDCAwareCachedThreadPool("threadpool");
+    try {
+      server.run();
+
+      try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
+        assertTrue(zkClient.isConnected());
+        zkClient.makePath("/", true);
+        for (String c : collections) {
+          zkClient.makePath("/collections/"+c, true);
+        }
+        // TODO: fix this to be independent of ZK
+        ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
+        List<Future<?>> futures = new ArrayList<>();
+        for (int i = 0; i < 1000; i++) {
+          futures.add(executor.submit(() -> {
+            String collection = collections[random().nextInt(collections.length)];
+            int id = Assign.incAndGetId(stateManager, collection, 0);
+            Object val = collectionUniqueIds.get(collection).put(id, fixedValue);
+            if (val != null) {
+              fail("ZkController do not generate unique id for " + collection);
+            }
+          }));
+        }
+        for (Future<?> future : futures) {
+          future.get();
+        }
+      }
+      assertEquals(1000, (long) collectionUniqueIds.values().stream()
+          .map(ConcurrentHashMap::size)
+          .reduce((m1, m2) -> m1 + m2).get());
+    } finally {
+      server.shutdown();
+      ExecutorUtil.shutdownAndAwaitTermination(executor);
+    }
+  }
+
+
+  @Test
+  public void testBuildCoreName() throws IOException, InterruptedException, KeeperException {
+    String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
+    ZkTestServer server = new ZkTestServer(zkDir);
+    server.run();
+    try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
+      zkClient.makePath("/", true);
+      // TODO: fix this to be independent of ZK
+      ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
+      Map<String, Slice> slices = new HashMap<>();
+      slices.put("shard1", new Slice("shard1", new HashMap<>(), null));
+      slices.put("shard2", new Slice("shard2", new HashMap<>(), null));
+
+      DocCollection docCollection = new DocCollection("collection1", slices, null, DocRouter.DEFAULT);
+      assertEquals("Core name pattern changed", "collection1_shard1_replica_n1", Assign.buildSolrCoreName(stateManager, docCollection, "shard1", Replica.Type.NRT));
+      assertEquals("Core name pattern changed", "collection1_shard2_replica_p2", Assign.buildSolrCoreName(stateManager, docCollection, "shard2", Replica.Type.PULL));
+    } finally {
+      server.shutdown();
+    }
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1c6cc20e/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java
new file mode 100644
index 0000000..c084412
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionReloadTest.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.util.RetryUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Verifies cluster state remains consistent after collection reload.
+ */
+@SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
+public class CollectionReloadTest extends SolrCloudTestCase {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(1)
+        .addConfig("conf", configset("cloud-minimal"))
+        .configure();
+  }
+  
+  @Test
+  public void testReloadedLeaderStateAfterZkSessionLoss() throws Exception {
+
+    log.info("testReloadedLeaderStateAfterZkSessionLoss initialized OK ... running test logic");
+
+    final String testCollectionName = "c8n_1x1";
+    CollectionAdminRequest.createCollection(testCollectionName, "conf", 1, 1)
+        .process(cluster.getSolrClient());
+
+    Replica leader
+        = cluster.getSolrClient().getZkStateReader().getLeaderRetry(testCollectionName, "shard1", DEFAULT_TIMEOUT);
+
+    long coreStartTime = getCoreStatus(leader).getCoreStartTime().getTime();
+    CollectionAdminRequest.reloadCollection(testCollectionName).process(cluster.getSolrClient());
+
+    RetryUtil.retryUntil("Timed out waiting for core to reload", 30, 1000, TimeUnit.MILLISECONDS, () -> {
+      long restartTime = 0;
+      try {
+        restartTime = getCoreStatus(leader).getCoreStartTime().getTime();
+      } catch (Exception e) {
+        log.warn("Exception getting core start time: {}", e.getMessage());
+        return false;
+      }
+      return restartTime > coreStartTime;
+    });
+
+    final int initialStateVersion = getCollectionState(testCollectionName).getZNodeVersion();
+
+    cluster.expireZkSession(cluster.getReplicaJetty(leader));
+
+    waitForState("Timed out waiting for core to re-register as ACTIVE after session expiry", testCollectionName, (n, c) -> {
+      log.info("Collection state: {}", c.toString());
+      Replica expiredReplica = c.getReplica(leader.getName());
+      return expiredReplica.getState() == Replica.State.ACTIVE && c.getZNodeVersion() > initialStateVersion;
+    });
+
+    log.info("testReloadedLeaderStateAfterZkSessionLoss succeeded ... shutting down now!");
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1c6cc20e/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
new file mode 100644
index 0000000..f68fa9e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+@Slow
+public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(3)
+        .addConfig("conf", configset("cloud-minimal"))
+        .configure();
+  }
+
+  @Before
+  public void deleteCollections() throws Exception {
+    cluster.deleteAllCollections();
+  }
+
+  @Test
+  public void testAddTooManyReplicas() throws Exception {
+    final String collectionName = "TooManyReplicasInSeveralFlavors";
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .setMaxShardsPerNode(1)
+        .process(cluster.getSolrClient());
+
+    // I have two replicas, one for each shard
+
+    // Curiously, I should be able to add a bunch of replicas if I specify the node, even more than maxShardsPerNode
+    // Just get the first node any way we can.
+    // Get a node to use for the "node" parameter.
+    String nodeName = getAllNodeNames(collectionName).get(0);
+
+    // Add a replica using the "node" parameter (no "too many replicas check")
+    // this node should have 2 replicas on it
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .setNode(nodeName)
+        .process(cluster.getSolrClient());
+
+    // Three replicas so far, should be able to create another one "normally"
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .process(cluster.getSolrClient());
+
+    // This one should fail though, no "node" parameter specified
+    Exception e = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+          .process(cluster.getSolrClient());
+    });
+
+    assertTrue("Should have gotten the right error message back",
+          e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+
+    // Oddly, we should succeed next just because setting property.name will not check for nodes being "full up"
+    // TODO: Isn't this a bug?
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .withProperty("name", "bogus2")
+        .setNode(nodeName)
+        .process(cluster.getSolrClient());
+
+    DocCollection collectionState = getCollectionState(collectionName);
+    Slice slice = collectionState.getSlice("shard1");
+    Replica replica = getRandomReplica(slice, r -> r.getCoreName().equals("bogus2"));
+    assertNotNull("Should have found a replica named 'bogus2'", replica);
+    assertEquals("Replica should have been put on correct core", nodeName, replica.getNodeName());
+
+    // Shard1 should have 4 replicas
+    assertEquals("There should be 4 replicas for shard 1", 4, slice.getReplicas().size());
+
+    // And let's fail one more time because to ensure that the math doesn't do weird stuff it we have more replicas
+    // than simple calcs would indicate.
+    Exception e2 = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+          .process(cluster.getSolrClient());
+    });
+
+    assertTrue("Should have gotten the right error message back",
+        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+    // wait for recoveries to finish, for a clean shutdown - see SOLR-9645
+    waitForState("Expected to see all replicas active", collectionName, (n, c) -> {
+      for (Replica r : c.getReplicas()) {
+        if (r.getState() != Replica.State.ACTIVE)
+          return false;
+      }
+      return true;
+    });
+  }
+
+  @Test
+  public void testAddShard() throws Exception {
+
+    String collectionName = "TooManyReplicasWhenAddingShards";
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 2)
+        .setMaxShardsPerNode(2)
+        .process(cluster.getSolrClient());
+
+    // We have two nodes, maxShardsPerNode is set to 2. Therefore, we should be able to add 2 shards each with
+    // two replicas, but fail on the third.
+    CollectionAdminRequest.createShard(collectionName, "shard1")
+        .process(cluster.getSolrClient());
+
+    // Now we should have one replica on each Jetty, add another to reach maxShardsPerNode
+    CollectionAdminRequest.createShard(collectionName, "shard2")
+        .process(cluster.getSolrClient());
+
+    // Now fail to add the third as it should exceed maxShardsPerNode
+    Exception e = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.createShard(collectionName, "shard3")
+          .process(cluster.getSolrClient());
+    });
+    assertTrue("Should have gotten the right error message back",
+        e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+    // Hmmm, providing a nodeset also overrides the checks for max replicas, so prove it.
+    List<String> nodes = getAllNodeNames(collectionName);
+
+    CollectionAdminRequest.createShard(collectionName, "shard4")
+        .setNodeSet(StringUtils.join(nodes, ","))
+        .process(cluster.getSolrClient());
+
+    // And just for yucks, insure we fail the "regular" one again.
+    Exception e2 = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.createShard(collectionName, "shard5")
+          .process(cluster.getSolrClient());
+    });
+    assertTrue("Should have gotten the right error message back",
+        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+    // And finally, insure that there are all the replcias we expect. We should have shards 1, 2 and 4 and each
+    // should have exactly two replicas
+    waitForState("Expected shards shardstart, 1, 2 and 4, each with two active replicas", collectionName, (n, c) -> {
+      return DocCollection.isFullyActive(n, c, 4, 2);
+    });
+    Map<String, Slice> slices = getCollectionState(collectionName).getSlicesMap();
+    assertEquals("There should be exaclty four slices", slices.size(), 4);
+    assertNotNull("shardstart should exist", slices.get("shardstart"));
+    assertNotNull("shard1 should exist", slices.get("shard1"));
+    assertNotNull("shard2 should exist", slices.get("shard2"));
+    assertNotNull("shard4 should exist", slices.get("shard4"));
+    assertEquals("Shardstart should have exactly 2 replicas", 2, slices.get("shardstart").getReplicas().size());
+    assertEquals("Shard1 should have exactly 2 replicas", 2, slices.get("shard1").getReplicas().size());
+    assertEquals("Shard2 should have exactly 2 replicas", 2, slices.get("shard2").getReplicas().size());
+    assertEquals("Shard4 should have exactly 2 replicas", 2, slices.get("shard4").getReplicas().size());
+
+  }
+
+  @Test
+  public void testDownedShards() throws Exception {
+    String collectionName = "TooManyReplicasWhenAddingDownedNode";
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 1)
+        .setMaxShardsPerNode(2)
+        .process(cluster.getSolrClient());
+
+    // Shut down a Jetty, I really don't care which
+    JettySolrRunner jetty = cluster.getRandomJetty(random());
+    String deadNode = jetty.getBaseUrl().toString();
+    cluster.stopJettySolrRunner(jetty);
+
+    try {
+
+      // Adding a replica on a dead node should fail
+      Exception e1 = expectThrows(Exception.class, () -> {
+        CollectionAdminRequest.addReplicaToShard(collectionName, "shardstart")
+            .setNode(deadNode)
+            .process(cluster.getSolrClient());
+      });
+      assertTrue("Should have gotten a message about shard not currently active: " + e1.toString(),
+          e1.toString().contains("At least one of the node(s) specified [" + deadNode + "] are not currently active in"));
+
+      // Should also die if we just add a shard
+      Exception e2 = expectThrows(Exception.class, () -> {
+        CollectionAdminRequest.createShard(collectionName, "shard1")
+            .setNodeSet(deadNode)
+            .process(cluster.getSolrClient());
+      });
+
+      assertTrue("Should have gotten a message about shard not currently active: " + e2.toString(),
+          e2.toString().contains("At least one of the node(s) specified [" + deadNode + "] are not currently active in"));
+    }
+    finally {
+      cluster.startJettySolrRunner(jetty);
+    }
+  }
+
+  private List<String> getAllNodeNames(String collectionName) throws KeeperException, InterruptedException {
+    DocCollection state = getCollectionState(collectionName);
+    return state.getReplicas().stream().map(Replica::getNodeName).distinct().collect(Collectors.toList());
+  }
+
+}