You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by da...@apache.org on 2018/01/23 10:30:40 UTC

[10/41] lucene-solr:jira/solr-11702: SOLR-11817: Move Collections API classes to it's own package

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
new file mode 100644
index 0000000..840f774
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionsAPIViaSolrCloudCluster.java
@@ -0,0 +1,297 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test of the Collections API with the MiniSolrCloudCluster.
+ */
+@LuceneTestCase.Slow
+public class TestCollectionsAPIViaSolrCloudCluster extends SolrCloudTestCase {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private static final int numShards = 2;
+  private static final int numReplicas = 2;
+  private static final int maxShardsPerNode = 1;
+  private static final int nodeCount = 5;
+  private static final String configName = "solrCloudCollectionConfig";
+  private static final Map<String,String> collectionProperties  // ensure indexes survive core shutdown
+      = Collections.singletonMap("solr.directoryFactory", "solr.StandardDirectoryFactory");
+
+  @Override
+  public void setUp() throws Exception {
+    configureCluster(nodeCount).addConfig(configName, configset("cloud-minimal")).configure();
+    super.setUp();
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    cluster.shutdown();
+    super.tearDown();
+  }
+
+  private void createCollection(String collectionName, String createNodeSet) throws Exception {
+    if (random().nextBoolean()) { // process asynchronously
+      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
+          .setMaxShardsPerNode(maxShardsPerNode)
+          .setCreateNodeSet(createNodeSet)
+          .setProperties(collectionProperties)
+          .processAndWait(cluster.getSolrClient(), 30);
+    }
+    else {
+      CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
+          .setMaxShardsPerNode(maxShardsPerNode)
+          .setCreateNodeSet(createNodeSet)
+          .setProperties(collectionProperties)
+          .process(cluster.getSolrClient());
+    }
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish
+        (collectionName, cluster.getSolrClient().getZkStateReader(), true, true, 330);
+  }
+
+  @Test
+  public void testCollectionCreateSearchDelete() throws Exception {
+    final CloudSolrClient client = cluster.getSolrClient();
+    final String collectionName = "testcollection";
+
+    assertNotNull(cluster.getZkServer());
+    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
+    assertEquals(nodeCount, jettys.size());
+    for (JettySolrRunner jetty : jettys) {
+      assertTrue(jetty.isRunning());
+    }
+
+    // shut down a server
+    JettySolrRunner stoppedServer = cluster.stopJettySolrRunner(0);
+    assertTrue(stoppedServer.isStopped());
+    assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
+
+    // create a server
+    JettySolrRunner startedServer = cluster.startJettySolrRunner();
+    assertTrue(startedServer.isRunning());
+    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
+
+    // create collection
+    createCollection(collectionName, null);
+
+    // modify/query collection
+    new UpdateRequest().add("id", "1").commit(client, collectionName);
+    QueryResponse rsp = client.query(collectionName, new SolrQuery("*:*"));
+    assertEquals(1, rsp.getResults().getNumFound());
+
+    // remove a server not hosting any replicas
+    ZkStateReader zkStateReader = client.getZkStateReader();
+    zkStateReader.forceUpdateCollection(collectionName);
+    ClusterState clusterState = zkStateReader.getClusterState();
+    Map<String,JettySolrRunner> jettyMap = new HashMap<>();
+    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
+      String key = jetty.getBaseUrl().toString().substring((jetty.getBaseUrl().getProtocol() + "://").length());
+      jettyMap.put(key, jetty);
+    }
+    Collection<Slice> slices = clusterState.getCollection(collectionName).getSlices();
+    // track the servers not host replicas
+    for (Slice slice : slices) {
+      jettyMap.remove(slice.getLeader().getNodeName().replace("_solr", "/solr"));
+      for (Replica replica : slice.getReplicas()) {
+        jettyMap.remove(replica.getNodeName().replace("_solr", "/solr"));
+      }
+    }
+    assertTrue("Expected to find a node without a replica", jettyMap.size() > 0);
+    JettySolrRunner jettyToStop = jettyMap.entrySet().iterator().next().getValue();
+    jettys = cluster.getJettySolrRunners();
+    for (int i = 0; i < jettys.size(); ++i) {
+      if (jettys.get(i).equals(jettyToStop)) {
+        cluster.stopJettySolrRunner(i);
+        assertEquals(nodeCount - 1, cluster.getJettySolrRunners().size());
+      }
+    }
+
+    // re-create a server (to restore original nodeCount count)
+    startedServer = cluster.startJettySolrRunner(jettyToStop);
+    assertTrue(startedServer.isRunning());
+    assertEquals(nodeCount, cluster.getJettySolrRunners().size());
+
+    CollectionAdminRequest.deleteCollection(collectionName).process(client);
+    AbstractDistribZkTestBase.waitForCollectionToDisappear
+        (collectionName, client.getZkStateReader(), true, true, 330);
+
+    // create it again
+    createCollection(collectionName, null);
+
+    // check that there's no left-over state
+    assertEquals(0, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
+
+    // modify/query collection
+    new UpdateRequest().add("id", "1").commit(client, collectionName);
+    assertEquals(1, client.query(collectionName, new SolrQuery("*:*")).getResults().getNumFound());
+  }
+
+  @Test
+  public void testCollectionCreateWithoutCoresThenDelete() throws Exception {
+
+    final String collectionName = "testSolrCloudCollectionWithoutCores";
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    assertNotNull(cluster.getZkServer());
+    assertFalse(cluster.getJettySolrRunners().isEmpty());
+
+    // create collection
+    createCollection(collectionName, OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY);
+
+    // check the collection's corelessness
+    int coreCount = 0;
+    DocCollection docCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
+    for (Map.Entry<String,Slice> entry : docCollection.getSlicesMap().entrySet()) {
+      coreCount += entry.getValue().getReplicasMap().entrySet().size();
+    }
+    assertEquals(0, coreCount);
+
+    // delete the collection
+    CollectionAdminRequest.deleteCollection(collectionName).process(client);
+    AbstractDistribZkTestBase.waitForCollectionToDisappear
+        (collectionName, client.getZkStateReader(), true, true, 330);
+  }
+
+  @Test
+  public void testStopAllStartAll() throws Exception {
+
+    final String collectionName = "testStopAllStartAllCollection";
+    final CloudSolrClient client = cluster.getSolrClient();
+
+    assertNotNull(cluster.getZkServer());
+    List<JettySolrRunner> jettys = cluster.getJettySolrRunners();
+    assertEquals(nodeCount, jettys.size());
+    for (JettySolrRunner jetty : jettys) {
+      assertTrue(jetty.isRunning());
+    }
+
+    final SolrQuery query = new SolrQuery("*:*");
+    final SolrInputDocument doc = new SolrInputDocument();
+
+    // create collection
+    createCollection(collectionName, null);
+
+    ZkStateReader zkStateReader = client.getZkStateReader();
+
+    // modify collection
+    final int numDocs = 1 + random().nextInt(10);
+    for (int ii = 1; ii <= numDocs; ++ii) {
+      doc.setField("id", ""+ii);
+      client.add(collectionName, doc);
+      if (ii*2 == numDocs) client.commit(collectionName);
+    }
+    client.commit(collectionName);
+
+    // query collection
+    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
+
+    // the test itself
+    zkStateReader.forceUpdateCollection(collectionName);
+    final ClusterState clusterState = zkStateReader.getClusterState();
+
+    final Set<Integer> leaderIndices = new HashSet<>();
+    final Set<Integer> followerIndices = new HashSet<>();
+    {
+      final Map<String,Boolean> shardLeaderMap = new HashMap<>();
+      for (final Slice slice : clusterState.getCollection(collectionName).getSlices()) {
+        for (final Replica replica : slice.getReplicas()) {
+          shardLeaderMap.put(replica.getNodeName().replace("_solr", "/solr"), Boolean.FALSE);
+        }
+        shardLeaderMap.put(slice.getLeader().getNodeName().replace("_solr", "/solr"), Boolean.TRUE);
+      }
+      for (int ii = 0; ii < jettys.size(); ++ii) {
+        final URL jettyBaseUrl = jettys.get(ii).getBaseUrl();
+        final String jettyBaseUrlString = jettyBaseUrl.toString().substring((jettyBaseUrl.getProtocol() + "://").length());
+        final Boolean isLeader = shardLeaderMap.get(jettyBaseUrlString);
+        if (Boolean.TRUE.equals(isLeader)) {
+          leaderIndices.add(ii);
+        } else if (Boolean.FALSE.equals(isLeader)) {
+          followerIndices.add(ii);
+        } // else neither leader nor follower i.e. node without a replica (for our collection)
+      }
+    }
+    final List<Integer> leaderIndicesList = new ArrayList<>(leaderIndices);
+    final List<Integer> followerIndicesList = new ArrayList<>(followerIndices);
+
+    // first stop the followers (in no particular order)
+    Collections.shuffle(followerIndicesList, random());
+    for (Integer ii : followerIndicesList) {
+      if (!leaderIndices.contains(ii)) {
+        cluster.stopJettySolrRunner(jettys.get(ii));
+      }
+    }
+
+    // then stop the leaders (again in no particular order)
+    Collections.shuffle(leaderIndicesList, random());
+    for (Integer ii : leaderIndicesList) {
+      cluster.stopJettySolrRunner(jettys.get(ii));
+    }
+
+    // calculate restart order
+    final List<Integer> restartIndicesList = new ArrayList<>();
+    Collections.shuffle(leaderIndicesList, random());
+    restartIndicesList.addAll(leaderIndicesList);
+    Collections.shuffle(followerIndicesList, random());
+    restartIndicesList.addAll(followerIndicesList);
+    if (random().nextBoolean()) Collections.shuffle(restartIndicesList, random());
+
+    // and then restart jettys in that order
+    for (Integer ii : restartIndicesList) {
+      final JettySolrRunner jetty = jettys.get(ii);
+      if (!jetty.isRunning()) {
+        cluster.startJettySolrRunner(jetty);
+        assertTrue(jetty.isRunning());
+      }
+    }
+    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
+
+    zkStateReader.forceUpdateCollection(collectionName);
+
+    // re-query collection
+    assertEquals(numDocs, client.query(collectionName, query).getResults().getNumFound());
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
new file mode 100644
index 0000000..58ac17d
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.hdfs.HdfsTestUtil;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.backup.BackupManager;
+import org.apache.solr.core.backup.repository.HdfsBackupRepository;
+import org.apache.solr.util.BadHdfsThreadsFilter;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_CONF;
+import static org.apache.solr.core.backup.BackupManager.BACKUP_NAME_PROP;
+import static org.apache.solr.core.backup.BackupManager.BACKUP_PROPS_FILE;
+import static org.apache.solr.core.backup.BackupManager.COLLECTION_NAME_PROP;
+import static org.apache.solr.core.backup.BackupManager.CONFIG_STATE_DIR;
+import static org.apache.solr.core.backup.BackupManager.ZK_STATE_DIR;
+
+/**
+ * This class implements the tests for HDFS integration for Solr backup/restore capability.
+ */
+@ThreadLeakFilters(defaultFilters = true, filters = {
+    BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
+})
+public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
+  public static final String SOLR_XML = "<solr>\n" +
+      "\n" +
+      "  <str name=\"shareSchema\">${shareSchema:false}</str>\n" +
+      "  <str name=\"configSetBaseDir\">${configSetBaseDir:configsets}</str>\n" +
+      "  <str name=\"coreRootDirectory\">${coreRootDirectory:.}</str>\n" +
+      "\n" +
+      "  <shardHandlerFactory name=\"shardHandlerFactory\" class=\"HttpShardHandlerFactory\">\n" +
+      "    <str name=\"urlScheme\">${urlScheme:}</str>\n" +
+      "    <int name=\"socketTimeout\">${socketTimeout:90000}</int>\n" +
+      "    <int name=\"connTimeout\">${connTimeout:15000}</int>\n" +
+      "  </shardHandlerFactory>\n" +
+      "\n" +
+      "  <solrcloud>\n" +
+      "    <str name=\"host\">127.0.0.1</str>\n" +
+      "    <int name=\"hostPort\">${hostPort:8983}</int>\n" +
+      "    <str name=\"hostContext\">${hostContext:solr}</str>\n" +
+      "    <int name=\"zkClientTimeout\">${solr.zkclienttimeout:30000}</int>\n" +
+      "    <bool name=\"genericCoreNodeNames\">${genericCoreNodeNames:true}</bool>\n" +
+      "    <int name=\"leaderVoteWait\">10000</int>\n" +
+      "    <int name=\"distribUpdateConnTimeout\">${distribUpdateConnTimeout:45000}</int>\n" +
+      "    <int name=\"distribUpdateSoTimeout\">${distribUpdateSoTimeout:340000}</int>\n" +
+      "  </solrcloud>\n" +
+      "  \n" +
+      "  <backup>\n" +
+      "    <repository  name=\"hdfs\" class=\"org.apache.solr.core.backup.repository.HdfsBackupRepository\"> \n" +
+      "      <str name=\"location\">${solr.hdfs.default.backup.path}</str>\n" +
+      "      <str name=\"solr.hdfs.home\">${solr.hdfs.home:}</str>\n" +
+      "      <str name=\"solr.hdfs.confdir\">${solr.hdfs.confdir:}</str>\n" +
+      "    </repository>\n" +
+      "  </backup>\n" +
+      "  \n" +
+      "</solr>\n";
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private static MiniDFSCluster dfsCluster;
+  private static String hdfsUri;
+  private static FileSystem fs;
+
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
+    hdfsUri = HdfsTestUtil.getURI(dfsCluster);
+    try {
+      URI uri = new URI(hdfsUri);
+      Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
+      conf.setBoolean("fs.hdfs.impl.disable.cache", true);
+      fs = FileSystem.get(uri, conf);
+
+      if (fs instanceof DistributedFileSystem) {
+        // Make sure dfs is not in safe mode
+        while (((DistributedFileSystem) fs).setSafeMode(SafeModeAction.SAFEMODE_GET, true)) {
+          log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
+          try {
+            Thread.sleep(5000);
+          } catch (InterruptedException e) {
+            Thread.interrupted();
+            // continue
+          }
+        }
+      }
+
+      fs.mkdirs(new org.apache.hadoop.fs.Path("/backup"));
+    } catch (IOException | URISyntaxException e) {
+      throw new RuntimeException(e);
+    }
+
+    System.setProperty("solr.hdfs.default.backup.path", "/backup");
+    System.setProperty("solr.hdfs.home", hdfsUri + "/solr");
+    useFactory("solr.StandardDirectoryFactory");
+
+    configureCluster(NUM_SHARDS)// nodes
+    .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
+    .withSolrXml(SOLR_XML)
+    .configure();
+  }
+
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    System.clearProperty("solr.hdfs.home");
+    System.clearProperty("solr.hdfs.default.backup.path");
+    System.clearProperty("test.build.data");
+    System.clearProperty("test.cache.data");
+    IOUtils.closeQuietly(fs);
+    fs = null;
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+  }
+
+  @Override
+  public String getCollectionName() {
+    return "hdfsbackuprestore";
+  }
+
+  @Override
+  public String getBackupRepoName() {
+    return "hdfs";
+  }
+
+  @Override
+  public String getBackupLocation() {
+    return null;
+  }
+
+  protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
+    String backupName = "configonlybackup";
+    CloudSolrClient solrClient = cluster.getSolrClient();
+
+    CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName)
+        .setRepositoryName(getBackupRepoName())
+        .setIndexBackupStrategy(CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY);
+    backup.process(solrClient);
+
+    Map<String,String> params = new HashMap<>();
+    params.put("location", "/backup");
+    params.put("solr.hdfs.home", hdfsUri + "/solr");
+
+    HdfsBackupRepository repo = new HdfsBackupRepository();
+    repo.init(new NamedList<>(params));
+    BackupManager mgr = new BackupManager(repo, solrClient.getZkStateReader());
+
+    URI baseLoc = repo.createURI("/backup");
+
+    Properties props = mgr.readBackupProperties(baseLoc, backupName);
+    assertNotNull(props);
+    assertEquals(collectionName, props.getProperty(COLLECTION_NAME_PROP));
+    assertEquals(backupName, props.getProperty(BACKUP_NAME_PROP));
+    assertEquals(configName, props.getProperty(COLL_CONF));
+
+    DocCollection collectionState = mgr.readCollectionState(baseLoc, backupName, collectionName);
+    assertNotNull(collectionState);
+    assertEquals(collectionName, collectionState.getName());
+
+    URI configDirLoc = repo.resolve(baseLoc, backupName, ZK_STATE_DIR, CONFIG_STATE_DIR, configName);
+    assertTrue(repo.exists(configDirLoc));
+
+    Collection<String> expected = Arrays.asList(BACKUP_PROPS_FILE, ZK_STATE_DIR);
+    URI backupLoc = repo.resolve(baseLoc, backupName);
+    String[] dirs = repo.listAll(backupLoc);
+    for (String d : dirs) {
+      assertTrue(expected.contains(d));
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java
new file mode 100644
index 0000000..587b9b1
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestLocalFSCloudBackupRestore.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import org.junit.BeforeClass;
+
+/**
+ * This class implements the tests for local file-system integration for Solr backup/restore capability.
+ * Note that the Solr backup/restore still requires a "shared" file-system. Its just that in this case
+ * such file-system would be exposed via local file-system API.
+ */
+public class TestLocalFSCloudBackupRestore extends AbstractCloudBackupRestoreTestCase {
+  private static String backupLocation;
+
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    configureCluster(NUM_SHARDS)// nodes
+        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
+        .configure();
+
+    boolean whitespacesInPath = random().nextBoolean();
+    if (whitespacesInPath) {
+      backupLocation = createTempDir("my backup").toAbsolutePath().toString();
+    } else {
+      backupLocation = createTempDir("mybackup").toAbsolutePath().toString();
+    }
+  }
+
+  @Override
+  public String getCollectionName() {
+    return "backuprestore";
+  }
+
+  @Override
+  public String getBackupRepoName() {
+    return null;
+  }
+
+  @Override
+  public String getBackupLocation() {
+    return backupLocation;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java
new file mode 100644
index 0000000..d327aec
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestReplicaProperties.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Test;
+
+@Slow
+public class TestReplicaProperties extends ReplicaPropertiesBase {
+
+  public static final String COLLECTION_NAME = "testcollection";
+
+  public TestReplicaProperties() {
+    schemaString = "schema15.xml";      // we need a string id
+    sliceCount = 2;
+  }
+
+  @Test
+  @ShardsFixed(num = 4)
+  public void test() throws Exception {
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      // Mix up a bunch of different combinations of shards and replicas in order to exercise boundary cases.
+      // shards, replicationfactor, maxreplicaspernode
+      int shards = random().nextInt(7);
+      if (shards < 2) shards = 2;
+      int rFactor = random().nextInt(4);
+      if (rFactor < 2) rFactor = 2;
+      createCollection(null, COLLECTION_NAME, shards, rFactor, shards * rFactor + 1, client, null, "conf1");
+    }
+
+    waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2);
+    waitForRecoveriesToFinish(COLLECTION_NAME, false);
+
+    listCollection();
+
+    clusterAssignPropertyTest();
+  }
+
+  private void listCollection() throws IOException, SolrServerException {
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set("action", CollectionParams.CollectionAction.LIST.toString());
+      SolrRequest request = new QueryRequest(params);
+      request.setPath("/admin/collections");
+
+      NamedList<Object> rsp = client.request(request);
+      List<String> collections = (List<String>) rsp.get("collections");
+      assertTrue("control_collection was not found in list", collections.contains("control_collection"));
+      assertTrue(DEFAULT_COLLECTION + " was not found in list", collections.contains(DEFAULT_COLLECTION));
+      assertTrue(COLLECTION_NAME + " was not found in list", collections.contains(COLLECTION_NAME));
+    }
+  }
+
+
+  private void clusterAssignPropertyTest() throws Exception {
+
+    try (CloudSolrClient client = createCloudClient(null)) {
+      client.connect();
+      try {
+        doPropertyAction(client,
+            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+            "property", "preferredLeader");
+      } catch (SolrException se) {
+        assertTrue("Should have seen missing required parameter 'collection' error",
+            se.getMessage().contains("Missing required parameter: collection"));
+      }
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+          "collection", COLLECTION_NAME,
+          "property", "preferredLeader");
+
+      verifyUniqueAcrossCollection(client, COLLECTION_NAME, "preferredleader");
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+          "collection", COLLECTION_NAME,
+          "property", "property.newunique",
+          "shardUnique", "true");
+      verifyUniqueAcrossCollection(client, COLLECTION_NAME, "property.newunique");
+
+      try {
+        doPropertyAction(client,
+            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+            "collection", COLLECTION_NAME,
+            "property", "whatever",
+            "shardUnique", "false");
+        fail("Should have thrown an exception here.");
+      } catch (SolrException se) {
+        assertTrue("Should have gotten a specific error message here",
+            se.getMessage().contains("Balancing properties amongst replicas in a slice requires that the " +
+                "property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'"));
+      }
+      // Should be able to set non-unique-per-slice values in several places.
+      Map<String, Slice> slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
+      List<String> sliceList = new ArrayList<>(slices.keySet());
+      String c1_s1 = sliceList.get(0);
+      List<String> replicasList = new ArrayList<>(slices.get(c1_s1).getReplicasMap().keySet());
+      String c1_s1_r1 = replicasList.get(0);
+      String c1_s1_r2 = replicasList.get(1);
+
+      addProperty(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r1,
+          "property", "bogus1",
+          "property.value", "true");
+
+      addProperty(client,
+          "action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString(),
+          "collection", COLLECTION_NAME,
+          "shard", c1_s1,
+          "replica", c1_s1_r2,
+          "property", "property.bogus1",
+          "property.value", "whatever");
+
+      try {
+        doPropertyAction(client,
+            "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+            "collection", COLLECTION_NAME,
+            "property", "bogus1",
+            "shardUnique", "false");
+        fail("Should have thrown parameter error here");
+      } catch (SolrException se) {
+        assertTrue("Should have caught specific exception ",
+            se.getMessage().contains("Balancing properties amongst replicas in a slice requires that the property be " +
+                "pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'"));
+      }
+
+      // Should have no effect despite the "shardUnique" param being set.
+
+      doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString(),
+          "collection", COLLECTION_NAME,
+          "property", "property.bogus1",
+          "shardUnique", "true");
+
+      verifyPropertyVal(client, COLLECTION_NAME,
+          c1_s1_r1, "bogus1", "true");
+      verifyPropertyVal(client, COLLECTION_NAME,
+          c1_s1_r2, "property.bogus1", "whatever");
+
+      // At this point we've assigned a preferred leader. Make it happen and check that all the nodes that are
+      // leaders _also_ have the preferredLeader property set.
+
+
+      NamedList<Object> res = doPropertyAction(client,
+          "action", CollectionParams.CollectionAction.REBALANCELEADERS.toString(),
+          "collection", COLLECTION_NAME);
+
+      verifyLeaderAssignment(client, COLLECTION_NAME);
+
+    }
+  }
+
+  private void verifyLeaderAssignment(CloudSolrClient client, String collectionName)
+      throws InterruptedException, KeeperException {
+    String lastFailMsg = "";
+    for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
+      lastFailMsg = "";
+      ClusterState clusterState = client.getZkStateReader().getClusterState();
+      for (Slice slice : clusterState.getCollection(collectionName).getSlices()) {
+        Boolean foundLeader = false;
+        Boolean foundPreferred = false;
+        for (Replica replica : slice.getReplicas()) {
+          Boolean isLeader = replica.getBool("leader", false);
+          Boolean isPreferred = replica.getBool("property.preferredleader", false);
+          if (isLeader != isPreferred) {
+            lastFailMsg = "Replica should NOT have preferredLeader != leader. Preferred: " + isPreferred.toString() +
+                " leader is " + isLeader.toString();
+          }
+          if (foundLeader && isLeader) {
+            lastFailMsg = "There should only be a single leader in _any_ shard! Replica " + replica.getName() +
+                " is the second leader in slice " + slice.getName();
+          }
+          if (foundPreferred && isPreferred) {
+            lastFailMsg = "There should only be a single preferredLeader in _any_ shard! Replica " + replica.getName() +
+                " is the second preferredLeader in slice " + slice.getName();
+          }
+          foundLeader = foundLeader ? foundLeader : isLeader;
+          foundPreferred = foundPreferred ? foundPreferred : isPreferred;
+        }
+      }
+      if (lastFailMsg.length() == 0) return;
+      Thread.sleep(100);
+    }
+    fail(lastFailMsg);
+  }
+
+  private void addProperty(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
+    assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    for (int idx = 0; idx < paramsIn.length; idx += 2) {
+      params.set(paramsIn[idx], paramsIn[idx + 1]);
+    }
+    QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+    client.request(request);
+
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/api/collections/TestRequestStatusCollectionAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestRequestStatusCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestRequestStatusCollectionAPI.java
new file mode 100644
index 0000000..3d32d6c
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestRequestStatusCollectionAPI.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud.api.collections;
+
+import java.io.IOException;
+
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.BasicDistributedZkTest;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.junit.Test;
+
+public class TestRequestStatusCollectionAPI extends BasicDistributedZkTest {
+
+  public static final int MAX_WAIT_TIMEOUT_SECONDS = 90;
+
+  public TestRequestStatusCollectionAPI() {
+    schemaString = "schema15.xml";      // we need a string id
+  }
+
+  @Test
+  public void test() throws Exception {
+    ModifiableSolrParams params = new ModifiableSolrParams();
+
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
+    params.set("name", "collection2");
+    params.set("numShards", 2);
+    params.set("replicationFactor", 1);
+    params.set("maxShardsPerNode", 100);
+    params.set("collection.configName", "conf1");
+    params.set(CommonAdminParams.ASYNC, "1000");
+    try {
+      sendRequest(params);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    // Check for the request to be completed.
+
+    NamedList r = null;
+    NamedList status = null;
+    String message = null;
+
+    params = new ModifiableSolrParams();
+
+    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
+    params.set(OverseerCollectionMessageHandler.REQUESTID, "1000");
+
+    try {
+      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    assertEquals("found [1000] in completed tasks", message);
+
+    // Check for a random (hopefully non-existent request id
+    params = new ModifiableSolrParams();
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.REQUESTSTATUS.toString());
+    params.set(OverseerCollectionMessageHandler.REQUESTID, "9999999");
+    try {
+      r = sendRequest(params);
+      status = (NamedList) r.get("status");
+      message = (String) status.get("msg");
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    assertEquals("Did not find [9999999] in any tasks queue", message);
+
+    params = new ModifiableSolrParams();
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.SPLITSHARD.toString());
+    params.set("collection", "collection2");
+    params.set("shard", "shard1");
+    params.set(CommonAdminParams.ASYNC, "1001");
+    try {
+      sendRequest(params);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    // Check for the request to be completed.
+    params = new ModifiableSolrParams();
+    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
+    params.set(OverseerCollectionMessageHandler.REQUESTID, "1001");
+    try {
+      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    assertEquals("found [1001] in completed tasks", message);
+
+    params = new ModifiableSolrParams();
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
+    params.set("name", "collection2");
+    params.set("numShards", 2);
+    params.set("replicationFactor", 1);
+    params.set("maxShardsPerNode", 100);
+    params.set("collection.configName", "conf1");
+    params.set(CommonAdminParams.ASYNC, "1002");
+    try {
+      sendRequest(params);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    params = new ModifiableSolrParams();
+
+    params.set("action", CollectionParams.CollectionAction.REQUESTSTATUS.toString());
+    params.set(OverseerCollectionMessageHandler.REQUESTID, "1002");
+
+    try {
+      message = sendStatusRequestWithRetry(params, MAX_WAIT_TIMEOUT_SECONDS);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+
+    assertEquals("found [1002] in failed tasks", message);
+
+    params = new ModifiableSolrParams();
+    params.set(CollectionParams.ACTION, CollectionParams.CollectionAction.CREATE.toString());
+    params.set("name", "collection3");
+    params.set("numShards", 1);
+    params.set("replicationFactor", 1);
+    params.set("maxShardsPerNode", 100);
+    params.set("collection.configName", "conf1");
+    params.set(CommonAdminParams.ASYNC, "1002");
+    try {
+      r = sendRequest(params);
+    } catch (SolrServerException | IOException e) {
+      e.printStackTrace();
+    }
+
+    assertEquals("Task with the same requestid already exists.", r.get("error"));
+  }
+
+  /**
+   * Helper method to send a status request with specific retry limit and return
+   * the message/null from the success response.
+   */
+  private String sendStatusRequestWithRetry(ModifiableSolrParams params, int maxCounter)
+      throws SolrServerException, IOException{
+    String message = null;
+    while (maxCounter-- > 0) {
+      final NamedList r = sendRequest(params);
+      final NamedList status = (NamedList) r.get("status");
+      final RequestStatusState state = RequestStatusState.fromKey((String) status.get("state"));
+      message = (String) status.get("msg");
+
+      if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED) {
+        return message;
+      }
+
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+      }
+
+    }
+    // Return last state?
+    return message;
+  }
+
+  protected NamedList sendRequest(ModifiableSolrParams params) throws SolrServerException, IOException {
+    SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.getSolrClient()).getBaseURL();
+    baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
+
+    try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 15000)) {
+      return baseServer.request(request);
+    }
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
index a842a87..2310a14 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
@@ -80,7 +80,7 @@ import org.apache.solr.util.DefaultSolrThreadFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.REQUESTID;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.REQUESTID;
 
 /**
  * Simulated {@link SolrCloudManager}.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
index 22f9fb9..86de8ff 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
@@ -47,11 +47,11 @@ import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
 import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.cloud.ActionThrottle;
-import org.apache.solr.cloud.AddReplicaCmd;
-import org.apache.solr.cloud.Assign;
-import org.apache.solr.cloud.CreateCollectionCmd;
-import org.apache.solr.cloud.CreateShardCmd;
-import org.apache.solr.cloud.SplitShardCmd;
+import org.apache.solr.cloud.api.collections.AddReplicaCmd;
+import org.apache.solr.cloud.api.collections.Assign;
+import org.apache.solr.cloud.api.collections.CreateCollectionCmd;
+import org.apache.solr.cloud.api.collections.CreateShardCmd;
+import org.apache.solr.cloud.api.collections.SplitShardCmd;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
 import org.apache.solr.cloud.overseer.CollectionMutator;
 import org.apache.solr.cloud.overseer.ZkWriteCommand;
@@ -730,7 +730,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
   }
 
   /**
-   * Move replica. This uses a similar algorithm as {@link org.apache.solr.cloud.MoveReplicaCmd#moveNormalReplica(ClusterState, NamedList, String, String, DocCollection, Replica, Slice, int, boolean)}.
+   * Move replica. This uses a similar algorithm as {@link org.apache.solr.cloud.api.collections.MoveReplicaCmd#moveNormalReplica(ClusterState, NamedList, String, String, DocCollection, Replica, Slice, int, boolean)}.
    * @param message operation details
    * @param results operation results.
    */
@@ -909,7 +909,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
   }
 
   /**
-   * Delete a shard. This uses a similar algorithm as {@link org.apache.solr.cloud.DeleteShardCmd}
+   * Delete a shard. This uses a similar algorithm as {@link org.apache.solr.cloud.api.collections.DeleteShardCmd}
    * @param message operation details
    * @param results operation results
    */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java
index c242809..cd45c15 100644
--- a/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/cdcr/BaseCdcrDistributedZkTest.java
@@ -43,7 +43,7 @@ import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.cloud.AbstractDistribZkTestBase;
 import org.apache.solr.cloud.AbstractZkTestCase;
 import org.apache.solr.cloud.ChaosMonkey;
-import org.apache.solr.cloud.OverseerCollectionMessageHandler;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
@@ -74,9 +74,8 @@ import org.junit.BeforeClass;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.NUM_SLICES;
 import static org.apache.solr.common.cloud.ZkStateReader.CLUSTER_PROPS;
 import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
@@ -448,9 +447,9 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
     for (Map.Entry<String, Object> entry : collectionProps.entrySet()) {
       if (entry.getValue() != null) params.set(entry.getKey(), String.valueOf(entry.getValue()));
     }
-    Integer numShards = (Integer) collectionProps.get(NUM_SLICES);
+    Integer numShards = (Integer) collectionProps.get(OverseerCollectionMessageHandler.NUM_SLICES);
     if (numShards == null) {
-      String shardNames = (String) collectionProps.get(SHARDS_PROP);
+      String shardNames = (String) collectionProps.get(OverseerCollectionMessageHandler.SHARDS_PROP);
       numShards = StrUtils.splitSmart(shardNames, ',').size();
     }
     Integer replicationFactor = (Integer) collectionProps.get(REPLICATION_FACTOR);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
deleted file mode 100644
index 58d499b..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.hdfs;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import com.carrotsearch.randomizedtesting.annotations.Nightly;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
-import com.codahale.metrics.Counter;
-import com.codahale.metrics.Metric;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.client.solrj.request.CoreStatus;
-import org.apache.solr.client.solrj.response.CoreAdminResponse;
-import org.apache.solr.cloud.CollectionsAPIDistributedZkTest;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkConfigManager;
-import org.apache.solr.metrics.SolrMetricManager;
-import org.apache.solr.util.BadHdfsThreadsFilter;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-@Slow
-@Nightly
-@ThreadLeakFilters(defaultFilters = true, filters = {
-    BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
-})
-public class HdfsCollectionsAPIDistributedZkTest extends CollectionsAPIDistributedZkTest {
-
-  private static MiniDFSCluster dfsCluster;
-
-  @BeforeClass
-  public static void setupClass() throws Exception {
-    System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
-    System.setProperty("tests.hdfs.numdatanodes", "1");
-   
-    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
-
-    ZkConfigManager configManager = new ZkConfigManager(zkClient());
-    configManager.uploadConfigDir(configset("cloud-hdfs"), "conf");
-    configManager.uploadConfigDir(configset("cloud-hdfs"), "conf2");
-
-    System.setProperty("solr.hdfs.home", HdfsTestUtil.getDataDir(dfsCluster, "data"));
-  }
-
-  @AfterClass
-  public static void teardownClass() throws Exception {
-    cluster.shutdown(); // need to close before the MiniDFSCluster
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
-    System.clearProperty("solr.hdfs.blockcache.blocksperbank");
-    System.clearProperty("tests.hdfs.numdatanodes");
-    System.clearProperty("solr.hdfs.home");
-  }
-
-  @Test
-  public void moveReplicaTest() throws Exception {
-    cluster.waitForAllNodes(5000);
-    String coll = "movereplicatest_coll";
-
-    CloudSolrClient cloudClient = cluster.getSolrClient();
-
-    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
-    create.setMaxShardsPerNode(2);
-    cloudClient.request(create);
-
-    for (int i = 0; i < 10; i++) {
-      cloudClient.add(coll, sdoc("id",String.valueOf(i)));
-      cloudClient.commit(coll);
-    }
-
-    List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
-    Collections.shuffle(slices, random());
-    Slice slice = null;
-    Replica replica = null;
-    for (Slice s : slices) {
-      slice = s;
-      for (Replica r : s.getReplicas()) {
-        if (s.getLeader() != r) {
-          replica = r;
-        }
-      }
-    }
-    String dataDir = getDataDir(replica);
-
-    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
-    ArrayList<String> l = new ArrayList<>(liveNodes);
-    Collections.shuffle(l, random());
-    String targetNode = null;
-    for (String node : liveNodes) {
-      if (!replica.getNodeName().equals(node)) {
-        targetNode = node;
-        break;
-      }
-    }
-    assertNotNull(targetNode);
-
-    CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
-    moveReplica.process(cloudClient);
-
-    checkNumOfCores(cloudClient, replica.getNodeName(), 0);
-    checkNumOfCores(cloudClient, targetNode, 2);
-
-    waitForState("Wait for recovery finish failed",coll, clusterShape(2,2));
-    slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
-    boolean found = false;
-    for (Replica newReplica : slice.getReplicas()) {
-      if (getDataDir(newReplica).equals(dataDir)) {
-        found = true;
-      }
-    }
-    assertTrue(found);
-
-
-    // data dir is reused so replication will be skipped
-    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
-      SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
-      List<String> registryNames = manager.registryNames().stream()
-          .filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
-      for (String registry : registryNames) {
-        Map<String, Metric> metrics = manager.registry(registry).getMetrics();
-        Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
-        if (counter != null) {
-          assertEquals(0, counter.getCount());
-        }
-      }
-    }
-  }
-
-
-  private void checkNumOfCores(CloudSolrClient cloudClient, String nodeName, int expectedCores) throws IOException, SolrServerException {
-    assertEquals(nodeName + " does not have expected number of cores",expectedCores, getNumOfCores(cloudClient, nodeName));
-  }
-
-  private int getNumOfCores(CloudSolrClient cloudClient, String nodeName) throws IOException, SolrServerException {
-    try (HttpSolrClient coreclient = getHttpSolrClient(cloudClient.getZkStateReader().getBaseUrlForNodeName(nodeName))) {
-      CoreAdminResponse status = CoreAdminRequest.getStatus(null, coreclient);
-      return status.getCoreStatus().size();
-    }
-  }
-
-  private String getDataDir(Replica replica) throws IOException, SolrServerException {
-    try (HttpSolrClient coreclient = getHttpSolrClient(replica.getBaseUrl())) {
-      CoreStatus status = CoreAdminRequest.getCoreStatus(replica.getCoreName(), coreclient);
-      return status.getDataDirectory();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
index 9f0ff20..b031393 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
@@ -56,6 +56,7 @@ import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.client.solrj.response.CoreAdminResponse;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.RequestStatusState;
+import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrException;
@@ -96,9 +97,6 @@ import org.noggit.JSONWriter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
-import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
 import static org.apache.solr.common.util.Utils.makeMap;
 
 /**
@@ -174,7 +172,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     }
   }
 
-  static class CloudSolrServerClient {
+  public static class CloudSolrServerClient {
     SolrClient solrClient;
     String shardName;
     int port;
@@ -186,6 +184,10 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
       this.solrClient = client;
     }
 
+    public SolrClient getSolrClient() {
+      return solrClient;
+    }
+
     @Override
     public int hashCode() {
       final int prime = 31;
@@ -1621,9 +1623,9 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     for (Map.Entry<String, Object> entry : collectionProps.entrySet()) {
       if(entry.getValue() !=null) params.set(entry.getKey(), String.valueOf(entry.getValue()));
     }
-    Integer numShards = (Integer) collectionProps.get(NUM_SLICES);
+    Integer numShards = (Integer) collectionProps.get(OverseerCollectionMessageHandler.NUM_SLICES);
     if(numShards==null){
-      String shardNames = (String) collectionProps.get(SHARDS_PROP);
+      String shardNames = (String) collectionProps.get(OverseerCollectionMessageHandler.SHARDS_PROP);
       numShards = StrUtils.splitSmart(shardNames,',').size();
     }
     Integer numNrtReplicas = (Integer) collectionProps.get(ZkStateReader.NRT_REPLICAS);
@@ -1685,12 +1687,12 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     int numTlogReplicas = useTlogReplicas()?replicationFactor:0;
     return createCollection(collectionInfos, collectionName,
         Utils.makeMap(
-        NUM_SLICES, numShards,
-        ZkStateReader.NRT_REPLICAS, numNrtReplicas,
-        ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
-        ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
-        CREATE_NODE_SET, createNodeSetStr,
-        ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode),
+            OverseerCollectionMessageHandler.NUM_SLICES, numShards,
+            ZkStateReader.NRT_REPLICAS, numNrtReplicas,
+            ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
+            ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
+            OverseerCollectionMessageHandler.CREATE_NODE_SET, createNodeSetStr,
+            ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode),
         client, configSetName);
   }
 
@@ -1701,12 +1703,12 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
     int numTlogReplicas = useTlogReplicas()?replicationFactor:0;
     return createCollection(collectionInfos, collectionName,
         Utils.makeMap(
-        NUM_SLICES, numShards,
-        ZkStateReader.NRT_REPLICAS, numNrtReplicas,
-        ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
-        ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
-        CREATE_NODE_SET, createNodeSetStr,
-        ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode),
+            OverseerCollectionMessageHandler.NUM_SLICES, numShards,
+            ZkStateReader.NRT_REPLICAS, numNrtReplicas,
+            ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
+            ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
+            OverseerCollectionMessageHandler.CREATE_NODE_SET, createNodeSetStr,
+            ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode),
         client, configName);
   }
 
@@ -1905,7 +1907,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
         ZkStateReader.NRT_REPLICAS, numNrtReplicas,
         ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
         ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
-        NUM_SLICES, numShards);
+        OverseerCollectionMessageHandler.NUM_SLICES, numShards);
     Map<String,List<Integer>> collectionInfos = new HashMap<>();
     createCollection(collectionInfos, collName, props, client);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
index 7f4f0cb..360632c 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
@@ -419,7 +419,7 @@ public class MiniSolrCloudCluster {
     return jetty;
   }
 
-  protected JettySolrRunner stopJettySolrRunner(JettySolrRunner jetty) throws Exception {
+  public JettySolrRunner stopJettySolrRunner(JettySolrRunner jetty) throws Exception {
     jetty.stop();
     return jetty;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a3c4f738/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java b/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java
index 90eea94..ac1c5e1 100644
--- a/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java
+++ b/solr/test-framework/src/test/org/apache/solr/cloud/MiniSolrCloudClusterTest.java
@@ -73,7 +73,7 @@ public class MiniSolrCloudClusterTest extends LuceneTestCase {
 
     MiniSolrCloudCluster cluster = new MiniSolrCloudCluster(3, createTempDir(), JettyConfig.builder().build()) {
       @Override
-      protected JettySolrRunner stopJettySolrRunner(JettySolrRunner jetty) throws Exception {
+      public JettySolrRunner stopJettySolrRunner(JettySolrRunner jetty) throws Exception {
         JettySolrRunner j = super.stopJettySolrRunner(jetty);
         if (jettyIndex.incrementAndGet() == 2)
           throw new IOException("Fake IOException on shutdown!");