You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by sa...@apache.org on 2016/11/02 23:59:08 UTC

[11/50] [abbrv] lucene-solr:apiv2: SOLR-9132: Cut over some collections API and recovery tests

SOLR-9132: Cut over some collections API and recovery tests


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f56d111a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f56d111a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f56d111a

Branch: refs/heads/apiv2
Commit: f56d111adf46e127c62a3fd11fdae9b9725c1024
Parents: c8906b2
Author: Alan Woodward <ro...@apache.org>
Authored: Tue Sep 27 14:03:45 2016 +0100
Committer: Alan Woodward <ro...@apache.org>
Committed: Fri Oct 28 10:33:21 2016 +0100

----------------------------------------------------------------------
 .../client/solrj/embedded/JettySolrRunner.java  |   10 +
 .../configsets/cloud-dynamic/conf/schema.xml    |    2 +
 .../solr/configsets/cloud-hdfs/conf/schema.xml  |   28 +
 .../configsets/cloud-hdfs/conf/solrconfig.xml   |   50 +
 .../cloud-minimal-jmx/conf/schema.xml           |   28 +
 .../cloud-minimal-jmx/conf/solrconfig.xml       |   50 +
 .../cloud/CollectionTooManyReplicasTest.java    |  301 ++--
 .../cloud/CollectionsAPIDistributedZkTest.java  | 1296 +++++-------------
 .../solr/cloud/CreateCollectionCleanupTest.java |    5 +-
 .../apache/solr/cloud/CustomCollectionTest.java |  491 ++-----
 .../apache/solr/cloud/MigrateRouteKeyTest.java  |    1 +
 .../org/apache/solr/cloud/RecoveryZkTest.java   |  166 +--
 .../org/apache/solr/cloud/ShardSplitTest.java   |    6 +-
 .../solr/cloud/TestClusterProperties.java       |   45 +
 .../cloud/TestDeleteCollectionOnDownNodes.java  |   65 +
 .../HdfsCollectionsAPIDistributedZkTest.java    |   33 +-
 .../solr/cloud/hdfs/HdfsRecoveryZkTest.java     |   22 +-
 .../solrj/request/CollectionAdminRequest.java   |   16 +-
 .../client/solrj/request/UpdateRequest.java     |    8 +
 .../org/apache/solr/common/cloud/Slice.java     |   10 +
 .../apache/solr/cloud/MiniSolrCloudCluster.java |   35 +-
 .../apache/solr/cloud/SolrCloudTestCase.java    |    6 +-
 22 files changed, 1042 insertions(+), 1632 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f56d111a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
index a0ca7eb..f4887e6 100644
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
+++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
@@ -38,6 +38,8 @@ import java.util.Random;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.servlet.SolrDispatchFilter;
 import org.eclipse.jetty.server.Connector;
@@ -293,6 +295,10 @@ public class JettySolrRunner {
     return getSolrDispatchFilter().getCores();
   }
 
+  public String getNodeName() {
+    return getCoreContainer().getZkController().getNodeName();
+  }
+
   public boolean isRunning() {
     return server.isRunning();
   }
@@ -453,6 +459,10 @@ public class JettySolrRunner {
     }
   }
 
+  public SolrClient newClient() {
+    return new HttpSolrClient.Builder(getBaseUrl().toString()).build();
+  }
+
   public DebugFilter getDebugFilter() {
     return (DebugFilter)debugFilter.getFilter();
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f56d111a/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml b/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml
index 41822a3..af201c0 100644
--- a/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml
+++ b/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml
@@ -223,6 +223,8 @@
   <!-- points to the root document of a block of nested documents -->
   <field name="_root_" type="string" indexed="true" stored="true"/>
 
+  <field name="_route_" type="string" indexed="true" stored="true" multiValued="false"/>
+
   <field name="multi_int_with_docvals" type="tint" multiValued="true" docValues="true" indexed="false"/>
 
   <dynamicField name="*_coordinate" type="tdouble" indexed="true" stored="false"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f56d111a/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/schema.xml b/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/schema.xml
new file mode 100644
index 0000000..aab5e81
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/schema.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema name="minimal" version="1.1">
+  <fieldType name="string" class="solr.StrField"/>
+  <fieldType name="int" class="solr.TrieIntField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
+  <fieldType name="long" class="solr.TrieLongField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
+  <dynamicField name="*" type="string" indexed="true" stored="true"/>
+  <!-- for versioning -->
+  <field name="_version_" type="long" indexed="true" stored="true"/>
+  <field name="_root_" type="int" indexed="true" stored="true" multiValued="false" required="false"/>
+  <field name="id" type="string" indexed="true" stored="true"/>
+  <uniqueKey>id</uniqueKey>
+</schema>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f56d111a/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/solrconfig.xml b/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/solrconfig.xml
new file mode 100644
index 0000000..88290da
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/solrconfig.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Minimal solrconfig.xml with /select, /admin and /update only -->
+
+<config>
+
+  <directoryFactory name="DirectoryFactory"
+                    class="solr.HdfsDirectoryFactory"/>
+  <indexConfig>
+    <lockType>hdfs</lockType>
+  </indexConfig>
+
+  <schemaFactory class="ClassicIndexSchemaFactory"/>
+
+  <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
+
+  <updateHandler class="solr.DirectUpdateHandler2">
+    <commitWithin>
+      <softCommit>${solr.commitwithin.softcommit:true}</softCommit>
+    </commitWithin>
+    <updateLog></updateLog>
+  </updateHandler>
+
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="indent">true</str>
+      <str name="df">text</str>
+    </lst>
+
+  </requestHandler>
+</config>
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f56d111a/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/schema.xml b/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/schema.xml
new file mode 100644
index 0000000..aab5e81
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/schema.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema name="minimal" version="1.1">
+  <fieldType name="string" class="solr.StrField"/>
+  <fieldType name="int" class="solr.TrieIntField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
+  <fieldType name="long" class="solr.TrieLongField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
+  <dynamicField name="*" type="string" indexed="true" stored="true"/>
+  <!-- for versioning -->
+  <field name="_version_" type="long" indexed="true" stored="true"/>
+  <field name="_root_" type="int" indexed="true" stored="true" multiValued="false" required="false"/>
+  <field name="id" type="string" indexed="true" stored="true"/>
+  <uniqueKey>id</uniqueKey>
+</schema>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f56d111a/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/solrconfig.xml b/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/solrconfig.xml
new file mode 100644
index 0000000..7f27c91
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/cloud-minimal-jmx/conf/solrconfig.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Minimal solrconfig.xml with JMX enabled -->
+
+<config>
+
+  <jmx/>
+
+  <dataDir>${solr.data.dir:}</dataDir>
+
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
+  <schemaFactory class="ClassicIndexSchemaFactory"/>
+
+  <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
+
+  <updateHandler class="solr.DirectUpdateHandler2">
+    <commitWithin>
+      <softCommit>${solr.commitwithin.softcommit:true}</softCommit>
+    </commitWithin>
+    <updateLog></updateLog>
+  </updateHandler>
+
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="indent">true</str>
+      <str name="df">text</str>
+    </lst>
+
+  </requestHandler>
+</config>
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f56d111a/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
index afc7c48..a1c2175 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionTooManyReplicasTest.java
@@ -16,186 +16,153 @@
  */
 package org.apache.solr.cloud;
 
-import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
-import java.util.Properties;
+import java.util.stream.Collectors;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
+import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
 import org.apache.zookeeper.KeeperException;
+import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 @Slow
-public class CollectionTooManyReplicasTest extends AbstractFullDistribZkTestBase {
+public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
 
-  public CollectionTooManyReplicasTest() {
-    sliceCount = 1;
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(3)
+        .addConfig("conf", configset("cloud-minimal"))
+        .configure();
+  }
+
+  @Before
+  public void deleteCollections() throws Exception {
+    cluster.deleteAllCollections();
   }
 
   @Test
-  @ShardsFixed(num = 1)
   public void testAddTooManyReplicas() throws Exception {
-    String collectionName = "TooManyReplicasInSeveralFlavors";
-    CollectionAdminRequest.Create create = new CollectionAdminRequest.Create()
-        .setCollectionName(collectionName)
-        .setNumShards(2)
-        .setReplicationFactor(1)
-        .setMaxShardsPerNode(2)
-        .setStateFormat(2);
+    final String collectionName = "TooManyReplicasInSeveralFlavors";
+    CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
+        .setMaxShardsPerNode(1)
+        .process(cluster.getSolrClient());
 
-    CollectionAdminResponse response = create.process(cloudClient);
-    assertEquals(0, response.getStatus());
-    assertTrue(response.isSuccess());
-    // Now I have the fixed Jetty plus the control instnace, I have two replicas, one for each shard
+    // I have two replicas, one for each shard
 
     // Curiously, I should be able to add a bunch of replicas if I specify the node, even more than maxShardsPerNode
     // Just get the first node any way we can.
     // Get a node to use for the "node" parameter.
-
     String nodeName = getAllNodeNames(collectionName).get(0);
+
     // Add a replica using the "node" parameter (no "too many replicas check")
     // this node should have 2 replicas on it
-    CollectionAdminRequest.AddReplica addReplicaNode = new CollectionAdminRequest.AddReplica()
-        .setCollectionName(collectionName)
-        .setShardName("shard1")
-        .setNode(nodeName);
-    response = addReplicaNode.process(cloudClient);
-    assertEquals(0, response.getStatus());
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .setNode(nodeName)
+        .process(cluster.getSolrClient());
 
     // Three replicas so far, should be able to create another one "normally"
-    CollectionAdminRequest.AddReplica addReplica = new CollectionAdminRequest.AddReplica()
-        .setCollectionName(collectionName)
-        .setShardName("shard1");
-
-    response = addReplica.process(cloudClient);
-    assertEquals(0, response.getStatus());
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .process(cluster.getSolrClient());
 
     // This one should fail though, no "node" parameter specified
-    try {
-      addReplica.process(cloudClient);
-      fail("Should have thrown an error because the nodes are full");
-    } catch (HttpSolrClient.RemoteSolrException se) {
-      assertTrue("Should have gotten the right error message back",
-          se.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-    }
+    Exception e = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+          .process(cluster.getSolrClient());
+    });
+
+    assertTrue("Should have gotten the right error message back",
+          e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
 
     // Oddly, we should succeed next just because setting property.name will not check for nodes being "full up"
-    Properties props = new Properties();
-    props.setProperty("name", "bogus2");
-    addReplicaNode.setProperties(props);
-    response = addReplicaNode.process(cloudClient);
-    assertEquals(0, response.getStatus());
-
-    ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
-    zkStateReader.forceUpdateCollection(collectionName);
-    Slice slice = zkStateReader.getClusterState().getSlicesMap(collectionName).get("shard1");
-
-    Replica rep = null;
-    for (Replica rep1 : slice.getReplicas()) { // Silly compiler
-      if (rep1.get("core").equals("bogus2")) {
-        rep = rep1;
-        break;
-      }
-    }
-    assertNotNull("Should have found a replica named 'bogus2'", rep);
-    assertEquals("Replica should have been put on correct core", nodeName, rep.getNodeName());
+    // TODO: Isn't this a bug?
+    CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+        .withProperty("name", "bogus2")
+        .setNode(nodeName)
+        .process(cluster.getSolrClient());
+
+    DocCollection collectionState = getCollectionState(collectionName);
+    Slice slice = collectionState.getSlice("shard1");
+    Replica replica = getRandomReplica(slice, r -> r.getCoreName().equals("bogus2"));
+    assertNotNull("Should have found a replica named 'bogus2'", replica);
+    assertEquals("Replica should have been put on correct core", nodeName, replica.getNodeName());
 
     // Shard1 should have 4 replicas
     assertEquals("There should be 4 replicas for shard 1", 4, slice.getReplicas().size());
 
-    // And let's fail one more time because to insure that the math doesn't do weird stuff it we have more replicas
+    // And let's fail one more time because to ensure that the math doesn't do weird stuff it we have more replicas
     // than simple calcs would indicate.
-    try {
-      addReplica.process(cloudClient);
-      fail("Should have thrown an error because the nodes are full");
-    } catch (HttpSolrClient.RemoteSolrException se) {
-      assertTrue("Should have gotten the right error message back",
-          se.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-    }
+    Exception e2 = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
+          .process(cluster.getSolrClient());
+    });
+
+    assertTrue("Should have gotten the right error message back",
+        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
+
+    // wait for recoveries to finish, for a clean shutdown - see SOLR-9645
+    waitForState("Expected to see all replicas active", collectionName, (n, c) -> {
+      for (Replica r : c.getReplicas()) {
+        if (r.getState() != Replica.State.ACTIVE)
+          return false;
+      }
+      return true;
+    });
   }
 
   @Test
-  @ShardsFixed(num = 2)
   public void testAddShard() throws Exception {
+
     String collectionName = "TooManyReplicasWhenAddingShards";
-    CollectionAdminRequest.Create create = new CollectionAdminRequest.Create()
-        .setCollectionName(collectionName)
-        .setReplicationFactor(2)
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 2)
         .setMaxShardsPerNode(2)
-        .setStateFormat(2)
-        .setRouterName("implicit")
-        .setShards("shardstart");
-
-    NamedList<Object> request = create.process(cloudClient).getResponse();
+        .process(cluster.getSolrClient());
 
-    assertTrue("Could not create the collection", request.get("success") != null);
     // We have two nodes, maxShardsPerNode is set to 2. Therefore, we should be able to add 2 shards each with
     // two replicas, but fail on the third.
-
-    CollectionAdminRequest.CreateShard createShard = new CollectionAdminRequest.CreateShard()
-        .setCollectionName(collectionName)
-        .setShardName("shard1");
-    CollectionAdminResponse resp = createShard.process(cloudClient);
-    assertEquals(0, resp.getStatus());
+    CollectionAdminRequest.createShard(collectionName, "shard1")
+        .process(cluster.getSolrClient());
 
     // Now we should have one replica on each Jetty, add another to reach maxShardsPerNode
-
-    createShard = new CollectionAdminRequest.CreateShard()
-        .setCollectionName(collectionName)
-        .setShardName("shard2");
-    resp = createShard.process(cloudClient);
-    assertEquals(0, resp.getStatus());
-
+    CollectionAdminRequest.createShard(collectionName, "shard2")
+        .process(cluster.getSolrClient());
 
     // Now fail to add the third as it should exceed maxShardsPerNode
-    createShard = new CollectionAdminRequest.CreateShard()
-        .setCollectionName(collectionName)
-        .setShardName("shard3");
-    try {
-      createShard.process(cloudClient);
-      fail("Should have exceeded the max number of replicas allowed");
-    } catch (HttpSolrClient.RemoteSolrException se) {
-      assertTrue("Should have gotten the right error message back",
-          se.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-    }
+    Exception e = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.createShard(collectionName, "shard3")
+          .process(cluster.getSolrClient());
+    });
+    assertTrue("Should have gotten the right error message back",
+        e.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
 
     // Hmmm, providing a nodeset also overrides the checks for max replicas, so prove it.
     List<String> nodes = getAllNodeNames(collectionName);
 
-    createShard = new CollectionAdminRequest.CreateShard()
-        .setCollectionName(collectionName)
-        .setShardName("shard4")
-        .setNodeSet(StringUtils.join(nodes, ","));
-    resp = createShard.process(cloudClient);
-    assertEquals(0, resp.getStatus());
+    CollectionAdminRequest.createShard(collectionName, "shard4")
+        .setNodeSet(StringUtils.join(nodes, ","))
+        .process(cluster.getSolrClient());
 
     // And just for yucks, insure we fail the "regular" one again.
-    createShard = new CollectionAdminRequest.CreateShard()
-        .setCollectionName(collectionName)
-        .setShardName("shard5");
-    try {
-      createShard.process(cloudClient);
-      fail("Should have exceeded the max number of replicas allowed");
-    } catch (HttpSolrClient.RemoteSolrException se) {
-      assertTrue("Should have gotten the right error message back",
-          se.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
-    }
+    Exception e2 = expectThrows(Exception.class, () -> {
+      CollectionAdminRequest.createShard(collectionName, "shard5")
+          .process(cluster.getSolrClient());
+    });
+    assertTrue("Should have gotten the right error message back",
+        e2.getMessage().contains("given the current number of live nodes and a maxShardsPerNode of"));
 
     // And finally, insure that there are all the replcias we expect. We should have shards 1, 2 and 4 and each
     // should have exactly two replicas
-    ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
-    zkStateReader.forceUpdateCollection(collectionName);
-    Map<String, Slice> slices = zkStateReader.getClusterState().getSlicesMap(collectionName);
+    waitForState("Expected shards shardstart, 1, 2 and 4, each with two active replicas", collectionName, (n, c) -> {
+      return DocCollection.isFullyActive(n, c, 4, 2);
+    });
+    Map<String, Slice> slices = getCollectionState(collectionName).getSlicesMap();
     assertEquals("There should be exaclty four slices", slices.size(), 4);
     assertNotNull("shardstart should exist", slices.get("shardstart"));
     assertNotNull("shard1 should exist", slices.get("shard1"));
@@ -209,82 +176,46 @@ public class CollectionTooManyReplicasTest extends AbstractFullDistribZkTestBase
   }
 
   @Test
-  @ShardsFixed(num = 2)
   public void testDownedShards() throws Exception {
     String collectionName = "TooManyReplicasWhenAddingDownedNode";
-    CollectionAdminRequest.Create create = new CollectionAdminRequest.Create()
-        .setCollectionName(collectionName)
-        .setReplicationFactor(1)
+    CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 1)
         .setMaxShardsPerNode(2)
-        .setStateFormat(2)
-        .setRouterName("implicit")
-        .setShards("shardstart");
-
-    NamedList<Object> request = create.process(cloudClient).getResponse();
-
-    assertTrue("Could not create the collection", request.get("success") != null);
-    try (SolrZkClient zkClient = new SolrZkClient(zkServer.getZkAddress(),
-        AbstractZkTestCase.TIMEOUT)) {
+        .process(cluster.getSolrClient());
 
-      List<String> liveNodes = zkClient.getChildren("/live_nodes", null, true);
+    // Shut down a Jetty, I really don't care which
+    JettySolrRunner jetty = cluster.getRandomJetty(random());
+    String deadNode = jetty.getBaseUrl().toString();
+    cluster.stopJettySolrRunner(jetty);
 
-      // Shut down a Jetty, I really don't care which
-      JettySolrRunner downJetty = jettys.get(r.nextInt(2));
+    try {
 
-      downJetty.stop();
-      List<String> liveNodesNow = null;
-      for (int idx = 0; idx < 150; ++idx) {
-        liveNodesNow = zkClient.getChildren("/live_nodes", null, true);
-        if (liveNodesNow.size() != liveNodes.size()) break;
-        Thread.sleep(100);
-      }
-      List<String> deadNodes = new ArrayList<>(liveNodes);
-      assertTrue("Should be a downed node", deadNodes.removeAll(liveNodesNow));
-      liveNodes.removeAll(deadNodes);
-
-      //OK, we've killed a node. Insure we get errors when we ask to create a replica or shard that involves it.
-      // First try adding a  replica to the downed node.
-      CollectionAdminRequest.AddReplica addReplicaNode = new CollectionAdminRequest.AddReplica()
-          .setCollectionName(collectionName)
-          .setShardName("shardstart")
-          .setNode(deadNodes.get(0));
-
-      try {
-        addReplicaNode.process(cloudClient);
-        fail("Should have gotten an exception");
-      } catch (HttpSolrClient.RemoteSolrException se) {
-        assertTrue("Should have gotten a message about shard not ",
-            se.getMessage().contains("At least one of the node(s) specified are not currently active, no action taken."));
-      }
+      // Adding a replica on a dead node should fail
+      Exception e1 = expectThrows(Exception.class, () -> {
+        CollectionAdminRequest.addReplicaToShard(collectionName, "shardstart")
+            .setNode(deadNode)
+            .process(cluster.getSolrClient());
+      });
+      assertTrue("Should have gotten a message about shard not ",
+          e1.getMessage().contains("At least one of the node(s) specified are not currently active, no action taken."));
 
       // Should also die if we just add a shard
-      CollectionAdminRequest.CreateShard createShard = new CollectionAdminRequest.CreateShard()
-          .setCollectionName(collectionName)
-          .setShardName("shard1")
-          .setNodeSet(deadNodes.get(0));
-      try {
-        createShard.process(cloudClient);
-        fail("Should have gotten an exception");
-      } catch (HttpSolrClient.RemoteSolrException se) {
-        assertTrue("Should have gotten a message about shard not ",
-            se.getMessage().contains("At least one of the node(s) specified are not currently active, no action taken."));
-      }
-      //downJetty.start();
+      Exception e2 = expectThrows(Exception.class, () -> {
+        CollectionAdminRequest.createShard(collectionName, "shard1")
+            .setNodeSet(deadNode)
+            .process(cluster.getSolrClient());
+      });
+
+      assertTrue("Should have gotten a message about shard not ",
+          e2.getMessage().contains("At least one of the node(s) specified are not currently active, no action taken."));
+    }
+    finally {
+      cluster.startJettySolrRunner(jetty);
     }
   }
 
   private List<String> getAllNodeNames(String collectionName) throws KeeperException, InterruptedException {
-    ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
-    zkStateReader.forceUpdateCollection(collectionName);
-    Slice slice = zkStateReader.getClusterState().getSlicesMap(collectionName).get("shard1");
-
-    List<String> nodes = new ArrayList<>();
-    for (Replica rep : slice.getReplicas()) {
-      nodes.add(rep.getNodeName());
-    }
-
-    assertTrue("Should have some nodes!", nodes.size() > 0);
-    return nodes;
+    DocCollection state = getCollectionState(collectionName);
+    return state.getReplicas().stream().map(Replica::getNodeName).distinct().collect(Collectors.toList());
   }
 
 }