You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by is...@apache.org on 2020/07/17 00:34:14 UTC

[lucene-solr] branch jira/solr-14656-master updated: SOLR-14656: Remove autoscaling framework, part 3

This is an automated email from the ASF dual-hosted git repository.

ishan pushed a commit to branch jira/solr-14656-master
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git


The following commit(s) were added to refs/heads/jira/solr-14656-master by this push:
     new 9d39021  SOLR-14656: Remove autoscaling framework, part 3
9d39021 is described below

commit 9d39021865c1d1cf5b4ee27509ed83af2b17afee
Author: Ishan Chattopadhyaya <is...@apache.org>
AuthorDate: Fri Jul 17 06:04:00 2020 +0530

    SOLR-14656: Remove autoscaling framework, part 3
---
 .../test/org/apache/solr/cloud/CloudTestUtils.java | 196 --------------
 .../apache/solr/cloud/ReplaceNodeNoTargetTest.java | 127 ---------
 .../cloud/RoutingToNodesWithPropertiesTest.java    | 242 -----------------
 .../collections/CollectionTooManyReplicasTest.java |  13 +-
 .../ConcurrentCreateCollectionTest.java            | 292 ---------------------
 5 files changed, 7 insertions(+), 863 deletions(-)

diff --git a/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java b/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
deleted file mode 100644
index 9c8e6ff..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.request.RequestWriter;
-import org.apache.solr.client.solrj.request.RequestWriter.StringPayloadContentWriter;
-import org.apache.solr.client.solrj.request.V2Request;
-import org.apache.solr.client.solrj.response.SolrResponseBase;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.util.TimeOut;
-import org.junit.Assert;
-
-import static org.apache.solr.common.params.CommonParams.JSON_MIME;
-
-
-/**
- * Some useful methods for SolrCloud tests.
- */
-public class CloudTestUtils {
-
-  public static final int DEFAULT_TIMEOUT = 90;
-
-  /**
-   * Wait for a particular named trigger to be scheduled.
-   * <p>
-   * This is a convenience method that polls the autoscaling API looking for a trigger with the 
-   * specified name using the {@link #DEFAULT_TIMEOUT}.  It is particularly useful for tests 
-   * that want to know when the Overseer has finished scheduling the automatic triggers on startup.
-   * </p>
-   *
-   * @param cloudManager current instance of {@link SolrCloudManager}
-   * @param triggerName the name of the trigger we need to see sheduled in order to return successfully
-   * @see #suspendTrigger
-   */
-  public static long waitForTriggerToBeScheduled(final SolrCloudManager cloudManager,
-                                                 final String triggerName)
-    throws InterruptedException, TimeoutException, IOException {
-
-    TimeOut timeout = new TimeOut(DEFAULT_TIMEOUT, TimeUnit.SECONDS, cloudManager.getTimeSource());
-    while (!timeout.hasTimedOut()) {
-      final SolrResponse response = cloudManager.request(AutoScalingRequest.create(SolrRequest.METHOD.GET, null));
-      @SuppressWarnings({"unchecked"})
-      final Map<String,?> triggers = (Map<String,?>) response.getResponse().get("triggers");
-      Assert.assertNotNull("null triggers in response from autoscaling request", triggers);
-      
-      if ( triggers.containsKey(triggerName) ) {
-        return timeout.timeElapsed(TimeUnit.MILLISECONDS);
-      }
-      timeout.sleep(100);
-    }
-    throw new TimeoutException("Never saw trigger with name: " + triggerName);
-  }
-
-  /**
-   * Suspends the trigger with the specified name
-   * <p>
-   * This is a convenience method that sends a <code>suspend-trigger</code> command to the autoscaling
-   * API for the specified trigger.  It is particularly useful for tests that may need to disable automatic
-   * triggers such as <code>.scheduled_maintenance</code> in order to test their own
-   * triggers.
-   * </p>
-   *
-   * @param cloudManager current instance of {@link SolrCloudManager}
-   * @param triggerName the name of the trigger to suspend.  This must already be scheduled.
-   * @see #assertAutoScalingRequest
-   * @see #waitForTriggerToBeScheduled
-   */
-  public static void suspendTrigger(final SolrCloudManager cloudManager,
-                                    final String triggerName) throws IOException {
-    assertAutoScalingRequest(cloudManager, "{'suspend-trigger' : {'name' : '"+triggerName+"'} }");
-  }
-
-  /**
-   * Creates &amp; executes an autoscaling request against the current cluster, asserting that 
-   * the result is a success.
-   * 
-   * @param cloudManager current instance of {@link SolrCloudManager}
-   * @param json The request to POST to the AutoScaling Handler
-   * @see AutoScalingRequest#create
-   */
-  public static void assertAutoScalingRequest(final SolrCloudManager cloudManager,
-                                              final String json) throws IOException {
-    // TODO: a lot of code that directly uses AutoScalingRequest.create should use this method
-    
-    @SuppressWarnings({"rawtypes"})
-    final SolrRequest req = AutoScalingRequest.create(SolrRequest.METHOD.POST, json);
-    final SolrResponse rsp = cloudManager.request(req);
-    final String result = rsp.getResponse().get("result").toString();
-    Assert.assertEquals("Unexpected result from auto-scaling command: " + json + " -> " + rsp,
-                        "success", result);
-  }
-
-  
-  /**
-   * Helper class for sending (JSON) autoscaling requests that can randomize between V1 and V2 requests
-   */
-  @SuppressWarnings({"rawtypes"})
-  // nocommit remove this
-  public static class AutoScalingRequest extends SolrRequest {
-    private SolrParams params = null;
-    /**
-     * Creates a request using a randomized root path (V1 vs V2)
-     *
-     * @param m HTTP Method to use
-     * @aram message JSON payload, may be null
-     */
-    @SuppressWarnings({"rawtypes"})
-    public static SolrRequest create(SolrRequest.METHOD m, String message) {
-      return create(m, null, message);
-    }
-    /**
-     * Creates a request using a randomized root path (V1 vs V2)
-     *
-     * @param m HTTP Method to use
-     * @param subPath optional sub-path under <code>"$ROOT/autoscaling"</code>. may be null, 
-     *        otherwise must start with "/"
-     * @param message JSON payload, may be null
-     */
-    @SuppressWarnings({"rawtypes"})
-    public static SolrRequest create(SolrRequest.METHOD m, String subPath, String message) {
-      return create(m,subPath,message,null);
-
-    }
-    @SuppressWarnings({"rawtypes"})
-    public static SolrRequest create(SolrRequest.METHOD m, String subPath, String message, SolrParams params) {
-      final boolean useV1 = LuceneTestCase.random().nextBoolean();
-      String path = useV1 ? "/admin/autoscaling" : "/cluster/autoscaling";
-      if (null != subPath) {
-        assert subPath.startsWith("/");
-        path += subPath;
-      }
-      return useV1
-          ? new AutoScalingRequest(m, path, message).withParams(params)
-          : new V2Request.Builder(path).withMethod(m).withParams(params).withPayload(message).build();
-
-    }
-
-    protected final String message;
-
-    /**
-     * Simple request
-     * @param m HTTP Method to use
-     * @param path path to send request to
-     * @param message JSON payload, may be null
-     */
-    private AutoScalingRequest(METHOD m, String path, String message) {
-      super(m, path);
-      this.message = message;
-    }
-
-
-    AutoScalingRequest withParams(SolrParams params){
-      this.params = params;
-      return this;
-    }
-    @Override
-    public SolrParams getParams() {
-      return params;
-    }
-
-    @Override
-    public RequestWriter.ContentWriter getContentWriter(String expectedType) {
-      return message == null ? null : new StringPayloadContentWriter(message, JSON_MIME);
-    }
-
-    @Override
-    protected SolrResponse createResponse(SolrClient client) {
-      return new SolrResponseBase();
-    }
-  }
-}
diff --git a/solr/core/src/test/org/apache/solr/cloud/ReplaceNodeNoTargetTest.java b/solr/core/src/test/org/apache/solr/cloud/ReplaceNodeNoTargetTest.java
deleted file mode 100644
index cd5d4be..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/ReplaceNodeNoTargetTest.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Set;
-
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.CoreAdminRequest;
-import org.apache.solr.client.solrj.response.CoreAdminResponse;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.cloud.CloudTestUtils.AutoScalingRequest;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.cloud.ReplaceNodeTest.createReplaceNodeRequest;
-
-@LuceneTestCase.AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/SOLR-11067")
-public class ReplaceNodeNoTargetTest extends SolrCloudTestCase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(6)
-        .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-dynamic").resolve("conf"))
-        .configure();
-  }
-
-  protected String getSolrXml() {
-    return "solr.xml";
-  }
-
-
-  @Test
-  @LuceneTestCase.AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/SOLR-11067")
-  public void test() throws Exception {
-    String coll = "replacenodetest_coll_notarget";
-    if (log.isInfoEnabled()) {
-      log.info("total_jettys: {}", cluster.getJettySolrRunners().size());
-    }
-
-    CloudSolrClient cloudClient = cluster.getSolrClient();
-    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
-    ArrayList<String> l = new ArrayList<>(liveNodes);
-    Collections.shuffle(l, random());
-    String node2bdecommissioned = l.get(0);
-    CloudSolrClient solrClient = cluster.getSolrClient();
-    String setClusterPolicyCommand = "{" +
-        " 'set-cluster-policy': [" +
-        "      {'replica':'<5', 'shard': '#EACH', 'node': '#ANY'}]}";
-    @SuppressWarnings({"rawtypes"})
-    SolrRequest req = AutoScalingRequest.create(SolrRequest.METHOD.POST, setClusterPolicyCommand);
-    solrClient.request(req);
-
-    log.info("Creating collection...");
-    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf1", 5, 2, 0, 0);
-    cloudClient.request(create);
-    cluster.waitForActiveCollection(coll, 5, 10);
-
-    if (log.isInfoEnabled()) {
-      log.info("Current core status list for node we plan to decommision: {} => {}",
-          node2bdecommissioned,
-          getCoreStatusForNamedNode(cloudClient, node2bdecommissioned).getCoreStatus());
-      log.info("Decommisioning node: {}", node2bdecommissioned);
-    }
-
-    createReplaceNodeRequest(node2bdecommissioned, null, null).processAsync("001", cloudClient);
-    CollectionAdminRequest.RequestStatus requestStatus = CollectionAdminRequest.requestStatus("001");
-    boolean success = false;
-    CollectionAdminRequest.RequestStatusResponse rsp = null;
-    for (int i = 0; i < 300; i++) {
-      rsp = requestStatus.process(cloudClient);
-      if (rsp.getRequestStatus() == RequestStatusState.COMPLETED) {
-        success = true;
-        break;
-      }
-      assertFalse("async replace node request aparently failed: " + rsp.toString(),
-                  
-                  rsp.getRequestStatus() == RequestStatusState.FAILED);
-      Thread.sleep(50);
-    }
-    assertTrue("async replace node request should have finished successfully by now, last status: " + rsp,
-               success);
-    CoreAdminResponse status = getCoreStatusForNamedNode(cloudClient, node2bdecommissioned);
-    assertEquals("Expected no cores for decommisioned node: "
-                 + status.getCoreStatus().toString(),
-                 0, status.getCoreStatus().size());
-  }
-
-  /**
-   * Given a cloud client and a nodename, build an HTTP client for that node, and ask it for it's core status
-   */
-  private CoreAdminResponse getCoreStatusForNamedNode(final CloudSolrClient cloudClient,
-                                                      final String nodeName) throws Exception {
-    
-    try (HttpSolrClient coreclient = getHttpSolrClient
-         (cloudClient.getZkStateReader().getBaseUrlForNodeName(nodeName))) {
-      return CoreAdminRequest.getStatus(null, coreclient);
-    }
-  }
-
-}
diff --git a/solr/core/src/test/org/apache/solr/cloud/RoutingToNodesWithPropertiesTest.java b/solr/core/src/test/org/apache/solr/cloud/RoutingToNodesWithPropertiesTest.java
deleted file mode 100644
index 4f9cbfc..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/RoutingToNodesWithPropertiesTest.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.cloud.NodeStateProvider;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.SolrClientCloudManager;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.ShardParams;
-import org.apache.solr.common.util.CommonTestInjection;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.util.TimeSource;
-import org.apache.solr.handler.component.TrackingShardHandlerFactory;
-import org.apache.solr.util.TestInjection;
-import org.apache.solr.util.TimeOut;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.cloud.rule.ImplicitSnitch.SYSPROP;
-
-public class RoutingToNodesWithPropertiesTest extends SolrCloudTestCase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static final String PROP_NAME = SYSPROP + "zone";
-  final static String COLLECTION = "coll";
-
-  private final List<String> zone1Nodes = new ArrayList<>();
-  private final List<String> zone2Nodes = new ArrayList<>();
-  private final LinkedList<TrackingShardHandlerFactory.ShardRequestAndParams> zone1Queue = new LinkedList<>();
-  private final LinkedList<TrackingShardHandlerFactory.ShardRequestAndParams> zone2Queue = new LinkedList<>();
-
-  @Before
-  public void setupCluster() throws Exception {
-    CommonTestInjection.setAdditionalProps(ImmutableMap.of("zone", "us-west1"));
-    configureCluster(2)
-        .withSolrXml(TEST_PATH().resolve("solr-trackingshardhandler.xml"))
-        .addConfig("config", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
-        .configure();
-
-    zone1Nodes.addAll(cluster.getJettySolrRunners().stream().map(JettySolrRunner::getNodeName).collect(Collectors.toSet()));
-    CommonTestInjection.setAdditionalProps(ImmutableMap.of("zone", "us-west2"));
-    zone2Nodes.add(cluster.startJettySolrRunner().getNodeName());
-    zone2Nodes.add(cluster.startJettySolrRunner().getNodeName());
-
-    String commands =  "{set-cluster-policy :[{" +
-        "    'replica':'#EQUAL'," +
-        "    'shard':'#EACH'," +
-        "    'sysprop.zone':'#EACH'}]}";
-
-    @SuppressWarnings({"rawtypes"})
-    SolrRequest req = CloudTestUtils.AutoScalingRequest.create(SolrRequest.METHOD.POST, commands);
-    cluster.getSolrClient().request(req);
-
-    CollectionAdminRequest.createCollection(COLLECTION, 2, 2)
-        .process(cluster.getSolrClient());
-    cluster.waitForActiveCollection(COLLECTION, 2, 4);
-
-    // Checking putting replicas
-    for (Slice slice : getCollectionState(COLLECTION).getSlices()) {
-      int numReplicaInZone1 = 0;
-      int numReplicaInZone2 = 0;
-      for (Replica replica : slice.getReplicas()) {
-        if (zone1Nodes.contains(replica.getNodeName()))
-          numReplicaInZone1++;
-        if (zone2Nodes.contains(replica.getNodeName()))
-          numReplicaInZone2++;
-      }
-
-      assertEquals(1, numReplicaInZone1);
-      assertEquals(1, numReplicaInZone2);
-    }
-
-    // check inject props
-    try (SolrCloudManager cloudManager = new SolrClientCloudManager(new ZkDistributedQueueFactory(cluster.getZkClient()),
-        cluster.getSolrClient())) {
-      for (String zone1Node: zone1Nodes) {
-        NodeStateProvider nodeStateProvider = cloudManager.getNodeStateProvider();
-        Map<String, Object> map  = nodeStateProvider.getNodeValues(zone1Node, Collections.singletonList(PROP_NAME));
-        assertEquals("us-west1", map.get(PROP_NAME));
-      }
-
-      for (String zone2Node: zone2Nodes) {
-        NodeStateProvider nodeStateProvider = cloudManager.getNodeStateProvider();
-        Map<String, Object> map = nodeStateProvider.getNodeValues(zone2Node, Collections.singletonList(PROP_NAME));
-        assertEquals("us-west2", map.get(PROP_NAME));
-      }
-
-      for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
-        if (zone1Nodes.contains(jetty.getNodeName())) {
-          ((TrackingShardHandlerFactory)jetty.getCoreContainer().getShardHandlerFactory()).setTrackingQueue(zone1Queue);
-        } else {
-          ((TrackingShardHandlerFactory)jetty.getCoreContainer().getShardHandlerFactory()).setTrackingQueue(zone2Queue);
-        }
-      }
-
-      for (int i = 0; i < 20; i++) {
-        new UpdateRequest()
-            .add("id", String.valueOf(i))
-            .process(cluster.getSolrClient(), COLLECTION);
-      }
-
-      new UpdateRequest()
-          .commit(cluster.getSolrClient(), COLLECTION);
-    }
-  }
-
-  @After
-  public void after() {
-    TestInjection.reset();
-  }
-
-  @Test
-  public void test() throws Exception {
-    final int NUM_TRY = 10;
-    CollectionAdminRequest
-        .setClusterProperty(ZkStateReader.DEFAULT_SHARD_PREFERENCES, ShardParams.SHARDS_PREFERENCE_NODE_WITH_SAME_SYSPROP +":"+PROP_NAME)
-        .process(cluster.getSolrClient());
-    {
-      TimeOut timeOut = new TimeOut(20, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-      timeOut.waitFor("Timeout waiting for sysprops are cached in all nodes", () -> {
-        int total = 0;
-        for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
-          total += runner.getCoreContainer().getZkController().getSysPropsCacher().getCacheSize();
-        }
-        return total == cluster.getJettySolrRunners().size() * cluster.getJettySolrRunners().size();
-      });
-    }
-
-    for (int i = 0; i <  NUM_TRY; i++) {
-      SolrQuery qRequest = new SolrQuery("*:*");
-      ModifiableSolrParams qParams = new ModifiableSolrParams();
-      qParams.add(ShardParams.SHARDS_INFO, "true");
-      qRequest.add(qParams);
-      QueryResponse qResponse = cluster.getSolrClient().query(COLLECTION, qRequest);
-
-      Object shardsInfo = qResponse.getResponse().get(ShardParams.SHARDS_INFO);
-      assertNotNull("Unable to obtain "+ShardParams.SHARDS_INFO, shardsInfo);
-      SimpleOrderedMap<?> shardsInfoMap = (SimpleOrderedMap<?>)shardsInfo;
-      String firstReplicaAddr = ((SimpleOrderedMap) shardsInfoMap.getVal(0)).get("shardAddress").toString();
-      String secondReplicaAddr = ((SimpleOrderedMap) shardsInfoMap.getVal(1)).get("shardAddress").toString();
-      boolean firstReplicaInZone1 = false;
-      boolean secondReplicaInZone1 = false;
-      for (String zone1Node : zone1Nodes) {
-        zone1Node = zone1Node.replace("_solr", "");
-        firstReplicaInZone1 = firstReplicaInZone1 || firstReplicaAddr.contains(zone1Node);
-        secondReplicaInZone1 = secondReplicaInZone1 || secondReplicaAddr.contains(zone1Node);
-      }
-
-      assertEquals(firstReplicaInZone1, secondReplicaInZone1);
-    }
-
-    // intense asserting using TrackingShardHandlerFactory
-    assertRoutingToSameZone();
-
-    // Cachers should be stop running
-    CollectionAdminRequest
-        .setClusterProperty(ZkStateReader.DEFAULT_SHARD_PREFERENCES, ShardParams.SHARDS_PREFERENCE_REPLICA_TYPE+":PULL")
-        .process(cluster.getSolrClient());
-    {
-      TimeOut timeOut = new TimeOut(20, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-      timeOut.waitFor("Timeout waiting for sysPropsCache stop", () -> {
-        int numNodeStillRunningCache = 0;
-        for (JettySolrRunner runner: cluster.getJettySolrRunners()) {
-          if (runner.getCoreContainer().getZkController().getSysPropsCacher().isRunning()) {
-            numNodeStillRunningCache++;
-          }
-        }
-        return numNodeStillRunningCache == 0;
-      });
-    }
-
-    // Testing disable default shard preferences
-    CollectionAdminRequest
-        .setClusterProperty(ZkStateReader.DEFAULT_SHARD_PREFERENCES, null)
-        .process(cluster.getSolrClient());
-    {
-      TimeOut timeOut = new TimeOut(20, TimeUnit.SECONDS, TimeSource.NANO_TIME);
-      timeOut.waitFor("Timeout waiting cluster properties get updated", () -> {
-        int numNodeGetUpdatedPref = 0;
-        int numNodeStillRunningCache = 0;
-        for (JettySolrRunner runner: cluster.getJettySolrRunners()) {
-          if (runner.getCoreContainer().getZkController()
-              .getZkStateReader().getClusterProperties().containsKey(ZkStateReader.DEFAULT_SHARD_PREFERENCES)) {
-            numNodeGetUpdatedPref++;
-          }
-          if (runner.getCoreContainer().getZkController().getSysPropsCacher().isRunning()) {
-            numNodeStillRunningCache++;
-          }
-        }
-        return numNodeGetUpdatedPref == 0 && numNodeStillRunningCache == 0;
-      });
-    }
-
-  }
-
-  private void assertRoutingToSameZone() {
-    for (TrackingShardHandlerFactory.ShardRequestAndParams sreq: zone1Queue) {
-      String firstNode = sreq.shard.split("\\|")[0];
-      assertTrue(zone1Nodes.stream().anyMatch(s -> firstNode.contains(s.replace('_','/'))));
-    }
-    for (TrackingShardHandlerFactory.ShardRequestAndParams sreq: zone2Queue) {
-      String firstNode = sreq.shard.split("\\|")[0];
-      assertTrue(zone2Nodes.stream().anyMatch(s -> firstNode.contains(s.replace('_','/'))));
-    }
-  }
-}
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
index 6489f1c..46a3655 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/CollectionTooManyReplicasTest.java
@@ -21,10 +21,8 @@ import java.util.Map;
 import java.util.stream.Collectors;
 
 import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.cloud.CloudTestUtils;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
@@ -32,6 +30,7 @@ import org.apache.solr.common.cloud.Slice;
 import org.apache.zookeeper.KeeperException;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 
 @Slow
@@ -50,6 +49,7 @@ public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
   }
 
   @Test
+  @Ignore // Since maxShardsPerNode was removed in SOLR-12847 and autoscaling framework was removed in SOLR-14656, this test is broken
   public void testAddTooManyReplicas() throws Exception {
 
     final String collectionName = "TooManyReplicasInSeveralFlavors";
@@ -70,8 +70,8 @@ public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
         .process(cluster.getSolrClient());
 
     // equivalent to maxShardsPerNode=1
-    String commands =  "{ set-cluster-policy: [ {replica: '<2', shard: '#ANY', node: '#ANY', strict: true} ] }";
-    cluster.getSolrClient().request(CloudTestUtils.AutoScalingRequest.create(SolrRequest.METHOD.POST, commands));
+    // String commands =  "{ set-cluster-policy: [ {replica: '<2', shard: '#ANY', node: '#ANY', strict: true} ] }";
+    // cluster.getSolrClient().request(CloudTestUtils.AutoScalingRequest.create(SolrRequest.METHOD.POST, commands));
 
     // this should fail because the policy prevents it
     Exception e = expectThrows(Exception.class, () -> {
@@ -116,10 +116,11 @@ public class CollectionTooManyReplicasTest extends SolrCloudTestCase {
   }
 
   @Test
+  @Ignore // Since maxShardsPerNode was removed in SOLR-12847 and autoscaling framework was removed in SOLR-14656, this test is broken
   public void testAddShard() throws Exception {
     // equivalent to maxShardsPerNode=2
-    String commands =  "{ set-cluster-policy: [ {replica: '<3', shard: '#ANY', node: '#ANY', strict: true} ] }";
-    cluster.getSolrClient().request(CloudTestUtils.AutoScalingRequest.create(SolrRequest.METHOD.POST, commands));
+    // String commands =  "{ set-cluster-policy: [ {replica: '<3', shard: '#ANY', node: '#ANY', strict: true} ] }";
+    // cluster.getSolrClient().request(CloudTestUtils.AutoScalingRequest.create(SolrRequest.METHOD.POST, commands));
 
     String collectionName = "TooManyReplicasWhenAddingShards";
     CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 2)
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentCreateCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentCreateCollectionTest.java
deleted file mode 100644
index cc3bbd6..0000000
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/ConcurrentCreateCollectionTest.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.api.collections;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.embedded.JettySolrRunner;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.cloud.CloudTestUtils;
-import org.apache.solr.cloud.SolrCloudTestCase;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-public class ConcurrentCreateCollectionTest extends SolrCloudTestCase {
-  
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  static int NODES = 2;
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    configureCluster(NODES)
-         .addConfig("conf", configset("cloud-minimal"))
-        //.addConfig("conf", configset("_default"))
-        .configure();
-  }
-
-  @Before
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-  }
-
-  @After
-  @Override
-  public void tearDown() throws Exception {
-    super.tearDown();
-    cluster.deleteAllCollections();
-  }
-
-
-  private CollectionAdminRequest.Create createCollectionRequest(String cname, int numShards, int numReplicas) throws Exception {
-    CollectionAdminRequest.Create creq = CollectionAdminRequest
-        //  .createCollection(cname, "conf", NODES - 1, NODES - 1)
-        .createCollection(cname, "conf", numShards, numReplicas);
-    creq.setWaitForFinalState(true);
-    creq.setAutoAddReplicas(true);
-    return creq;
-  }
-
-  public void testConcurrentCreatePlacement() throws Exception {
-    final int nThreads = 2;
-    final int createsPerThread = 1;
-    final int nShards = 1;
-    final int repFactor = 2;
-    final boolean useClusterPolicy = false;
-    final boolean useCollectionPolicy = true;
-    final boolean startUnbalanced = true; // can help make a smaller test that can still reproduce an issue.
-    final int unbalancedSize = 1; // the number of replicas to create first
-    final boolean stopNode = false;  // only applicable when startUnbalanced==true... stops a node during first collection creation, then restarts
-
-    final CloudSolrClient client = cluster.getSolrClient();
-
-
-    if (startUnbalanced) {
-      /*** This produces a failure (multiple replicas of single shard on same node) when run with NODES=4 and
-       final int nThreads = 2;
-       final int createsPerThread = 1;
-       final int nShards = 2;
-       final int repFactor = 2;
-       final boolean useClusterPolicy = false;
-       final boolean useCollectionPolicy = true;
-       final boolean startUnbalanced = true;
-       // NOTE: useClusterPolicy=true seems to fix it! So does putting both creates in a single thread!
-       // NOTE: even creating a single replica to start with causes failure later on.
-
-       Also reproduced with smaller cluster: NODES=2 and
-       final int nThreads = 2;
-       final int createsPerThread = 1;
-       final int nShards = 1;
-       final int repFactor = 2;
-       final boolean useClusterPolicy = false;
-       final boolean useCollectionPolicy = true;
-       final boolean startUnbalanced = true;
-
-       Also, with NODES=3:
-       final int nThreads = 2;
-       final int createsPerThread = 1;
-       final int nShards = 1;
-       final int repFactor = 2;
-       final boolean useClusterPolicy = false;
-       final boolean useCollectionPolicy = true;
-       final boolean startUnbalanced = false;
-
-       // Also succeeded in replicating a bug where all 5 replicas were on a single node: CORES=5, nThreads=5, repFactor=5,
-       //    unbalancedSize = 16 (4 replicas on each of the up nodes), stopNode=true
-       ***/
-
-
-      JettySolrRunner downJetty = cluster.getJettySolrRunners().get(0);
-      if (stopNode) {
-        cluster.stopJettySolrRunner(downJetty);
-      }
-
-      String cname = "STARTCOLLECTION";
-      CollectionAdminRequest.Create creq = CollectionAdminRequest
-          //  .createCollection(cname, "conf", NODES - 1, NODES - 1)
-          .createCollection(cname, "conf", unbalancedSize, 1);
-      creq.setWaitForFinalState(true);
-      // creq.setAutoAddReplicas(true);
-      if (useCollectionPolicy) { creq.setPolicy("policy1"); }
-      creq.process(client);
-
-      if (stopNode) {
-        // this will start it with a new port.... does it matter?
-        cluster.startJettySolrRunner(downJetty);
-      }
-    }
-
-
-
-    if (useClusterPolicy) {
-      String setClusterPolicyCommand = "{" +
-          " 'set-cluster-policy': [" +
-          // "      {'cores':'<100', 'node':'#ANY'}," +
-          "      {'replica':'<2', 'shard': '#EACH', 'node': '#ANY'}," +
-          // "      {'replica':'<2', 'node': '#ANY'}," +
-          "    ]" +
-          "}";
-
-      @SuppressWarnings({"rawtypes"})
-      SolrRequest req = CloudTestUtils.AutoScalingRequest.create(SolrRequest.METHOD.POST, setClusterPolicyCommand);
-      client.request(req);
-    }
-
-    if (useCollectionPolicy) {
-      // NOTE: the mere act of setting this named policy prevents LegacyAssignStrategy from being used, even if the policy is
-      // not used during collection creation.
-      String commands =  "{set-policy : {" +
-          " policy1 : [{replica:'<2' , node:'#ANY'}]" +
-          ",policy2 : [{replica:'<2' , shard:'#EACH', node:'#ANY'}]" +
-          "}}";
-      client.request(CloudTestUtils.AutoScalingRequest.create(SolrRequest.METHOD.POST, commands));
-
-      /*** take defaults for cluster preferences
-      String cmd = "{" +
-          " 'set-cluster-preferences': [" +
-          // "      {'cores':'<100', 'node':'#ANY'}," +
-          "      {minimize:cores}" +
-          "    ]" +
-          "}";
-
-      SolrRequest req = CloudTestUtils.AutoScalingRequest.create(SolrRequest.METHOD.POST, cmd);
-      client.request(req);
-       ***/
-    }
-
-    /***
-    SolrRequest req = CloudTestUtils.AutoScalingRequest.create(SolrRequest.METHOD.GET, null);
-    SolrResponse response = req.process(client);
-    log.info("######### AUTOSCALE {}", response);
-     ***/
-
-
-    byte[] data = client.getZkStateReader().getZkClient().getData("/autoscaling.json", null, null, true);
-    if (log.isInfoEnabled()) {
-      log.info("AUTOSCALE DATA: {}", new String(data, "UTF-8"));
-    }
-
-    final AtomicInteger collectionNum = new AtomicInteger();
-    Thread[] indexThreads = new Thread[nThreads];
-
-    for (int i=0; i<nThreads; i++) {
-      indexThreads[i] = new Thread(() -> {
-        try {
-          for (int j=0; j<createsPerThread; j++) {
-            int num = collectionNum.incrementAndGet();
-            // Thread.sleep(num*1000);
-            String collectionName = "collection" + num;
-            CollectionAdminRequest.Create createReq = CollectionAdminRequest
-                .createCollection(collectionName, "conf", nShards, repFactor)
-                // .setMaxShardsPerNode(1) // should be default
-                ;
-            createReq.setWaitForFinalState(false);
-            if (useCollectionPolicy) {
-              createReq.setPolicy("policy1");
-            }
-            createReq.setAutoAddReplicas(true);
-
-            createReq.process(client);
-            // cluster.waitForActiveCollection(collectionName, 1, repFactor);
-            // Thread.sleep(10000);
-          }
-        } catch (Exception e) {
-          fail(e.getMessage());
-        }
-      });
-    }
-
-    for (Thread thread : indexThreads) {
-      thread.start();
-    }
-
-    for (Thread thread : indexThreads) {
-      thread.join();
-    }
-
-    int expectedTotalReplicas = unbalancedSize + nThreads * createsPerThread * nShards * repFactor;
-    int expectedPerNode = expectedTotalReplicas / NODES;
-    boolean expectBalanced = (expectedPerNode * NODES == expectedTotalReplicas);
-
-    Map<String,List<Replica>> replicaMap = new HashMap<>();
-    ClusterState cstate = client.getZkStateReader().getClusterState();
-    for (DocCollection collection : cstate.getCollectionsMap().values()) {
-      for (Replica replica : collection.getReplicas()) {
-        String url = replica.getBaseUrl();
-        List<Replica> replicas = replicaMap.get(url);
-        if (replicas == null) {
-          replicas = new ArrayList<>();
-          replicaMap.put(url, replicas);
-        }
-        replicas.add(replica);
-      }
-    }
-
-    // check if nodes are balanced
-    boolean failed = false;
-    for (List<Replica> replicas : replicaMap.values()) {
-      if (replicas.size() != expectedPerNode ) {
-        if (expectBalanced) {
-          failed = true;
-        }
-        log.error("UNBALANCED CLUSTER: expected replicas per node {} but got {}", expectedPerNode, replicas.size());
-      }
-    }
-
-    // check if there were multiple replicas of the same shard placed on the same node
-    for (DocCollection collection : cstate.getCollectionsMap().values()) {
-      for (Slice slice : collection.getSlices()) {
-        Map<String, Replica> nodeToReplica = new HashMap<>();
-        for (Replica replica : slice.getReplicas()) {
-          Replica prev = nodeToReplica.put(replica.getBaseUrl(), replica);
-          if (prev != null) {
-            failed = true;
-            // NOTE: with a replication factor > 2, this will print multiple times per bad slice.
-            log.error("MULTIPLE REPLICAS OF SINGLE SHARD ON SAME NODE: r1={} r2={}", prev, replica);
-          }
-        }
-      }
-    }
-
-    if (failed) {
-      log.error("Cluster state {}", cstate.getCollectionsMap());
-    }
-
-    assertEquals(replicaMap.size(),  NODES);  // make sure something was created
-
-    assertTrue(!failed);
-  }
-
-
-  
-}