You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ab...@apache.org on 2018/04/09 14:58:51 UTC

lucene-solr:jira/solr-12181: SOLR-12181: Add integration tests for split shard and merge shards (unsupported).

Repository: lucene-solr
Updated Branches:
  refs/heads/jira/solr-12181 957c1d2c8 -> 2f13a21f2


SOLR-12181: Add integration tests for split shard and merge shards (unsupported).


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2f13a21f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2f13a21f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2f13a21f

Branch: refs/heads/jira/solr-12181
Commit: 2f13a21f239447ae4d65537b2ed851602886e081
Parents: 957c1d2
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Mon Apr 9 16:58:08 2018 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Mon Apr 9 16:58:08 2018 +0200

----------------------------------------------------------------------
 .../cloud/autoscaling/ComputePlanAction.java    |   7 +-
 .../cloud/autoscaling/IndexSizeTrigger.java     |   7 +-
 .../org/apache/solr/cloud/CloudTestUtils.java   |  14 +-
 .../cloud/autoscaling/IndexSizeTriggerTest.java | 268 +++++++++++++++++--
 .../cloud/autoscaling/sim/SimCloudManager.java  |  73 ++++-
 .../sim/SimClusterStateProvider.java            | 182 ++++++++-----
 .../autoscaling/sim/SimSolrCloudTestCase.java   |  54 +---
 .../solrj/cloud/autoscaling/ReplicaInfo.java    |   4 +
 .../cloud/autoscaling/UnsupportedSuggester.java |   5 +
 .../apache/solr/cloud/SolrCloudTestCase.java    |   2 +-
 10 files changed, 461 insertions(+), 155 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2f13a21f/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
index 57aa310..4a9c744 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
@@ -219,11 +219,8 @@ public class ComputePlanAction extends TriggerActionBase {
         for (Map.Entry<Suggester.Hint, Object> e : op.getHints().entrySet()) {
           suggester = suggester.hint(e.getKey(), e.getValue());
         }
-        if (++start >= ops.size()) {
-          event.getProperties().remove(START);
-        } else {
-          event.getProperties().put(START, start);
-        }
+        start++;
+        event.getProperties().put(START, start);
         break;
       case SCHEDULED:
         String preferredOp = (String) event.getProperty(AutoScalingParams.PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2f13a21f/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
index c3fde1e..7bfda9a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
@@ -169,7 +169,7 @@ public class IndexSizeTrigger extends TriggerBase {
       return;
     }
 
-    // replica name / info + size
+    // replica name / info + size, retrieved from leaders only
     Map<String, ReplicaInfo> currentSizes = new HashMap<>();
 
     try {
@@ -235,7 +235,9 @@ public class IndexSizeTrigger extends TriggerBase {
             // verify that it's a Number
             if (!(size instanceof Number)) {
               log.warn("invalid size value - not a number: '" + size + "' is " + size.getClass().getName());
+              return;
             }
+            info = (ReplicaInfo)info.clone();
             info.getVariables().put(SIZE_PROP, ((Number) size).longValue());
             currentSizes.put(info.getCore(), info);
           }
@@ -295,6 +297,7 @@ public class IndexSizeTrigger extends TriggerBase {
       if (replicas.size() < 2) {
         return;
       }
+      // sort by increasing size
       replicas.sort((r1, r2) -> {
         long delta = (Long) r1.getVariable(SIZE_PROP) - (Long) r2.getVariable(SIZE_PROP);
         if (delta > 0) {
@@ -305,7 +308,7 @@ public class IndexSizeTrigger extends TriggerBase {
           return 0;
         }
       });
-      // take top two
+      // take top two smallest
       TriggerEvent.Op op = new TriggerEvent.Op(belowOp);
       op.addHint(Suggester.Hint.COLL_SHARD, new Pair(coll, replicas.get(0).getShard()));
       op.addHint(Suggester.Hint.COLL_SHARD, new Pair(coll, replicas.get(1).getShard()));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2f13a21f/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java b/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
index 04c90b1..d51e6ca 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CloudTestUtils.java
@@ -114,18 +114,24 @@ public class CloudTestUtils {
    */
   public static CollectionStatePredicate clusterShape(int expectedShards, int expectedReplicas) {
     return (liveNodes, collectionState) -> {
-      if (collectionState == null)
+      if (collectionState == null) {
+        log.debug("-- null collection");
         return false;
-      if (collectionState.getSlices().size() != expectedShards)
+      }
+      if (collectionState.getActiveSlices().size() != expectedShards) {
+        log.debug("-- wrong number of active slices, expected=" + expectedShards + ", found=" + collectionState.getSlices().size());
         return false;
-      for (Slice slice : collectionState) {
+      }
+      for (Slice slice : collectionState.getActiveSlices()) {
         int activeReplicas = 0;
         for (Replica replica : slice) {
           if (replica.isActive(liveNodes))
             activeReplicas++;
         }
-        if (activeReplicas != expectedReplicas)
+        if (activeReplicas != expectedReplicas) {
+          log.debug("-- wrong number of active replicas in slice " + slice.getName() + ", expected=" + expectedReplicas + ", found=" + activeReplicas);
           return false;
+        }
       }
       return true;
     };

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2f13a21f/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
index 7966291..19edfbf 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
@@ -19,6 +19,7 @@ package org.apache.solr.cloud.autoscaling;
 
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -41,20 +42,25 @@ import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.cloud.autoscaling.sim.SimCloudManager;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.Pair;
 import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.common.util.Utils;
 import org.apache.solr.core.SolrResourceLoader;
 import org.apache.solr.util.LogLevel;
 import org.junit.After;
 import org.junit.AfterClass;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static org.apache.solr.cloud.autoscaling.AutoScalingHandlerTest.createAutoScalingRequest;
+import static org.apache.solr.common.cloud.ZkStateReader.SOLR_AUTOSCALING_CONF_PATH;
 
 /**
  *
@@ -68,23 +74,31 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
   private static TimeSource timeSource;
   private static SolrResourceLoader loader;
 
+  private static int SPEED;
+
   private AutoScaling.TriggerEventProcessor noFirstRunProcessor = event -> {
     fail("Did not expect the processor to fire on first run! event=" + event);
     return true;
   };
   private static final long WAIT_FOR_DELTA_NANOS = TimeUnit.MILLISECONDS.toNanos(2);
 
+  static Map<String, List<CapturedEvent>> listenerEvents = new ConcurrentHashMap<>();
+  static CountDownLatch listenerCreated = new CountDownLatch(1);
+  static CountDownLatch finished = new CountDownLatch(1);
+
   @BeforeClass
   public static void setupCluster() throws Exception {
     configureCluster(2)
         .addConfig("conf", configset("cloud-minimal"))
         .configure();
-    if (random().nextBoolean() && false) {
+    if (random().nextBoolean()) {
       cloudManager = cluster.getJettySolrRunner(0).getCoreContainer().getZkController().getSolrCloudManager();
       solrClient = cluster.getSolrClient();
       loader = cluster.getJettySolrRunner(0).getCoreContainer().getResourceLoader();
+      SPEED = 1;
     } else {
-      cloudManager = SimCloudManager.createCluster(2, TimeSource.get("simTime:50"));
+      SPEED = 50;
+      cloudManager = SimCloudManager.createCluster(2, TimeSource.get("simTime:" + SPEED));
       // wait for defaults to be applied - due to accelerated time sometimes we may miss this
       cloudManager.getTimeSource().sleep(10000);
       AutoScalingConfig cfg = cloudManager.getDistribStateManager().getAutoScalingConfig();
@@ -97,23 +111,18 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
 
   @After
   public void restoreDefaults() throws Exception {
-    SolrRequest req = createAutoScalingRequest(SolrRequest.METHOD.POST,
-        "{'set-trigger' : " + AutoScaling.SCHEDULED_MAINTENANCE_TRIGGER_DSL + "}");
-    NamedList<Object> response = solrClient.request(req);
-    assertEquals(response.get("result").toString(), "success");
-    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
-    if (autoScalingConfig.getTriggerListenerConfigs().containsKey("foo")) {
-      String cmd = "{" +
-          "'remove-listener' : {'name' : 'foo'}" +
-          "}";
-      response = solrClient.request(createAutoScalingRequest(SolrRequest.METHOD.POST, cmd));
-      assertEquals(response.get("result").toString(), "success");
-    }
     if (cloudManager instanceof SimCloudManager) {
+      log.info(((SimCloudManager) cloudManager).dumpClusterState(true));
       ((SimCloudManager) cloudManager).getSimClusterStateProvider().simDeleteAllCollections();
+      ((SimCloudManager) cloudManager).simResetOpCounts();
     } else {
       cluster.deleteAllCollections();
     }
+    cloudManager.getDistribStateManager().setData(SOLR_AUTOSCALING_CONF_PATH, Utils.toJSON(new ZkNodeProps()), -1);
+    cloudManager.getTimeSource().sleep(5000);
+    listenerEvents.clear();
+    listenerCreated = new CountDownLatch(1);
+    finished = new CountDownLatch(1);
   }
 
   @AfterClass
@@ -127,7 +136,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
 
   @Test
   public void testTrigger() throws Exception {
-    String collectionName = "collection1";
+    String collectionName = "testTrigger_collection";
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
         "conf", 2, 2).setMaxShardsPerNode(2);
     create.process(solrClient);
@@ -146,7 +155,7 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
         SolrInputDocument doc = new SolrInputDocument("id", "id-" + i);
         solrClient.add(collectionName, doc);
       }
-      solrClient.commit();
+      solrClient.commit(collectionName);
 
       AtomicBoolean fired = new AtomicBoolean(false);
       AtomicReference<TriggerEvent> eventRef = new AtomicReference<>();
@@ -197,6 +206,233 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
     }
   }
 
+  public static class CapturingTriggerListener extends TriggerListenerBase {
+    @Override
+    public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, AutoScalingConfig.TriggerListenerConfig config) throws TriggerValidationException {
+      super.configure(loader, cloudManager, config);
+      listenerCreated.countDown();
+    }
+
+    @Override
+    public synchronized void onEvent(TriggerEvent event, TriggerEventProcessorStage stage, String actionName,
+                                     ActionContext context, Throwable error, String message) {
+      List<CapturedEvent> lst = listenerEvents.computeIfAbsent(config.name, s -> new ArrayList<>());
+      CapturedEvent ev = new CapturedEvent(timeSource.getTimeNs(), context, config, stage, actionName, event, message);
+      log.info("=======> " + ev);
+      lst.add(ev);
+    }
+  }
+
+  public static class FinishedProcessingListener extends TriggerListenerBase {
+
+    @Override
+    public void onEvent(TriggerEvent event, TriggerEventProcessorStage stage, String actionName, ActionContext context, Throwable error, String message) throws Exception {
+      finished.countDown();
+    }
+  }
+
+  @Test
+  public void testSplitIntegration() throws Exception {
+    String collectionName = "testSplitIntegration_collection";
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
+        "conf", 2, 2).setMaxShardsPerNode(2);
+    create.process(solrClient);
+    CloudTestUtils.waitForState(cloudManager, "failed to create " + collectionName, collectionName,
+        CloudTestUtils.clusterShape(2, 2));
+
+    long waitForSeconds = 3 + random().nextInt(5);
+    String setTriggerCommand = "{" +
+        "'set-trigger' : {" +
+        "'name' : 'index_size_trigger'," +
+        "'event' : 'indexSize'," +
+        "'waitFor' : '" + waitForSeconds + "s'," +
+        "'unit' : 'docs'," +
+        "'above' : 10," +
+        "'below' : 4," +
+        "'enabled' : true," +
+        "'actions' : [{'name' : 'compute_plan', 'class' : 'solr.ComputePlanAction'}," +
+        "{'name' : 'execute_plan', 'class' : '" + ExecutePlanAction.class.getName() + "'}]" +
+        "}}";
+    SolrRequest req = createAutoScalingRequest(SolrRequest.METHOD.POST, setTriggerCommand);
+    NamedList<Object> response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+
+    String setListenerCommand = "{" +
+        "'set-listener' : " +
+        "{" +
+        "'name' : 'capturing'," +
+        "'trigger' : 'index_size_trigger'," +
+        "'stage' : ['STARTED','ABORTED','SUCCEEDED','FAILED']," +
+        "'beforeAction' : ['compute_plan','execute_plan']," +
+        "'afterAction' : ['compute_plan','execute_plan']," +
+        "'class' : '" + CapturingTriggerListener.class.getName() + "'" +
+        "}" +
+        "}";
+    req = createAutoScalingRequest(SolrRequest.METHOD.POST, setListenerCommand);
+    response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+
+    setListenerCommand = "{" +
+        "'set-listener' : " +
+        "{" +
+        "'name' : 'finished'," +
+        "'trigger' : 'index_size_trigger'," +
+        "'stage' : ['SUCCEEDED']," +
+        "'class' : '" + FinishedProcessingListener.class.getName() + "'" +
+        "}" +
+        "}";
+    req = createAutoScalingRequest(SolrRequest.METHOD.POST, setListenerCommand);
+    response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+
+
+    for (int i = 0; i < 25; i++) {
+      SolrInputDocument doc = new SolrInputDocument("id", "id-" + i);
+      solrClient.add(collectionName, doc);
+    }
+    solrClient.commit(collectionName);
+
+    timeSource.sleep(TimeUnit.MILLISECONDS.convert(waitForSeconds + 1, TimeUnit.SECONDS));
+
+    boolean await = finished.await(60000 / SPEED, TimeUnit.MILLISECONDS);
+    assertTrue("did not finish processing in time", await);
+    CloudTestUtils.waitForState(cloudManager, collectionName, 10, TimeUnit.SECONDS, CloudTestUtils.clusterShape(4, 2));
+    assertEquals(1, listenerEvents.size());
+    List<CapturedEvent> events = listenerEvents.get("capturing");
+    assertNotNull("'capturing' events not found", events);
+    assertEquals("events: " + events, 6, events.size());
+    assertEquals(TriggerEventProcessorStage.STARTED, events.get(0).stage);
+    assertEquals(TriggerEventProcessorStage.BEFORE_ACTION, events.get(1).stage);
+    assertEquals(TriggerEventProcessorStage.AFTER_ACTION, events.get(2).stage);
+    assertEquals(TriggerEventProcessorStage.BEFORE_ACTION, events.get(3).stage);
+    assertEquals(TriggerEventProcessorStage.AFTER_ACTION, events.get(4).stage);
+    assertEquals(TriggerEventProcessorStage.SUCCEEDED, events.get(5).stage);
+    // check ops
+    List<TriggerEvent.Op> ops = (List<TriggerEvent.Op>) events.get(4).event.getProperty(TriggerEvent.REQUESTED_OPS);
+    assertNotNull("should contain requestedOps", ops);
+    assertEquals("number of ops", 2, ops.size());
+    boolean shard1 = false;
+    boolean shard2 = false;
+    for (TriggerEvent.Op op : ops) {
+      assertEquals(CollectionParams.CollectionAction.SPLITSHARD, op.getAction());
+      Set<Pair<String, String>> hints = (Set<Pair<String, String>>)op.getHints().get(Suggester.Hint.COLL_SHARD);
+      assertNotNull("hints", hints);
+      assertEquals("hints", 1, hints.size());
+      Pair<String, String> p = hints.iterator().next();
+      assertEquals(collectionName, p.first());
+      if (p.second().equals("shard1")) {
+        shard1 = true;
+      } else if (p.second().equals("shard2")) {
+        shard2 = true;
+      } else {
+        fail("unexpected shard name " + p.second());
+      }
+    }
+    assertTrue("shard1 should be split", shard1);
+    assertTrue("shard2 should be split", shard2);
+
+  }
+
+  @Test
+  public void testMergeIntegration() throws Exception {
+    String collectionName = "testMergeIntegration_collection";
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
+        "conf", 2, 2).setMaxShardsPerNode(2);
+    create.process(solrClient);
+    CloudTestUtils.waitForState(cloudManager, "failed to create " + collectionName, collectionName,
+        CloudTestUtils.clusterShape(2, 2));
+
+    for (int i = 0; i < 10; i++) {
+      SolrInputDocument doc = new SolrInputDocument("id", "id-" + (i * 100));
+      solrClient.add(collectionName, doc);
+    }
+    solrClient.commit(collectionName);
+
+    long waitForSeconds = 3 + random().nextInt(5);
+    String setTriggerCommand = "{" +
+        "'set-trigger' : {" +
+        "'name' : 'index_size_trigger'," +
+        "'event' : 'indexSize'," +
+        "'waitFor' : '" + waitForSeconds + "s'," +
+        "'unit' : 'docs'," +
+        "'above' : 40," +
+        "'below' : 4," +
+        "'enabled' : true," +
+        "'actions' : [{'name' : 'compute_plan', 'class' : 'solr.ComputePlanAction'}," +
+        "{'name' : 'execute_plan', 'class' : '" + ExecutePlanAction.class.getName() + "'}]" +
+        "}}";
+    SolrRequest req = createAutoScalingRequest(SolrRequest.METHOD.POST, setTriggerCommand);
+    NamedList<Object> response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+
+    String setListenerCommand = "{" +
+        "'set-listener' : " +
+        "{" +
+        "'name' : 'capturing'," +
+        "'trigger' : 'index_size_trigger'," +
+        "'stage' : ['STARTED','ABORTED','SUCCEEDED','FAILED']," +
+        "'beforeAction' : ['compute_plan','execute_plan']," +
+        "'afterAction' : ['compute_plan','execute_plan']," +
+        "'class' : '" + CapturingTriggerListener.class.getName() + "'" +
+        "}" +
+        "}";
+    req = createAutoScalingRequest(SolrRequest.METHOD.POST, setListenerCommand);
+    response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+
+    setListenerCommand = "{" +
+        "'set-listener' : " +
+        "{" +
+        "'name' : 'finished'," +
+        "'trigger' : 'index_size_trigger'," +
+        "'stage' : ['SUCCEEDED']," +
+        "'class' : '" + FinishedProcessingListener.class.getName() + "'" +
+        "}" +
+        "}";
+    req = createAutoScalingRequest(SolrRequest.METHOD.POST, setListenerCommand);
+    response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+
+    // delete some docs to trigger a merge
+    for (int i = 0; i < 5; i++) {
+      solrClient.deleteById(collectionName, "id-" + (i * 100));
+    }
+    solrClient.commit(collectionName);
+
+    timeSource.sleep(TimeUnit.MILLISECONDS.convert(waitForSeconds + 1, TimeUnit.SECONDS));
+
+    boolean await = finished.await(60000 / SPEED, TimeUnit.MILLISECONDS);
+    assertTrue("did not finish processing in time", await);
+    assertEquals(1, listenerEvents.size());
+    List<CapturedEvent> events = listenerEvents.get("capturing");
+    assertNotNull("'capturing' events not found", events);
+    assertEquals("events: " + events, 6, events.size());
+    assertEquals(TriggerEventProcessorStage.STARTED, events.get(0).stage);
+    assertEquals(TriggerEventProcessorStage.BEFORE_ACTION, events.get(1).stage);
+    assertEquals(TriggerEventProcessorStage.AFTER_ACTION, events.get(2).stage);
+    assertEquals(TriggerEventProcessorStage.BEFORE_ACTION, events.get(3).stage);
+    assertEquals(TriggerEventProcessorStage.AFTER_ACTION, events.get(4).stage);
+    assertEquals(TriggerEventProcessorStage.SUCCEEDED, events.get(5).stage);
+    // check ops
+    List<TriggerEvent.Op> ops = (List<TriggerEvent.Op>) events.get(4).event.getProperty(TriggerEvent.REQUESTED_OPS);
+    assertNotNull("should contain requestedOps", ops);
+    assertTrue("number of ops: " + ops, ops.size() > 0);
+    for (TriggerEvent.Op op : ops) {
+      assertEquals(CollectionParams.CollectionAction.MERGESHARDS, op.getAction());
+      Set<Pair<String, String>> hints = (Set<Pair<String, String>>)op.getHints().get(Suggester.Hint.COLL_SHARD);
+      assertNotNull("hints", hints);
+      assertEquals("hints", 2, hints.size());
+      Pair<String, String> p = hints.iterator().next();
+      assertEquals(collectionName, p.first());
+    }
+
+    // TODO: fix this once MERGESHARDS is supported
+    List<TriggerEvent.Op> unsupportedOps = (List<TriggerEvent.Op>)events.get(2).context.get("properties.unsupportedOps");
+    assertNotNull("should have unsupportedOps", unsupportedOps);
+    assertEquals(unsupportedOps.toString() + "\n" + ops, ops.size(), unsupportedOps.size());
+    unsupportedOps.forEach(op -> assertEquals(CollectionParams.CollectionAction.MERGESHARDS, op.getAction()));
+  }
+
   private Map<String, Object> createTriggerProps(long waitForSeconds) {
     Map<String, Object> props = new HashMap<>();
     props.put("event", "indexSize");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2f13a21f/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
index adfb76a..9641552 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
@@ -24,8 +24,10 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Locale;
 import java.util.Map;
 import java.util.Random;
+import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentSkipListMap;
@@ -42,6 +44,7 @@ import org.apache.solr.client.solrj.cloud.DistributedQueueFactory;
 import org.apache.solr.client.solrj.cloud.DistribStateManager;
 import org.apache.solr.client.solrj.cloud.NodeStateProvider;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
 import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -57,6 +60,7 @@ import org.apache.solr.cloud.autoscaling.OverseerTriggerThread;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.cloud.rule.ImplicitSnitch;
@@ -242,6 +246,67 @@ public class SimCloudManager implements SolrCloudManager {
     return values;
   }
 
+  public String dumpClusterState(boolean withCollections) throws Exception {
+    StringBuilder sb = new StringBuilder();
+    sb.append("#######################################\n");
+    sb.append("############ CLUSTER STATE ############\n");
+    sb.append("#######################################\n");
+    sb.append("## Live nodes:\t\t" + getLiveNodesSet().size() + "\n");
+    int emptyNodes = 0;
+    int maxReplicas = 0;
+    int minReplicas = Integer.MAX_VALUE;
+    Map<String, Map<Replica.State, AtomicInteger>> replicaStates = new TreeMap<>();
+    int numReplicas = 0;
+    for (String node : getLiveNodesSet().get()) {
+      List<ReplicaInfo> replicas = getSimClusterStateProvider().simGetReplicaInfos(node);
+      numReplicas += replicas.size();
+      if (replicas.size() > maxReplicas) {
+        maxReplicas = replicas.size();
+      }
+      if (minReplicas > replicas.size()) {
+        minReplicas = replicas.size();
+      }
+      for (ReplicaInfo ri : replicas) {
+        replicaStates.computeIfAbsent(ri.getCollection(), c -> new TreeMap<>())
+            .computeIfAbsent(ri.getState(), s -> new AtomicInteger())
+            .incrementAndGet();
+      }
+      if (replicas.isEmpty()) {
+        emptyNodes++;
+      }
+    }
+    if (minReplicas == Integer.MAX_VALUE) {
+      minReplicas = 0;
+    }
+    sb.append("## Empty nodes:\t" + emptyNodes + "\n");
+    Set<String> deadNodes = getSimNodeStateProvider().simGetDeadNodes();
+    sb.append("## Dead nodes:\t\t" + deadNodes.size() + "\n");
+    deadNodes.forEach(n -> sb.append("##\t\t" + n + "\n"));
+    sb.append("## Collections:\t" + getSimClusterStateProvider().simListCollections() + "\n");
+    if (withCollections) {
+      ClusterState state = clusterStateProvider.getClusterState();
+      state.forEachCollection(coll -> sb.append(coll.toString() + "\n"));
+    }
+    sb.append("## Max replicas per node:\t" + maxReplicas + "\n");
+    sb.append("## Min replicas per node:\t" + minReplicas + "\n");
+    sb.append("## Total replicas:\t\t" + numReplicas + "\n");
+    replicaStates.forEach((c, map) -> {
+      AtomicInteger repCnt = new AtomicInteger();
+      map.forEach((s, cnt) -> repCnt.addAndGet(cnt.get()));
+      sb.append("## * " + c + "\t\t" + repCnt.get() + "\n");
+      map.forEach((s, cnt) -> sb.append("##\t\t- " + String.format(Locale.ROOT, "%-12s  %4d", s, cnt.get()) + "\n"));
+    });
+    sb.append("######### Solr op counts ##########\n");
+    simGetOpCounts().forEach((k, cnt) -> sb.append("##\t\t- " + String.format(Locale.ROOT, "%-14s  %4d", k, cnt.get()) + "\n"));
+    sb.append("######### Autoscaling event counts ###########\n");
+    Map<String, Map<String, AtomicInteger>> counts = simGetEventCounts();
+    counts.forEach((trigger, map) -> {
+      sb.append("## * Trigger: " + trigger + "\n");
+      map.forEach((s, cnt) -> sb.append("##\t\t- " + String.format(Locale.ROOT, "%-11s  %4d", s, cnt.get()) + "\n"));
+    });
+    return sb.toString();
+  }
+
   /**
    * Get the instance of {@link SolrResourceLoader} that is used by the cluster components.
    */
@@ -576,8 +641,12 @@ public class SimCloudManager implements SolrCloudManager {
           }
           break;
         case DELETE:
-          clusterStateProvider.simDeleteCollection(req.getParams().get(CommonParams.NAME),
-              req.getParams().get(CommonAdminParams.ASYNC), results);
+          try {
+            clusterStateProvider.simDeleteCollection(req.getParams().get(CommonParams.NAME),
+                req.getParams().get(CommonAdminParams.ASYNC), results);
+          } catch (Exception e) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
+          }
           break;
         case LIST:
           results.add("collections", clusterStateProvider.simListCollections());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2f13a21f/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
index d535908..b8da5b0 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
@@ -244,7 +244,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    * @return true if a node existed and was removed
    */
   public boolean simRemoveNode(String nodeId) throws Exception {
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       Set<String> collections = new HashSet<>();
       // mark every replica on that node as down
@@ -299,14 +299,14 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     liveNodes.add(nodeId);
     createEphemeralLiveNode(nodeId);
     Set<String> collections = new HashSet<>();
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       setReplicaStates(nodeId, Replica.State.RECOVERING, collections);
     } finally {
       lock.unlock();
     }
     cloudManager.getTimeSource().sleep(1000);
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       setReplicaStates(nodeId, Replica.State.ACTIVE, collections);
     } finally {
@@ -392,7 +392,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       throw new Exception("Wrong node (not " + nodeId + "): " + replicaInfo);
     }
 
-    lock.lock();
+    lock.lockInterruptibly();
     try {
 
       opDelay(replicaInfo.getCollection(), CollectionParams.CollectionAction.ADDREPLICA.name());
@@ -438,7 +438,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    */
   public void simRemoveReplica(String nodeId, String coreNodeName) throws Exception {
     List<ReplicaInfo> replicas = nodeReplicaMap.computeIfAbsent(nodeId, n -> new ArrayList<>());
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       for (int i = 0; i < replicas.size(); i++) {
         if (coreNodeName.equals(replicas.get(i).getName())) {
@@ -688,11 +688,11 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    * @param async async id
    * @param results results of the operation
    */
-  public void simDeleteCollection(String collection, String async, NamedList results) throws IOException {
+  public void simDeleteCollection(String collection, String async, NamedList results) throws Exception {
     if (async != null) {
       results.add(CoreAdminParams.REQUESTID, async);
     }
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       collProperties.remove(collection);
       sliceProperties.remove(collection);
@@ -730,7 +730,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    * Remove all collections.
    */
   public void simDeleteAllCollections() throws Exception {
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       nodeReplicaMap.clear();
       collProperties.clear();
@@ -805,7 +805,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     String collectionName = message.getStr(COLLECTION_PROP);
     String sliceName = message.getStr(SHARD_ID_PROP);
     ClusterState clusterState = getClusterState();
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       ZkWriteCommand cmd = new CollectionMutator(cloudManager).createShard(clusterState, message);
       if (cmd.noop) {
@@ -873,6 +873,10 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     AtomicReference<String> sliceName = new AtomicReference<>();
     sliceName.set(message.getStr(SHARD_ID_PROP));
     String splitKey = message.getStr("split.key");
+
+    // always invalidate cached collection states to get up-to-date metrics
+    collectionsStatesRef.set(null);
+
     ClusterState clusterState = getClusterState();
     DocCollection collection = clusterState.getCollection(collectionName);
     Slice parentSlice = SplitShardCmd.getParentSlice(clusterState, collectionName, sliceName, splitKey);
@@ -896,10 +900,11 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     if (sessionWrapper != null) sessionWrapper.release();
 
     // adjust numDocs / deletedDocs / maxDoc
-    String numDocsStr = parentSlice.getLeader().getStr("SEARCHER..searcher.numDocs", "0");
+    String numDocsStr = parentSlice.getLeader().getStr("SEARCHER.searcher.numDocs", "0");
     long numDocs = Long.parseLong(numDocsStr);
     long newNumDocs = numDocs / subSlices.size();
     long remainder = numDocs % subSlices.size();
+    String remainderSlice = null;
 
     for (ReplicaPosition replicaPosition : replicaPositions) {
       String subSliceName = replicaPosition.shard;
@@ -912,22 +917,31 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       replicaProps.put(ZkStateReader.BASE_URL_PROP, Utils.getBaseUrlForNodeName(subShardNodeName, "http"));
 
       long replicasNumDocs = newNumDocs;
-      if (subSlices.get(0).equals(subSliceName)) { // only add to the first
+      if (remainderSlice == null) {
+        remainderSlice = subSliceName;
+      }
+      if (remainderSlice.equals(subSliceName)) { // only add to one sub slice
         replicasNumDocs += remainder;
       }
-      replicaProps.put("SEARCHER..searcher.numDocs", replicasNumDocs);
-      replicaProps.put("SEARCHER..searcher.maxDoc", replicasNumDocs);
-      replicaProps.put("SEARCHER..searcher.deletedDocs", 0);
+      replicaProps.put("SEARCHER.searcher.numDocs", replicasNumDocs);
+      replicaProps.put("SEARCHER.searcher.maxDoc", replicasNumDocs);
+      replicaProps.put("SEARCHER.searcher.deletedDocs", 0);
 
       ReplicaInfo ri = new ReplicaInfo("core_node" + Assign.incAndGetId(stateManager, collectionName, 0),
           solrCoreName, collectionName, replicaPosition.shard, replicaPosition.type, subShardNodeName, replicaProps);
       simAddReplica(replicaPosition.node, ri, false);
     }
     // mark the old slice as inactive
-    Map<String, Object> props = sliceProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>())
-        .computeIfAbsent(sliceName.get(), s -> new ConcurrentHashMap<>());
-    props.put(ZkStateReader.STATE_PROP, Slice.State.INACTIVE.toString());
-    props.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
+    lock.lockInterruptibly();
+    try {
+      Map<String, Object> props = sliceProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>())
+          .computeIfAbsent(sliceName.get(), s -> new ConcurrentHashMap<>());
+      props.put(ZkStateReader.STATE_PROP, Slice.State.INACTIVE.toString());
+      props.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
+      // XXX also mark replicas as down? currently SplitShardCmd doesn't do this
+    } finally {
+      lock.unlock();
+    }
     // add slice props
     for (int i = 0; i < subRanges.size(); i++) {
       String subSlice = subSlices.get(i);
@@ -937,8 +951,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       sliceProps.put(Slice.RANGE, range);
       sliceProps.put(Slice.PARENT, sliceName.get());
       sliceProps.put(ZkStateReader.STATE_PROP, Slice.State.ACTIVE.toString());
-      props.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
+      sliceProps.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
     }
+    collectionsStatesRef.set(null);
     simRunLeaderElection(Collections.singleton(collectionName), true);
     results.add("success", "");
 
@@ -967,7 +982,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
 
     opDelay(collectionName, CollectionParams.CollectionAction.DELETESHARD.name());
 
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       sliceProperties.computeIfAbsent(collectionName, coll -> new ConcurrentHashMap<>()).remove(sliceName);
       nodeReplicaMap.forEach((n, replicas) -> {
@@ -1005,7 +1020,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    * @return
    * @throws SolrException
    */
-  public UpdateResponse simUpdate(UpdateRequest req) throws SolrException, IOException {
+  public UpdateResponse simUpdate(UpdateRequest req) throws SolrException, InterruptedException, IOException {
     String collection = req.getCollection();
     if (collection == null) {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection not set");
@@ -1013,66 +1028,89 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     if (!simListCollections().contains(collection)) {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection '" + collection + "' doesn't exist");
     }
+    // always reset first to get the current metrics
+    collectionsStatesRef.set(null);
     DocCollection coll = getClusterState().getCollection(collection);
     DocRouter router = coll.getRouter();
-    // process updates first
-
-    List<String> deletes = req.getDeleteById();
-    if (deletes != null && !deletes.isEmpty()) {
-      for (String id : deletes) {
-        Slice s = router.getTargetSlice(id, null, null, req.getParams(), coll);
-        String numDocsStr = s.getLeader().getProperty("SEARCHER.searcher.numDocs");
-        if (numDocsStr != null && Long.parseLong(numDocsStr) > 0) {
-          try {
-            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.deletedDocs", 1, true, false);
-            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", -1, true, false);
-          } catch (Exception e) {
-            throw new IOException(e);
-          }
-        }
-      }
-    }
-    deletes = req.getDeleteQuery();
-    if (deletes != null && !deletes.isEmpty()) {
-      for (String q : deletes) {
-        if (!"*:*".equals(q)) {
-          throw new UnsupportedOperationException("Only '*:*' query is supported in deleteByQuery");
-        }
-        for (Slice s : coll.getSlices()) {
-          String numDocsStr = s.getLeader().getProperty("SEARCHER.searcher.numDocs");
+
+    boolean modified = false;
+
+    lock.lockInterruptibly();
+    try {
+      List<String> deletes = req.getDeleteById();
+      if (deletes != null && !deletes.isEmpty()) {
+        for (String id : deletes) {
+          Slice s = router.getTargetSlice(id, null, null, req.getParams(), coll);
+          String numDocsStr = s.getLeader().getStr("SEARCHER.searcher.numDocs");
           if (numDocsStr == null) {
+            LOG.debug("-- no docs in " + s.getLeader());
             continue;
           }
           long numDocs = Long.parseLong(numDocsStr);
           if (numDocs == 0) {
+            LOG.debug("-- attempting to delete nonexistent doc " + id + " from " + s.getLeader());
             continue;
           }
+          if (numDocsStr != null) {
+            modified = true;
+            try {
+              simSetShardValue(collection, s.getName(), "SEARCHER.searcher.deletedDocs", 1, true, false);
+              simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", -1, true, false);
+            } catch (Exception e) {
+              throw new IOException(e);
+            }
+          }
+        }
+      }
+      deletes = req.getDeleteQuery();
+      if (deletes != null && !deletes.isEmpty()) {
+        for (String q : deletes) {
+          if (!"*:*".equals(q)) {
+            throw new UnsupportedOperationException("Only '*:*' query is supported in deleteByQuery");
+          }
+          for (Slice s : coll.getSlices()) {
+            String numDocsStr = s.getLeader().getStr("SEARCHER.searcher.numDocs");
+            if (numDocsStr == null) {
+              continue;
+            }
+            long numDocs = Long.parseLong(numDocsStr);
+            if (numDocs == 0) {
+              continue;
+            }
+            modified = true;
+            try {
+              simSetShardValue(collection, s.getName(), "SEARCHER.searcher.deletedDocs", numDocs, false, false);
+              simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", 0, false, false);
+            } catch (Exception e) {
+              throw new IOException(e);
+            }
+          }
+        }
+      }
+      List<SolrInputDocument> docs = req.getDocuments();
+      if (docs != null && !docs.isEmpty()) {
+        for (SolrInputDocument doc : docs) {
+          String id = (String) doc.getFieldValue("id");
+          if (id == null) {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Document without id: " + doc);
+          }
+          Slice s = router.getTargetSlice(id, null, null, req.getParams(), coll);
+          modified = true;
           try {
-            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.deletedDocs", numDocs, false, false);
-            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", 0, false, false);
+            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", 1, true, false);
+            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.maxDoc", 1, true, false);
+            // Policy reuses this value and expects it to be in GB units!!!
+            // simSetShardValue(collection, s.getName(), "INDEX.sizeInBytes", 500, true, false);
           } catch (Exception e) {
             throw new IOException(e);
           }
         }
       }
-    }
-    List<SolrInputDocument> docs = req.getDocuments();
-    if (docs != null && !docs.isEmpty()) {
-      for (SolrInputDocument doc : docs) {
-        String id = (String) doc.getFieldValue("id");
-        if (id == null) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Document without id: " + doc);
-        }
-        Slice s = router.getTargetSlice(id, null, null, req.getParams(), coll);
-        try {
-          simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", 1, true, false);
-          simSetShardValue(collection, s.getName(), "SEARCHER.searcher.maxDoc", 1, true, false);
-          // Policy reuses this value and expects it to be in GB units!!!
-          // simSetShardValue(collection, s.getName(), "INDEX.sizeInBytes", 500, true, false);
-        } catch (Exception e) {
-          throw new IOException(e);
-        }
+      if (modified) {
+        collectionsStatesRef.set(null);
       }
+    } finally {
+      lock.unlock();
     }
     return new UpdateResponse();
   }
@@ -1099,7 +1137,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    * @param properties properties to set
    */
   public void simSetClusterProperties(Map<String, Object> properties) throws Exception {
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       clusterProperties.clear();
       if (properties != null) {
@@ -1118,7 +1156,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    * @param value property value
    */
   public void simSetClusterProperty(String key, Object value) throws Exception {
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       if (value != null) {
         clusterProperties.put(key, value);
@@ -1137,7 +1175,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    * @param properties properties
    */
   public void simSetCollectionProperties(String coll, Map<String, Object> properties) throws Exception {
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       if (properties == null) {
         collProperties.remove(coll);
@@ -1160,7 +1198,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    */
   public void simSetCollectionProperty(String coll, String key, String value) throws Exception {
     Map<String, Object> props = collProperties.computeIfAbsent(coll, c -> new HashMap<>());
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       if (value == null) {
         props.remove(key);
@@ -1181,7 +1219,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    */
   public void simSetSliceProperties(String coll, String slice, Map<String, Object> properties) throws Exception {
     Map<String, Object> sliceProps = sliceProperties.computeIfAbsent(coll, c -> new HashMap<>()).computeIfAbsent(slice, s -> new HashMap<>());
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       sliceProps.clear();
       if (properties != null) {
@@ -1309,9 +1347,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    * List collections.
    * @return list of existing collections.
    */
-  public List<String> simListCollections() {
+  public List<String> simListCollections() throws InterruptedException {
     final Set<String> collections = new HashSet<>();
-    lock.lock();
+    lock.lockInterruptibly();
     try {
       nodeReplicaMap.forEach((n, replicas) -> {
         replicas.forEach(ri -> collections.add(ri.getCollection()));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2f13a21f/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
index f18234a..1c56b74 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimSolrCloudTestCase.java
@@ -79,59 +79,7 @@ public class SimSolrCloudTestCase extends SolrTestCaseJ4 {
   public void tearDown() throws Exception {
     super.tearDown();
     if (cluster != null) {
-      log.info("\n");
-      log.info("#############################################");
-      log.info("############ FINAL CLUSTER STATS ############");
-      log.info("#############################################\n");
-      log.info("## Live nodes:\t\t" + cluster.getLiveNodesSet().size());
-      int emptyNodes = 0;
-      int maxReplicas = 0;
-      int minReplicas = Integer.MAX_VALUE;
-      Map<String, Map<Replica.State, AtomicInteger>> replicaStates = new TreeMap<>();
-      int numReplicas = 0;
-      for (String node : cluster.getLiveNodesSet().get()) {
-        List<ReplicaInfo> replicas = cluster.getSimClusterStateProvider().simGetReplicaInfos(node);
-        numReplicas += replicas.size();
-        if (replicas.size() > maxReplicas) {
-          maxReplicas = replicas.size();
-        }
-        if (minReplicas > replicas.size()) {
-          minReplicas = replicas.size();
-        }
-        for (ReplicaInfo ri : replicas) {
-          replicaStates.computeIfAbsent(ri.getCollection(), c -> new TreeMap<>())
-              .computeIfAbsent(ri.getState(), s -> new AtomicInteger())
-              .incrementAndGet();
-        }
-        if (replicas.isEmpty()) {
-          emptyNodes++;
-        }
-      }
-      if (minReplicas == Integer.MAX_VALUE) {
-        minReplicas = 0;
-      }
-      log.info("## Empty nodes:\t" + emptyNodes);
-      Set<String> deadNodes = cluster.getSimNodeStateProvider().simGetDeadNodes();
-      log.info("## Dead nodes:\t\t" + deadNodes.size());
-      deadNodes.forEach(n -> log.info("##\t\t" + n));
-      log.info("## Collections:\t" + cluster.getSimClusterStateProvider().simListCollections());
-      log.info("## Max replicas per node:\t" + maxReplicas);
-      log.info("## Min replicas per node:\t" + minReplicas);
-      log.info("## Total replicas:\t\t" + numReplicas);
-      replicaStates.forEach((c, map) -> {
-        AtomicInteger repCnt = new AtomicInteger();
-        map.forEach((s, cnt) -> repCnt.addAndGet(cnt.get()));
-        log.info("## * " + c + "\t\t" + repCnt.get());
-        map.forEach((s, cnt) -> log.info("##\t\t- " + String.format(Locale.ROOT, "%-12s  %4d", s, cnt.get())));
-      });
-      log.info("######### Final Solr op counts ##########");
-      cluster.simGetOpCounts().forEach((k, cnt) -> log.info("##\t\t- " + String.format(Locale.ROOT, "%-14s  %4d", k, cnt.get())));
-      log.info("######### Autoscaling event counts ###########");
-      Map<String, Map<String, AtomicInteger>> counts = cluster.simGetEventCounts();
-      counts.forEach((trigger, map) -> {
-        log.info("## * Trigger: " + trigger);
-        map.forEach((s, cnt) -> log.info("##\t\t- " + String.format(Locale.ROOT, "%-11s  %4d", s, cnt.get())));
-      });
+      log.info(cluster.dumpClusterState(false));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2f13a21f/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
index 8c1fba3..e1d8281 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
@@ -66,6 +66,10 @@ public class ReplicaInfo implements MapWriter {
     this.node = node;
   }
 
+  public Object clone() {
+    return new ReplicaInfo(name, core, collection, shard, type, node, variables);
+  }
+
   @Override
   public void writeMap(EntryWriter ew) throws IOException {
     ew.put(name, (MapWriter) ew1 -> {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2f13a21f/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/UnsupportedSuggester.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/UnsupportedSuggester.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/UnsupportedSuggester.java
index 825a24a..9d44ae4 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/UnsupportedSuggester.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/UnsupportedSuggester.java
@@ -42,6 +42,11 @@ public class UnsupportedSuggester extends Suggester {
   }
 
   @Override
+  public CollectionParams.CollectionAction getAction() {
+    return action;
+  }
+
+  @Override
   SolrRequest init() {
     log.warn("Unsupported suggester for action " + action + " with hints " + hints + " - no suggestion available");
     return null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2f13a21f/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
index b8bf1f4..b39c64c 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/SolrCloudTestCase.java
@@ -272,7 +272,7 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
 
   /**
    * Return a {@link CollectionStatePredicate} that returns true if a collection has the expected
-   * number of shards and replicas
+   * number of active shards and active replicas
    */
   public static CollectionStatePredicate clusterShape(int expectedShards, int expectedReplicas) {
     return (liveNodes, collectionState) -> {