You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ab...@apache.org on 2018/04/05 13:09:29 UTC

[1/2] lucene-solr:jira/solr-12181: SOLR-12181: WIP patch.

Repository: lucene-solr
Updated Branches:
  refs/heads/jira/solr-12181 [created] 957c1d2c8


SOLR-12181: WIP patch.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/c882af72
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/c882af72
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/c882af72

Branch: refs/heads/jira/solr-12181
Commit: c882af72127704ce82848e1d93b529df8fbde5c2
Parents: 34b83ed
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Wed Apr 4 17:00:05 2018 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Wed Apr 4 17:00:05 2018 +0200

----------------------------------------------------------------------
 .../solr/cloud/autoscaling/AutoScaling.java     |   3 +
 .../cloud/autoscaling/ComputePlanAction.java    |   1 +
 .../cloud/autoscaling/IndexSizeTrigger.java     | 357 +++++++++++++++++++
 .../solr/cloud/autoscaling/MetricTrigger.java   |   6 +-
 .../solr/cloud/autoscaling/TriggerEvent.java    |  16 +-
 .../cloud/autoscaling/TriggerEventType.java     |   3 +-
 .../solr/common/params/CollectionParams.java    |   4 +-
 7 files changed, 382 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c882af72/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScaling.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScaling.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScaling.java
index 68282a7..93f449a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScaling.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoScaling.java
@@ -180,6 +180,9 @@ public class AutoScaling {
         case SCHEDULED:
           t = new ScheduledTrigger(name);
         break;
+        case INDEXSIZE:
+          t = new IndexSizeTrigger(name);
+          break;
         default:
           throw new IllegalArgumentException("Unknown event type: " + type + " in trigger: " + name);
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c882af72/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
index 8f3175c..610edf7 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
@@ -203,6 +203,7 @@ public class ComputePlanAction extends TriggerActionBase {
         break;
       case SEARCHRATE:
       case METRIC:
+      case INDEXSIZE:
         List<TriggerEvent.Op> ops = (List<TriggerEvent.Op>)event.getProperty(TriggerEvent.REQUESTED_OPS, Collections.emptyList());
         int start = (Integer)event.getProperty(START, 0);
         if (ops.isEmpty() || start >= ops.size()) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c882af72/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
new file mode 100644
index 0000000..fa406cc
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
@@ -0,0 +1,357 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.autoscaling;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
+import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
+import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.util.Pair;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.core.SolrResourceLoader;
+import org.apache.solr.metrics.SolrCoreMetricManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ *
+ */
+public class IndexSizeTrigger extends TriggerBase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  public static final String ABOVE_PROP = "above";
+  public static final String ABOVE_OP_PROP = "aboveOp";
+  public static final String BELOW_PROP = "below";
+  public static final String BELOW_OP_PROP = "belowOp";
+  public static final String UNIT_PROP = "unit";
+  public static final String COLLECTIONS_PROP = "collections";
+
+  public static final String SIZE_PROP = "__indexSize__";
+  public static final String ABOVE_SIZE_PROP = "aboveSize";
+  public static final String BELOW_SIZE_PROP = "belowSize";
+
+  public enum Unit { bytes, docs }
+
+  private long above, below;
+  private CollectionParams.CollectionAction aboveOp, belowOp;
+  private Unit unit;
+  private final Set<String> collections = new HashSet<>();
+  private final Map<String, Long> lastEventMap = new ConcurrentHashMap<>();
+
+  public IndexSizeTrigger(String name) {
+    super(TriggerEventType.INDEXSIZE, name);
+    TriggerUtils.validProperties(validProperties,
+        ABOVE_PROP, BELOW_PROP, UNIT_PROP, COLLECTIONS_PROP);
+  }
+
+  @Override
+  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
+    super.configure(loader, cloudManager, properties);
+    String unitStr = String.valueOf(properties.getOrDefault(UNIT_PROP, Unit.bytes));
+    try {
+      unit = Unit.valueOf(unitStr.toLowerCase(Locale.ROOT));
+    } catch (Exception e) {
+      throw new TriggerValidationException(getName(), UNIT_PROP, "invalid unit, must be one of " + Arrays.toString(Unit.values()));
+    }
+    String aboveStr = String.valueOf(properties.getOrDefault(ABOVE_PROP, Long.MAX_VALUE));
+    String belowStr = String.valueOf(properties.getOrDefault(BELOW_PROP, -1));
+    try {
+      above = Long.parseLong(aboveStr);
+      if (above <= 0) {
+        throw new Exception("value must be > 0");
+      }
+    } catch (Exception e) {
+      throw new TriggerValidationException(getName(), ABOVE_PROP, "invalid value '" + aboveStr + "': " + e.toString());
+    }
+    try {
+      below = Long.parseLong(aboveStr);
+      if (below < 0) {
+        below = -1;
+      }
+    } catch (Exception e) {
+      throw new TriggerValidationException(getName(), BELOW_PROP, "invalid value '" + belowStr + "': " + e.toString());
+    }
+    if (below > 0 && (below * 2 > above)) {
+      throw new TriggerValidationException(getName(), BELOW_PROP,
+          "invalid value " + below + ", should be less than half of '" + ABOVE_PROP + "' value, which is " + above);
+    }
+    String collectionsString = (String) properties.get(COLLECTIONS_PROP);
+    if (collectionsString != null && !collectionsString.isEmpty()) {
+      collections.addAll(StrUtils.splitSmart(collectionsString, ','));
+    }
+    String aboveOpStr = String.valueOf(properties.getOrDefault(ABOVE_OP_PROP, CollectionParams.CollectionAction.SPLITSHARD.toLower()));
+    // TODO: this is a placeholder until SOLR-9407 is implemented
+    String belowOpStr = String.valueOf(properties.getOrDefault(BELOW_OP_PROP, CollectionParams.CollectionAction.MERGESHARDS.toLower()));
+    aboveOp = CollectionParams.CollectionAction.get(aboveOpStr);
+    if (aboveOp == null) {
+      throw new TriggerValidationException(getName(), ABOVE_OP_PROP, "unrecognized value of " + ABOVE_OP_PROP + ": '" + aboveOpStr + "'");
+    }
+    belowOp = CollectionParams.CollectionAction.get(belowOpStr);
+    if (belowOp == null) {
+      throw new TriggerValidationException(getName(), BELOW_OP_PROP, "unrecognized value of " + BELOW_OP_PROP + ": '" + belowOpStr + "'");
+    }
+  }
+
+  @Override
+  protected Map<String, Object> getState() {
+    Map<String, Object> state = new HashMap<>();
+    state.put("lastEventMap", lastEventMap);
+    return state;
+  }
+
+  @Override
+  protected void setState(Map<String, Object> state) {
+    this.lastEventMap.clear();
+    Map<String, Long> replicaVsTime = (Map<String, Long>)state.get("lastEventMap");
+    if (replicaVsTime != null) {
+      this.lastEventMap.putAll(replicaVsTime);
+    }
+  }
+
+  @Override
+  public void restoreState(AutoScaling.Trigger old) {
+    assert old.isClosed();
+    if (old instanceof IndexSizeTrigger) {
+    } else {
+      throw new SolrException(SolrException.ErrorCode.INVALID_STATE,
+          "Unable to restore state from an unknown type of trigger");
+    }
+  }
+
+  @Override
+  public void run() {
+    synchronized(this) {
+      if (isClosed) {
+        log.warn(getName() + " ran but was already closed");
+        return;
+      }
+    }
+    AutoScaling.TriggerEventProcessor processor = processorRef.get();
+    if (processor == null) {
+      return;
+    }
+
+    // replica name / info + size
+    Map<String, ReplicaInfo> currentSizes = new HashMap<>();
+
+    try {
+      ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState();
+      for (String node : clusterState.getLiveNodes()) {
+        Map<String, ReplicaInfo> metricTags = new HashMap<>();
+        // coll, shard, replica
+        Map<String, Map<String, List<ReplicaInfo>>> infos = cloudManager.getNodeStateProvider().getReplicaInfo(node, Collections.emptyList());
+        infos.forEach((coll, shards) -> {
+          if (!collections.isEmpty() && !collections.contains(coll)) {
+            return;
+          }
+          DocCollection docCollection = clusterState.getCollection(coll);
+
+          shards.forEach((sh, replicas) -> {
+            // check only the leader
+            Replica r = docCollection.getSlice(sh).getLeader();
+            // no leader - don't do anything
+            if (r == null) {
+              return;
+            }
+            // find ReplicaInfo
+            ReplicaInfo info = null;
+            for (ReplicaInfo ri : replicas) {
+              if (r.getCoreName().equals(ri.getCore())) {
+                info = ri;
+                break;
+              }
+            }
+            if (info == null) {
+              // should not happen
+              log.warn("could not find replica info for replica " + r);
+              return;
+            }
+            // we have to translate to the metrics registry name, which uses "_replica_nN" as suffix
+            String replicaName = Utils.parseMetricsReplicaName(coll, info.getCore());
+            if (replicaName == null) { // should never happen???
+              replicaName = info.getName(); // which is actually coreNode name...
+            }
+            String registry = SolrCoreMetricManager.createRegistryName(true, coll, sh, replicaName, null);
+            String tag;
+            switch (unit) {
+              case bytes:
+                tag = "metrics:" + registry + ":INDEX.size";
+                break;
+              case docs:
+                tag = "metrics:" + registry + "SEARCHER.searcher.numDocs";
+                break;
+              default:
+                throw new UnsupportedOperationException("Unit " + unit + " not supported");
+            }
+            metricTags.put(tag, info);
+          });
+        });
+        if (metricTags.isEmpty()) {
+          continue;
+        }
+        Map<String, Object> sizes = cloudManager.getNodeStateProvider().getNodeValues(node, metricTags.keySet());
+        sizes.forEach((tag, size) -> {
+          ReplicaInfo info = metricTags.get(tag);
+          if (info == null) {
+            log.warn("Missing replica info for response tag " + tag);
+          } else {
+            // verify that it's a Number
+            if (!(size instanceof Number)) {
+              log.warn("invalid size value - not a number: '" + size + "' is " + size.getClass().getName());
+            }
+            info.getVariables().put(SIZE_PROP, ((Number) size).longValue());
+            currentSizes.put(info.getCore(), info);
+          }
+        });
+      }
+    } catch (IOException e) {
+      log.warn("Error running trigger " + getName(), e);
+      return;
+    }
+
+    long now = cloudManager.getTimeSource().getTimeNs();
+
+    // now check thresholds
+
+    // collection / list(info)
+    Map<String, List<ReplicaInfo>> aboveSize = new HashMap<>();
+    currentSizes.entrySet().stream()
+        .filter(e -> (Long)e.getValue().getVariable(SIZE_PROP) > above &&
+            waitForElapsed(e.getKey(), now, lastEventMap))
+        .forEach(e -> {
+          ReplicaInfo info = e.getValue();
+          List<ReplicaInfo> infos = aboveSize.computeIfAbsent(info.getCollection(), c -> new ArrayList<>());
+          infos.add(info);
+        });
+    // collection / list(info)
+    Map<String, List<ReplicaInfo>> belowSize = new HashMap<>();
+    currentSizes.entrySet().stream()
+        .filter(e -> (Long)e.getValue().getVariable(SIZE_PROP) < below &&
+            waitForElapsed(e.getKey(), now, lastEventMap))
+        .forEach(e -> {
+          ReplicaInfo info = e.getValue();
+          List<ReplicaInfo> infos = belowSize.computeIfAbsent(info.getCollection(), c -> new ArrayList<>());
+          infos.add(info);
+        });
+
+    if (aboveSize.isEmpty() && belowSize.isEmpty()) {
+      return;
+    }
+
+    // find the earliest time when a condition was exceeded
+    final AtomicLong eventTime = new AtomicLong(now);
+
+    // calculate ops
+    final List<TriggerEvent.Op> ops = new ArrayList<>();
+    aboveSize.forEach((coll, replicas) -> {
+      replicas.forEach(r -> {
+        TriggerEvent.Op op = new TriggerEvent.Op(aboveOp);
+        op.addHint(Suggester.Hint.COLL_SHARD, new Pair<>(coll, r.getShard()));
+        ops.add(op);
+        Long time = lastEventMap.get(r.getCore());
+        if (time != null && eventTime.get() > time) {
+          eventTime.set(time);
+        }
+      });
+    });
+    belowSize.forEach((coll, replicas) -> {
+      if (replicas.size() < 2) {
+        return;
+      }
+      replicas.sort((r1, r2) -> {
+        long delta = (Long) r1.getVariable(SIZE_PROP) - (Long) r2.getVariable(SIZE_PROP);
+        if (delta > 0) {
+          return 1;
+        } else if (delta < 0) {
+          return -1;
+        } else {
+          return 0;
+        }
+      });
+      // take top two
+      TriggerEvent.Op op = new TriggerEvent.Op(belowOp);
+      op.addHint(Suggester.Hint.COLL_SHARD, new Pair(coll, replicas.get(0).getShard()));
+      op.addHint(Suggester.Hint.COLL_SHARD, new Pair(coll, replicas.get(1).getShard()));
+      ops.add(op);
+      Long time = lastEventMap.get(replicas.get(0).getCore());
+      if (time != null && eventTime.get() > time) {
+        eventTime.set(time);
+      }
+      time = lastEventMap.get(replicas.get(1).getCore());
+      if (time != null && eventTime.get() > time) {
+        eventTime.set(time);
+      }
+    });
+
+    if (ops.isEmpty()) {
+      return;
+    }
+    if (processor.process(new IndexSizeEvent(getName(), eventTime.get(), ops, aboveSize, belowSize))) {
+      // update last event times
+      aboveSize.forEach((coll, replicas) -> {
+        replicas.forEach(r -> lastEventMap.put(r.getCore(), now));
+      });
+      belowSize.forEach((coll, replicas) -> {
+        lastEventMap.put(replicas.get(0).getCore(), now);
+        lastEventMap.put(replicas.get(1).getCore(), now);
+      });
+    }
+  }
+
+  private boolean waitForElapsed(String name, long now, Map<String, Long> lastEventMap) {
+    Long lastTime = lastEventMap.computeIfAbsent(name, s -> now);
+    long elapsed = TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS);
+    log.trace("name={}, lastTime={}, elapsed={}", name, lastTime, elapsed);
+    if (TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS) < getWaitForSecond()) {
+      return false;
+    }
+    return true;
+  }
+
+  public static class IndexSizeEvent extends TriggerEvent {
+    public IndexSizeEvent(String source, long eventTime, List<Op> ops, Map<String, List<ReplicaInfo>> aboveSize,
+                          Map<String, List<ReplicaInfo>> belowSize) {
+      super(TriggerEventType.INDEXSIZE, source, eventTime, null);
+      properties.put(TriggerEvent.REQUESTED_OPS, ops);
+      properties.put(ABOVE_SIZE_PROP, aboveSize);
+      properties.put(BELOW_SIZE_PROP, belowSize);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c882af72/solr/core/src/java/org/apache/solr/cloud/autoscaling/MetricTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/MetricTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/MetricTrigger.java
index 9fdf8dc..9058a9a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/MetricTrigger.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/MetricTrigger.java
@@ -203,12 +203,12 @@ public class MetricTrigger extends TriggerBase {
       List<Op> ops = new ArrayList<>(hotNodes.size());
       for (String n : hotNodes.keySet()) {
         Op op = new Op(CollectionParams.CollectionAction.get(preferredOp));
-        op.setHint(Suggester.Hint.SRC_NODE, n);
+        op.addHint(Suggester.Hint.SRC_NODE, n);
         if (!collection.equals(Policy.ANY)) {
           if (!shard.equals(Policy.ANY)) {
-            op.setHint(Suggester.Hint.COLL_SHARD, new Pair<>(collection, shard));
+            op.addHint(Suggester.Hint.COLL_SHARD, new Pair<>(collection, shard));
           } else {
-            op.setHint(Suggester.Hint.COLL, collection);
+            op.addHint(Suggester.Hint.COLL, collection);
           }
         }
         ops.add(op);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c882af72/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEvent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEvent.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEvent.java
index e4a4b3d..907309d 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEvent.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/TriggerEvent.java
@@ -17,9 +17,13 @@
 package org.apache.solr.cloud.autoscaling;
 
 import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.EnumMap;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
 import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
@@ -49,11 +53,17 @@ public class TriggerEvent implements MapWriter {
 
     public Op(CollectionParams.CollectionAction action, Suggester.Hint hint, Object hintValue) {
       this.action = action;
-      this.hints.put(hint, hintValue);
+      addHint(hint, hintValue);
     }
 
-    public void setHint(Suggester.Hint hint, Object value) {
-      hints.put(hint, value);
+    public void addHint(Suggester.Hint hint, Object value) {
+      hint.validator.accept(value);
+      if (hint.multiValued) {
+        Collection<?> values = value instanceof Collection ? (Collection) value : Collections.singletonList(value);
+        ((Set) hints.computeIfAbsent(hint, h -> new HashSet<>())).addAll(values);
+      } else {
+        hints.put(hint, value == null ? null : String.valueOf(value));
+      }
     }
 
     public CollectionParams.CollectionAction getAction() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c882af72/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/TriggerEventType.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/TriggerEventType.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/TriggerEventType.java
index 96bc773..a983bf0 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/TriggerEventType.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/TriggerEventType.java
@@ -28,5 +28,6 @@ public enum TriggerEventType {
   SEARCHRATE,
   INDEXRATE,
   INVALID,
-  METRIC
+  METRIC,
+  INDEXSIZE
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c882af72/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java
index f473ee4..6fb348f 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java
@@ -119,7 +119,9 @@ public interface CollectionParams {
     REPLACENODE(true, LockLevel.NONE),
     DELETENODE(true, LockLevel.NONE),
     MOCK_REPLICA_TASK(false, LockLevel.REPLICA),
-    NONE(false, LockLevel.NONE)
+    NONE(false, LockLevel.NONE),
+    // TODO: not implemented yet
+    MERGESHARDS(true, LockLevel.SHARD)
     ;
     public final boolean isWrite;
 


[2/2] lucene-solr:jira/solr-12181: SOLR-12181: Add missing support in the simulator to track update and delete requests. Add unit test.

Posted by ab...@apache.org.
SOLR-12181: Add missing support in the simulator to track update and delete requests.
Add unit test.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/957c1d2c
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/957c1d2c
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/957c1d2c

Branch: refs/heads/jira/solr-12181
Commit: 957c1d2c8e6ebd8b39679c40a107c262731c1f20
Parents: c882af7
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Thu Apr 5 15:08:26 2018 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Thu Apr 5 15:08:26 2018 +0200

----------------------------------------------------------------------
 .../autoscaling/AutoAddReplicasPlanAction.java  |   4 +-
 .../cloud/autoscaling/ComputePlanAction.java    |  13 +-
 .../cloud/autoscaling/IndexSizeTrigger.java     |   9 +-
 .../cloud/autoscaling/IndexSizeTriggerTest.java | 220 +++++++++++++++++++
 .../cloud/autoscaling/NodeAddedTriggerTest.java |   2 +-
 .../cloud/autoscaling/sim/SimCloudManager.java  |  30 ++-
 .../sim/SimClusterStateProvider.java            | 155 ++++++++++++-
 .../autoscaling/sim/SimNodeStateProvider.java   |   4 +-
 .../cloud/autoscaling/sim/TestLargeCluster.java |   2 +-
 .../autoscaling/sim/TestTriggerIntegration.java |   2 +-
 .../client/solrj/cloud/autoscaling/Policy.java  |   3 +
 .../cloud/autoscaling/SplitShardSuggester.java  |  43 ++++
 .../cloud/autoscaling/UnsupportedSuggester.java |  54 +++++
 13 files changed, 511 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/957c1d2c/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanAction.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanAction.java
index febd6bd..4189aa4 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanAction.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/AutoAddReplicasPlanAction.java
@@ -33,7 +33,7 @@ import org.apache.solr.common.cloud.ZkStateReader;
 public class AutoAddReplicasPlanAction extends ComputePlanAction {
 
   @Override
-  protected Suggester getSuggester(Policy.Session session, TriggerEvent event, SolrCloudManager cloudManager) {
+  protected Suggester getSuggester(Policy.Session session, TriggerEvent event, ActionContext context, SolrCloudManager cloudManager) {
     // for backward compatibility
     ClusterStateProvider stateProvider = cloudManager.getClusterStateProvider();
     String autoAddReplicas = stateProvider.getClusterProperty(ZkStateReader.AUTO_ADD_REPLICAS, (String) null);
@@ -41,7 +41,7 @@ public class AutoAddReplicasPlanAction extends ComputePlanAction {
       return NoneSuggester.get(session);
     }
 
-    Suggester suggester = super.getSuggester(session, event, cloudManager);
+    Suggester suggester = super.getSuggester(session, event, context, cloudManager);
     ClusterState clusterState;
     try {
       clusterState = stateProvider.getClusterState();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/957c1d2c/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
index 610edf7..57aa310 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ComputePlanAction.java
@@ -34,6 +34,7 @@ import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
 import org.apache.solr.client.solrj.cloud.autoscaling.PolicyHelper;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
 import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
+import org.apache.solr.client.solrj.cloud.autoscaling.UnsupportedSuggester;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.params.AutoScalingParams;
@@ -88,7 +89,7 @@ public class ComputePlanAction extends TriggerActionBase {
         log.trace("-- state: {}", clusterState);
       }
       try {
-        Suggester intialSuggester = getSuggester(session, event, cloudManager);
+        Suggester intialSuggester = getSuggester(session, event, context, cloudManager);
         Suggester suggester = intialSuggester;
         int maxOperations = getMaxNumOps(event, autoScalingConf, clusterState);
         int requestedOperations = getRequestedNumOps(event);
@@ -112,7 +113,7 @@ public class ComputePlanAction extends TriggerActionBase {
           if (suggester.getSession() != null) {
             session = suggester.getSession();
           }
-          suggester = getSuggester(session, event, cloudManager);
+          suggester = getSuggester(session, event, context, cloudManager);
 
           // break on first null op
           // unless a specific number of ops was requested
@@ -190,7 +191,7 @@ public class ComputePlanAction extends TriggerActionBase {
 
   private static final String START = "__start__";
 
-  protected Suggester getSuggester(Policy.Session session, TriggerEvent event, SolrCloudManager cloudManager) {
+  protected Suggester getSuggester(Policy.Session session, TriggerEvent event, ActionContext context, SolrCloudManager cloudManager) {
     Suggester suggester;
     switch (event.getEventType()) {
       case NODEADDED:
@@ -211,6 +212,10 @@ public class ComputePlanAction extends TriggerActionBase {
         }
         TriggerEvent.Op op = ops.get(start);
         suggester = session.getSuggester(op.getAction());
+        if (suggester instanceof UnsupportedSuggester) {
+          List<TriggerEvent.Op> unsupportedOps = (List<TriggerEvent.Op>)context.getProperties().computeIfAbsent("unsupportedOps", k -> new ArrayList<TriggerEvent.Op>());
+          unsupportedOps.add(op);
+        }
         for (Map.Entry<Suggester.Hint, Object> e : op.getHints().entrySet()) {
           suggester = suggester.hint(e.getKey(), e.getValue());
         }
@@ -226,7 +231,7 @@ public class ComputePlanAction extends TriggerActionBase {
         suggester = session.getSuggester(action);
         break;
       default:
-        throw new UnsupportedOperationException("No support for events other than nodeAdded, nodeLost, searchRate and metric. Received: " + event.getEventType());
+        throw new UnsupportedOperationException("No support for events other than nodeAdded, nodeLost, searchRate, metric and indexSize. Received: " + event.getEventType());
     }
     return suggester;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/957c1d2c/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
index fa406cc..c3fde1e 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
@@ -100,13 +100,15 @@ public class IndexSizeTrigger extends TriggerBase {
       throw new TriggerValidationException(getName(), ABOVE_PROP, "invalid value '" + aboveStr + "': " + e.toString());
     }
     try {
-      below = Long.parseLong(aboveStr);
+      below = Long.parseLong(belowStr);
       if (below < 0) {
         below = -1;
       }
     } catch (Exception e) {
       throw new TriggerValidationException(getName(), BELOW_PROP, "invalid value '" + belowStr + "': " + e.toString());
     }
+    // below must be at least 2x smaller than above, otherwise splitting a shard
+    // would immediately put the shard below the threshold and cause the mergeshards action
     if (below > 0 && (below * 2 > above)) {
       throw new TriggerValidationException(getName(), BELOW_PROP,
           "invalid value " + below + ", should be less than half of '" + ABOVE_PROP + "' value, which is " + above);
@@ -198,8 +200,7 @@ public class IndexSizeTrigger extends TriggerBase {
               }
             }
             if (info == null) {
-              // should not happen
-              log.warn("could not find replica info for replica " + r);
+              // probably replica is not on this node
               return;
             }
             // we have to translate to the metrics registry name, which uses "_replica_nN" as suffix
@@ -214,7 +215,7 @@ public class IndexSizeTrigger extends TriggerBase {
                 tag = "metrics:" + registry + ":INDEX.size";
                 break;
               case docs:
-                tag = "metrics:" + registry + "SEARCHER.searcher.numDocs";
+                tag = "metrics:" + registry + ":SEARCHER.searcher.numDocs";
                 break;
               default:
                 throw new UnsupportedOperationException("Unit " + unit + " not supported");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/957c1d2c/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
new file mode 100644
index 0000000..7966291
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.autoscaling;
+
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
+import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
+import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.cloud.CloudTestUtils;
+import org.apache.solr.cloud.SolrCloudTestCase;
+import org.apache.solr.cloud.autoscaling.sim.SimCloudManager;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Pair;
+import org.apache.solr.common.util.TimeSource;
+import org.apache.solr.core.SolrResourceLoader;
+import org.apache.solr.util.LogLevel;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.solr.cloud.autoscaling.AutoScalingHandlerTest.createAutoScalingRequest;
+
+/**
+ *
+ */
+@LogLevel("org.apache.solr.cloud.autoscaling=DEBUG")
+public class IndexSizeTriggerTest extends SolrCloudTestCase {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private static SolrCloudManager cloudManager;
+  private static SolrClient solrClient;
+  private static TimeSource timeSource;
+  private static SolrResourceLoader loader;
+
+  private AutoScaling.TriggerEventProcessor noFirstRunProcessor = event -> {
+    fail("Did not expect the processor to fire on first run! event=" + event);
+    return true;
+  };
+  private static final long WAIT_FOR_DELTA_NANOS = TimeUnit.MILLISECONDS.toNanos(2);
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(2)
+        .addConfig("conf", configset("cloud-minimal"))
+        .configure();
+    if (random().nextBoolean() && false) {
+      cloudManager = cluster.getJettySolrRunner(0).getCoreContainer().getZkController().getSolrCloudManager();
+      solrClient = cluster.getSolrClient();
+      loader = cluster.getJettySolrRunner(0).getCoreContainer().getResourceLoader();
+    } else {
+      cloudManager = SimCloudManager.createCluster(2, TimeSource.get("simTime:50"));
+      // wait for defaults to be applied - due to accelerated time sometimes we may miss this
+      cloudManager.getTimeSource().sleep(10000);
+      AutoScalingConfig cfg = cloudManager.getDistribStateManager().getAutoScalingConfig();
+      assertFalse("autoscaling config is empty", cfg.isEmpty());
+      solrClient = ((SimCloudManager)cloudManager).simGetSolrClient();
+      loader = ((SimCloudManager) cloudManager).getLoader();
+    }
+    timeSource = cloudManager.getTimeSource();
+  }
+
+  @After
+  public void restoreDefaults() throws Exception {
+    SolrRequest req = createAutoScalingRequest(SolrRequest.METHOD.POST,
+        "{'set-trigger' : " + AutoScaling.SCHEDULED_MAINTENANCE_TRIGGER_DSL + "}");
+    NamedList<Object> response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+    AutoScalingConfig autoScalingConfig = cloudManager.getDistribStateManager().getAutoScalingConfig();
+    if (autoScalingConfig.getTriggerListenerConfigs().containsKey("foo")) {
+      String cmd = "{" +
+          "'remove-listener' : {'name' : 'foo'}" +
+          "}";
+      response = solrClient.request(createAutoScalingRequest(SolrRequest.METHOD.POST, cmd));
+      assertEquals(response.get("result").toString(), "success");
+    }
+    if (cloudManager instanceof SimCloudManager) {
+      ((SimCloudManager) cloudManager).getSimClusterStateProvider().simDeleteAllCollections();
+    } else {
+      cluster.deleteAllCollections();
+    }
+  }
+
+  @AfterClass
+  public static void teardown() throws Exception {
+    if (cloudManager instanceof SimCloudManager) {
+      cloudManager.close();
+    }
+    solrClient = null;
+    cloudManager = null;
+  }
+
+  @Test
+  public void testTrigger() throws Exception {
+    String collectionName = "collection1";
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
+        "conf", 2, 2).setMaxShardsPerNode(2);
+    create.process(solrClient);
+    CloudTestUtils.waitForState(cloudManager, "failed to create " + collectionName, collectionName,
+        CloudTestUtils.clusterShape(2, 2));
+
+    long waitForSeconds = 3 + random().nextInt(5);
+    Map<String, Object> props = createTriggerProps(waitForSeconds);
+    try (IndexSizeTrigger trigger = new IndexSizeTrigger("index_size_trigger")) {
+      trigger.configure(loader, cloudManager, props);
+      trigger.init();
+      trigger.setProcessor(noFirstRunProcessor);
+      trigger.run();
+
+      for (int i = 0; i < 25; i++) {
+        SolrInputDocument doc = new SolrInputDocument("id", "id-" + i);
+        solrClient.add(collectionName, doc);
+      }
+      solrClient.commit();
+
+      AtomicBoolean fired = new AtomicBoolean(false);
+      AtomicReference<TriggerEvent> eventRef = new AtomicReference<>();
+      trigger.setProcessor(event -> {
+        if (fired.compareAndSet(false, true)) {
+          eventRef.set(event);
+          long currentTimeNanos = timeSource.getTimeNs();
+          long eventTimeNanos = event.getEventTime();
+          long waitForNanos = TimeUnit.NANOSECONDS.convert(waitForSeconds, TimeUnit.SECONDS) - WAIT_FOR_DELTA_NANOS;
+          if (currentTimeNanos - eventTimeNanos <= waitForNanos) {
+            fail("processor was fired before the configured waitFor period: currentTimeNanos=" + currentTimeNanos + ", eventTimeNanos=" +  eventTimeNanos + ",waitForNanos=" + waitForNanos);
+          }
+        } else {
+          fail("IndexSizeTrigger was fired more than once!");
+        }
+        return true;
+      });
+      trigger.run();
+      TriggerEvent ev = eventRef.get();
+      // waitFor delay - should not produce any event yet
+      assertNull("waitFor not elapsed but produced an event", ev);
+      timeSource.sleep(TimeUnit.MILLISECONDS.convert(waitForSeconds + 1, TimeUnit.SECONDS));
+      trigger.run();
+      ev = eventRef.get();
+      assertNotNull("should have fired an event", ev);
+      List<TriggerEvent.Op> ops = (List<TriggerEvent.Op>) ev.getProperty(TriggerEvent.REQUESTED_OPS);
+      assertNotNull("should contain requestedOps", ops);
+      assertEquals("number of ops", 2, ops.size());
+      boolean shard1 = false;
+      boolean shard2 = false;
+      for (TriggerEvent.Op op : ops) {
+        assertEquals(CollectionParams.CollectionAction.SPLITSHARD, op.getAction());
+        Set<Pair<String, String>> hints = (Set<Pair<String, String>>)op.getHints().get(Suggester.Hint.COLL_SHARD);
+        assertNotNull("hints", hints);
+        assertEquals("hints", 1, hints.size());
+        Pair<String, String> p = hints.iterator().next();
+        assertEquals(collectionName, p.first());
+        if (p.second().equals("shard1")) {
+          shard1 = true;
+        } else if (p.second().equals("shard2")) {
+          shard2 = true;
+        } else {
+          fail("unexpected shard name " + p.second());
+        }
+      }
+      assertTrue("shard1 should be split", shard1);
+      assertTrue("shard2 should be split", shard2);
+    }
+  }
+
+  private Map<String, Object> createTriggerProps(long waitForSeconds) {
+    Map<String, Object> props = new HashMap<>();
+    props.put("event", "indexSize");
+    props.put("waitFor", waitForSeconds);
+    props.put("enabled", true);
+    props.put(IndexSizeTrigger.UNIT_PROP, IndexSizeTrigger.Unit.docs.toString());
+    props.put(IndexSizeTrigger.ABOVE_PROP, 10);
+    props.put(IndexSizeTrigger.BELOW_PROP, 2);
+    List<Map<String, String>> actions = new ArrayList<>(3);
+    Map<String, String> map = new HashMap<>(2);
+    map.put("name", "compute_plan");
+    map.put("class", "solr.ComputePlanAction");
+    actions.add(map);
+    map = new HashMap<>(2);
+    map.put("name", "execute_plan");
+    map.put("class", "solr.ExecutePlanAction");
+    actions.add(map);
+    props.put("actions", actions);
+    return props;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/957c1d2c/solr/core/src/test/org/apache/solr/cloud/autoscaling/NodeAddedTriggerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/NodeAddedTriggerTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/NodeAddedTriggerTest.java
index 2d084b8..cd52785 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/NodeAddedTriggerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/NodeAddedTriggerTest.java
@@ -91,7 +91,7 @@ public class NodeAddedTriggerTest extends SolrCloudTestCase {
           long eventTimeNanos = event.getEventTime();
           long waitForNanos = TimeUnit.NANOSECONDS.convert(waitForSeconds, TimeUnit.SECONDS) - WAIT_FOR_DELTA_NANOS;
           if (currentTimeNanos - eventTimeNanos <= waitForNanos) {
-            fail("NodeAddedListener was fired before the configured waitFor period: currentTimeNanos=" + currentTimeNanos + ", eventTimeNanos=" +  eventTimeNanos + ",waitForNanos=" + waitForNanos);
+            fail("processor was fired before the configured waitFor period: currentTimeNanos=" + currentTimeNanos + ", eventTimeNanos=" +  eventTimeNanos + ",waitForNanos=" + waitForNanos);
           }
         } else {
           fail("NodeAddedTrigger was fired more than once!");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/957c1d2c/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
index 04dc96f..adfb76a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
@@ -43,7 +43,9 @@ import org.apache.solr.client.solrj.cloud.DistribStateManager;
 import org.apache.solr.client.solrj.cloud.NodeStateProvider;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
+import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.request.RequestWriter;
 import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.RequestStatusState;
@@ -333,6 +335,17 @@ public class SimCloudManager implements SolrCloudManager {
     return new SolrClient() {
       @Override
       public NamedList<Object> request(SolrRequest request, String collection) throws SolrServerException, IOException {
+        if (collection != null) {
+          if (request instanceof AbstractUpdateRequest) {
+            ((AbstractUpdateRequest)request).setParam("collection", collection);
+          } else if (request instanceof QueryRequest) {
+            ModifiableSolrParams params = new ModifiableSolrParams(request.getParams());
+            params.set("collection", collection);
+            request = new QueryRequest(params);
+          } else {
+            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "when collection != null only UpdateRequest and QueryRequest are supported: request=" + request + ", collection=" + collection);
+          }
+        }
         SolrResponse rsp = SimCloudManager.this.request(request);
         return rsp.getResponse();
       }
@@ -508,14 +521,17 @@ public class SimCloudManager implements SolrCloudManager {
       incrementCount("update");
       // support only updates to the system collection
       UpdateRequest ureq = (UpdateRequest)req;
-      if (ureq.getCollection() == null || !ureq.getCollection().equals(CollectionAdminParams.SYSTEM_COLL)) {
-        throw new UnsupportedOperationException("Only .system updates are supported but got: " + req);
-      }
-      List<SolrInputDocument> docs = ureq.getDocuments();
-      if (docs != null) {
-        systemColl.addAll(docs);
+      String collection = ureq.getCollection();
+      if (collection != null && !collection.equals(CollectionAdminParams.SYSTEM_COLL)) {
+        // simulate an update
+        return clusterStateProvider.simUpdate(ureq);
+      } else {
+        List<SolrInputDocument> docs = ureq.getDocuments();
+        if (docs != null) {
+          systemColl.addAll(docs);
+        }
+        return new UpdateResponse();
       }
-      return new UpdateResponse();
     }
     // support only a specific subset of collection admin ops
     if (!(req instanceof CollectionAdminRequest)) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/957c1d2c/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
index f6762fc..d535908 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
@@ -47,6 +47,8 @@ import org.apache.solr.client.solrj.cloud.autoscaling.Suggestion;
 import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
 import org.apache.solr.client.solrj.cloud.autoscaling.VersionedData;
 import org.apache.solr.client.solrj.impl.ClusterStateProvider;
+import org.apache.solr.client.solrj.request.UpdateRequest;
+import org.apache.solr.client.solrj.response.UpdateResponse;
 import org.apache.solr.cloud.ActionThrottle;
 import org.apache.solr.cloud.api.collections.AddReplicaCmd;
 import org.apache.solr.cloud.api.collections.Assign;
@@ -57,6 +59,7 @@ import org.apache.solr.cloud.overseer.ClusterStateMutator;
 import org.apache.solr.cloud.overseer.CollectionMutator;
 import org.apache.solr.cloud.overseer.ZkWriteCommand;
 import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.DocRouter;
@@ -638,6 +641,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
           replicaNum.getAndIncrement());
       try {
         replicaProps.put(ZkStateReader.CORE_NAME_PROP, coreName);
+        replicaProps.put("SEARCHER.searcher.deletedDocs", 0);
+        replicaProps.put("SEARCHER.searcher.numDocs", 0);
+        replicaProps.put("SEARCHER.searcher.maxDoc", 0);
         ReplicaInfo ri = new ReplicaInfo("core_node" + Assign.incAndGetId(stateManager, collectionName, 0),
             coreName, collectionName, pos.shard, pos.type, pos.node, replicaProps);
         cloudManager.submit(() -> {
@@ -662,6 +668,8 @@ public class SimClusterStateProvider implements ClusterStateProvider {
         }
       });
     });
+    // force recreation of collection states
+    collectionsStatesRef.set(null);
     simRunLeaderElection(Collections.singleton(collectionName), true);
     if (waitForFinalState) {
       boolean finished = finalStateLatch.await(cloudManager.getTimeSource().convertDelay(TimeUnit.SECONDS, 60, TimeUnit.MILLISECONDS),
@@ -887,6 +895,12 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     PolicyHelper.SessionWrapper sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
     if (sessionWrapper != null) sessionWrapper.release();
 
+    // adjust numDocs / deletedDocs / maxDoc
+    String numDocsStr = parentSlice.getLeader().getStr("SEARCHER..searcher.numDocs", "0");
+    long numDocs = Long.parseLong(numDocsStr);
+    long newNumDocs = numDocs / subSlices.size();
+    long remainder = numDocs % subSlices.size();
+
     for (ReplicaPosition replicaPosition : replicaPositions) {
       String subSliceName = replicaPosition.shard;
       String subShardNodeName = replicaPosition.node;
@@ -897,6 +911,14 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       replicaProps.put(ZkStateReader.REPLICA_TYPE, replicaPosition.type.toString());
       replicaProps.put(ZkStateReader.BASE_URL_PROP, Utils.getBaseUrlForNodeName(subShardNodeName, "http"));
 
+      long replicasNumDocs = newNumDocs;
+      if (subSlices.get(0).equals(subSliceName)) { // only add to the first
+        replicasNumDocs += remainder;
+      }
+      replicaProps.put("SEARCHER..searcher.numDocs", replicasNumDocs);
+      replicaProps.put("SEARCHER..searcher.maxDoc", replicasNumDocs);
+      replicaProps.put("SEARCHER..searcher.deletedDocs", 0);
+
       ReplicaInfo ri = new ReplicaInfo("core_node" + Assign.incAndGetId(stateManager, collectionName, 0),
           solrCoreName, collectionName, replicaPosition.shard, replicaPosition.type, subShardNodeName, replicaProps);
       simAddReplica(replicaPosition.node, ri, false);
@@ -967,6 +989,95 @@ public class SimClusterStateProvider implements ClusterStateProvider {
   }
 
   /**
+   * Simulate an update by increasing replica metrics.
+   * <p>The following core metrics are updated:
+   * <ul>
+   *   <li></li>
+   * </ul>
+   * </p>
+   * <p>IMPORTANT limitations:
+   * <ul>
+   *   <li>document replacements are always counted as new docs</li>
+   *   <li>delete by ID always succeeds (unless there are 0 documents)</li>
+   *   <li>deleteByQuery never matches unless the query is <code>*:*</code></li>
+   * </ul></p>
+   * @param req
+   * @return
+   * @throws SolrException
+   */
+  public UpdateResponse simUpdate(UpdateRequest req) throws SolrException, IOException {
+    String collection = req.getCollection();
+    if (collection == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection not set");
+    }
+    if (!simListCollections().contains(collection)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection '" + collection + "' doesn't exist");
+    }
+    DocCollection coll = getClusterState().getCollection(collection);
+    DocRouter router = coll.getRouter();
+    // process updates first
+
+    List<String> deletes = req.getDeleteById();
+    if (deletes != null && !deletes.isEmpty()) {
+      for (String id : deletes) {
+        Slice s = router.getTargetSlice(id, null, null, req.getParams(), coll);
+        String numDocsStr = s.getLeader().getProperty("SEARCHER.searcher.numDocs");
+        if (numDocsStr != null && Long.parseLong(numDocsStr) > 0) {
+          try {
+            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.deletedDocs", 1, true, false);
+            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", -1, true, false);
+          } catch (Exception e) {
+            throw new IOException(e);
+          }
+        }
+      }
+    }
+    deletes = req.getDeleteQuery();
+    if (deletes != null && !deletes.isEmpty()) {
+      for (String q : deletes) {
+        if (!"*:*".equals(q)) {
+          throw new UnsupportedOperationException("Only '*:*' query is supported in deleteByQuery");
+        }
+        for (Slice s : coll.getSlices()) {
+          String numDocsStr = s.getLeader().getProperty("SEARCHER.searcher.numDocs");
+          if (numDocsStr == null) {
+            continue;
+          }
+          long numDocs = Long.parseLong(numDocsStr);
+          if (numDocs == 0) {
+            continue;
+          }
+          try {
+            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.deletedDocs", numDocs, false, false);
+            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", 0, false, false);
+          } catch (Exception e) {
+            throw new IOException(e);
+          }
+        }
+      }
+    }
+    List<SolrInputDocument> docs = req.getDocuments();
+    if (docs != null && !docs.isEmpty()) {
+      for (SolrInputDocument doc : docs) {
+        String id = (String) doc.getFieldValue("id");
+        if (id == null) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Document without id: " + doc);
+        }
+        Slice s = router.getTargetSlice(id, null, null, req.getParams(), coll);
+        try {
+          simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", 1, true, false);
+          simSetShardValue(collection, s.getName(), "SEARCHER.searcher.maxDoc", 1, true, false);
+          // Policy reuses this value and expects it to be in GB units!!!
+          // simSetShardValue(collection, s.getName(), "INDEX.sizeInBytes", 500, true, false);
+        } catch (Exception e) {
+          throw new IOException(e);
+        }
+      }
+    }
+    return new UpdateResponse();
+  }
+
+  /**
    * Saves cluster properties to clusterprops.json.
    * @return current properties
    */
@@ -1089,7 +1200,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    * @param value property value
    */
   public void simSetCollectionValue(String collection, String key, Object value) throws Exception {
-    simSetCollectionValue(collection, key, value, false);
+    simSetCollectionValue(collection, key, value, false, false);
   }
 
   /**
@@ -1100,8 +1211,8 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    * @param divide if the value is a {@link Number} and this param is true, then the value will be evenly
    *               divided by the number of replicas.
    */
-  public void simSetCollectionValue(String collection, String key, Object value, boolean divide) throws Exception {
-    simSetShardValue(collection, null, key, value, divide);
+  public void simSetCollectionValue(String collection, String key, Object value, boolean delta, boolean divide) throws Exception {
+    simSetShardValue(collection, null, key, value, delta, divide);
   }
 
   /**
@@ -1112,7 +1223,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    * @param value property value
    */
   public void simSetShardValue(String collection, String shard, String key, Object value) throws Exception {
-    simSetShardValue(collection, shard, key, value, false);
+    simSetShardValue(collection, shard, key, value, false, false);
   }
 
   /**
@@ -1121,10 +1232,12 @@ public class SimClusterStateProvider implements ClusterStateProvider {
    * @param shard shard name. If null then all shards will be affected.
    * @param key property name
    * @param value property value
+   * @param delta if true then treat the numeric value as delta to add to the existing value
+   *              (or set the value to delta if missing)
    * @param divide if the value is a {@link Number} and this is true, then the value will be evenly
    *               divided by the number of replicas.
    */
-  public void simSetShardValue(String collection, String shard, String key, Object value, boolean divide) throws Exception {
+  public void simSetShardValue(String collection, String shard, String key, Object value, boolean delta, boolean divide) throws Exception {
     List<ReplicaInfo> infos = new ArrayList<>();
     nodeReplicaMap.forEach((n, replicas) -> {
       replicas.forEach(r -> {
@@ -1140,14 +1253,38 @@ public class SimClusterStateProvider implements ClusterStateProvider {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection " + collection + " doesn't exist.");
     }
     if (divide && value != null && (value instanceof Number)) {
-      value = ((Number)value).doubleValue() / infos.size();
+      if ((value instanceof Long) || (value instanceof Integer)) {
+        value = ((Number) value).longValue() / infos.size();
+      } else {
+        value = ((Number) value).doubleValue() / infos.size();
+      }
     }
     for (ReplicaInfo r : infos) {
       synchronized (r) {
         if (value == null) {
           r.getVariables().remove(key);
         } else {
-          r.getVariables().put(key, value);
+          if (delta) {
+            Object prevValue = r.getVariables().get(key);
+            if (prevValue != null) {
+              if ((prevValue instanceof Number) && (value instanceof Number)) {
+                if (((prevValue instanceof Long) || (prevValue instanceof Integer)) &&
+                    ((value instanceof Long) || (value instanceof Integer))) {
+                  Long newValue = ((Number)prevValue).longValue() + ((Number)value).longValue();
+                  r.getVariables().put(key, newValue);
+                } else {
+                  Double newValue = ((Number)prevValue).doubleValue() + ((Number)value).doubleValue();
+                  r.getVariables().put(key, newValue);
+                }
+              } else {
+                throw new UnsupportedOperationException("delta cannot be applied to non-numeric values: " + prevValue + " and " + value);
+              }
+            } else {
+              r.getVariables().put(key, value);
+            }
+          } else {
+            r.getVariables().put(key, value);
+          }
         }
       }
     }
@@ -1263,7 +1400,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
           slices.put(s, slice);
         });
         Map<String, Object> collProps = collProperties.computeIfAbsent(coll, c -> new ConcurrentHashMap<>());
-        DocCollection dc = new DocCollection(coll, slices, collProps, DocRouter.DEFAULT, clusterStateVersion, ZkStateReader.CLUSTER_STATE);
+        Map<String, Object> routerProp = (Map<String, Object>) collProps.getOrDefault(DocCollection.DOC_ROUTER, Collections.singletonMap("name", DocRouter.DEFAULT_NAME));
+        DocRouter router = DocRouter.getDocRouter((String)routerProp.getOrDefault("name", DocRouter.DEFAULT_NAME));
+        DocCollection dc = new DocCollection(coll, slices, collProps, router, clusterStateVersion, ZkStateReader.CLUSTER_STATE);
         res.put(coll, dc);
       });
       collectionsStatesRef.set(res);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/957c1d2c/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimNodeStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimNodeStateProvider.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimNodeStateProvider.java
index 6d1f68a..b9169eb 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimNodeStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimNodeStateProvider.java
@@ -41,7 +41,7 @@ import org.slf4j.LoggerFactory;
 /**
  * Simulated {@link NodeStateProvider}.
  * Note: in order to setup node-level metrics use {@link #simSetNodeValues(String, Map)}. However, in order
- * to setup core-level metrics use {@link SimClusterStateProvider#simSetCollectionValue(String, String, Object, boolean)}.
+ * to setup core-level metrics use {@link SimClusterStateProvider#simSetCollectionValue(String, String, Object, boolean, boolean)}.
  */
 public class SimNodeStateProvider implements NodeStateProvider {
   private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -204,7 +204,7 @@ public class SimNodeStateProvider implements NodeStateProvider {
 
   /**
    * Simulate getting replica metrics values. This uses per-replica properties set in
-   * {@link SimClusterStateProvider#simSetCollectionValue(String, String, Object, boolean)} and
+   * {@link SimClusterStateProvider#simSetCollectionValue(String, String, Object, boolean, boolean)} and
    * similar methods.
    * @param node node id
    * @param tags metrics names

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/957c1d2c/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
index 14ac40f..2c4d8d3 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
@@ -540,7 +540,7 @@ public class TestLargeCluster extends SimSolrCloudTestCase {
 
     String metricName = "QUERY./select.requestTimes:1minRate";
     // simulate search traffic
-    cluster.getSimClusterStateProvider().simSetShardValue(collectionName, "shard1", metricName, 40, true);
+    cluster.getSimClusterStateProvider().simSetShardValue(collectionName, "shard1", metricName, 40, false, true);
 
     // now define the trigger. doing it earlier may cause partial events to be generated (where only some
     // nodes / replicas exceeded the threshold).

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/957c1d2c/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestTriggerIntegration.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestTriggerIntegration.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestTriggerIntegration.java
index 31e3636..c898dbc 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestTriggerIntegration.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestTriggerIntegration.java
@@ -1192,7 +1192,7 @@ public class TestTriggerIntegration extends SimSolrCloudTestCase {
 //      solrClient.query(COLL1, query);
 //    }
 
-    cluster.getSimClusterStateProvider().simSetCollectionValue(COLL1, "QUERY./select.requestTimes:1minRate", 500, true);
+    cluster.getSimClusterStateProvider().simSetCollectionValue(COLL1, "QUERY./select.requestTimes:1minRate", 500, false, true);
 
     boolean await = triggerFiredLatch.await(20000 / SPEED, TimeUnit.MILLISECONDS);
     assertTrue("The trigger did not fire at all", await);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/957c1d2c/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
index bee69c8..9496b0f 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/Policy.java
@@ -466,7 +466,10 @@ public class Policy implements MapWriter {
 
   static {
     ops.put(CollectionAction.ADDREPLICA, () -> new AddReplicaSuggester());
+    ops.put(CollectionAction.DELETEREPLICA, () -> new UnsupportedSuggester(CollectionAction.DELETEREPLICA));
     ops.put(CollectionAction.MOVEREPLICA, () -> new MoveReplicaSuggester());
+    ops.put(CollectionAction.SPLITSHARD, () -> new SplitShardSuggester());
+    ops.put(CollectionAction.MERGESHARDS, () -> new UnsupportedSuggester(CollectionAction.MERGESHARDS));
   }
 
   public Map<String, List<Clause>> getPolicies() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/957c1d2c/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/SplitShardSuggester.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/SplitShardSuggester.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/SplitShardSuggester.java
new file mode 100644
index 0000000..dc371d2
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/SplitShardSuggester.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.cloud.autoscaling;
+
+import java.util.Collections;
+import java.util.Set;
+
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.common.util.Pair;
+
+/**
+ * This suggester produces a SPLITSHARD request using provided {@link Hint#COLL_SHARD} value.
+ */
+class SplitShardSuggester extends Suggester {
+
+  @Override
+  SolrRequest init() {
+    Set<Pair<String, String>> shards = (Set<Pair<String, String>>) hints.getOrDefault(Hint.COLL_SHARD, Collections.emptySet());
+    if (shards.isEmpty()) {
+      throw new RuntimeException("split-shard requires 'collection' and 'shard'");
+    }
+    if (shards.size() > 1) {
+      throw new RuntimeException("split-shard requires exactly one pair of 'collection' and 'shard'");
+    }
+    Pair<String, String> collShard = shards.iterator().next();
+    return CollectionAdminRequest.splitShard(collShard.first()).setShardName(collShard.second());
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/957c1d2c/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/UnsupportedSuggester.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/UnsupportedSuggester.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/UnsupportedSuggester.java
new file mode 100644
index 0000000..825a24a
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/UnsupportedSuggester.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.cloud.autoscaling;
+
+import java.lang.invoke.MethodHandles;
+
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.common.params.CollectionParams;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This suggester simply logs the request but does not produce any suggestions.
+ */
+public class UnsupportedSuggester extends Suggester {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private final CollectionParams.CollectionAction action;
+
+  public static UnsupportedSuggester get(Policy.Session session, CollectionParams.CollectionAction action) {
+    UnsupportedSuggester suggester = new UnsupportedSuggester(action);
+    suggester._init(session);
+    return suggester;
+  }
+
+  public UnsupportedSuggester(CollectionParams.CollectionAction action) {
+    this.action = action;
+  }
+
+  @Override
+  SolrRequest init() {
+    log.warn("Unsupported suggester for action " + action + " with hints " + hints + " - no suggestion available");
+    return null;
+  }
+
+  @Override
+  public SolrRequest getSuggestion() {
+    return null;
+  }
+}