You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by da...@apache.org on 2018/10/23 00:05:58 UTC

[37/52] [abbrv] [partial] lucene-solr:jira/gradle: Add gradle support for Solr

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/ExecutePlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ExecutePlanAction.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/ExecutePlanAction.java
deleted file mode 100644
index 6179bcc..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/ExecutePlanAction.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.commons.lang3.exception.ExceptionUtils;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.cloud.autoscaling.AlreadyExistsException;
-import org.apache.solr.client.solrj.cloud.DistribStateManager;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.Utils;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is responsible for executing cluster operations read from the {@link ActionContext}'s properties
- * with the key name "operations".
- */
-public class ExecutePlanAction extends TriggerActionBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static final String PREFIX = "op-";
-
-  static final int DEFAULT_TASK_TIMEOUT_SECONDS = 120;
-
-  @Override
-  public void process(TriggerEvent event, ActionContext context) throws Exception {
-    log.debug("-- processing event: {} with context properties: {}", event, context.getProperties());
-    SolrCloudManager cloudManager = context.getCloudManager();
-    List<SolrRequest> operations = (List<SolrRequest>) context.getProperty("operations");
-    if (operations == null || operations.isEmpty()) {
-      log.info("No operations to execute for event: {}", event);
-      return;
-    }
-    try {
-      for (SolrRequest operation : operations) {
-        log.debug("Executing operation: {}", operation.getParams());
-        try {
-          SolrResponse response = null;
-          int counter = 0;
-          if (operation instanceof CollectionAdminRequest.AsyncCollectionAdminRequest) {
-            CollectionAdminRequest.AsyncCollectionAdminRequest req = (CollectionAdminRequest.AsyncCollectionAdminRequest) operation;
-            // waitForFinalState so that the end effects of operations are visible
-            req.setWaitForFinalState(true);
-            String asyncId = event.getSource() + '/' + event.getId() + '/' + counter;
-            String znode = saveAsyncId(cloudManager.getDistribStateManager(), event, asyncId);
-            log.trace("Saved requestId: {} in znode: {}", asyncId, znode);
-            // TODO: find a better way of using async calls using dataProvider API !!!
-            req.setAsyncId(asyncId);
-            SolrResponse asyncResponse = cloudManager.request(req);
-            if (asyncResponse.getResponse().get("error") != null) {
-              throw new IOException("" + asyncResponse.getResponse().get("error"));
-            }
-            asyncId = (String)asyncResponse.getResponse().get("requestid");
-            CollectionAdminRequest.RequestStatusResponse statusResponse = waitForTaskToFinish(cloudManager, asyncId,
-                DEFAULT_TASK_TIMEOUT_SECONDS, TimeUnit.SECONDS);
-            if (statusResponse != null) {
-              RequestStatusState state = statusResponse.getRequestStatus();
-              if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED || state == RequestStatusState.NOT_FOUND) {
-                try {
-                  cloudManager.getDistribStateManager().removeData(znode, -1);
-                } catch (Exception e) {
-                  log.warn("Unexpected exception while trying to delete znode: " + znode, e);
-                }
-              }
-              response = statusResponse;
-            }
-          } else {
-            response = cloudManager.request(operation);
-          }
-          NamedList<Object> result = response.getResponse();
-          context.getProperties().compute("responses", (s, o) -> {
-            List<NamedList<Object>> responses = (List<NamedList<Object>>) o;
-            if (responses == null)  responses = new ArrayList<>(operations.size());
-            responses.add(result);
-            return responses;
-          });
-        } catch (IOException e) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "Unexpected exception executing operation: " + operation.getParams(), e);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ExecutePlanAction was interrupted", e);
-        } catch (Exception e) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "Unexpected exception executing operation: " + operation.getParams(), e);
-        }
-      }
-    } catch (Exception e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-          "Unexpected exception while processing event: " + event, e);
-    }
-  }
-
-
-  static CollectionAdminRequest.RequestStatusResponse waitForTaskToFinish(SolrCloudManager cloudManager, String requestId, long duration, TimeUnit timeUnit) throws IOException, InterruptedException {
-    long timeoutSeconds = timeUnit.toSeconds(duration);
-    RequestStatusState state = RequestStatusState.NOT_FOUND;
-    CollectionAdminRequest.RequestStatusResponse statusResponse = null;
-    for (int i = 0; i < timeoutSeconds; i++) {
-      try {
-        statusResponse = (CollectionAdminRequest.RequestStatusResponse)cloudManager.request(CollectionAdminRequest.requestStatus(requestId));
-        state = statusResponse.getRequestStatus();
-        if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED) {
-          log.trace("Task with requestId={} finished with state={} in {}s", requestId, state, i * 5);
-          cloudManager.request(CollectionAdminRequest.deleteAsyncId(requestId));
-          return statusResponse;
-        } else if (state == RequestStatusState.NOT_FOUND) {
-          // the request for this id was never actually submitted! no harm done, just bail out
-          log.warn("Task with requestId={} was not found on overseer", requestId);
-          cloudManager.request(CollectionAdminRequest.deleteAsyncId(requestId));
-          return statusResponse;
-        }
-      } catch (Exception e) {
-        Throwable rootCause = ExceptionUtils.getRootCause(e);
-        if (rootCause instanceof IllegalStateException && rootCause.getMessage().contains("Connection pool shut down"))  {
-          throw e;
-        }
-        if (rootCause instanceof TimeoutException && rootCause.getMessage().contains("Could not connect to ZooKeeper")) {
-          throw e;
-        }
-        if (rootCause instanceof SolrServerException) {
-          throw e;
-        }
-        log.error("Unexpected Exception while querying status of requestId=" + requestId, e);
-        throw e;
-      }
-      if (i > 0 && i % 5 == 0) {
-        log.trace("Task with requestId={} still not complete after {}s. Last state={}", requestId, i * 5, state);
-      }
-      cloudManager.getTimeSource().sleep(5000);
-    }
-    log.debug("Task with requestId={} did not complete within 5 minutes. Last state={}", requestId, state);
-    return statusResponse;
-  }
-
-  /**
-   * Saves the given asyncId in ZK as a persistent sequential node.
-   *
-   * @return the path of the newly created node in ZooKeeper
-   */
-  private String saveAsyncId(DistribStateManager stateManager, TriggerEvent event, String asyncId) throws InterruptedException, AlreadyExistsException, IOException, KeeperException {
-    String parentPath = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/" + event.getSource() + "/" + getName();
-    try {
-      stateManager.makePath(parentPath);
-    } catch (AlreadyExistsException e) {
-      // ignore
-    }
-    return stateManager.createData(parentPath + "/" + PREFIX, Utils.toJSON(Collections.singletonMap("requestid", asyncId)), CreateMode.PERSISTENT_SEQUENTIAL);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/HttpTriggerListener.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/HttpTriggerListener.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/HttpTriggerListener.java
deleted file mode 100644
index b4f9bf0..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/HttpTriggerListener.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-import java.util.StringJoiner;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
-import org.apache.solr.client.solrj.impl.HttpClientUtil;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.util.PropertiesUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Simple HTTP callback that POSTs event data to a URL.
- * URL, payload and headers may contain property substitution patterns, with the following properties available:
- * <ul>
- *   <li>config.* - listener configuration</li>
- *   <li>event.* - event properties</li>
- *   <li>stage - current stage of event processing</li>
- *   <li>actionName - optional current action name</li>
- *   <li>context.* - optional {@link ActionContext} properties</li>
- *   <li>error - optional error string (from {@link Throwable#toString()})</li>
- *   <li>message - optional message</li>
- * </ul>
- * The following listener configuration is supported:
- * <ul>
- *   <li>url - a URL template</li>
- *   <li>payload - string, optional payload template. If absent a JSON map of all properties listed above will be used.</li>
- *   <li>contentType - string, optional payload content type. If absent then <code>application/json</code> will be used.</li>
- *   <li>header.* - string, optional header template(s). The name of the property without "header." prefix defines the literal header name.</li>
- *   <li>timeout - int, optional connection and socket timeout in milliseconds. Default is 60 seconds.</li>
- *   <li>followRedirects - boolean, optional setting to follow redirects. Default is false.</li>
- * </ul>
- */
-public class HttpTriggerListener extends TriggerListenerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private String urlTemplate;
-  private String payloadTemplate;
-  private String contentType;
-  private Map<String, String> headerTemplates = new HashMap<>();
-  private int timeout = HttpClientUtil.DEFAULT_CONNECT_TIMEOUT;
-  private boolean followRedirects;
-
-  public HttpTriggerListener() {
-    super();
-    TriggerUtils.requiredProperties(requiredProperties, validProperties, "url");
-    TriggerUtils.validProperties(validProperties, "payload", "contentType", "timeout", "followRedirects");
-    validPropertyPrefixes.add("header.");
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, AutoScalingConfig.TriggerListenerConfig config) throws TriggerValidationException {
-    super.configure(loader, cloudManager, config);
-    urlTemplate = (String)config.properties.get("url");
-    payloadTemplate = (String)config.properties.get("payload");
-    contentType = (String)config.properties.get("contentType");
-    config.properties.forEach((k, v) -> {
-      if (k.startsWith("header.")) {
-        headerTemplates.put(k.substring(7), String.valueOf(v));
-      }
-    });
-    timeout = PropertiesUtil.toInteger(String.valueOf(config.properties.get("timeout")), HttpClientUtil.DEFAULT_CONNECT_TIMEOUT);
-    followRedirects = PropertiesUtil.toBoolean(String.valueOf(config.properties.get("followRedirects")));
-  }
-
-  @Override
-  public void onEvent(TriggerEvent event, TriggerEventProcessorStage stage, String actionName, ActionContext context, Throwable error, String message) {
-    Properties properties = new Properties();
-    properties.setProperty("stage", stage.toString());
-    // if configuration used "actionName" but we're in a non-action related stage then PropertiesUtil will
-    // throws an exception on missing value - so replace it with an empty string
-    if (actionName == null) {
-      actionName = "";
-    }
-    properties.setProperty("actionName", actionName);
-    if (context != null) {
-      context.getProperties().forEach((k, v) -> {
-        properties.setProperty("context." + k, String.valueOf(v));
-      });
-    }
-    if (error != null) {
-      properties.setProperty("error", error.toString());
-    } else {
-      properties.setProperty("error", "");
-    }
-    if (message != null) {
-      properties.setProperty("message", message);
-    } else {
-      properties.setProperty("message", "");
-    }
-    // add event properties
-    properties.setProperty("event.id", event.getId());
-    properties.setProperty("event.source", event.getSource());
-    properties.setProperty("event.eventTime", String.valueOf(event.eventTime));
-    properties.setProperty("event.eventType", event.getEventType().toString());
-    event.getProperties().forEach((k, v) -> {
-      properties.setProperty("event.properties." + k, String.valueOf(v));
-    });
-    // add config properties
-    properties.setProperty("config.name", config.name);
-    properties.setProperty("config.trigger", config.trigger);
-    properties.setProperty("config.listenerClass", config.listenerClass);
-    properties.setProperty("config.beforeActions", String.join(",", config.beforeActions));
-    properties.setProperty("config.afterActions", String.join(",", config.afterActions));
-    StringJoiner joiner = new StringJoiner(",");
-    config.stages.forEach(s -> joiner.add(s.toString()));
-    properties.setProperty("config.stages", joiner.toString());
-    config.properties.forEach((k, v) -> {
-      properties.setProperty("config.properties." + k, String.valueOf(v));
-    });
-    String url = PropertiesUtil.substituteProperty(urlTemplate, properties);
-    String payload;
-    String type;
-    if (payloadTemplate != null) {
-      payload = PropertiesUtil.substituteProperty(payloadTemplate, properties);
-      if (contentType != null) {
-        type = contentType;
-      } else {
-        type = "application/json";
-      }
-    } else {
-      payload = Utils.toJSONString(properties);
-      type = "application/json";
-    }
-    Map<String, String> headers = new HashMap<>();
-    headerTemplates.forEach((k, v) -> {
-      String headerVal = PropertiesUtil.substituteProperty(v, properties);
-      if (!headerVal.isEmpty()) {
-        headers.put(k, headerVal);
-      }
-    });
-    headers.put("Content-Type", type);
-    try {
-      cloudManager.httpRequest(url, SolrRequest.METHOD.POST, headers, payload, timeout, followRedirects);
-    } catch (IOException e) {
-      log.warn("Exception sending request for event " + event, e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/InactiveShardPlanAction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/InactiveShardPlanAction.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/InactiveShardPlanAction.java
deleted file mode 100644
index 6fca29a..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/InactiveShardPlanAction.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.cloud.autoscaling;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrResourceLoader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class checks whether there are shards that have been inactive for a long
- * time (which usually means they are left-overs from shard splitting) and requests their removal
- * after their cleanup TTL period elapsed.
- * <p>Shard delete requests are put into the {@link ActionContext}'s properties
- * with the key name "operations". The value is a List of SolrRequest objects.</p>
- */
-public class InactiveShardPlanAction extends TriggerActionBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String TTL_PROP = "ttl";
-
-  public static final int DEFAULT_TTL_SECONDS = 3600 * 24 * 2;
-
-  private int cleanupTTL;
-
-  public InactiveShardPlanAction() {
-    super();
-    TriggerUtils.validProperties(validProperties, TTL_PROP);
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    super.configure(loader, cloudManager, properties);
-    String cleanupStr = String.valueOf(properties.getOrDefault(TTL_PROP, String.valueOf(DEFAULT_TTL_SECONDS)));
-    try {
-      cleanupTTL = Integer.parseInt(cleanupStr);
-    } catch (Exception e) {
-      throw new TriggerValidationException(getName(), TTL_PROP, "invalid value '" + cleanupStr + "': " + e.toString());
-    }
-    if (cleanupTTL < 0) {
-      throw new TriggerValidationException(getName(), TTL_PROP, "invalid value '" + cleanupStr + "', should be > 0. ");
-    }
-  }
-
-  @Override
-  public void process(TriggerEvent event, ActionContext context) throws Exception {
-    SolrCloudManager cloudManager = context.getCloudManager();
-    ClusterState state = cloudManager.getClusterStateProvider().getClusterState();
-    Map<String, List<String>> cleanup = new LinkedHashMap<>();
-    Map<String, List<String>> inactive = new LinkedHashMap<>();
-    Map<String, Map<String, Object>> staleLocks = new LinkedHashMap<>();
-    state.forEachCollection(coll ->
-      coll.getSlices().forEach(s -> {
-        if (Slice.State.INACTIVE.equals(s.getState())) {
-          inactive.computeIfAbsent(coll.getName(), c -> new ArrayList<>()).add(s.getName());
-          String tstampStr = s.getStr(ZkStateReader.STATE_TIMESTAMP_PROP);
-          if (tstampStr == null || tstampStr.isEmpty()) {
-            return;
-          }
-          long timestamp = Long.parseLong(tstampStr);
-          // this timestamp uses epoch time
-          long currentTime = cloudManager.getTimeSource().getEpochTimeNs();
-          long delta = TimeUnit.NANOSECONDS.toSeconds(currentTime - timestamp);
-          log.debug("{}/{}: tstamp={}, time={}, delta={}", coll.getName(), s.getName(), timestamp, currentTime, delta);
-          if (delta > cleanupTTL) {
-            log.debug("-- delete inactive {} / {}", coll.getName(), s.getName());
-            List<SolrRequest> operations = (List<SolrRequest>)context.getProperties().computeIfAbsent("operations", k -> new ArrayList<>());
-            operations.add(CollectionAdminRequest.deleteShard(coll.getName(), s.getName()));
-            cleanup.computeIfAbsent(coll.getName(), c -> new ArrayList<>()).add(s.getName());
-          }
-        }
-        // check for stale shard split locks
-        String parentPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + coll.getName();
-        List<String> locks;
-        try {
-          locks = cloudManager.getDistribStateManager().listData(parentPath).stream()
-              .filter(name -> name.endsWith("-splitting"))
-              .collect(Collectors.toList());
-          for (String lock : locks) {
-            try {
-              String lockPath = parentPath + "/" + lock;
-              Map<String, Object> lockData = Utils.getJson(cloudManager.getDistribStateManager(), lockPath);
-              String tstampStr = (String)lockData.get(ZkStateReader.STATE_TIMESTAMP_PROP);
-              if (tstampStr == null || tstampStr.isEmpty()) {
-                return;
-              }
-              long timestamp = Long.parseLong(tstampStr);
-              // this timestamp uses epoch time
-              long currentTime = cloudManager.getTimeSource().getEpochTimeNs();
-              long delta = TimeUnit.NANOSECONDS.toSeconds(currentTime - timestamp);
-              log.debug("{}/{}: locktstamp={}, time={}, delta={}", coll.getName(), lock, timestamp, currentTime, delta);
-              if (delta > cleanupTTL) {
-                log.debug("-- delete inactive split lock for {}/{}, delta={}", coll.getName(), lock, delta);
-                cloudManager.getDistribStateManager().removeData(lockPath, -1);
-                lockData.put("currentTimeNs", currentTime);
-                lockData.put("deltaSec", delta);
-                lockData.put("ttlSec", cleanupTTL);
-                staleLocks.put(coll.getName() + "/" + lock, lockData);
-              } else {
-                log.debug("-- lock " + coll.getName() + "/" + lock + " still active (delta=" + delta + ")");
-              }
-            } catch (NoSuchElementException nse) {
-              // already removed by someone else - ignore
-            }
-          }
-        } catch (Exception e) {
-          log.warn("Exception checking for inactive shard split locks in " + parentPath, e);
-        }
-      })
-    );
-    Map<String, Object> results = new LinkedHashMap<>();
-    if (!cleanup.isEmpty()) {
-      results.put("inactive", inactive);
-      results.put("cleanup", cleanup);
-    }
-    if (!staleLocks.isEmpty()) {
-      results.put("staleLocks", staleLocks);
-    }
-    if (!results.isEmpty()) {
-      context.getProperties().put(getName(), results);
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
deleted file mode 100644
index 25083ae..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.ReplicaInfo;
-import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.util.Pair;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.common.util.Utils;
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.metrics.SolrCoreMetricManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- *
- */
-public class IndexSizeTrigger extends TriggerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  public static final String ABOVE_BYTES_PROP = "aboveBytes";
-  public static final String ABOVE_DOCS_PROP = "aboveDocs";
-  public static final String ABOVE_OP_PROP = "aboveOp";
-  public static final String BELOW_BYTES_PROP = "belowBytes";
-  public static final String BELOW_DOCS_PROP = "belowDocs";
-  public static final String BELOW_OP_PROP = "belowOp";
-  public static final String COLLECTIONS_PROP = "collections";
-  public static final String MAX_OPS_PROP = "maxOps";
-
-  public static final String BYTES_SIZE_PROP = "__bytes__";
-  public static final String DOCS_SIZE_PROP = "__docs__";
-  public static final String ABOVE_SIZE_PROP = "aboveSize";
-  public static final String BELOW_SIZE_PROP = "belowSize";
-  public static final String VIOLATION_PROP = "violationType";
-
-  public static final int DEFAULT_MAX_OPS = 10;
-
-  public enum Unit { bytes, docs }
-
-  private long aboveBytes, aboveDocs, belowBytes, belowDocs;
-  private int maxOps;
-  private CollectionParams.CollectionAction aboveOp, belowOp;
-  private final Set<String> collections = new HashSet<>();
-  private final Map<String, Long> lastAboveEventMap = new ConcurrentHashMap<>();
-  private final Map<String, Long> lastBelowEventMap = new ConcurrentHashMap<>();
-
-  public IndexSizeTrigger(String name) {
-    super(TriggerEventType.INDEXSIZE, name);
-    TriggerUtils.validProperties(validProperties,
-        ABOVE_BYTES_PROP, ABOVE_DOCS_PROP, BELOW_BYTES_PROP, BELOW_DOCS_PROP,
-        COLLECTIONS_PROP, MAX_OPS_PROP);
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    super.configure(loader, cloudManager, properties);
-    String aboveStr = String.valueOf(properties.getOrDefault(ABOVE_BYTES_PROP, Long.MAX_VALUE));
-    String belowStr = String.valueOf(properties.getOrDefault(BELOW_BYTES_PROP, -1));
-    try {
-      aboveBytes = Long.parseLong(aboveStr);
-      if (aboveBytes <= 0) {
-        throw new Exception("value must be > 0");
-      }
-    } catch (Exception e) {
-      throw new TriggerValidationException(getName(), ABOVE_BYTES_PROP, "invalid value '" + aboveStr + "': " + e.toString());
-    }
-    try {
-      belowBytes = Long.parseLong(belowStr);
-      if (belowBytes < 0) {
-        belowBytes = -1;
-      }
-    } catch (Exception e) {
-      throw new TriggerValidationException(getName(), BELOW_BYTES_PROP, "invalid value '" + belowStr + "': " + e.toString());
-    }
-    // below must be at least 2x smaller than above, otherwise splitting a shard
-    // would immediately put the shard below the threshold and cause the mergeshards action
-    if (belowBytes > 0 && (belowBytes * 2 > aboveBytes)) {
-      throw new TriggerValidationException(getName(), BELOW_BYTES_PROP,
-          "invalid value " + belowBytes + ", should be less than half of '" + ABOVE_BYTES_PROP + "' value, which is " + aboveBytes);
-    }
-    // do the same for docs bounds
-    aboveStr = String.valueOf(properties.getOrDefault(ABOVE_DOCS_PROP, Long.MAX_VALUE));
-    belowStr = String.valueOf(properties.getOrDefault(BELOW_DOCS_PROP, -1));
-    try {
-      aboveDocs = Long.parseLong(aboveStr);
-      if (aboveDocs <= 0) {
-        throw new Exception("value must be > 0");
-      }
-    } catch (Exception e) {
-      throw new TriggerValidationException(getName(), ABOVE_DOCS_PROP, "invalid value '" + aboveStr + "': " + e.toString());
-    }
-    try {
-      belowDocs = Long.parseLong(belowStr);
-      if (belowDocs < 0) {
-        belowDocs = -1;
-      }
-    } catch (Exception e) {
-      throw new TriggerValidationException(getName(), BELOW_DOCS_PROP, "invalid value '" + belowStr + "': " + e.toString());
-    }
-    // below must be at least 2x smaller than above, otherwise splitting a shard
-    // would immediately put the shard below the threshold and cause the mergeshards action
-    if (belowDocs > 0 && (belowDocs * 2 > aboveDocs)) {
-      throw new TriggerValidationException(getName(), BELOW_DOCS_PROP,
-          "invalid value " + belowDocs + ", should be less than half of '" + ABOVE_DOCS_PROP + "' value, which is " + aboveDocs);
-    }
-
-    String collectionsString = (String) properties.get(COLLECTIONS_PROP);
-    if (collectionsString != null && !collectionsString.isEmpty()) {
-      collections.addAll(StrUtils.splitSmart(collectionsString, ','));
-    }
-    String aboveOpStr = String.valueOf(properties.getOrDefault(ABOVE_OP_PROP, CollectionParams.CollectionAction.SPLITSHARD.toLower()));
-    // TODO: this is a placeholder until SOLR-9407 is implemented
-    String belowOpStr = String.valueOf(properties.getOrDefault(BELOW_OP_PROP, CollectionParams.CollectionAction.MERGESHARDS.toLower()));
-    aboveOp = CollectionParams.CollectionAction.get(aboveOpStr);
-    if (aboveOp == null) {
-      throw new TriggerValidationException(getName(), ABOVE_OP_PROP, "unrecognized value of: '" + aboveOpStr + "'");
-    }
-    belowOp = CollectionParams.CollectionAction.get(belowOpStr);
-    if (belowOp == null) {
-      throw new TriggerValidationException(getName(), BELOW_OP_PROP, "unrecognized value of: '" + belowOpStr + "'");
-    }
-    String maxOpsStr = String.valueOf(properties.getOrDefault(MAX_OPS_PROP, DEFAULT_MAX_OPS));
-    try {
-      maxOps = Integer.parseInt(maxOpsStr);
-      if (maxOps < 1) {
-        throw new Exception("must be > 1");
-      }
-    } catch (Exception e) {
-      throw new TriggerValidationException(getName(), MAX_OPS_PROP, "invalid value: '" + maxOpsStr + "': " + e.getMessage());
-    }
-  }
-
-  @Override
-  protected Map<String, Object> getState() {
-    Map<String, Object> state = new HashMap<>();
-    state.put("lastAboveEventMap", lastAboveEventMap);
-    state.put("lastBelowEventMap", lastBelowEventMap);
-    return state;
-  }
-
-  @Override
-  protected void setState(Map<String, Object> state) {
-    this.lastAboveEventMap.clear();
-    this.lastBelowEventMap.clear();
-    Map<String, Long> replicaVsTime = (Map<String, Long>)state.get("lastAboveEventMap");
-    if (replicaVsTime != null) {
-      this.lastAboveEventMap.putAll(replicaVsTime);
-    }
-    replicaVsTime = (Map<String, Long>)state.get("lastBelowEventMap");
-    if (replicaVsTime != null) {
-      this.lastBelowEventMap.putAll(replicaVsTime);
-    }
-  }
-
-  @Override
-  public void restoreState(AutoScaling.Trigger old) {
-    assert old.isClosed();
-    if (old instanceof IndexSizeTrigger) {
-      IndexSizeTrigger that = (IndexSizeTrigger)old;
-      assert this.name.equals(that.name);
-      this.lastAboveEventMap.clear();
-      this.lastBelowEventMap.clear();
-      this.lastAboveEventMap.putAll(that.lastAboveEventMap);
-      this.lastBelowEventMap.putAll(that.lastBelowEventMap);
-    } else {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE,
-          "Unable to restore state from an unknown type of trigger");
-    }
-  }
-
-  @Override
-  public void run() {
-    synchronized(this) {
-      if (isClosed) {
-        log.warn(getName() + " ran but was already closed");
-        return;
-      }
-    }
-    AutoScaling.TriggerEventProcessor processor = processorRef.get();
-    if (processor == null) {
-      return;
-    }
-
-    // replica name / info + size, retrieved from leaders only
-    Map<String, ReplicaInfo> currentSizes = new HashMap<>();
-
-    try {
-      ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState();
-      for (String node : clusterState.getLiveNodes()) {
-        Map<String, ReplicaInfo> metricTags = new HashMap<>();
-        // coll, shard, replica
-        Map<String, Map<String, List<ReplicaInfo>>> infos = cloudManager.getNodeStateProvider().getReplicaInfo(node, Collections.emptyList());
-        infos.forEach((coll, shards) -> {
-          if (!collections.isEmpty() && !collections.contains(coll)) {
-            return;
-          }
-          DocCollection docCollection = clusterState.getCollection(coll);
-
-          shards.forEach((sh, replicas) -> {
-            // check only the leader of a replica in active shard
-            Slice s = docCollection.getSlice(sh);
-            if (s.getState() != Slice.State.ACTIVE) {
-              return;
-            }
-            Replica r = s.getLeader();
-            // no leader - don't do anything
-            if (r == null) {
-              return;
-            }
-            // find ReplicaInfo
-            ReplicaInfo info = null;
-            for (ReplicaInfo ri : replicas) {
-              if (r.getCoreName().equals(ri.getCore())) {
-                info = ri;
-                break;
-              }
-            }
-            if (info == null) {
-              // probably replica is not on this node?
-              return;
-            }
-            // we have to translate to the metrics registry name, which uses "_replica_nN" as suffix
-            String replicaName = Utils.parseMetricsReplicaName(coll, info.getCore());
-            if (replicaName == null) { // should never happen???
-              replicaName = info.getName(); // which is actually coreNode name...
-            }
-            String registry = SolrCoreMetricManager.createRegistryName(true, coll, sh, replicaName, null);
-            String tag = "metrics:" + registry + ":INDEX.sizeInBytes";
-            metricTags.put(tag, info);
-            tag = "metrics:" + registry + ":SEARCHER.searcher.numDocs";
-            metricTags.put(tag, info);
-          });
-        });
-        if (metricTags.isEmpty()) {
-          continue;
-        }
-        Map<String, Object> sizes = cloudManager.getNodeStateProvider().getNodeValues(node, metricTags.keySet());
-        sizes.forEach((tag, size) -> {
-          final ReplicaInfo info = metricTags.get(tag);
-          if (info == null) {
-            log.warn("Missing replica info for response tag " + tag);
-          } else {
-            // verify that it's a Number
-            if (!(size instanceof Number)) {
-              log.warn("invalid size value - not a number: '" + size + "' is " + size.getClass().getName());
-              return;
-            }
-
-            ReplicaInfo currentInfo = currentSizes.computeIfAbsent(info.getCore(), k -> (ReplicaInfo)info.clone());
-            if (tag.contains("INDEX")) {
-              currentInfo.getVariables().put(BYTES_SIZE_PROP, ((Number) size).longValue());
-            } else {
-              currentInfo.getVariables().put(DOCS_SIZE_PROP, ((Number) size).longValue());
-            }
-          }
-        });
-      }
-    } catch (IOException e) {
-      log.warn("Error running trigger " + getName(), e);
-      return;
-    }
-
-    long now = cloudManager.getTimeSource().getTimeNs();
-
-    // now check thresholds
-
-    // collection / list(info)
-    Map<String, List<ReplicaInfo>> aboveSize = new HashMap<>();
-
-    Set<String> splittable = new HashSet<>();
-
-    currentSizes.forEach((coreName, info) -> {
-      if ((Long)info.getVariable(BYTES_SIZE_PROP) > aboveBytes ||
-          (Long)info.getVariable(DOCS_SIZE_PROP) > aboveDocs) {
-        if (waitForElapsed(coreName, now, lastAboveEventMap)) {
-          List<ReplicaInfo> infos = aboveSize.computeIfAbsent(info.getCollection(), c -> new ArrayList<>());
-          if (!infos.contains(info)) {
-            if ((Long)info.getVariable(BYTES_SIZE_PROP) > aboveBytes) {
-              info.getVariables().put(VIOLATION_PROP, ABOVE_BYTES_PROP);
-            } else {
-              info.getVariables().put(VIOLATION_PROP, ABOVE_DOCS_PROP);
-            }
-            infos.add(info);
-            splittable.add(info.getName());
-          }
-        }
-      } else {
-        // no violation - clear waitForElapsed
-        lastAboveEventMap.remove(coreName);
-      }
-    });
-
-    // collection / list(info)
-    Map<String, List<ReplicaInfo>> belowSize = new HashMap<>();
-
-    currentSizes.forEach((coreName, info) -> {
-      if (((Long)info.getVariable(BYTES_SIZE_PROP) < belowBytes ||
-          (Long)info.getVariable(DOCS_SIZE_PROP) < belowDocs) &&
-          // make sure we don't produce conflicting ops
-          !splittable.contains(info.getName())) {
-        if (waitForElapsed(coreName, now, lastBelowEventMap)) {
-          List<ReplicaInfo> infos = belowSize.computeIfAbsent(info.getCollection(), c -> new ArrayList<>());
-          if (!infos.contains(info)) {
-            if ((Long)info.getVariable(BYTES_SIZE_PROP) < belowBytes) {
-              info.getVariables().put(VIOLATION_PROP, BELOW_BYTES_PROP);
-            } else {
-              info.getVariables().put(VIOLATION_PROP, BELOW_DOCS_PROP);
-            }
-            infos.add(info);
-          }
-        }
-      } else {
-        // no violation - clear waitForElapsed
-        lastBelowEventMap.remove(coreName);
-      }
-    });
-
-    if (aboveSize.isEmpty() && belowSize.isEmpty()) {
-      log.trace("NO VIOLATIONS: Now={}", now);
-      log.trace("lastAbove={}", lastAboveEventMap);
-      log.trace("lastBelow={}", lastBelowEventMap);
-      return;
-    }
-
-    // find the earliest time when a condition was exceeded
-    final AtomicLong eventTime = new AtomicLong(now);
-
-    // calculate ops
-    final List<TriggerEvent.Op> ops = new ArrayList<>();
-    aboveSize.forEach((coll, replicas) -> {
-      // sort by decreasing size to first split the largest ones
-      // XXX see the comment below about using DOCS_SIZE_PROP in lieu of BYTES_SIZE_PROP
-      replicas.sort((r1, r2) -> {
-        long delta = (Long) r1.getVariable(DOCS_SIZE_PROP) - (Long) r2.getVariable(DOCS_SIZE_PROP);
-        if (delta > 0) {
-          return -1;
-        } else if (delta < 0) {
-          return 1;
-        } else {
-          return 0;
-        }
-      });
-      replicas.forEach(r -> {
-        if (ops.size() >= maxOps) {
-          return;
-        }
-        TriggerEvent.Op op = new TriggerEvent.Op(aboveOp);
-        op.addHint(Suggester.Hint.COLL_SHARD, new Pair<>(coll, r.getShard()));
-        ops.add(op);
-        Long time = lastAboveEventMap.get(r.getCore());
-        if (time != null && eventTime.get() > time) {
-          eventTime.set(time);
-        }
-      });
-    });
-    belowSize.forEach((coll, replicas) -> {
-      if (replicas.size() < 2) {
-        return;
-      }
-      if (ops.size() >= maxOps) {
-        return;
-      }
-      // sort by increasing size
-      replicas.sort((r1, r2) -> {
-        // XXX this is not quite correct - if BYTES_SIZE_PROP decided that replica got here
-        // then we should be sorting by BYTES_SIZE_PROP. However, since DOCS and BYTES are
-        // loosely correlated it's simpler to sort just by docs (which better reflects the "too small"
-        // condition than index size, due to possibly existing deleted docs that still occupy space)
-        long delta = (Long) r1.getVariable(DOCS_SIZE_PROP) - (Long) r2.getVariable(DOCS_SIZE_PROP);
-        if (delta > 0) {
-          return 1;
-        } else if (delta < 0) {
-          return -1;
-        } else {
-          return 0;
-        }
-      });
-
-      // TODO: MERGESHARDS is not implemented yet. For now take the top two smallest shards
-      // TODO: but in the future we probably need to get ones with adjacent ranges.
-
-      // TODO: generate as many MERGESHARDS as needed to consume all belowSize shards
-      TriggerEvent.Op op = new TriggerEvent.Op(belowOp);
-      op.addHint(Suggester.Hint.COLL_SHARD, new Pair(coll, replicas.get(0).getShard()));
-      op.addHint(Suggester.Hint.COLL_SHARD, new Pair(coll, replicas.get(1).getShard()));
-      ops.add(op);
-      Long time = lastBelowEventMap.get(replicas.get(0).getCore());
-      if (time != null && eventTime.get() > time) {
-        eventTime.set(time);
-      }
-      time = lastBelowEventMap.get(replicas.get(1).getCore());
-      if (time != null && eventTime.get() > time) {
-        eventTime.set(time);
-      }
-    });
-
-    if (ops.isEmpty()) {
-      return;
-    }
-    if (processor.process(new IndexSizeEvent(getName(), eventTime.get(), ops, aboveSize, belowSize))) {
-      // update last event times
-      aboveSize.forEach((coll, replicas) -> {
-        replicas.forEach(r -> lastAboveEventMap.put(r.getCore(), now));
-      });
-      belowSize.forEach((coll, replicas) -> {
-        if (replicas.size() < 2) {
-          return;
-        }
-        lastBelowEventMap.put(replicas.get(0).getCore(), now);
-        lastBelowEventMap.put(replicas.get(1).getCore(), now);
-      });
-    }
-  }
-
-  private boolean waitForElapsed(String name, long now, Map<String, Long> lastEventMap) {
-    Long lastTime = lastEventMap.computeIfAbsent(name, s -> now);
-    long elapsed = TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS);
-    log.trace("name={}, lastTime={}, elapsed={}", name, lastTime, elapsed);
-    if (TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS) < getWaitForSecond()) {
-      return false;
-    }
-    return true;
-  }
-
-  public static class IndexSizeEvent extends TriggerEvent {
-    public IndexSizeEvent(String source, long eventTime, List<Op> ops, Map<String, List<ReplicaInfo>> aboveSize,
-                          Map<String, List<ReplicaInfo>> belowSize) {
-      super(TriggerEventType.INDEXSIZE, source, eventTime, null);
-      properties.put(TriggerEvent.REQUESTED_OPS, ops);
-      // avoid passing very large amounts of data here - just use replica names
-      TreeMap<String, String> above = new TreeMap<>();
-      aboveSize.forEach((coll, replicas) ->
-          replicas.forEach(r -> above.put(r.getCore(), "docs=" + r.getVariable(DOCS_SIZE_PROP) + ", bytes=" + r.getVariable(BYTES_SIZE_PROP))));
-      properties.put(ABOVE_SIZE_PROP, above);
-      TreeMap<String, String> below = new TreeMap<>();
-      belowSize.forEach((coll, replicas) ->
-          replicas.forEach(r -> below.put(r.getCore(), "docs=" + r.getVariable(DOCS_SIZE_PROP) + ", bytes=" + r.getVariable(BYTES_SIZE_PROP))));
-      properties.put(BELOW_SIZE_PROP, below);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/LoggingListener.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/LoggingListener.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/LoggingListener.java
deleted file mode 100644
index a7dcf63..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/LoggingListener.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.lang.invoke.MethodHandles;
-
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Implementation of {@link TriggerListener} that reports
- * events to a log.
- */
-public class LoggingListener extends TriggerListenerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @Override
-  public void onEvent(TriggerEvent event, TriggerEventProcessorStage stage, String actionName, ActionContext context,
-                      Throwable error, String message) {
-    log.info("{}: stage={}, actionName={}, event={}, error={}, messsage={}", config.name, stage, actionName, event, error, message);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/MetricTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/MetricTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/MetricTrigger.java
deleted file mode 100644
index 9058a9a..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/MetricTrigger.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.stream.Collectors;
-
-import org.apache.solr.client.solrj.cloud.autoscaling.Policy;
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.params.AutoScalingParams;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.common.util.Pair;
-import org.apache.solr.core.SolrResourceLoader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.AutoScalingParams.ABOVE;
-import static org.apache.solr.common.params.AutoScalingParams.BELOW;
-import static org.apache.solr.common.params.AutoScalingParams.METRIC;
-import static org.apache.solr.common.params.AutoScalingParams.PREFERRED_OP;
-
-public class MetricTrigger extends TriggerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private String metric;
-  private Number above, below;
-  private String collection, shard, node, preferredOp;
-
-  private final Map<String, Long> lastNodeEvent = new ConcurrentHashMap<>();
-
-  public MetricTrigger(String name) {
-    super(TriggerEventType.METRIC, name);
-    TriggerUtils.requiredProperties(requiredProperties, validProperties, METRIC);
-    TriggerUtils.validProperties(validProperties, ABOVE, BELOW, PREFERRED_OP,
-        AutoScalingParams.COLLECTION,
-        AutoScalingParams.SHARD,
-        AutoScalingParams.NODE);
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    super.configure(loader, cloudManager, properties);
-    this.metric = (String) properties.get(METRIC);
-    this.above = (Number) properties.get(ABOVE);
-    this.below = (Number) properties.get(BELOW);
-    this.collection = (String) properties.getOrDefault(AutoScalingParams.COLLECTION, Policy.ANY);
-    shard = (String) properties.getOrDefault(AutoScalingParams.SHARD, Policy.ANY);
-    if (collection.equals(Policy.ANY) && !shard.equals(Policy.ANY)) {
-      throw new TriggerValidationException("shard", "When 'shard' is other than #ANY then collection name must be also other than #ANY");
-    }
-    node = (String) properties.getOrDefault(AutoScalingParams.NODE, Policy.ANY);
-    preferredOp = (String) properties.getOrDefault(PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
-  }
-
-  @Override
-  protected Map<String, Object> getState() {
-    return null;
-  }
-
-  @Override
-  protected void setState(Map<String, Object> state) {
-    lastNodeEvent.clear();
-    Map<String, Long> nodeTimes = (Map<String, Long>) state.get("lastNodeEvent");
-    if (nodeTimes != null) {
-      lastNodeEvent.putAll(nodeTimes);
-    }
-  }
-
-  @Override
-  public void restoreState(AutoScaling.Trigger old) {
-    assert old.isClosed();
-    if (old instanceof MetricTrigger) {
-      MetricTrigger that = (MetricTrigger) old;
-      assert this.name.equals(that.name);
-      this.lastNodeEvent.clear();
-      this.lastNodeEvent.putAll(that.lastNodeEvent);
-    } else {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE,
-          "Unable to restore state from an unknown type of trigger");
-    }
-  }
-
-  @Override
-  public void run() {
-    AutoScaling.TriggerEventProcessor processor = processorRef.get();
-    if (processor == null) {
-      return;
-    }
-
-    Set<String> liveNodes = null;
-    if (node.equals(Policy.ANY)) {
-      if (collection.equals(Policy.ANY)) {
-        liveNodes = cloudManager.getClusterStateProvider().getLiveNodes();
-      } else {
-        final Set<String> nodes = new HashSet<>();
-        ClusterState.CollectionRef ref = cloudManager.getClusterStateProvider().getState(collection);
-        DocCollection docCollection;
-        if (ref == null || (docCollection = ref.get()) == null) {
-          log.warn("MetricTrigger could not find collection: {}", collection);
-          return;
-        }
-        if (shard.equals(Policy.ANY)) {
-          docCollection.getReplicas().forEach(replica -> {
-            nodes.add(replica.getNodeName());
-          });
-        } else {
-          Slice slice = docCollection.getSlice(shard);
-          if (slice == null) {
-            log.warn("MetricTrigger could not find collection: {} shard: {}", collection, shard);
-            return;
-          }
-          slice.getReplicas().forEach(replica -> nodes.add(replica.getNodeName()));
-        }
-        liveNodes = nodes;
-      }
-    } else {
-      liveNodes = Collections.singleton(node);
-    }
-
-    Map<String, Number> rates = new HashMap<>(liveNodes.size());
-    for (String node : liveNodes) {
-      Map<String, Object> values = cloudManager.getNodeStateProvider().getNodeValues(node, Collections.singletonList(metric));
-      values.forEach((tag, rate) -> rates.computeIfAbsent(node, s -> (Number) rate));
-    }
-
-    long now = cloudManager.getTimeSource().getTimeNs();
-    // check for exceeded rates and filter out those with less than waitFor from previous events
-    Map<String, Number> hotNodes = rates.entrySet().stream()
-        .filter(entry -> waitForElapsed(entry.getKey(), now, lastNodeEvent))
-        .filter(entry -> (below != null && Double.compare(entry.getValue().doubleValue(), below.doubleValue()) < 0) || (above != null && Double.compare(entry.getValue().doubleValue(), above.doubleValue()) > 0))
-        .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
-
-    if (hotNodes.isEmpty()) return;
-
-    final AtomicLong eventTime = new AtomicLong(now);
-    hotNodes.forEach((n, r) -> {
-      long time = lastNodeEvent.get(n);
-      if (eventTime.get() > time) {
-        eventTime.set(time);
-      }
-    });
-
-    if (processor.process(new MetricBreachedEvent(getName(), collection, shard, preferredOp, eventTime.get(), metric, hotNodes))) {
-      hotNodes.keySet().forEach(node -> lastNodeEvent.put(node, now));
-    }
-  }
-
-  private boolean waitForElapsed(String name, long now, Map<String, Long> lastEventMap) {
-    Long lastTime = lastEventMap.computeIfAbsent(name, s -> now);
-    long elapsed = TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS);
-    log.trace("name={}, lastTime={}, elapsed={}", name, lastTime, elapsed);
-    if (TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS) < getWaitForSecond()) {
-      return false;
-    }
-    return true;
-  }
-
-  public static class MetricBreachedEvent extends TriggerEvent {
-    public MetricBreachedEvent(String source, String collection, String shard, String preferredOp, long eventTime, String metric, Map<String, Number> hotNodes) {
-      super(TriggerEventType.METRIC, source, eventTime, null);
-      properties.put(METRIC, metric);
-      properties.put(AutoScalingParams.NODE, hotNodes);
-      if (!collection.equals(Policy.ANY)) {
-        properties.put(AutoScalingParams.COLLECTION, collection);
-      }
-      if (!shard.equals(Policy.ANY))  {
-        properties.put(AutoScalingParams.SHARD, shard);
-      }
-      properties.put(PREFERRED_OP, preferredOp);
-
-      // specify requested ops
-      List<Op> ops = new ArrayList<>(hotNodes.size());
-      for (String n : hotNodes.keySet()) {
-        Op op = new Op(CollectionParams.CollectionAction.get(preferredOp));
-        op.addHint(Suggester.Hint.SRC_NODE, n);
-        if (!collection.equals(Policy.ANY)) {
-          if (!shard.equals(Policy.ANY)) {
-            op.addHint(Suggester.Hint.COLL_SHARD, new Pair<>(collection, shard));
-          } else {
-            op.addHint(Suggester.Hint.COLL, collection);
-          }
-        }
-        ops.add(op);
-      }
-      properties.put(TriggerEvent.REQUESTED_OPS, ops);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeAddedTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeAddedTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeAddedTrigger.java
deleted file mode 100644
index 6202944..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeAddedTrigger.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.core.SolrResourceLoader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.AutoScalingParams.PREFERRED_OP;
-
-/**
- * Trigger for the {@link TriggerEventType#NODEADDED} event
- */
-public class NodeAddedTrigger extends TriggerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private Set<String> lastLiveNodes = new HashSet<>();
-
-  private Map<String, Long> nodeNameVsTimeAdded = new HashMap<>();
-
-  private String preferredOp;
-
-  public NodeAddedTrigger(String name) {
-    super(TriggerEventType.NODEADDED, name);
-    TriggerUtils.validProperties(validProperties, PREFERRED_OP);
-  }
-
-  @Override
-  public void init() throws Exception {
-    super.init();
-    lastLiveNodes = new HashSet<>(cloudManager.getClusterStateProvider().getLiveNodes());
-    log.debug("NodeAddedTrigger {} - Initial livenodes: {}", name, lastLiveNodes);
-    log.debug("NodeAddedTrigger {} instantiated with properties: {}", name, properties);
-    // pick up added nodes for which marker paths were created
-    try {
-      List<String> added = stateManager.listData(ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH);
-      added.forEach(n -> {
-        // don't add nodes that have since gone away
-        if (lastLiveNodes.contains(n)) {
-          log.debug("Adding node from marker path: {}", n);
-          nodeNameVsTimeAdded.put(n, cloudManager.getTimeSource().getTimeNs());
-        }
-        removeMarker(n);
-      });
-    } catch (NoSuchElementException e) {
-      // ignore
-    } catch (Exception e) {
-      log.warn("Exception retrieving nodeLost markers", e);
-    }
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    super.configure(loader, cloudManager, properties);
-    preferredOp = (String) properties.getOrDefault(PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
-    preferredOp = preferredOp.toLowerCase(Locale.ROOT);
-    // verify
-    CollectionParams.CollectionAction action = CollectionParams.CollectionAction.get(preferredOp);
-    switch (action) {
-      case ADDREPLICA:
-      case MOVEREPLICA:
-      case NONE:
-        break;
-      default:
-        throw new TriggerValidationException("Unsupported preferredOperation=" + preferredOp + " specified for node added trigger");
-    }
-  }
-
-  @Override
-  public void restoreState(AutoScaling.Trigger old) {
-    assert old.isClosed();
-    if (old instanceof NodeAddedTrigger) {
-      NodeAddedTrigger that = (NodeAddedTrigger) old;
-      assert this.name.equals(that.name);
-      this.lastLiveNodes.clear();
-      this.lastLiveNodes.addAll(that.lastLiveNodes);
-      this.nodeNameVsTimeAdded.clear();
-      this.nodeNameVsTimeAdded.putAll(that.nodeNameVsTimeAdded);
-    } else  {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE,
-          "Unable to restore state from an unknown type of trigger");
-    }
-  }
-
-  @Override
-  protected Map<String, Object> getState() {
-    Map<String,Object> state = new HashMap<>();
-    state.put("lastLiveNodes", lastLiveNodes);
-    state.put("nodeNameVsTimeAdded", nodeNameVsTimeAdded);
-    return state;
-  }
-
-  @Override
-  protected void setState(Map<String, Object> state) {
-    this.lastLiveNodes.clear();
-    this.nodeNameVsTimeAdded.clear();
-    Collection<String> lastLiveNodes = (Collection<String>)state.get("lastLiveNodes");
-    if (lastLiveNodes != null) {
-      this.lastLiveNodes.addAll(lastLiveNodes);
-    }
-    Map<String,Long> nodeNameVsTimeAdded = (Map<String,Long>)state.get("nodeNameVsTimeAdded");
-    if (nodeNameVsTimeAdded != null) {
-      this.nodeNameVsTimeAdded.putAll(nodeNameVsTimeAdded);
-    }
-  }
-
-  @Override
-  public void run() {
-    try {
-      synchronized (this) {
-        if (isClosed) {
-          log.warn("NodeAddedTrigger ran but was already closed");
-          throw new RuntimeException("Trigger has been closed");
-        }
-      }
-      log.debug("Running NodeAddedTrigger {}", name);
-
-      Set<String> newLiveNodes = new HashSet<>(cloudManager.getClusterStateProvider().getLiveNodes());
-      log.debug("Found livenodes: {}", newLiveNodes.size());
-
-      // have any nodes that we were tracking been removed from the cluster?
-      // if so, remove them from the tracking map
-      Set<String> trackingKeySet = nodeNameVsTimeAdded.keySet();
-      trackingKeySet.retainAll(newLiveNodes);
-
-      // have any new nodes been added?
-      Set<String> copyOfNew = new HashSet<>(newLiveNodes);
-      copyOfNew.removeAll(lastLiveNodes);
-      copyOfNew.forEach(n -> {
-        long eventTime = cloudManager.getTimeSource().getTimeNs();
-        log.debug("Tracking new node: {} at time {}", n, eventTime);
-        nodeNameVsTimeAdded.put(n, eventTime);
-      });
-
-      // has enough time expired to trigger events for a node?
-      List<String> nodeNames = new ArrayList<>();
-      List<Long> times = new ArrayList<>();
-      for (Iterator<Map.Entry<String, Long>> it = nodeNameVsTimeAdded.entrySet().iterator(); it.hasNext(); ) {
-        Map.Entry<String, Long> entry = it.next();
-        String nodeName = entry.getKey();
-        Long timeAdded = entry.getValue();
-        long now = cloudManager.getTimeSource().getTimeNs();
-        if (TimeUnit.SECONDS.convert(now - timeAdded, TimeUnit.NANOSECONDS) >= getWaitForSecond()) {
-          nodeNames.add(nodeName);
-          times.add(timeAdded);
-        }
-      }
-      AutoScaling.TriggerEventProcessor processor = processorRef.get();
-      if (!nodeNames.isEmpty()) {
-        if (processor != null) {
-          log.debug("NodeAddedTrigger {} firing registered processor for nodes: {} added at times {}, now={}", name,
-              nodeNames, times, cloudManager.getTimeSource().getTimeNs());
-          if (processor.process(new NodeAddedEvent(getEventType(), getName(), times, nodeNames, preferredOp))) {
-            // remove from tracking set only if the fire was accepted
-            nodeNames.forEach(n -> {
-              nodeNameVsTimeAdded.remove(n);
-              removeMarker(n);
-            });
-          }
-        } else  {
-          nodeNames.forEach(n -> {
-            nodeNameVsTimeAdded.remove(n);
-            removeMarker(n);
-          });
-        }
-      }
-      lastLiveNodes = new HashSet<>(newLiveNodes);
-    } catch (RuntimeException e) {
-      log.error("Unexpected exception in NodeAddedTrigger", e);
-    }
-  }
-
-  private void removeMarker(String nodeName) {
-    String path = ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH + "/" + nodeName;
-    try {
-      log.debug("NodeAddedTrigger {} - removing marker path: {}", name, path);
-      if (stateManager.hasData(path)) {
-        stateManager.removeData(path, -1);
-      }
-    } catch (NoSuchElementException e) {
-      // ignore
-    } catch (Exception e) {
-      log.debug("Exception removing nodeAdded marker " + nodeName, e);
-    }
-
-  }
-
-  public static class NodeAddedEvent extends TriggerEvent {
-
-    public NodeAddedEvent(TriggerEventType eventType, String source, List<Long> times, List<String> nodeNames, String preferredOp) {
-      // use the oldest time as the time of the event
-      super(eventType, source, times.get(0), null);
-      properties.put(NODE_NAMES, nodeNames);
-      properties.put(EVENT_TIMES, times);
-      properties.put(PREFERRED_OP, preferredOp);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0ae21ad0/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeLostTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeLostTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeLostTrigger.java
deleted file mode 100644
index ddb4913..0000000
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/NodeLostTrigger.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.cloud.autoscaling;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.solr.client.solrj.cloud.SolrCloudManager;
-import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventType;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.CollectionParams;
-import org.apache.solr.core.SolrResourceLoader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.AutoScalingParams.PREFERRED_OP;
-
-/**
- * Trigger for the {@link TriggerEventType#NODELOST} event
- */
-public class NodeLostTrigger extends TriggerBase {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  private Set<String> lastLiveNodes = new HashSet<>();
-
-  private Map<String, Long> nodeNameVsTimeRemoved = new HashMap<>();
-
-  private String preferredOp;
-
-  public NodeLostTrigger(String name) {
-    super(TriggerEventType.NODELOST, name);
-    TriggerUtils.validProperties(validProperties, PREFERRED_OP);
-  }
-
-  @Override
-  public void init() throws Exception {
-    super.init();
-    lastLiveNodes = new HashSet<>(cloudManager.getClusterStateProvider().getLiveNodes());
-    log.debug("NodeLostTrigger {} - Initial livenodes: {}", name, lastLiveNodes);
-    // pick up lost nodes for which marker paths were created
-    try {
-      List<String> lost = stateManager.listData(ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH);
-      lost.forEach(n -> {
-        // don't add nodes that have since came back
-        if (!lastLiveNodes.contains(n)) {
-          log.debug("Adding lost node from marker path: {}", n);
-          nodeNameVsTimeRemoved.put(n, cloudManager.getTimeSource().getTimeNs());
-        }
-        removeMarker(n);
-      });
-    } catch (NoSuchElementException e) {
-      // ignore
-    } catch (Exception e) {
-      log.warn("Exception retrieving nodeLost markers", e);
-    }
-  }
-
-  @Override
-  public void configure(SolrResourceLoader loader, SolrCloudManager cloudManager, Map<String, Object> properties) throws TriggerValidationException {
-    super.configure(loader, cloudManager, properties);
-    preferredOp = (String) properties.getOrDefault(PREFERRED_OP, CollectionParams.CollectionAction.MOVEREPLICA.toLower());
-    preferredOp = preferredOp.toLowerCase(Locale.ROOT);
-    // verify
-    CollectionParams.CollectionAction action = CollectionParams.CollectionAction.get(preferredOp);
-    switch (action) {
-      case MOVEREPLICA:
-      case DELETENODE:
-      case NONE:
-        break;
-      default:
-        throw new TriggerValidationException("Unsupported preferredOperation=" + preferredOp + " specified for node lost trigger");
-    }
-  }
-
-  @Override
-  public void restoreState(AutoScaling.Trigger old) {
-    assert old.isClosed();
-    if (old instanceof NodeLostTrigger) {
-      NodeLostTrigger that = (NodeLostTrigger) old;
-      assert this.name.equals(that.name);
-      this.lastLiveNodes.clear();
-      this.lastLiveNodes.addAll(that.lastLiveNodes);
-      this.nodeNameVsTimeRemoved.clear();
-      this.nodeNameVsTimeRemoved.putAll(that.nodeNameVsTimeRemoved);
-    } else  {
-      throw new SolrException(SolrException.ErrorCode.INVALID_STATE,
-          "Unable to restore state from an unknown type of trigger");
-    }
-  }
-
-  @Override
-  protected Map<String, Object> getState() {
-    Map<String,Object> state = new HashMap<>();
-    state.put("lastLiveNodes", lastLiveNodes);
-    state.put("nodeNameVsTimeRemoved", nodeNameVsTimeRemoved);
-    return state;
-  }
-
-  @Override
-  protected void setState(Map<String, Object> state) {
-    this.lastLiveNodes.clear();
-    this.nodeNameVsTimeRemoved.clear();
-    Collection<String> lastLiveNodes = (Collection<String>)state.get("lastLiveNodes");
-    if (lastLiveNodes != null) {
-      this.lastLiveNodes.addAll(lastLiveNodes);
-    }
-    Map<String,Long> nodeNameVsTimeRemoved = (Map<String,Long>)state.get("nodeNameVsTimeRemoved");
-    if (nodeNameVsTimeRemoved != null) {
-      this.nodeNameVsTimeRemoved.putAll(nodeNameVsTimeRemoved);
-    }
-  }
-
-  @Override
-  public void run() {
-    try {
-      synchronized (this) {
-        if (isClosed) {
-          log.warn("NodeLostTrigger ran but was already closed");
-          return;
-        }
-      }
-
-      Set<String> newLiveNodes = new HashSet<>(cloudManager.getClusterStateProvider().getLiveNodes());
-      log.debug("Running NodeLostTrigger: {} with currently live nodes: {}", name, newLiveNodes.size());
-
-      // have any nodes that we were tracking been added to the cluster?
-      // if so, remove them from the tracking map
-      Set<String> trackingKeySet = nodeNameVsTimeRemoved.keySet();
-      trackingKeySet.removeAll(newLiveNodes);
-
-      // have any nodes been removed?
-      Set<String> copyOfLastLiveNodes = new HashSet<>(lastLiveNodes);
-      copyOfLastLiveNodes.removeAll(newLiveNodes);
-      copyOfLastLiveNodes.forEach(n -> {
-        log.debug("Tracking lost node: {}", n);
-        nodeNameVsTimeRemoved.put(n, cloudManager.getTimeSource().getTimeNs());
-      });
-
-      // has enough time expired to trigger events for a node?
-      List<String> nodeNames = new ArrayList<>();
-      List<Long> times = new ArrayList<>();
-      for (Iterator<Map.Entry<String, Long>> it = nodeNameVsTimeRemoved.entrySet().iterator(); it.hasNext(); ) {
-        Map.Entry<String, Long> entry = it.next();
-        String nodeName = entry.getKey();
-        Long timeRemoved = entry.getValue();
-        long now = cloudManager.getTimeSource().getTimeNs();
-        if (TimeUnit.SECONDS.convert(now - timeRemoved, TimeUnit.NANOSECONDS) >= getWaitForSecond()) {
-          nodeNames.add(nodeName);
-          times.add(timeRemoved);
-        }
-      }
-      // fire!
-      AutoScaling.TriggerEventProcessor processor = processorRef.get();
-      if (!nodeNames.isEmpty()) {
-        if (processor != null) {
-          log.debug("NodeLostTrigger firing registered processor for lost nodes: {}", nodeNames);
-          if (processor.process(new NodeLostEvent(getEventType(), getName(), times, nodeNames, preferredOp)))  {
-            // remove from tracking set only if the fire was accepted
-            nodeNames.forEach(n -> {
-              nodeNameVsTimeRemoved.remove(n);
-              removeMarker(n);
-            });
-          } else  {
-            log.debug("NodeLostTrigger processor for lost nodes: {} is not ready, will try later", nodeNames);
-          }
-        } else  {
-          nodeNames.forEach(n -> {
-            nodeNameVsTimeRemoved.remove(n);
-            removeMarker(n);
-          });
-        }
-      }
-      lastLiveNodes = new HashSet<>(newLiveNodes);
-    } catch (RuntimeException e) {
-      log.error("Unexpected exception in NodeLostTrigger", e);
-    }
-  }
-
-  private void removeMarker(String nodeName) {
-    String path = ZkStateReader.SOLR_AUTOSCALING_NODE_LOST_PATH + "/" + nodeName;
-    try {
-      if (stateManager.hasData(path)) {
-        stateManager.removeData(path, -1);
-      }
-    } catch (NoSuchElementException e) {
-      // ignore
-    } catch (Exception e) {
-      log.warn("Exception removing nodeLost marker " + nodeName, e);
-    }
-  }
-
-  public static class NodeLostEvent extends TriggerEvent {
-
-    public NodeLostEvent(TriggerEventType eventType, String source, List<Long> times, List<String> nodeNames, String preferredOp) {
-      // use the oldest time as the time of the event
-      super(eventType, source, times.get(0), null);
-      properties.put(NODE_NAMES, nodeNames);
-      properties.put(EVENT_TIMES, times);
-      properties.put(PREFERRED_OP, preferredOp);
-    }
-  }
-}