You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ab...@apache.org on 2019/02/27 18:26:30 UTC

[lucene-solr] 02/03: SOLR-11127: Initial version (depends on SOLR-13271)

This is an automated email from the ASF dual-hosted git repository.

ab pushed a commit to branch jira/solr-11127-2
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit efb8e61bcba7f0da4d77443365527fb18c9387b0
Author: Andrzej Bialecki <ab...@apache.org>
AuthorDate: Tue Feb 26 16:59:45 2019 +0100

    SOLR-11127: Initial version (depends on SOLR-13271)
---
 .../cloud/api/collections/DeleteCollectionCmd.java |   2 +-
 .../OverseerCollectionMessageHandler.java          |  35 +--
 .../api/collections/ReindexCollectionCmd.java      | 309 +++++++++++++++++++++
 .../solr/handler/admin/CollectionsHandler.java     |  10 +
 .../solrj/request/CollectionAdminRequest.java      |  14 +
 .../solr/common/params/CollectionParams.java       |   3 +-
 6 files changed, 338 insertions(+), 35 deletions(-)

diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
index e5f6f2d..7177f03 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
@@ -181,7 +181,7 @@ public class DeleteCollectionCmd implements OverseerCollectionMessageHandler.Cmd
     }
   }
 
-  private String referencedByAlias(String collection, Aliases aliases) {
+  public static String referencedByAlias(String collection, Aliases aliases) {
     Objects.requireNonNull(aliases);
     return aliases.getCollectionAliasListMap().entrySet().stream()
         .filter(e -> e.getValue().contains(collection))
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
index 03ec81c..4a458a2 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
@@ -31,39 +31,7 @@ import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
 import static org.apache.solr.common.params.CollectionAdminParams.COLLECTION;
 import static org.apache.solr.common.params.CollectionAdminParams.COLOCATED_WITH;
 import static org.apache.solr.common.params.CollectionAdminParams.WITH_COLLECTION;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICAPROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.ALIASPROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.BACKUP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATEALIAS;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESNAPSHOT;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEALIAS;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETENODE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICAPROP;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESNAPSHOT;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.MAINTAINROUTEDALIAS;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.MIGRATE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.MIGRATESTATEFORMAT;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOCK_COLL_TASK;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOCK_REPLICA_TASK;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOCK_SHARD_TASK;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.MODIFYCOLLECTION;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.OVERSEERSTATUS;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.REBALANCELEADERS;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.RELOAD;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.REMOVEROLE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.REPLACENODE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.RESTORE;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.SPLITSHARD;
-import static org.apache.solr.common.params.CollectionParams.CollectionAction.UTILIZENODE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
 import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
 import static org.apache.solr.common.params.CommonParams.NAME;
 import static org.apache.solr.common.util.Utils.makeMap;
@@ -274,6 +242,7 @@ public class OverseerCollectionMessageHandler implements OverseerMessageHandler,
         .put(DELETEREPLICA, new DeleteReplicaCmd(this))
         .put(ADDREPLICA, new AddReplicaCmd(this))
         .put(MOVEREPLICA, new MoveReplicaCmd(this))
+        .put(REINDEX_COLLECTION, new ReindexCollectionCmd(this))
         .put(UTILIZENODE, new UtilizeNodeCmd(this))
         .build()
     ;
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReindexCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReindexCollectionCmd.java
new file mode 100644
index 0000000..fa65ecb
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReindexCollectionCmd.java
@@ -0,0 +1,309 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.api.collections;
+
+import java.lang.invoke.MethodHandles;
+import java.util.Collections;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.apache.solr.client.solrj.SolrResponse;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.cloud.Overseer;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionAdminParams;
+import org.apache.solr.common.params.CollectionParams;
+import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.util.TimeOut;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Reindex a collection, usually in order to change the index schema.
+ * <p>WARNING: Reindexing is a potentially lossy operation - some indexed data that is not available as
+ * stored fields may be irretrievably lost, so users should use this command with caution, evaluating
+ * the potential impact by using different source and target collection names first, and preserving
+ * the source collection until the evaluation is complete.</p>
+ * <p>Reindexing follows these steps:</p>
+ * <ol>
+ *    <li>create a temporary collection using the most recent schema of the source collection
+ *    (or the one specified in the parameters, which must already exist).</li>
+ *    <li>copy the source documents to the temporary collection, reconstructing them from their stored
+ *    fields and reindexing them using the specified schema. NOTE: some data
+ *    loss may occur if the original stored field data is not available!</li>
+ *    <li>if the target collection name is not specified
+ *    then the same name as the source is assumed and at this step the source collection is permanently removed.</li>
+ *    <li>create the target collection from scratch with the specified name (or the same as source if not
+ *    specified), but using the new specified schema. NOTE: if the target name was not specified or is the same
+ *    as the source collection then the original collection has been deleted in the previous step and it's
+ *    not possible to roll-back the changes if the process is interrupted. The (possibly incomplete) data
+ *    is still available in the temporary collection.</li>
+ *    <li>copy the documents from the temporary collection to the target collection, using the specified schema.</li>
+ *    <li>delete temporary collection(s) and optionally delete the source collection if it still exists.</li>
+ * </ol>
+ */
+public class ReindexCollectionCmd implements OverseerCollectionMessageHandler.Cmd {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  public static final String ABORT = "abort";
+  public static final String KEEP_SOURCE = "keepSource";
+  public static final String TARGET = "target";
+  public static final String TMP_COL_PREFIX = ".reindex_";
+  public static final String CHK_COL_PREFIX = ".reindex_ck_";
+  public static final String REINDEX_PROP = CollectionAdminRequest.PROPERTY_PREFIX + "reindex";
+  public static final String REINDEX_PHASE_PROP = CollectionAdminRequest.PROPERTY_PREFIX + "reindex_phase";
+  public static final String READONLY_PROP = CollectionAdminRequest.PROPERTY_PREFIX + ZkStateReader.READ_ONLY_PROP;
+
+  private final OverseerCollectionMessageHandler ocmh;
+
+  public enum State {
+    IDLE,
+    RUNNING,
+    ABORTED,
+    FINISHED;
+
+    public String toLower() {
+      return toString().toLowerCase(Locale.ROOT);
+    }
+
+    public static State get(String p) {
+      if (p == null) {
+        return null;
+      }
+      p = p.toLowerCase(Locale.ROOT);
+      if (p.startsWith(CollectionAdminRequest.PROPERTY_PREFIX)) {
+        p = p.substring(CollectionAdminRequest.PROPERTY_PREFIX.length());
+      }
+      return states.get(p);
+    }
+    static Map<String, State> states = Collections.unmodifiableMap(
+        Stream.of(State.values()).collect(Collectors.toMap(State::toLower, Function.identity())));
+  }
+
+  public ReindexCollectionCmd(OverseerCollectionMessageHandler ocmh) {
+    this.ocmh = ocmh;
+  }
+
+  @Override
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList results) throws Exception {
+
+    log.info("*** called: {}", message);
+
+    String collection = message.getStr(CommonParams.NAME);
+    if (collection == null || clusterState.getCollectionOrNull(collection) == null) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection name must be specified and must exist");
+    }
+    String target = message.getStr(TARGET);
+    if (target == null) {
+      target = collection;
+    }
+    boolean keepSource = message.getBool(KEEP_SOURCE, false);
+    if (keepSource && target.equals(collection)) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't specify keepSource=true when target is the same as source");
+    }
+    boolean abort = message.getBool(ABORT, false);
+    DocCollection coll = clusterState.getCollection(collection);
+    if (abort) {
+      ZkNodeProps props = new ZkNodeProps(
+          Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.MODIFYCOLLECTION.toLower(),
+          ZkStateReader.COLLECTION_PROP, collection,
+          REINDEX_PROP, State.ABORTED.toLower());
+      ocmh.overseer.offerStateUpdate(Utils.toJSON(props));
+      results.add(State.ABORTED.toLower(), collection);
+      // if needed the cleanup will be performed by the running instance of the command
+      return;
+    }
+    // check it's not already running
+    State state = State.get(coll.getStr(REINDEX_PROP, State.IDLE.toLower()));
+    if (state == State.RUNNING) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Reindex is already running for collection " + collection);
+    }
+    // set the running flag
+    ZkNodeProps props = new ZkNodeProps(
+        Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.MODIFYCOLLECTION.toLower(),
+        ZkStateReader.COLLECTION_PROP, collection,
+        REINDEX_PROP, State.RUNNING.toLower());
+    ocmh.overseer.offerStateUpdate(Utils.toJSON(props));
+
+    boolean aborted = false;
+    Integer rf = coll.getReplicationFactor();
+    Integer numNrt = coll.getNumNrtReplicas();
+    Integer numTlog = coll.getNumTlogReplicas();
+    Integer numPull = coll.getNumPullReplicas();
+    int numShards = coll.getActiveSlices().size();
+
+    String configName = message.getStr(ZkStateReader.CONFIGNAME_PROP, ocmh.zkStateReader.readConfigName(collection));
+    String tmpCollection = TMP_COL_PREFIX + collection;
+    String chkCollection = CHK_COL_PREFIX + collection;
+
+    try {
+      // 0. set up temp and checkpoint collections - delete first if necessary
+      NamedList<Object> cmdResults = new NamedList<>();
+      ZkNodeProps cmd;
+      if (clusterState.getCollectionOrNull(tmpCollection) != null) {
+        // delete the tmp collection
+        cmd = new ZkNodeProps(
+            CommonParams.NAME, tmpCollection,
+            CoreAdminParams.DELETE_METRICS_HISTORY, "true"
+        );
+        ocmh.commandMap.get(CollectionParams.CollectionAction.DELETE).call(clusterState, cmd, cmdResults);
+        // nocommit error checking
+      }
+      if (clusterState.getCollectionOrNull(chkCollection) != null) {
+        // delete the checkpoint collection
+        cmd = new ZkNodeProps(
+            CommonParams.NAME, chkCollection,
+            CoreAdminParams.DELETE_METRICS_HISTORY, "true"
+        );
+        ocmh.commandMap.get(CollectionParams.CollectionAction.DELETE).call(clusterState, cmd, cmdResults);
+        // nocommit error checking
+      }
+
+      if (maybeAbort(collection)) {
+        aborted = true;
+        return;
+      }
+
+      // create the tmp collection - use RF=1
+      cmd = new ZkNodeProps(
+          CommonParams.NAME, tmpCollection,
+          ZkStateReader.NUM_SHARDS_PROP, String.valueOf(numShards),
+          ZkStateReader.REPLICATION_FACTOR, "1",
+          CollectionAdminParams.COLL_CONF, configName,
+          CommonAdminParams.WAIT_FOR_FINAL_STATE, "true"
+      );
+      ocmh.commandMap.get(CollectionParams.CollectionAction.CREATE).call(clusterState, cmd, cmdResults);
+      // nocommit error checking
+
+      // create the checkpoint collection - use RF=1 and 1 shard
+      cmd = new ZkNodeProps(
+          CommonParams.NAME, chkCollection,
+          ZkStateReader.NUM_SHARDS_PROP, "1",
+          ZkStateReader.REPLICATION_FACTOR, "1",
+          CollectionAdminParams.COLL_CONF, configName,
+          CommonAdminParams.WAIT_FOR_FINAL_STATE, "true"
+      );
+      ocmh.commandMap.get(CollectionParams.CollectionAction.CREATE).call(clusterState, cmd, cmdResults);
+      // nocommit error checking
+      // wait for a while until we see both collections
+      TimeOut waitUntil = new TimeOut(30, TimeUnit.SECONDS, ocmh.timeSource);
+      boolean created = false;
+      while (!waitUntil.hasTimedOut()) {
+        waitUntil.sleep(100);
+        // this also refreshes our local var clusterState
+        clusterState = ocmh.cloudManager.getClusterStateProvider().getClusterState();
+        created = clusterState.hasCollection(tmpCollection) && clusterState.hasCollection(chkCollection);
+        if (created) break;
+      }
+      if (!created) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not fully create temporary collection(s)");
+      }
+      if (maybeAbort(collection)) {
+        aborted = true;
+        return;
+      }
+
+      // 1. put the collection in read-only mode
+      cmd = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.MODIFYCOLLECTION.toLower(),
+          ZkStateReader.COLLECTION_PROP, collection,
+          READONLY_PROP, "true");
+      ocmh.overseer.offerStateUpdate(Utils.toJSON(props));
+
+      // 2. copy the documents to tmp
+      // Recipe taken from: http://joelsolr.blogspot.com/2016/10/solr-63-batch-jobs-parallel-etl-and.html
+      ModifiableSolrParams q = new ModifiableSolrParams();
+      q.set(CommonParams.QT, "/stream");
+      q.set("expr",
+          "daemon(id=\"" + tmpCollection + "\"," +
+              "terminate=\"true\"," +
+              "commit(" + tmpCollection + "," +
+              "update(" + tmpCollection + "," +
+              "batchSize=100," +
+              "topic(" + chkCollection + "," +
+              collection + "," +
+              "q=\"*:*\"," +
+              "fl=\"*\"," +
+              "id=\"topic_" + tmpCollection + "\"," +
+              // some of the documents eg. in .system contain large blobs
+              "rows=\"100\"," +
+              "initialCheckpoint=\"0\"))))");
+      SolrResponse rsp = ocmh.cloudManager.request(new QueryRequest(q));
+
+      // wait for the daemon to finish
+
+      // 5. set up an alias to use the tmp collection as the target name
+
+      // 6. optionally delete the source collection
+
+      // 7. delete the checkpoint collection
+
+      // nocommit error checking
+    } catch (Exception e) {
+      aborted = true;
+    } finally {
+      if (aborted) {
+        // nocommit - cleanup
+
+        // 1. kill the daemons
+        // 2. cleanup tmp / chk collections IFF the source collection still exists and is not empty
+        // 3. cleanup collection state
+        results.add(State.ABORTED.toLower(), collection);
+      }
+    }
+  }
+
+  private boolean maybeAbort(String collection) throws Exception {
+    DocCollection coll = ocmh.cloudManager.getClusterStateProvider().getClusterState().getCollectionOrNull(collection);
+    if (coll == null) {
+      // collection no longer present - abort
+      return true;
+    }
+    State state = State.get(coll.getStr(REINDEX_PROP, State.RUNNING.toLower()));
+    if (state != State.ABORTED) {
+      return false;
+    }
+    return true;
+  }
+
+  private String getDaemonUrl(SolrResponse rsp) {
+    return null;
+  }
+
+  private void cleanup(String collection, String daemonUrl) throws Exception {
+
+    if (daemonUrl != null) {
+      // kill the daemon
+    }
+    ClusterState clusterState = ocmh.cloudManager.getClusterStateProvider().getClusterState();
+
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index 3e026af..7b7b1e5 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -52,6 +52,7 @@ import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
 import org.apache.solr.cloud.ZkController.NotInClusterStateException;
 import org.apache.solr.cloud.ZkController;
 import org.apache.solr.cloud.ZkShardTerms;
+import org.apache.solr.cloud.api.collections.ReindexCollectionCmd;
 import org.apache.solr.cloud.overseer.SliceMutator;
 import org.apache.solr.cloud.rule.ReplicaAssigner;
 import org.apache.solr.cloud.rule.Rule;
@@ -524,6 +525,15 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
 
     RELOAD_OP(RELOAD, (req, rsp, h) -> copy(req.getParams().required(), null, NAME)),
 
+    REINDEX_COLLECTION_OP(REINDEX_COLLECTION, (req, rsp, h) -> {
+      Map<String, Object> m = copy(req.getParams().required(), null, NAME);
+      copy(req.getParams(), m,
+          ReindexCollectionCmd.ABORT,
+          ReindexCollectionCmd.KEEP_SOURCE,
+          ReindexCollectionCmd.TARGET);
+      return m;
+    }),
+
     SYNCSHARD_OP(SYNCSHARD, (req, rsp, h) -> {
       String collection = req.getParams().required().get("collection");
       String shard = req.getParams().required().get("shard");
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
index ea42590..a326abe 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
@@ -782,6 +782,20 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
   }
 
   /**
+   * Returns a SolrRequest to reindex a collection
+   */
+  public static ReindexCollection reindexCollection(String collection) {
+    return new ReindexCollection(collection);
+  }
+
+  public static class ReindexCollection extends AsyncCollectionSpecificAdminRequest {
+
+    private ReindexCollection(String collection) {
+      super(CollectionAction.REINDEX_COLLECTION, collection);
+    }
+  }
+
+  /**
    * Returns a SolrRequest to delete a collection
    */
   public static Delete deleteCollection(String collection) {
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java
index dee2f5f..6176263 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CollectionParams.java
@@ -121,7 +121,8 @@ public interface CollectionParams {
     MOCK_REPLICA_TASK(false, LockLevel.REPLICA),
     NONE(false, LockLevel.NONE),
     // TODO: not implemented yet
-    MERGESHARDS(true, LockLevel.SHARD)
+    MERGESHARDS(true, LockLevel.SHARD),
+    REINDEX_COLLECTION(true, LockLevel.COLLECTION)
     ;
     public final boolean isWrite;