You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@solr.apache.org by GitBox <gi...@apache.org> on 2022/03/01 01:06:59 UTC

[GitHub] [solr] risdenk commented on a change in pull request #705: SOLR-14920: Spotless formatting for core - non-test only

risdenk commented on a change in pull request #705:
URL: https://github.com/apache/solr/pull/705#discussion_r816289962



##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
##########
@@ -150,107 +157,141 @@ void deleteReplicaBasedOnCount(ClusterState clusterState,
       Slice shardSlice = entry.getKey();
       String shardId = shardSlice.getName();
       Set<String> replicaNames = entry.getValue();
-      Set<Replica> replicas = replicaNames.stream()
-          .map(name -> shardSlice.getReplica(name)).collect(Collectors.toSet());
+      Set<Replica> replicas =
+          replicaNames.stream()
+              .map(name -> shardSlice.getReplica(name))
+              .collect(Collectors.toSet());
       assignStrategy.verifyDeleteReplicas(ccc.getSolrCloudManager(), coll, shardId, replicas);
     }
 
     for (Map.Entry<Slice, Set<String>> entry : shardToReplicasMapping.entrySet()) {
       Slice shardSlice = entry.getKey();
       String shardId = shardSlice.getName();
       Set<String> replicas = entry.getValue();
-      //callDeleteReplica on all replicas
-      for (String replica: replicas) {
+      // callDeleteReplica on all replicas
+      for (String replica : replicas) {
         log.debug("Deleting replica {}  for shard {} based on count {}", replica, shardId, count);
         // don't verify with the placement plugin - we already did it
         deleteCore(coll, shardId, replica, message, results, onComplete, parallel, false);
       }
       results.add("shard_id", shardId);
       results.add("replicas_deleted", replicas);
     }
-
   }
 
-
-  /**
-   * Pick replicas to be deleted. Avoid picking the leader.
-   */
-  private Set<String> pickReplicasTobeDeleted(Slice slice, String shard, String collectionName, int count) {
+  /** Pick replicas to be deleted. Avoid picking the leader. */
+  private Set<String> pickReplicasTobeDeleted(
+      Slice slice, String shard, String collectionName, int count) {
     validateReplicaAvailability(slice, shard, collectionName, count);
     Collection<Replica> allReplicas = slice.getReplicas();
     Set<String> replicasToBeRemoved = new HashSet<String>();
     Replica leader = slice.getLeader();
-    for (Replica replica: allReplicas) {
+    for (Replica replica : allReplicas) {
       if (count == 0) {
         break;
       }
-      //Try avoiding to pick up the leader to minimize activity on the cluster.
+      // Try avoiding to pick up the leader to minimize activity on the cluster.
       if (leader.getCoreName().equals(replica.getCoreName())) {
         continue;
       }
       replicasToBeRemoved.add(replica.getName());
-      count --;
+      count--;
     }
     return replicasToBeRemoved;
   }
 
   /**
-   * Validate if there is less replicas than requested to remove. Also error out if there is
-   * only one replica available
+   * Validate if there is less replicas than requested to remove. Also error out if there is only
+   * one replica available
    */
-  private void validateReplicaAvailability(Slice slice, String shard, String collectionName, int count) {
-    //If there is a specific shard passed, validate if there any or just 1 replica left
+  private void validateReplicaAvailability(
+      Slice slice, String shard, String collectionName, int count) {
+    // If there is a specific shard passed, validate if there any or just 1 replica left
     if (slice != null) {
       Collection<Replica> allReplicasForShard = slice.getReplicas();
       if (allReplicasForShard == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No replicas found  in shard/collection: " +
-                shard + "/"  + collectionName);
+        throw new SolrException(
+            SolrException.ErrorCode.BAD_REQUEST,
+            "No replicas found  in shard/collection: " + shard + "/" + collectionName);
       }
 
-
       if (allReplicasForShard.size() == 1) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There is only one replica available in shard/collection: " +
-                shard + "/" + collectionName + ". Cannot delete that.");
+        throw new SolrException(
+            SolrException.ErrorCode.BAD_REQUEST,
+            "There is only one replica available in shard/collection: "
+                + shard
+                + "/"
+                + collectionName
+                + ". Cannot delete that.");
       }
 
       if (allReplicasForShard.size() <= count) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There are lesser num replicas requested to be deleted than are available in shard/collection : " +
-                shard + "/"  + collectionName  + " Requested: "  + count + " Available: " + allReplicasForShard.size() + ".");
+        throw new SolrException(
+            SolrException.ErrorCode.BAD_REQUEST,
+            "There are lesser num replicas requested to be deleted than are available in shard/collection : "
+                + shard
+                + "/"
+                + collectionName
+                + " Requested: "
+                + count
+                + " Available: "
+                + allReplicasForShard.size()
+                + ".");
       }
     }
   }
 
-  void deleteCore(DocCollection coll,
-                  String shardId,
-                  String replicaName,
-                  ZkNodeProps message,
-                  NamedList<Object> results,
-                  Runnable onComplete,
-                  boolean parallel,
-                  boolean verifyPlacement) throws KeeperException, IOException, InterruptedException {
+  void deleteCore(
+      DocCollection coll,
+      String shardId,
+      String replicaName,
+      ZkNodeProps message,
+      NamedList<Object> results,
+      Runnable onComplete,
+      boolean parallel,
+      boolean verifyPlacement)
+      throws KeeperException, IOException, InterruptedException {
 
     Slice slice = coll.getSlice(shardId);
     Replica replica = slice.getReplica(replicaName);
     if (replica == null) {
       ArrayList<String> l = new ArrayList<>();
-      for (Replica r : slice.getReplicas())
-        l.add(r.getName());
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid replica : " +  replicaName + " in shard/collection : " +
-              shardId  + "/" + coll.getName() + " available replicas are " +  StrUtils.join(l, ','));
+      for (Replica r : slice.getReplicas()) l.add(r.getName());
+      throw new SolrException(
+          SolrException.ErrorCode.BAD_REQUEST,
+          "Invalid replica : "
+              + replicaName
+              + " in shard/collection : "
+              + shardId
+              + "/"
+              + coll.getName()
+              + " available replicas are "
+              + StrUtils.join(l, ','));
     }
 
-    // If users are being safe and only want to remove a shard if it is down, they can specify onlyIfDown=true
+    // If users are being safe and only want to remove a shard if it is down, they can specify
+    // onlyIfDown=true
     // on the command.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/DistributedCollectionConfigSetCommandRunner.java
##########
@@ -110,132 +109,176 @@ public DistributedCollectionConfigSetCommandRunner(CoreContainer coreContainer)
     this.coreContainer = coreContainer;
 
     if (log.isInfoEnabled()) {
-      // Note is it hard to print a log when Collection API is handled by Overseer because Overseer is started regardless
+      // Note is it hard to print a log when Collection API is handled by Overseer because Overseer
+      // is started regardless
       // of how Collection API is handled, so it doesn't really know...
-      log.info("Creating DistributedCollectionConfigSetCommandRunner. Collection and ConfigSet APIs are running distributed (not Overseer based)");
+      log.info(
+          "Creating DistributedCollectionConfigSetCommandRunner. Collection and ConfigSet APIs are running distributed (not Overseer based)");
     }
 
     // TODO we should look at how everything is getting closed when the node is shutdown.
-    //  But it seems that CollectionsHandler (that creates instances of this class) is not really closed, so maybe it doesn't matter?
-    // With distributed Collection API execution, each node will have such an executor but given how thread pools work,
+    //  But it seems that CollectionsHandler (that creates instances of this class) is not really
+    // closed, so maybe it doesn't matter?
+    // With distributed Collection API execution, each node will have such an executor but given how
+    // thread pools work,
     // threads will only be created if needed (including the corePoolSize threads).
-    distributedCollectionApiExecutorService = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, 10, 0L, TimeUnit.MILLISECONDS,
-        new SynchronousQueue<>(),
-        new SolrNamedThreadFactory("DistributedCollectionApiExecutorService"));
-
-    commandsExecutor = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, 20, 0L, TimeUnit.MILLISECONDS,
-        new SynchronousQueue<>(),
-        new SolrNamedThreadFactory("DistributedCollectionApiCommandExecutor"));
-
-    ccc = new DistributedCollectionCommandContext(this.coreContainer, this.distributedCollectionApiExecutorService);
+    distributedCollectionApiExecutorService =
+        new ExecutorUtil.MDCAwareThreadPoolExecutor(
+            5,
+            10,
+            0L,
+            TimeUnit.MILLISECONDS,
+            new SynchronousQueue<>(),
+            new SolrNamedThreadFactory("DistributedCollectionApiExecutorService"));
+
+    commandsExecutor =
+        new ExecutorUtil.MDCAwareThreadPoolExecutor(
+            5,
+            20,
+            0L,
+            TimeUnit.MILLISECONDS,
+            new SynchronousQueue<>(),
+            new SolrNamedThreadFactory("DistributedCollectionApiCommandExecutor"));
+
+    ccc =
+        new DistributedCollectionCommandContext(
+            this.coreContainer, this.distributedCollectionApiExecutorService);
     commandMapper = new CollApiCmds.CommandMap(ccc);
-    asyncTaskTracker = new DistributedApiAsyncTracker(ccc.getZkStateReader().getZkClient(), ZK_ASYNC_ROOT);
+    asyncTaskTracker =
+        new DistributedApiAsyncTracker(ccc.getZkStateReader().getZkClient(), ZK_ASYNC_ROOT);
   }
 
-  /**
-   * See {@link DistributedApiAsyncTracker#getAsyncTaskRequestStatus(String)}
-   */
-  public Pair<RequestStatusState, OverseerSolrResponse> getAsyncTaskRequestStatus(String asyncId) throws Exception {
+  /** See {@link DistributedApiAsyncTracker#getAsyncTaskRequestStatus(String)} */
+  public Pair<RequestStatusState, OverseerSolrResponse> getAsyncTaskRequestStatus(String asyncId)
+      throws Exception {
     return asyncTaskTracker.getAsyncTaskRequestStatus(asyncId);
   }
 
-  /**
-   * See {@link DistributedApiAsyncTracker#deleteSingleAsyncId(String)}
-   */
+  /** See {@link DistributedApiAsyncTracker#deleteSingleAsyncId(String)} */
   public boolean deleteSingleAsyncId(String asyncId) throws Exception {
     return asyncTaskTracker.deleteSingleAsyncId(asyncId);
   }
 
-  /**
-   * See {@link DistributedApiAsyncTracker#deleteAllAsyncIds()}
-   */
+  /** See {@link DistributedApiAsyncTracker#deleteAllAsyncIds()} */
   public void deleteAllAsyncIds() throws Exception {
     asyncTaskTracker.deleteAllAsyncIds();
   }
 
-
   /**
-   * When {@link org.apache.solr.handler.admin.CollectionsHandler#invokeAction} does not enqueue to overseer queue and
-   * instead calls this method, this method is expected to do the equivalent of what Overseer does in
-   * {@link org.apache.solr.cloud.OverseerConfigSetMessageHandler#processMessage}.
-   * <p>
-   * The steps leading to that call in the Overseer execution path are (and the equivalent is done here):
+   * When {@link org.apache.solr.handler.admin.CollectionsHandler#invokeAction} does not enqueue to
+   * overseer queue and instead calls this method, this method is expected to do the equivalent of
+   * what Overseer does in {@link
+   * org.apache.solr.cloud.OverseerConfigSetMessageHandler#processMessage}.
+   *
+   * <p>The steps leading to that call in the Overseer execution path are (and the equivalent is
+   * done here):
+   *
    * <ul>
-   * <li>{@link org.apache.solr.cloud.OverseerTaskProcessor#run()} gets the message from the ZK queue, grabs the
-   * corresponding locks (write lock on the config set target of the API command and a read lock on the base config set
-   * if any - the case for config set creation) then executes the command using an executor service (it also checks the
-   * asyncId if any is specified but async calls are not supported for Config Set API calls).</li>
-   * <li>In {@link org.apache.solr.cloud.OverseerTaskProcessor}.{@code Runner.run()} (run on an executor thread) a call is made to
-   * {@link org.apache.solr.cloud.OverseerConfigSetMessageHandler#processMessage} which does a few checks and calls the
-   * appropriate Config Set method.
+   *   <li>{@link org.apache.solr.cloud.OverseerTaskProcessor#run()} gets the message from the ZK
+   *       queue, grabs the corresponding locks (write lock on the config set target of the API
+   *       command and a read lock on the base config set if any - the case for config set creation)
+   *       then executes the command using an executor service (it also checks the asyncId if any is
+   *       specified but async calls are not supported for Config Set API calls).
+   *   <li>In {@link org.apache.solr.cloud.OverseerTaskProcessor}.{@code Runner.run()} (run on an
+   *       executor thread) a call is made to {@link
+   *       org.apache.solr.cloud.OverseerConfigSetMessageHandler#processMessage} which does a few
+   *       checks and calls the appropriate Config Set method.
    * </ul>
    */
-  public void runConfigSetCommand(SolrQueryResponse rsp, ConfigSetsHandler.ConfigSetOperation operation, Map<String, Object> result, long timeoutMs) throws Exception {
-    // We refuse new tasks, but will wait for already submitted ones (i.e. those that made it through this method earlier).
+  public void runConfigSetCommand(
+      SolrQueryResponse rsp,
+      ConfigSetsHandler.ConfigSetOperation operation,
+      Map<String, Object> result,
+      long timeoutMs)
+      throws Exception {
+    // We refuse new tasks, but will wait for already submitted ones (i.e. those that made it
+    // through this method earlier).
     // See stopAndWaitForPendingTasksToComplete() below
     if (shuttingDown) {
-      throw new SolrException(SolrException.ErrorCode.CONFLICT, "Solr is shutting down, no more Config Set API tasks may be executed");
+      throw new SolrException(
+          SolrException.ErrorCode.CONFLICT,
+          "Solr is shutting down, no more Config Set API tasks may be executed");
     }
 
     ConfigSetParams.ConfigSetAction action = operation.getAction();
 
     // never null
     String configSetName = (String) result.get(NAME);
     // baseConfigSetName will be null if we're not creating a new config set
-    String baseConfigSetName = ConfigSetCmds.getBaseConfigSetName(action, (String) result.get(ConfigSetCmds.BASE_CONFIGSET));
+    String baseConfigSetName =
+        ConfigSetCmds.getBaseConfigSetName(
+            action, (String) result.get(ConfigSetCmds.BASE_CONFIGSET));
 
     if (log.isInfoEnabled()) {
       log.info("Running Config Set API locally for " + action + " " + configSetName); // nowarn
     }
 
-    ConfigSetCommandRunner commandRunner = new ConfigSetCommandRunner(new ZkNodeProps(result), action, configSetName, baseConfigSetName);
+    ConfigSetCommandRunner commandRunner =
+        new ConfigSetCommandRunner(
+            new ZkNodeProps(result), action, configSetName, baseConfigSetName);
     final Future<Void> taskFuture;
     try {
       taskFuture = commandsExecutor.submit(commandRunner);
     } catch (RejectedExecutionException ree) {
-      throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Too many executing commands", ree);
+      throw new SolrException(
+          SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Too many executing commands", ree);
     }
 
-    // Wait for a while... Just like Overseer based Config Set API (wait can timeout but actual command execution does not)
+    // Wait for a while... Just like Overseer based Config Set API (wait can timeout but actual
+    // command execution does not)
     try {
       taskFuture.get(timeoutMs, TimeUnit.MILLISECONDS);
     } catch (TimeoutException te) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, action + " " + configSetName + " timed out after " + timeoutMs + "ms");
+      throw new SolrException(
+          SolrException.ErrorCode.SERVER_ERROR,
+          action + " " + configSetName + " timed out after " + timeoutMs + "ms");
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, action + " " + configSetName + " interrupted", e);
+      throw new SolrException(
+          SolrException.ErrorCode.SERVER_ERROR, action + " " + configSetName + " interrupted", e);
     }
   }
 
   /**
-   * When {@link org.apache.solr.handler.admin.CollectionsHandler#invokeAction} does not enqueue to overseer queue and
-   * instead calls this method, this method is expected to do the equivalent of what Overseer does in
-   * {@link org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler#processMessage}.
-   * <p>
-   * The steps leading to that call in the Overseer execution path are (and the equivalent is done here):
+   * When {@link org.apache.solr.handler.admin.CollectionsHandler#invokeAction} does not enqueue to
+   * overseer queue and instead calls this method, this method is expected to do the equivalent of
+   * what Overseer does in {@link
+   * org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler#processMessage}.
+   *
+   * <p>The steps leading to that call in the Overseer execution path are (and the equivalent is
+   * done here):
+   *
    * <ul>
-   * <li>{@link org.apache.solr.cloud.OverseerTaskProcessor#run()} gets the message from the ZK queue, grabs the
-   * corresponding lock (Collection API calls do locking to prevent non compatible concurrent modifications of a collection),
-   * marks the async id of the task as running then executes the command using an executor service</li>
-   * <li>In {@link org.apache.solr.cloud.OverseerTaskProcessor}.{@code Runner.run()} (run on an executor thread) a call is made to
-   * {@link org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler#processMessage} which sets the logging
-   * context, calls {@link CollApiCmds.CollectionApiCommand#call}
+   *   <li>{@link org.apache.solr.cloud.OverseerTaskProcessor#run()} gets the message from the ZK
+   *       queue, grabs the corresponding lock (Collection API calls do locking to prevent non
+   *       compatible concurrent modifications of a collection), marks the async id of the task as
+   *       running then executes the command using an executor service
+   *   <li>In {@link org.apache.solr.cloud.OverseerTaskProcessor}.{@code Runner.run()} (run on an
+   *       executor thread) a call is made to {@link
+   *       org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler#processMessage}
+   *       which sets the logging context, calls {@link CollApiCmds.CollectionApiCommand#call}
    * </ul>
    */
-  public OverseerSolrResponse runCollectionCommand(ZkNodeProps message, CollectionParams.CollectionAction action, long timeoutMs) {
-    // We refuse new tasks, but will wait for already submitted ones (i.e. those that made it through this method earlier).
+  public OverseerSolrResponse runCollectionCommand(
+      ZkNodeProps message, CollectionParams.CollectionAction action, long timeoutMs) {
+    // We refuse new tasks, but will wait for already submitted ones (i.e. those that made it
+    // through this method earlier).
     // See stopAndWaitForPendingTasksToComplete() below

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/DistributedCollectionConfigSetCommandRunner.java
##########
@@ -376,12 +451,15 @@ public OverseerSolrResponse call() {
           }
         } finally {
           try {
-            // TODO If the Collection API command failed because the collection does not exist, we've just created some lock directory
-            //  structure for a non existent collection... Maybe try to remove it here? No big deal for now as leftover nodes in the
+            // TODO If the Collection API command failed because the collection does not exist,
+            // we've just created some lock directory
+            //  structure for a non existent collection... Maybe try to remove it here? No big deal
+            // for now as leftover nodes in the
             //  lock hierarchy do no harm, and there shouldn't be too many of those.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAlias.java
##########
@@ -340,56 +389,79 @@ private String createAllRequiredCollections(AddUpdateCommand cmd, CandidateColle
     } while (true);
   }
 
-  private CandidateCollection doSynchronous(AddUpdateCommand cmd, CandidateCollection targetCollectionDesc, CoreContainer coreContainer) {
-    ensureCollection(targetCollectionDesc.getCreationCollection(), coreContainer); // *should* throw if fails for some reason but...
+  private CandidateCollection doSynchronous(
+      AddUpdateCommand cmd, CandidateCollection targetCollectionDesc, CoreContainer coreContainer) {
+    ensureCollection(
+        targetCollectionDesc.getCreationCollection(),
+        coreContainer); // *should* throw if fails for some reason but...
     ZkController zkController = coreContainer.getZkController();
     updateParsedCollectionAliases(zkController.zkStateReader, true);
-    List<String> observedCols = zkController.zkStateReader.aliasesManager.getAliases().getCollectionAliasListMap().get(getAliasName());
+    List<String> observedCols =
+        zkController
+            .zkStateReader
+            .aliasesManager
+            .getAliases()
+            .getCollectionAliasListMap()
+            .get(getAliasName());
     if (!observedCols.contains(targetCollectionDesc.creationCollection)) {
       // if collection creation did not occur we've failed. Bail out.
-      throw new SolrException(SERVER_ERROR, "After we attempted to create " + targetCollectionDesc.creationCollection + " it did not exist");
+      throw new SolrException(
+          SERVER_ERROR,
+          "After we attempted to create "
+              + targetCollectionDesc.creationCollection
+              + " it did not exist");
     }
-    // then recalculate the candiate, which may result in continuation or termination the loop calling this method
+    // then recalculate the candiate, which may result in continuation or termination the loop
+    // calling this method
     targetCollectionDesc = findCandidateGivenValue(cmd);
     return targetCollectionDesc;
   }
 
-  private String doPreemptive(CandidateCollection targetCollectionDesc, SolrCore core, CoreContainer coreContainer) {
+  private String doPreemptive(
+      CandidateCollection targetCollectionDesc, SolrCore core, CoreContainer coreContainer) {
 
     if (!this.preemptiveCreateOnceAlready) {
-      preemptiveAsync(() -> {
-        try {
-          ensureCollection(targetCollectionDesc.creationCollection, coreContainer);
-        } catch (Exception e) {
-          log.error("Async creation of a collection for routed Alias {} failed!", this.getAliasName(), e);
-        }
-      }, core);
+      preemptiveAsync(
+          () -> {
+            try {
+              ensureCollection(targetCollectionDesc.creationCollection, coreContainer);
+            } catch (Exception e) {
+              log.error(
+                  "Async creation of a collection for routed Alias {} failed!",
+                  this.getAliasName(),
+                  e);
+            }
+          },
+          core);
     }
     return targetCollectionDesc.destinationCollection;
   }
 
   /**
    * Calculate the head collection (i.e. the most recent one for a TRA) if this routed alias has an
-   * implicit order, or if the collection is unordered return the appropriate collection name
-   * for the value in the current document. This method should never return null.
+   * implicit order, or if the collection is unordered return the appropriate collection name for
+   * the value in the current document. This method should never return null.
    */
-  abstract protected String getHeadCollectionIfOrdered(AddUpdateCommand cmd);
+  protected abstract String getHeadCollectionIfOrdered(AddUpdateCommand cmd);
 
   private void preemptiveAsync(Runnable r, SolrCore core) {
     preemptiveCreateOnceAlready = true;
     core.runAsync(r);
   }
 
   private SolrException unknownCreateType() {
-    return new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown creation type while adding " +
-        "document to a Time Routed Alias! This is a bug caused when a creation type has been added but " +
-        "not all code has been updated to handle it.");
+    return new SolrException(
+        SolrException.ErrorCode.SERVER_ERROR,
+        "Unknown creation type while adding "
+            + "document to a Time Routed Alias! This is a bug caused when a creation type has been added but "
+            + "not all code has been updated to handle it.");
   }
 
   void ensureCollection(String targetCollection, CoreContainer coreContainer) {
     CollectionsHandler collectionsHandler = coreContainer.getCollectionsHandler();
 
-    // Invoke MANINTAIN_ROUTED_ALIAS (in the Overseer, locked by alias name).  It will create the collection
+    // Invoke MANINTAIN_ROUTED_ALIAS (in the Overseer, locked by alias name).  It will create the
+    // collection
     //   and update the alias contingent on the requested collection name not already existing.
     //   otherwise it will return (without error).

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
##########
@@ -548,8 +676,10 @@ public boolean split(ClusterState clusterState, ZkNodeProps message, NamedList<O
 
       if (repFactor == 1) {
         // A commit is needed so that documents are visible when the sub-shard replicas come up
-        // (Note: This commit used to be after the state switch, but was brought here before the state switch
-        //  as per SOLR-13945 so that sub shards don't come up empty, momentarily, after being marked active) 
+        // (Note: This commit used to be after the state switch, but was brought here before the
+        // state switch
+        //  as per SOLR-13945 so that sub shards don't come up empty, momentarily, after being
+        // marked active)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/DistributedCollectionConfigSetCommandRunner.java
##########
@@ -110,132 +109,176 @@ public DistributedCollectionConfigSetCommandRunner(CoreContainer coreContainer)
     this.coreContainer = coreContainer;
 
     if (log.isInfoEnabled()) {
-      // Note is it hard to print a log when Collection API is handled by Overseer because Overseer is started regardless
+      // Note is it hard to print a log when Collection API is handled by Overseer because Overseer
+      // is started regardless
       // of how Collection API is handled, so it doesn't really know...
-      log.info("Creating DistributedCollectionConfigSetCommandRunner. Collection and ConfigSet APIs are running distributed (not Overseer based)");
+      log.info(
+          "Creating DistributedCollectionConfigSetCommandRunner. Collection and ConfigSet APIs are running distributed (not Overseer based)");
     }
 
     // TODO we should look at how everything is getting closed when the node is shutdown.
-    //  But it seems that CollectionsHandler (that creates instances of this class) is not really closed, so maybe it doesn't matter?
-    // With distributed Collection API execution, each node will have such an executor but given how thread pools work,
+    //  But it seems that CollectionsHandler (that creates instances of this class) is not really
+    // closed, so maybe it doesn't matter?
+    // With distributed Collection API execution, each node will have such an executor but given how
+    // thread pools work,
     // threads will only be created if needed (including the corePoolSize threads).
-    distributedCollectionApiExecutorService = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, 10, 0L, TimeUnit.MILLISECONDS,
-        new SynchronousQueue<>(),
-        new SolrNamedThreadFactory("DistributedCollectionApiExecutorService"));
-
-    commandsExecutor = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, 20, 0L, TimeUnit.MILLISECONDS,
-        new SynchronousQueue<>(),
-        new SolrNamedThreadFactory("DistributedCollectionApiCommandExecutor"));
-
-    ccc = new DistributedCollectionCommandContext(this.coreContainer, this.distributedCollectionApiExecutorService);
+    distributedCollectionApiExecutorService =
+        new ExecutorUtil.MDCAwareThreadPoolExecutor(
+            5,
+            10,
+            0L,
+            TimeUnit.MILLISECONDS,
+            new SynchronousQueue<>(),
+            new SolrNamedThreadFactory("DistributedCollectionApiExecutorService"));
+
+    commandsExecutor =
+        new ExecutorUtil.MDCAwareThreadPoolExecutor(
+            5,
+            20,
+            0L,
+            TimeUnit.MILLISECONDS,
+            new SynchronousQueue<>(),
+            new SolrNamedThreadFactory("DistributedCollectionApiCommandExecutor"));
+
+    ccc =
+        new DistributedCollectionCommandContext(
+            this.coreContainer, this.distributedCollectionApiExecutorService);
     commandMapper = new CollApiCmds.CommandMap(ccc);
-    asyncTaskTracker = new DistributedApiAsyncTracker(ccc.getZkStateReader().getZkClient(), ZK_ASYNC_ROOT);
+    asyncTaskTracker =
+        new DistributedApiAsyncTracker(ccc.getZkStateReader().getZkClient(), ZK_ASYNC_ROOT);
   }
 
-  /**
-   * See {@link DistributedApiAsyncTracker#getAsyncTaskRequestStatus(String)}
-   */
-  public Pair<RequestStatusState, OverseerSolrResponse> getAsyncTaskRequestStatus(String asyncId) throws Exception {
+  /** See {@link DistributedApiAsyncTracker#getAsyncTaskRequestStatus(String)} */
+  public Pair<RequestStatusState, OverseerSolrResponse> getAsyncTaskRequestStatus(String asyncId)
+      throws Exception {
     return asyncTaskTracker.getAsyncTaskRequestStatus(asyncId);
   }
 
-  /**
-   * See {@link DistributedApiAsyncTracker#deleteSingleAsyncId(String)}
-   */
+  /** See {@link DistributedApiAsyncTracker#deleteSingleAsyncId(String)} */
   public boolean deleteSingleAsyncId(String asyncId) throws Exception {
     return asyncTaskTracker.deleteSingleAsyncId(asyncId);
   }
 
-  /**
-   * See {@link DistributedApiAsyncTracker#deleteAllAsyncIds()}
-   */
+  /** See {@link DistributedApiAsyncTracker#deleteAllAsyncIds()} */
   public void deleteAllAsyncIds() throws Exception {
     asyncTaskTracker.deleteAllAsyncIds();
   }
 
-
   /**
-   * When {@link org.apache.solr.handler.admin.CollectionsHandler#invokeAction} does not enqueue to overseer queue and
-   * instead calls this method, this method is expected to do the equivalent of what Overseer does in
-   * {@link org.apache.solr.cloud.OverseerConfigSetMessageHandler#processMessage}.
-   * <p>
-   * The steps leading to that call in the Overseer execution path are (and the equivalent is done here):
+   * When {@link org.apache.solr.handler.admin.CollectionsHandler#invokeAction} does not enqueue to
+   * overseer queue and instead calls this method, this method is expected to do the equivalent of
+   * what Overseer does in {@link
+   * org.apache.solr.cloud.OverseerConfigSetMessageHandler#processMessage}.
+   *
+   * <p>The steps leading to that call in the Overseer execution path are (and the equivalent is
+   * done here):
+   *
    * <ul>
-   * <li>{@link org.apache.solr.cloud.OverseerTaskProcessor#run()} gets the message from the ZK queue, grabs the
-   * corresponding locks (write lock on the config set target of the API command and a read lock on the base config set
-   * if any - the case for config set creation) then executes the command using an executor service (it also checks the
-   * asyncId if any is specified but async calls are not supported for Config Set API calls).</li>
-   * <li>In {@link org.apache.solr.cloud.OverseerTaskProcessor}.{@code Runner.run()} (run on an executor thread) a call is made to
-   * {@link org.apache.solr.cloud.OverseerConfigSetMessageHandler#processMessage} which does a few checks and calls the
-   * appropriate Config Set method.
+   *   <li>{@link org.apache.solr.cloud.OverseerTaskProcessor#run()} gets the message from the ZK
+   *       queue, grabs the corresponding locks (write lock on the config set target of the API
+   *       command and a read lock on the base config set if any - the case for config set creation)
+   *       then executes the command using an executor service (it also checks the asyncId if any is
+   *       specified but async calls are not supported for Config Set API calls).
+   *   <li>In {@link org.apache.solr.cloud.OverseerTaskProcessor}.{@code Runner.run()} (run on an
+   *       executor thread) a call is made to {@link
+   *       org.apache.solr.cloud.OverseerConfigSetMessageHandler#processMessage} which does a few
+   *       checks and calls the appropriate Config Set method.
    * </ul>
    */
-  public void runConfigSetCommand(SolrQueryResponse rsp, ConfigSetsHandler.ConfigSetOperation operation, Map<String, Object> result, long timeoutMs) throws Exception {
-    // We refuse new tasks, but will wait for already submitted ones (i.e. those that made it through this method earlier).
+  public void runConfigSetCommand(
+      SolrQueryResponse rsp,
+      ConfigSetsHandler.ConfigSetOperation operation,
+      Map<String, Object> result,
+      long timeoutMs)
+      throws Exception {
+    // We refuse new tasks, but will wait for already submitted ones (i.e. those that made it
+    // through this method earlier).
     // See stopAndWaitForPendingTasksToComplete() below
     if (shuttingDown) {
-      throw new SolrException(SolrException.ErrorCode.CONFLICT, "Solr is shutting down, no more Config Set API tasks may be executed");
+      throw new SolrException(
+          SolrException.ErrorCode.CONFLICT,
+          "Solr is shutting down, no more Config Set API tasks may be executed");
     }
 
     ConfigSetParams.ConfigSetAction action = operation.getAction();
 
     // never null
     String configSetName = (String) result.get(NAME);
     // baseConfigSetName will be null if we're not creating a new config set
-    String baseConfigSetName = ConfigSetCmds.getBaseConfigSetName(action, (String) result.get(ConfigSetCmds.BASE_CONFIGSET));
+    String baseConfigSetName =
+        ConfigSetCmds.getBaseConfigSetName(
+            action, (String) result.get(ConfigSetCmds.BASE_CONFIGSET));
 
     if (log.isInfoEnabled()) {
       log.info("Running Config Set API locally for " + action + " " + configSetName); // nowarn
     }
 
-    ConfigSetCommandRunner commandRunner = new ConfigSetCommandRunner(new ZkNodeProps(result), action, configSetName, baseConfigSetName);
+    ConfigSetCommandRunner commandRunner =
+        new ConfigSetCommandRunner(
+            new ZkNodeProps(result), action, configSetName, baseConfigSetName);
     final Future<Void> taskFuture;
     try {
       taskFuture = commandsExecutor.submit(commandRunner);
     } catch (RejectedExecutionException ree) {
-      throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Too many executing commands", ree);
+      throw new SolrException(
+          SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Too many executing commands", ree);
     }
 
-    // Wait for a while... Just like Overseer based Config Set API (wait can timeout but actual command execution does not)
+    // Wait for a while... Just like Overseer based Config Set API (wait can timeout but actual
+    // command execution does not)
     try {
       taskFuture.get(timeoutMs, TimeUnit.MILLISECONDS);
     } catch (TimeoutException te) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, action + " " + configSetName + " timed out after " + timeoutMs + "ms");
+      throw new SolrException(
+          SolrException.ErrorCode.SERVER_ERROR,
+          action + " " + configSetName + " timed out after " + timeoutMs + "ms");
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, action + " " + configSetName + " interrupted", e);
+      throw new SolrException(
+          SolrException.ErrorCode.SERVER_ERROR, action + " " + configSetName + " interrupted", e);
     }
   }
 
   /**
-   * When {@link org.apache.solr.handler.admin.CollectionsHandler#invokeAction} does not enqueue to overseer queue and
-   * instead calls this method, this method is expected to do the equivalent of what Overseer does in
-   * {@link org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler#processMessage}.
-   * <p>
-   * The steps leading to that call in the Overseer execution path are (and the equivalent is done here):
+   * When {@link org.apache.solr.handler.admin.CollectionsHandler#invokeAction} does not enqueue to
+   * overseer queue and instead calls this method, this method is expected to do the equivalent of
+   * what Overseer does in {@link
+   * org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler#processMessage}.
+   *
+   * <p>The steps leading to that call in the Overseer execution path are (and the equivalent is
+   * done here):
+   *
    * <ul>
-   * <li>{@link org.apache.solr.cloud.OverseerTaskProcessor#run()} gets the message from the ZK queue, grabs the
-   * corresponding lock (Collection API calls do locking to prevent non compatible concurrent modifications of a collection),
-   * marks the async id of the task as running then executes the command using an executor service</li>
-   * <li>In {@link org.apache.solr.cloud.OverseerTaskProcessor}.{@code Runner.run()} (run on an executor thread) a call is made to
-   * {@link org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler#processMessage} which sets the logging
-   * context, calls {@link CollApiCmds.CollectionApiCommand#call}
+   *   <li>{@link org.apache.solr.cloud.OverseerTaskProcessor#run()} gets the message from the ZK
+   *       queue, grabs the corresponding lock (Collection API calls do locking to prevent non
+   *       compatible concurrent modifications of a collection), marks the async id of the task as
+   *       running then executes the command using an executor service
+   *   <li>In {@link org.apache.solr.cloud.OverseerTaskProcessor}.{@code Runner.run()} (run on an
+   *       executor thread) a call is made to {@link
+   *       org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler#processMessage}
+   *       which sets the logging context, calls {@link CollApiCmds.CollectionApiCommand#call}
    * </ul>
    */
-  public OverseerSolrResponse runCollectionCommand(ZkNodeProps message, CollectionParams.CollectionAction action, long timeoutMs) {
-    // We refuse new tasks, but will wait for already submitted ones (i.e. those that made it through this method earlier).
+  public OverseerSolrResponse runCollectionCommand(
+      ZkNodeProps message, CollectionParams.CollectionAction action, long timeoutMs) {
+    // We refuse new tasks, but will wait for already submitted ones (i.e. those that made it
+    // through this method earlier).
     // See stopAndWaitForPendingTasksToComplete() below
     if (shuttingDown) {
-      throw new SolrException(SolrException.ErrorCode.CONFLICT, "Solr is shutting down, no more Collection API tasks may be executed");
+      throw new SolrException(
+          SolrException.ErrorCode.CONFLICT,
+          "Solr is shutting down, no more Collection API tasks may be executed");
     }
 
     final String asyncId = message.getStr(ASYNC);
 
     if (log.isInfoEnabled()) {
-      log.info("Running Collection API locally for " + action.name() + " asyncId=" + asyncId); // nowarn
+      log.info(
+          "Running Collection API locally for " + action.name() + " asyncId=" + asyncId); // nowarn
     }
 
-    // Following the call below returning true, we must eventually cancel or complete the task. Happens either in the
+    // Following the call below returning true, we must eventually cancel or complete the task.
+    // Happens either in the
     // CollectionCommandRunner below or in the catch when the runner would not execute.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/overseer/ZkStateWriter.java
##########
@@ -238,26 +252,37 @@ public ClusterState writePendingUpdates(Map<String, ZkWriteCommand> updates) thr
                 log.debug("going to update_collection {} version: {}", path, c.getZNodeVersion());
               }
               Stat stat = reader.getZkClient().setData(path, data, c.getZNodeVersion(), true);
-              DocCollection newCollection = new DocCollection(name, c.getSlicesMap(), c.getProperties(), c.getRouter(), stat.getVersion());
+              DocCollection newCollection =
+                  new DocCollection(
+                      name, c.getSlicesMap(), c.getProperties(), c.getRouter(), stat.getVersion());
               clusterState = clusterState.copyWith(name, newCollection);
             } else {
               log.debug("going to create_collection {}", path);
               reader.getZkClient().create(path, data, CreateMode.PERSISTENT, true);
-              DocCollection newCollection = new DocCollection(name, c.getSlicesMap(), c.getProperties(), c.getRouter(), 0);
+              DocCollection newCollection =
+                  new DocCollection(name, c.getSlicesMap(), c.getProperties(), c.getRouter(), 0);
               clusterState = clusterState.copyWith(name, newCollection);
             }
           }
 
-          // When dealing with a per replica collection that did not do any update to the per replica states znodes but did
-          // update state.json, we add then remove a dummy node to change the cversion of the parent znode.
-          // This is not needed by Solr, there's no code watching the children and not watching the state.json node itself.
-          // It would be useful for external code watching the collection's Zookeeper state.json node children but not the node itself.
+          // When dealing with a per replica collection that did not do any update to the per
+          // replica states znodes but did
+          // update state.json, we add then remove a dummy node to change the cversion of the parent
+          // znode.
+          // This is not needed by Solr, there's no code watching the children and not watching the
+          // state.json node itself.
+          // It would be useful for external code watching the collection's Zookeeper state.json
+          // node children but not the node itself.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/DimensionalRoutedAlias.java
##########
@@ -98,11 +105,15 @@ public boolean updateParsedCollectionAliases(ZkStateReader zkStateReader, boolea
   public String computeInitialCollectionName() {
     StringBuilder sb = new StringBuilder(getAliasName());
     for (RoutedAlias dimension : dimensions) {
-      // N. B. getAliasName is generally safe as a regex because it must conform to collection naming rules
-      // and those rules exclude regex special characters. A malicious request might do something expensive, but
-      // if you have malicious users able to run admin commands and create aliases, it is very likely that you have
+      // N. B. getAliasName is generally safe as a regex because it must conform to collection
+      // naming rules
+      // and those rules exclude regex special characters. A malicious request might do something
+      // expensive, but
+      // if you have malicious users able to run admin commands and create aliases, it is very
+      // likely that you have
       // much bigger problems than an expensive regex.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteSnapshotCmd.java
##########
@@ -149,22 +158,38 @@ public void call(ClusterState state, ZkNodeProps message, NamedList<Object> resu
         }
       }
 
-      // Update the ZK meta-data to include only cores with the snapshot. This will enable users to figure out
+      // Update the ZK meta-data to include only cores with the snapshot. This will enable users to
+      // figure out
       // which cores still contain the named snapshot.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/DistributedCollectionConfigSetCommandRunner.java
##########
@@ -110,132 +109,176 @@ public DistributedCollectionConfigSetCommandRunner(CoreContainer coreContainer)
     this.coreContainer = coreContainer;
 
     if (log.isInfoEnabled()) {
-      // Note is it hard to print a log when Collection API is handled by Overseer because Overseer is started regardless
+      // Note is it hard to print a log when Collection API is handled by Overseer because Overseer
+      // is started regardless
       // of how Collection API is handled, so it doesn't really know...
-      log.info("Creating DistributedCollectionConfigSetCommandRunner. Collection and ConfigSet APIs are running distributed (not Overseer based)");
+      log.info(
+          "Creating DistributedCollectionConfigSetCommandRunner. Collection and ConfigSet APIs are running distributed (not Overseer based)");
     }
 
     // TODO we should look at how everything is getting closed when the node is shutdown.
-    //  But it seems that CollectionsHandler (that creates instances of this class) is not really closed, so maybe it doesn't matter?
-    // With distributed Collection API execution, each node will have such an executor but given how thread pools work,
+    //  But it seems that CollectionsHandler (that creates instances of this class) is not really
+    // closed, so maybe it doesn't matter?
+    // With distributed Collection API execution, each node will have such an executor but given how
+    // thread pools work,
     // threads will only be created if needed (including the corePoolSize threads).

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/DistributedCollectionConfigSetCommandRunner.java
##########
@@ -342,31 +397,51 @@ public OverseerSolrResponse call() {
 
       NamedList<Object> results = new NamedList<>();
       try {
-        // Create API lock for executing the command. This call is non blocking (not blocked on waiting for a lock to be acquired anyway,
+        // Create API lock for executing the command. This call is non blocking (not blocked on
+        // waiting for a lock to be acquired anyway,
         // might be blocked on access to ZK etc)
-        // We create a new CollectionApiLockFactory using a new ZkDistributedCollectionLockFactory because earlier (in the constructor of this class)
-        // the ZkStateReader was not yet available, due to how CoreContainer is built in part in the constructor and in part in its
+        // We create a new CollectionApiLockFactory using a new ZkDistributedCollectionLockFactory
+        // because earlier (in the constructor of this class)
+        // the ZkStateReader was not yet available, due to how CoreContainer is built in part in the
+        // constructor and in part in its
         // load() method. And this class is built from the CoreContainer constructor...
-        // The cost of these creations is low, and these classes do not hold state but only serve as an interface to Zookeeper.
+        // The cost of these creations is low, and these classes do not hold state but only serve as
+        // an interface to Zookeeper.
         // Note that after this call, we MUST execute the lock.release(); in the finally below

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/DistributedCollectionConfigSetCommandRunner.java
##########
@@ -249,35 +292,42 @@ public OverseerSolrResponse runCollectionCommand(ZkNodeProps message, Collection
     try {
       taskFuture = commandsExecutor.submit(commandRunner);
     } catch (RejectedExecutionException ree) {
-      // The command will not run, need to cancel the async ID so it can be reused on a subsequent attempt by the client
+      // The command will not run, need to cancel the async ID so it can be reused on a subsequent
+      // attempt by the client
       asyncTaskTracker.cancelAsyncId(asyncId);
-      throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Too many executing commands", ree);
+      throw new SolrException(
+          SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Too many executing commands", ree);
     }
 
     if (asyncId == null) {
-      // Non async calls wait for a while in case the command completes. If they time out, there's no way to track the
-      // job progress (improvement suggestion: decorrelate having a task ID from the fact of waiting for the job to complete)
+      // Non async calls wait for a while in case the command completes. If they time out, there's
+      // no way to track the
+      // job progress (improvement suggestion: decorrelate having a task ID from the fact of waiting
+      // for the job to complete)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteShardCmd.java
##########
@@ -95,15 +105,25 @@ public void call(ClusterState clusterState, ZkNodeProps message, NamedList<Objec
       propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
       ZkNodeProps m = new ZkNodeProps(propMap);
       if (ccc.getDistributedClusterStateUpdater().isDistributedStateUpdate()) {
-        // In this DeleteShardCmd.call() method there are potentially two cluster state updates. This is the first one.
-        // Even though the code of this method does not wait for it to complete, it does call the Collection API before
-        // it issues the second state change below. The collection API will be doing its own state change(s), and these will
-        // happen after this one (given it's for the same collection). Therefore we persist this state change
+        // In this DeleteShardCmd.call() method there are potentially two cluster state updates.
+        // This is the first one.
+        // Even though the code of this method does not wait for it to complete, it does call the
+        // Collection API before
+        // it issues the second state change below. The collection API will be doing its own state
+        // change(s), and these will
+        // happen after this one (given it's for the same collection). Therefore we persist this
+        // state change
         // immediately and do not group it with the one done further down.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/DistributedCollectionConfigSetCommandRunner.java
##########
@@ -110,132 +109,176 @@ public DistributedCollectionConfigSetCommandRunner(CoreContainer coreContainer)
     this.coreContainer = coreContainer;
 
     if (log.isInfoEnabled()) {
-      // Note is it hard to print a log when Collection API is handled by Overseer because Overseer is started regardless
+      // Note is it hard to print a log when Collection API is handled by Overseer because Overseer
+      // is started regardless
       // of how Collection API is handled, so it doesn't really know...

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/DistributedCollectionConfigSetCommandRunner.java
##########
@@ -342,31 +397,51 @@ public OverseerSolrResponse call() {
 
       NamedList<Object> results = new NamedList<>();
       try {
-        // Create API lock for executing the command. This call is non blocking (not blocked on waiting for a lock to be acquired anyway,
+        // Create API lock for executing the command. This call is non blocking (not blocked on
+        // waiting for a lock to be acquired anyway,
         // might be blocked on access to ZK etc)
-        // We create a new CollectionApiLockFactory using a new ZkDistributedCollectionLockFactory because earlier (in the constructor of this class)
-        // the ZkStateReader was not yet available, due to how CoreContainer is built in part in the constructor and in part in its
+        // We create a new CollectionApiLockFactory using a new ZkDistributedCollectionLockFactory
+        // because earlier (in the constructor of this class)
+        // the ZkStateReader was not yet available, due to how CoreContainer is built in part in the
+        // constructor and in part in its
         // load() method. And this class is built from the CoreContainer constructor...
-        // The cost of these creations is low, and these classes do not hold state but only serve as an interface to Zookeeper.
+        // The cost of these creations is low, and these classes do not hold state but only serve as
+        // an interface to Zookeeper.
         // Note that after this call, we MUST execute the lock.release(); in the finally below
-        DistributedMultiLock lock = new CollectionApiLockFactory(new ZkDistributedCollectionLockFactory(ccc.getZkStateReader().getZkClient(), ZK_COLLECTION_LOCKS)).createCollectionApiLock(action.lockLevel, collName, shardId, replicaName);
+        DistributedMultiLock lock =
+            new CollectionApiLockFactory(
+                    new ZkDistributedCollectionLockFactory(
+                        ccc.getZkStateReader().getZkClient(), ZK_COLLECTION_LOCKS))
+                .createCollectionApiLock(action.lockLevel, collName, shardId, replicaName);
 
         try {
-          log.debug("CollectionCommandRunner about to acquire lock for action {} lock level {}. {}/{}/{}",
-              action, action.lockLevel, collName, shardId, replicaName);
+          log.debug(
+              "CollectionCommandRunner about to acquire lock for action {} lock level {}. {}/{}/{}",
+              action,
+              action.lockLevel,
+              collName,
+              shardId,
+              replicaName);
 
           // Block this thread until all required locks are acquired.
           lock.waitUntilAcquired();
 
-          // Got the lock so moving from submitted to running if we run for an async task (if asyncId is null the asyncTaskTracker
+          // Got the lock so moving from submitted to running if we run for an async task (if
+          // asyncId is null the asyncTaskTracker
           // calls do nothing).

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/MaintainRoutedAliasCmd.java
##########
@@ -51,107 +50,139 @@
   }
 
   /**
-   * Invokes this command from the client.  If there's a problem it will throw an exception.
-   * Please note that is important to never add async to this invocation. This method must
-   * block (up to the standard OCP timeout) to prevent large batches of add's from sending a message
-   * to the overseer for every document added in RoutedAliasUpdateProcessor.
+   * Invokes this command from the client. If there's a problem it will throw an exception. Please
+   * note that is important to never add async to this invocation. This method must block (up to the
+   * standard OCP timeout) to prevent large batches of add's from sending a message to the overseer
+   * for every document added in RoutedAliasUpdateProcessor.
    */
   static void remoteInvoke(CollectionsHandler collHandler, String aliasName, String targetCol)
       throws Exception {
-    final CollectionParams.CollectionAction maintainroutedalias = CollectionParams.CollectionAction.MAINTAINROUTEDALIAS;
+    final CollectionParams.CollectionAction maintainroutedalias =
+        CollectionParams.CollectionAction.MAINTAINROUTEDALIAS;
     Map<String, Object> msg = new HashMap<>();
     msg.put(Overseer.QUEUE_OPERATION, maintainroutedalias.toLower());
     msg.put(CollectionParams.NAME, aliasName);
     msg.put(MaintainRoutedAliasCmd.ROUTED_ALIAS_TARGET_COL, targetCol);
-    final SolrResponse rsp = collHandler.submitCollectionApiCommand(new ZkNodeProps(msg), maintainroutedalias);
+    final SolrResponse rsp =
+        collHandler.submitCollectionApiCommand(new ZkNodeProps(msg), maintainroutedalias);
     if (rsp.getException() != null) {
       throw rsp.getException();
     }
   }
 
-  void addCollectionToAlias(String aliasName, ZkStateReader.AliasesManager aliasesManager, String createCollName) {
-    aliasesManager.applyModificationAndExportToZk(curAliases -> {
-      final List<String> curTargetCollections = curAliases.getCollectionAliasListMap().get(aliasName);
-      if (curTargetCollections.contains(createCollName)) {
-        return curAliases;
-      } else {
-        List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size() + 1);
-        // prepend it on purpose (thus reverse sorted). Solr alias resolution defaults to the first collection in a list
-        newTargetCollections.add(createCollName);
-        newTargetCollections.addAll(curTargetCollections);
-        return curAliases.cloneWithCollectionAlias(aliasName, StrUtils.join(newTargetCollections, ','));
-      }
-    });
+  void addCollectionToAlias(
+      String aliasName, ZkStateReader.AliasesManager aliasesManager, String createCollName) {
+    aliasesManager.applyModificationAndExportToZk(
+        curAliases -> {
+          final List<String> curTargetCollections =
+              curAliases.getCollectionAliasListMap().get(aliasName);
+          if (curTargetCollections.contains(createCollName)) {
+            return curAliases;
+          } else {
+            List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size() + 1);
+            // prepend it on purpose (thus reverse sorted). Solr alias resolution defaults to the
+            // first collection in a list
+            newTargetCollections.add(createCollName);
+            newTargetCollections.addAll(curTargetCollections);
+            return curAliases.cloneWithCollectionAlias(
+                aliasName, StrUtils.join(newTargetCollections, ','));
+          }
+        });
   }
 
-  private void removeCollectionFromAlias(String aliasName, ZkStateReader.AliasesManager aliasesManager, String createCollName) {
-    aliasesManager.applyModificationAndExportToZk(curAliases -> {
-      final List<String> curTargetCollections = curAliases.getCollectionAliasListMap().get(aliasName);
-      if (curTargetCollections.contains(createCollName)) {
-        List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size());
-        newTargetCollections.addAll(curTargetCollections);
-        newTargetCollections.remove(createCollName);
-        return curAliases.cloneWithCollectionAlias(aliasName, StrUtils.join(newTargetCollections, ','));
-      } else {
-        return curAliases;
-      }
-    });
+  private void removeCollectionFromAlias(
+      String aliasName, ZkStateReader.AliasesManager aliasesManager, String createCollName) {
+    aliasesManager.applyModificationAndExportToZk(
+        curAliases -> {
+          final List<String> curTargetCollections =
+              curAliases.getCollectionAliasListMap().get(aliasName);
+          if (curTargetCollections.contains(createCollName)) {
+            List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size());
+            newTargetCollections.addAll(curTargetCollections);
+            newTargetCollections.remove(createCollName);
+            return curAliases.cloneWithCollectionAlias(
+                aliasName, StrUtils.join(newTargetCollections, ','));
+          } else {
+            return curAliases;
+          }
+        });
   }
 
   @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList<Object> results) throws Exception {
-    //---- PARSE PRIMARY MESSAGE PARAMS
-    // important that we use NAME for the alias as that is what the Overseer will get a lock on before calling us
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList<Object> results)
+      throws Exception {
+    // ---- PARSE PRIMARY MESSAGE PARAMS
+    // important that we use NAME for the alias as that is what the Overseer will get a lock on
+    // before calling us
     final String aliasName = message.getStr(NAME);
     final String routeValue = message.getStr(ROUTED_ALIAS_TARGET_COL);
 
     final ZkStateReader.AliasesManager aliasesManager = ccc.getZkStateReader().aliasesManager;
     final Aliases aliases = aliasesManager.getAliases();
     final Map<String, String> aliasMetadata = aliases.getCollectionAliasProperties(aliasName);
     if (aliasMetadata.isEmpty()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Alias " + aliasName + " does not exist or is not a routed alias."); // if it did exist, we'd have a non-null map
+      throw new SolrException(
+          SolrException.ErrorCode.BAD_REQUEST,
+          "Alias "
+              + aliasName
+              + " does not exist or is not a routed alias."); // if it did exist, we'd have a
+      // non-null map

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
##########
@@ -163,26 +168,38 @@ private RestoreContext(ZkNodeProps message, CollectionCommandContext ccc) throws
       this.container = ccc.getCoreContainer();
       this.repository = this.container.newBackupRepository(repo);
 
-      this.location = repository.createDirectoryURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
+      this.location =
+          repository.createDirectoryURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
       final URI backupNameUri = repository.resolveDirectory(location, backupName);
       final String[] entries = repository.listAll(backupNameUri);
-      final boolean incremental = ! Arrays.stream(entries).anyMatch(entry -> entry.equals(BackupManager.TRADITIONAL_BACKUP_PROPS_FILE));
-      this.backupPath = (incremental) ?
-              repository.resolveDirectory(backupNameUri, entries[0]) : // incremental backups have an extra path component representing the backed up collection
+      final boolean incremental =
+          !Arrays.stream(entries)
+              .anyMatch(entry -> entry.equals(BackupManager.TRADITIONAL_BACKUP_PROPS_FILE));
+      this.backupPath =
+          (incremental)
+              ? repository.resolveDirectory(backupNameUri, entries[0])
+              : // incremental backups have an extra path component representing the backed up
+              // collection
               backupNameUri;

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -219,7 +239,8 @@ private AffinityPlacementPlugin(long minimalFreeDiskGB, long prioritizedFreeDisk
           .requestNodeMetric(NodeMetricImpl.FREE_DISK_GB);
       attributeFetcher.fetchFrom(allNodes);
       final AttributeValues attrValues = attributeFetcher.fetchAttributes();
-      // Get the number of currently existing cores per node, so we can update as we place new cores to not end up
+      // Get the number of currently existing cores per node, so we can update as we place new cores
+      // to not end up
       // always selecting the same node(s). This is used across placement requests

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
##########
@@ -274,44 +309,56 @@ private void validate(DocCollection backupCollectionState, int availableNodeCoun
       assert totalReplicasPerShard > 0;
     }
 
-    private void uploadConfig(String configName, String restoreConfigName, BackupManager backupMgr, ConfigSetService configSetService) throws IOException {
+    private void uploadConfig(
+        String configName,
+        String restoreConfigName,
+        BackupManager backupMgr,
+        ConfigSetService configSetService)
+        throws IOException {
       if (configSetService.checkConfigExists(restoreConfigName)) {
         log.info("Using existing config {}", restoreConfigName);
-        //TODO add overwrite option?
+        // TODO add overwrite option?
       } else {
         log.info("Uploading config {}", restoreConfigName);
 
         backupMgr.uploadConfigDir(configName, restoreConfigName, configSetService);
       }
     }
 
-    private void createCoreLessCollection(String restoreCollectionName,
-                                          String restoreConfigName,
-                                          DocCollection backupCollectionState,
-                                          ClusterState clusterState) throws Exception {
+    private void createCoreLessCollection(
+        String restoreCollectionName,
+        String restoreConfigName,
+        DocCollection backupCollectionState,
+        ClusterState clusterState)
+        throws Exception {
       Map<String, Object> propMap = new HashMap<>();
       propMap.put(Overseer.QUEUE_OPERATION, CREATE.toString());
-      propMap.put("fromApi", "true"); // mostly true.  Prevents autoCreated=true in the collection state.
+      propMap.put(
+          "fromApi", "true"); // mostly true.  Prevents autoCreated=true in the collection state.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerStatusCmd.java
##########
@@ -147,10 +162,13 @@ public OverseerStatusCmd(CollectionCommandContext ccc) {
   }
 
   @Override
-  public void call(ClusterState state, ZkNodeProps message, NamedList<Object> results) throws Exception {
-    // If Collection API execution is distributed, we're not running on the Overseer node so can't return any Overseer stats.
+  public void call(ClusterState state, ZkNodeProps message, NamedList<Object> results)
+      throws Exception {
+    // If Collection API execution is distributed, we're not running on the Overseer node so can't
+    // return any Overseer stats.
     if (ccc.getCoreContainer().getDistributedCollectionCommandRunner().isPresent()) {
-      // TODO: introduce a per node status command allowing insight into how Cluster state updates, Collection API and
+      // TODO: introduce a per node status command allowing insight into how Cluster state updates,
+      // Collection API and
       //  config set API execution went on that node...

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
##########
@@ -321,13 +368,19 @@ private void createCoreLessCollection(String restoreCollectionName,
         propMap.put(CollectionHandlingUtils.SHARDS_PROP, StrUtils.join(sliceNames, ','));
       } else {
         propMap.put(CollectionHandlingUtils.NUM_SLICES, sliceNames.size());
-        // ClusterStateMutator.createCollection detects that "slices" is in fact a slice structure instead of a
+        // ClusterStateMutator.createCollection detects that "slices" is in fact a slice structure
+        // instead of a
         //   list of names, and if so uses this instead of building it.  We clear the replica list.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/MaintainRoutedAliasCmd.java
##########
@@ -51,107 +50,139 @@
   }
 
   /**
-   * Invokes this command from the client.  If there's a problem it will throw an exception.
-   * Please note that is important to never add async to this invocation. This method must
-   * block (up to the standard OCP timeout) to prevent large batches of add's from sending a message
-   * to the overseer for every document added in RoutedAliasUpdateProcessor.
+   * Invokes this command from the client. If there's a problem it will throw an exception. Please
+   * note that is important to never add async to this invocation. This method must block (up to the
+   * standard OCP timeout) to prevent large batches of add's from sending a message to the overseer
+   * for every document added in RoutedAliasUpdateProcessor.
    */
   static void remoteInvoke(CollectionsHandler collHandler, String aliasName, String targetCol)
       throws Exception {
-    final CollectionParams.CollectionAction maintainroutedalias = CollectionParams.CollectionAction.MAINTAINROUTEDALIAS;
+    final CollectionParams.CollectionAction maintainroutedalias =
+        CollectionParams.CollectionAction.MAINTAINROUTEDALIAS;
     Map<String, Object> msg = new HashMap<>();
     msg.put(Overseer.QUEUE_OPERATION, maintainroutedalias.toLower());
     msg.put(CollectionParams.NAME, aliasName);
     msg.put(MaintainRoutedAliasCmd.ROUTED_ALIAS_TARGET_COL, targetCol);
-    final SolrResponse rsp = collHandler.submitCollectionApiCommand(new ZkNodeProps(msg), maintainroutedalias);
+    final SolrResponse rsp =
+        collHandler.submitCollectionApiCommand(new ZkNodeProps(msg), maintainroutedalias);
     if (rsp.getException() != null) {
       throw rsp.getException();
     }
   }
 
-  void addCollectionToAlias(String aliasName, ZkStateReader.AliasesManager aliasesManager, String createCollName) {
-    aliasesManager.applyModificationAndExportToZk(curAliases -> {
-      final List<String> curTargetCollections = curAliases.getCollectionAliasListMap().get(aliasName);
-      if (curTargetCollections.contains(createCollName)) {
-        return curAliases;
-      } else {
-        List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size() + 1);
-        // prepend it on purpose (thus reverse sorted). Solr alias resolution defaults to the first collection in a list
-        newTargetCollections.add(createCollName);
-        newTargetCollections.addAll(curTargetCollections);
-        return curAliases.cloneWithCollectionAlias(aliasName, StrUtils.join(newTargetCollections, ','));
-      }
-    });
+  void addCollectionToAlias(
+      String aliasName, ZkStateReader.AliasesManager aliasesManager, String createCollName) {
+    aliasesManager.applyModificationAndExportToZk(
+        curAliases -> {
+          final List<String> curTargetCollections =
+              curAliases.getCollectionAliasListMap().get(aliasName);
+          if (curTargetCollections.contains(createCollName)) {
+            return curAliases;
+          } else {
+            List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size() + 1);
+            // prepend it on purpose (thus reverse sorted). Solr alias resolution defaults to the
+            // first collection in a list
+            newTargetCollections.add(createCollName);
+            newTargetCollections.addAll(curTargetCollections);
+            return curAliases.cloneWithCollectionAlias(
+                aliasName, StrUtils.join(newTargetCollections, ','));
+          }
+        });
   }
 
-  private void removeCollectionFromAlias(String aliasName, ZkStateReader.AliasesManager aliasesManager, String createCollName) {
-    aliasesManager.applyModificationAndExportToZk(curAliases -> {
-      final List<String> curTargetCollections = curAliases.getCollectionAliasListMap().get(aliasName);
-      if (curTargetCollections.contains(createCollName)) {
-        List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size());
-        newTargetCollections.addAll(curTargetCollections);
-        newTargetCollections.remove(createCollName);
-        return curAliases.cloneWithCollectionAlias(aliasName, StrUtils.join(newTargetCollections, ','));
-      } else {
-        return curAliases;
-      }
-    });
+  private void removeCollectionFromAlias(
+      String aliasName, ZkStateReader.AliasesManager aliasesManager, String createCollName) {
+    aliasesManager.applyModificationAndExportToZk(
+        curAliases -> {
+          final List<String> curTargetCollections =
+              curAliases.getCollectionAliasListMap().get(aliasName);
+          if (curTargetCollections.contains(createCollName)) {
+            List<String> newTargetCollections = new ArrayList<>(curTargetCollections.size());
+            newTargetCollections.addAll(curTargetCollections);
+            newTargetCollections.remove(createCollName);
+            return curAliases.cloneWithCollectionAlias(
+                aliasName, StrUtils.join(newTargetCollections, ','));
+          } else {
+            return curAliases;
+          }
+        });
   }
 
   @Override
-  public void call(ClusterState clusterState, ZkNodeProps message, NamedList<Object> results) throws Exception {
-    //---- PARSE PRIMARY MESSAGE PARAMS
-    // important that we use NAME for the alias as that is what the Overseer will get a lock on before calling us
+  public void call(ClusterState clusterState, ZkNodeProps message, NamedList<Object> results)
+      throws Exception {
+    // ---- PARSE PRIMARY MESSAGE PARAMS
+    // important that we use NAME for the alias as that is what the Overseer will get a lock on
+    // before calling us
     final String aliasName = message.getStr(NAME);
     final String routeValue = message.getStr(ROUTED_ALIAS_TARGET_COL);
 
     final ZkStateReader.AliasesManager aliasesManager = ccc.getZkStateReader().aliasesManager;
     final Aliases aliases = aliasesManager.getAliases();
     final Map<String, String> aliasMetadata = aliases.getCollectionAliasProperties(aliasName);
     if (aliasMetadata.isEmpty()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Alias " + aliasName + " does not exist or is not a routed alias."); // if it did exist, we'd have a non-null map
+      throw new SolrException(
+          SolrException.ErrorCode.BAD_REQUEST,
+          "Alias "
+              + aliasName
+              + " does not exist or is not a routed alias."); // if it did exist, we'd have a
+      // non-null map
     }
     final RoutedAlias ra = RoutedAlias.fromProps(aliasName, aliasMetadata);
     if (ra == null) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "MaintainRoutedAlias called on non-routed alias");
+      throw new SolrException(
+          SolrException.ErrorCode.SERVER_ERROR, "MaintainRoutedAlias called on non-routed alias");
     }
 
     ra.updateParsedCollectionAliases(ccc.getZkStateReader(), true);
     List<RoutedAlias.Action> actions = ra.calculateActions(routeValue);
     for (RoutedAlias.Action action : actions) {
-      boolean exists = ccc.getZkStateReader().getClusterState().getCollectionOrNull(action.targetCollection) != null;
+      boolean exists =
+          ccc.getZkStateReader().getClusterState().getCollectionOrNull(action.targetCollection)
+              != null;
       switch (action.actionType) {
         case ENSURE_REMOVED:
           if (exists) {
-            ccc.getExecutorService().submit(() -> {
-              try {
-                deleteTargetCollection(clusterState, results, aliasName, aliasesManager, action);
-              } catch (Exception e) {
-                log.warn("Deletion of {} by {} {} failed (this might be ok if two clients were"
-                    , action.targetCollection, ra.getAliasName()
-                    , " writing to a routed alias at the same time and both caused a deletion)");
-                log.debug("Exception for last message:", e);
-              }
-            });
+            ccc.getExecutorService()
+                .submit(
+                    () -> {
+                      try {
+                        deleteTargetCollection(
+                            clusterState, results, aliasName, aliasesManager, action);
+                      } catch (Exception e) {
+                        log.warn(
+                            "Deletion of {} by {} {} failed (this might be ok if two clients were",
+                            action.targetCollection,
+                            ra.getAliasName(),
+                            " writing to a routed alias at the same time and both caused a deletion)");
+                        log.debug("Exception for last message:", e);
+                      }
+                    });
           }
           break;
         case ENSURE_EXISTS:
           if (!exists) {
-            addTargetCollection(clusterState, results, aliasName, aliasesManager, aliasMetadata, action);
+            addTargetCollection(
+                clusterState, results, aliasName, aliasesManager, aliasMetadata, action);
           } else {
             // check that the collection is properly integrated into the alias (see
-            // TimeRoutedAliasUpdateProcessorTest.java:141). Presently we need to ensure inclusion in the alias
-            // and the presence of the appropriate collection property. Note that this only works if the collection
-            // happens to fall where we would have created one already. Support for un-even collection sizes will
-            // take additional work (though presently they might work if the below book keeping is done by hand)
+            // TimeRoutedAliasUpdateProcessorTest.java:141). Presently we need to ensure inclusion
+            // in the alias
+            // and the presence of the appropriate collection property. Note that this only works if
+            // the collection
+            // happens to fall where we would have created one already. Support for un-even
+            // collection sizes will
+            // take additional work (though presently they might work if the below book keeping is
+            // done by hand)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/impl/ReplicaPlacementImpl.java
##########
@@ -66,21 +66,37 @@ public String toString() {
   }
 
   /**
-   * Translates a set of {@link ReplicaPlacement} returned by a plugin into a list of {@link ReplicaPosition} expected
-   * by {@link org.apache.solr.cloud.api.collections.Assign.AssignStrategy}
+   * Translates a set of {@link ReplicaPlacement} returned by a plugin into a list of {@link
+   * ReplicaPosition} expected by {@link
+   * org.apache.solr.cloud.api.collections.Assign.AssignStrategy}
    */
-  static List<ReplicaPosition> toReplicaPositions(String collection, Set<ReplicaPlacement> replicaPlacementSet) {
-    // The replica index in ReplicaPosition is not as strict a concept as it might seem. It is used in rules
-    // based placement (for sorting replicas) but its presence in ReplicaPosition is not justified (and when the code
+  static List<ReplicaPosition> toReplicaPositions(
+      String collection, Set<ReplicaPlacement> replicaPlacementSet) {
+    // The replica index in ReplicaPosition is not as strict a concept as it might seem. It is used
+    // in rules
+    // based placement (for sorting replicas) but its presence in ReplicaPosition is not justified
+    // (and when the code
     // is executing here, it means rules based placement is not used).
-    // Looking at ReplicaAssigner.tryAllPermutations, it is well possible to create replicas with same index
-    // living on a given node for the same shard. This likely never happens because of the way replicas are
-    // placed on nodes (never two on the same node for same shard). Adopting the same shortcut/bad design here,
+    // Looking at ReplicaAssigner.tryAllPermutations, it is well possible to create replicas with
+    // same index
+    // living on a given node for the same shard. This likely never happens because of the way
+    // replicas are
+    // placed on nodes (never two on the same node for same shard). Adopting the same shortcut/bad
+    // design here,
     // but index should be removed at some point from ReplicaPosition.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAlias.java
##########
@@ -96,28 +105,33 @@ public static RoutedAlias fromProps(String aliasName, Map<String, String> props)
       String[] types = typeStr.split(",");
       java.util.List<String> fields = new ArrayList<>();
       if (types.length > 2) {
-        throw new SolrException(BAD_REQUEST,"More than 2 dimensions is not supported yet. " +
-            "Please monitor SOLR-13628 for progress");
+        throw new SolrException(
+            BAD_REQUEST,
+            "More than 2 dimensions is not supported yet. "
+                + "Please monitor SOLR-13628 for progress");
       }
       for (int i = 0; i < types.length; i++) {
         String type = types[i];
         addRouterTypeOf(type, routerTypes);
 
-        // v2 api case - the v2 -> v1 mapping mechanisms can't handle this conversion because they expect
+        // v2 api case - the v2 -> v1 mapping mechanisms can't handle this conversion because they
+        // expect
         // strings or arrays of strings, not lists of objects.
         // TODO: The generic types for props are a lie
         if (props.containsKey("router.routerList")) {
           HashMap<String, Object> tmp = new HashMap<>(props);
           @SuppressWarnings({"unchecked"})
-          List<Map<String, Object>> v2RouterList = (List<Map<String, Object>>) tmp.get("router.routerList");
+          List<Map<String, Object>> v2RouterList =
+              (List<Map<String, Object>>) tmp.get("router.routerList");
           Map<String, Object> o = v2RouterList.get(i);
           for (Map.Entry<String, Object> entry : o.entrySet()) {
             props.put(ROUTER_PREFIX + i + "." + entry.getKey(), String.valueOf(entry.getValue()));
           }
         }
         // Here we need to push the type into each dimension's params. We could have eschewed the
         // "Dimensional[dim1,dim2]" style notation, to simplify this case but I think it's nice
-        // to be able to understand the dimensionality at a glance without having to hunt for name properties
+        // to be able to understand the dimensionality at a glance without having to hunt for name
+        // properties
         // in the list of properties for each dimension.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAlias.java
##########
@@ -45,44 +48,50 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.common.SolrException.ErrorCode.BAD_REQUEST;
-import static org.apache.solr.common.SolrException.ErrorCode.SERVER_ERROR;
-import static org.apache.solr.common.params.CollectionAdminParams.ROUTER_PREFIX;
-
 public abstract class RoutedAlias {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-
   @SuppressWarnings("WeakerAccess")
   public static final String ROUTER_TYPE_NAME = ROUTER_PREFIX + "name";
+
   @SuppressWarnings("WeakerAccess")
   public static final String ROUTER_FIELD = ROUTER_PREFIX + "field";
+
   public static final String CREATE_COLLECTION_PREFIX = "create-collection.";
+
   @SuppressWarnings("WeakerAccess")
-  public static final Set<String> MINIMAL_REQUIRED_PARAMS = Sets.newHashSet(ROUTER_TYPE_NAME, ROUTER_FIELD);
+  public static final Set<String> MINIMAL_REQUIRED_PARAMS =
+      Sets.newHashSet(ROUTER_TYPE_NAME, ROUTER_FIELD);
+
   public static final String ROUTED_ALIAS_NAME_CORE_PROP = "routedAliasName"; // core prop
   private static final String DIMENSIONAL = "Dimensional[";
 
-  // This class is created once per request and the overseer methods prevent duplicate create requests
-  // from creating extra copies via locking on the alias name. All we need to track here is that we don't
+  // This class is created once per request and the overseer methods prevent duplicate create
+  // requests
+  // from creating extra copies via locking on the alias name. All we need to track here is that we
+  // don't
   // spam preemptive creates to the overseer multiple times from *this* request.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/impl/ReplicaPlacementImpl.java
##########
@@ -66,21 +66,37 @@ public String toString() {
   }
 
   /**
-   * Translates a set of {@link ReplicaPlacement} returned by a plugin into a list of {@link ReplicaPosition} expected
-   * by {@link org.apache.solr.cloud.api.collections.Assign.AssignStrategy}
+   * Translates a set of {@link ReplicaPlacement} returned by a plugin into a list of {@link
+   * ReplicaPosition} expected by {@link
+   * org.apache.solr.cloud.api.collections.Assign.AssignStrategy}
    */
-  static List<ReplicaPosition> toReplicaPositions(String collection, Set<ReplicaPlacement> replicaPlacementSet) {
-    // The replica index in ReplicaPosition is not as strict a concept as it might seem. It is used in rules
-    // based placement (for sorting replicas) but its presence in ReplicaPosition is not justified (and when the code
+  static List<ReplicaPosition> toReplicaPositions(
+      String collection, Set<ReplicaPlacement> replicaPlacementSet) {
+    // The replica index in ReplicaPosition is not as strict a concept as it might seem. It is used
+    // in rules
+    // based placement (for sorting replicas) but its presence in ReplicaPosition is not justified
+    // (and when the code
     // is executing here, it means rules based placement is not used).
-    // Looking at ReplicaAssigner.tryAllPermutations, it is well possible to create replicas with same index
-    // living on a given node for the same shard. This likely never happens because of the way replicas are
-    // placed on nodes (never two on the same node for same shard). Adopting the same shortcut/bad design here,
+    // Looking at ReplicaAssigner.tryAllPermutations, it is well possible to create replicas with
+    // same index
+    // living on a given node for the same shard. This likely never happens because of the way
+    // replicas are
+    // placed on nodes (never two on the same node for same shard). Adopting the same shortcut/bad
+    // design here,
     // but index should be removed at some point from ReplicaPosition.
     List<ReplicaPosition> replicaPositions = new ArrayList<>(replicaPlacementSet.size());
-    int index = 0; // This really an arbitrary value when adding replicas and a possible source of core name collisions
+    int index =
+        0; // This really an arbitrary value when adding replicas and a possible source of core name
+    // collisions

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAlias.java
##########
@@ -127,8 +141,10 @@ public static RoutedAlias fromProps(String aliasName, Map<String, String> props)
         }
         fields.add(props.get(ROUTER_PREFIX + i + ".field"));
       }
-      // this next remove is checked for key because when we build from aliases.json's data it we get an
-      // immutable map which would cause  UnsupportedOperationException to be thrown. This remove is here
+      // this next remove is checked for key because when we build from aliases.json's data it we
+      // get an
+      // immutable map which would cause  UnsupportedOperationException to be thrown. This remove is
+      // here
       // to prevent this property from making it into aliases.json

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAlias.java
##########
@@ -96,28 +105,33 @@ public static RoutedAlias fromProps(String aliasName, Map<String, String> props)
       String[] types = typeStr.split(",");
       java.util.List<String> fields = new ArrayList<>();
       if (types.length > 2) {
-        throw new SolrException(BAD_REQUEST,"More than 2 dimensions is not supported yet. " +
-            "Please monitor SOLR-13628 for progress");
+        throw new SolrException(
+            BAD_REQUEST,
+            "More than 2 dimensions is not supported yet. "
+                + "Please monitor SOLR-13628 for progress");
       }
       for (int i = 0; i < types.length; i++) {
         String type = types[i];
         addRouterTypeOf(type, routerTypes);
 
-        // v2 api case - the v2 -> v1 mapping mechanisms can't handle this conversion because they expect
+        // v2 api case - the v2 -> v1 mapping mechanisms can't handle this conversion because they
+        // expect
         // strings or arrays of strings, not lists of objects.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAlias.java
##########
@@ -233,33 +267,45 @@ private static RoutedAlias routedAliasForType(String aliasName, Map<String, Stri
   public abstract void validateRouteValue(AddUpdateCommand cmd) throws SolrException;
 
   /**
-   * Create any required collections and return the name of the collection to which the current document should be sent.
+   * Create any required collections and return the name of the collection to which the current
+   * document should be sent.
    *
    * @param cmd The command that might cause collection creation
-   * @return The name of the proper destination collection for the document which may or may not be a
-   * newly created collection
+   * @return The name of the proper destination collection for the document which may or may not be
+   *     a newly created collection
    */
   public String createCollectionsIfRequired(AddUpdateCommand cmd) {
 
     // Even though it is possible that multiple requests hit this code in the 1-2 sec that
-    // it takes to create a collection, it's an established anti-pattern to feed data with a very large number
-    // of client connections. This in mind, we only guard against spamming the overseer within a batch of
-    // updates. We are intentionally tolerating a low level of redundant requests in favor of simpler code. Most
-    // super-sized installations with many update clients will likely be multi-tenant and multiple tenants
-    // probably don't write to the same alias. As such, we have deferred any solution to the "many clients causing
-    // collection creation simultaneously" problem until such time as someone actually has that problem in a
+    // it takes to create a collection, it's an established anti-pattern to feed data with a very
+    // large number
+    // of client connections. This in mind, we only guard against spamming the overseer within a
+    // batch of
+    // updates. We are intentionally tolerating a low level of redundant requests in favor of
+    // simpler code. Most
+    // super-sized installations with many update clients will likely be multi-tenant and multiple
+    // tenants
+    // probably don't write to the same alias. As such, we have deferred any solution to the "many
+    // clients causing
+    // collection creation simultaneously" problem until such time as someone actually has that
+    // problem in a
     // real world use case that isn't just an anti-pattern.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAlias.java
##########
@@ -398,10 +470,11 @@ void ensureCollection(String targetCollection, CoreContainer coreContainer) {
       //  of a race and that's okay... we'll ultimately retry any way.
 
       // Ensure our view of the aliases has updated. If we didn't do this, our zkStateReader might
-      //  not yet know about the new alias (thus won't see the newly added collection to it), and we might think
+      //  not yet know about the new alias (thus won't see the newly added collection to it), and we
+      // might think
       //  we failed.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
##########
@@ -430,47 +509,71 @@ public boolean split(ClusterState clusterState, ZkNodeProps message, NamedList<O
       }
 
       t = timings.sub("identifyNodesForReplicas");
-      Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
-          .forCollection(collectionName)
-          .forShard(subSlices)
-          .assignNrtReplicas(numNrt.get())
-          .assignTlogReplicas(numTlog.get())
-          .assignPullReplicas(numPull.get())
-          .onNodes(new ArrayList<>(clusterState.getLiveNodes()))
-          .build();
+      Assign.AssignRequest assignRequest =
+          new Assign.AssignRequestBuilder()
+              .forCollection(collectionName)
+              .forShard(subSlices)
+              .assignNrtReplicas(numNrt.get())
+              .assignTlogReplicas(numTlog.get())
+              .assignPullReplicas(numPull.get())
+              .onNodes(new ArrayList<>(clusterState.getLiveNodes()))
+              .build();
       Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(ccc.getCoreContainer());
-      List<ReplicaPosition> replicaPositions = assignStrategy.assign(ccc.getSolrCloudManager(), assignRequest);
+      List<ReplicaPosition> replicaPositions =
+          assignStrategy.assign(ccc.getSolrCloudManager(), assignRequest);
       t.stop();
 
       t = timings.sub("createReplicaPlaceholders");
       final DistributedClusterStateUpdater.StateChangeRecorder scr;
       boolean hasRecordedDistributedUpdate = false;
       if (ccc.getDistributedClusterStateUpdater().isDistributedStateUpdate()) {
-        scr = ccc.getDistributedClusterStateUpdater().createStateChangeRecorder(collectionName, false);
+        scr =
+            ccc.getDistributedClusterStateUpdater()
+                .createStateChangeRecorder(collectionName, false);
       } else {
         scr = null;
       }
       for (ReplicaPosition replicaPosition : replicaPositions) {
         String sliceName = replicaPosition.shard;
         String subShardNodeName = replicaPosition.node;
-        String solrCoreName = Assign.buildSolrCoreName(collectionName, sliceName, replicaPosition.type, replicaPosition.index);
-
-        log.debug("Creating replica shard {} as part of slice {} of collection {} on {}"
-            , solrCoreName, sliceName, collectionName, subShardNodeName);
-
-        // we first create all replicas in DOWN state without actually creating their cores in order to
-        // avoid a race condition where Overseer may prematurely activate the new sub-slices (and deactivate
-        // the parent slice) before all new replicas are added. This situation may lead to a loss of performance
+        String solrCoreName =
+            Assign.buildSolrCoreName(
+                collectionName, sliceName, replicaPosition.type, replicaPosition.index);
+
+        log.debug(
+            "Creating replica shard {} as part of slice {} of collection {} on {}",
+            solrCoreName,
+            sliceName,
+            collectionName,
+            subShardNodeName);
+
+        // we first create all replicas in DOWN state without actually creating their cores in order
+        // to
+        // avoid a race condition where Overseer may prematurely activate the new sub-slices (and
+        // deactivate
+        // the parent slice) before all new replicas are added. This situation may lead to a loss of
+        // performance
         // because the new shards will be activated with possibly many fewer replicas.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
##########
@@ -495,25 +598,36 @@ public boolean split(ClusterState clusterState, ZkNodeProps message, NamedList<O
         if (asyncId != null) {
           propMap.put(ASYNC, asyncId);
         }
-        // special flag param to instruct addReplica not to create the replica in cluster state again
+        // special flag param to instruct addReplica not to create the replica in cluster state
+        // again
         propMap.put(CollectionHandlingUtils.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, "true");
 
         propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
 
         replicas.add(propMap);
       }
-      if (hasRecordedDistributedUpdate && ccc.getDistributedClusterStateUpdater().isDistributedStateUpdate()) {
-        // Actually add the replicas to the collection state. Note that when Overseer takes care of the state,
-        // there is no wait here for the state update to be visible, but with distributed state update done synchronously
-        // we wait (we could in theory create a thread and have it do the work if we REALLY needed, but we likely don't).
+      if (hasRecordedDistributedUpdate
+          && ccc.getDistributedClusterStateUpdater().isDistributedStateUpdate()) {
+        // Actually add the replicas to the collection state. Note that when Overseer takes care of
+        // the state,
+        // there is no wait here for the state update to be visible, but with distributed state
+        // update done synchronously
+        // we wait (we could in theory create a thread and have it do the work if we REALLY needed,
+        // but we likely don't).

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/TimeRoutedAlias.java
##########
@@ -352,18 +373,28 @@ public void validateRouteValue(AddUpdateCommand cmd) throws SolrException {
 
     // FUTURE: maybe in some cases the user would want to ignore/warn instead?
     if (docTimestamp.isAfter(Instant.now().plusMillis(getMaxFutureMs()))) {
-      throw new SolrException(BAD_REQUEST,
-          "The document's time routed key of " + docTimestamp + " is too far in the future given " +
-              ROUTER_MAX_FUTURE + "=" + getMaxFutureMs());
+      throw new SolrException(
+          BAD_REQUEST,
+          "The document's time routed key of "
+              + docTimestamp
+              + " is too far in the future given "
+              + ROUTER_MAX_FUTURE
+              + "="
+              + getMaxFutureMs());
     }
 
-    // Although this is also checked later, we need to check it here too to handle the case in Dimensional Routed
-    // aliases where one can legally have zero collections for a newly encountered category and thus the loop later
+    // Although this is also checked later, we need to check it here too to handle the case in
+    // Dimensional Routed
+    // aliases where one can legally have zero collections for a newly encountered category and thus
+    // the loop later
     // can't catch this.
 
-    // SOLR-13760 - we need to fix the date math to a specific instant when the first document arrives.
-    // If we don't do this DRA's with a time dimension have variable start times across the other dimensions
-    // and logic gets much to complicated, and depends too much on queries to zookeeper. This keeps life simpler.
+    // SOLR-13760 - we need to fix the date math to a specific instant when the first document
+    // arrives.
+    // If we don't do this DRA's with a time dimension have variable start times across the other
+    // dimensions
+    // and logic gets much to complicated, and depends too much on queries to zookeeper. This keeps
+    // life simpler.
     // I have to admit I'm not terribly fond of the mutation during a validate method however.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/DistributedCollectionConfigSetCommandRunner.java
##########
@@ -110,132 +109,176 @@ public DistributedCollectionConfigSetCommandRunner(CoreContainer coreContainer)
     this.coreContainer = coreContainer;
 
     if (log.isInfoEnabled()) {
-      // Note is it hard to print a log when Collection API is handled by Overseer because Overseer is started regardless
+      // Note is it hard to print a log when Collection API is handled by Overseer because Overseer
+      // is started regardless
       // of how Collection API is handled, so it doesn't really know...
-      log.info("Creating DistributedCollectionConfigSetCommandRunner. Collection and ConfigSet APIs are running distributed (not Overseer based)");
+      log.info(
+          "Creating DistributedCollectionConfigSetCommandRunner. Collection and ConfigSet APIs are running distributed (not Overseer based)");
     }
 
     // TODO we should look at how everything is getting closed when the node is shutdown.
-    //  But it seems that CollectionsHandler (that creates instances of this class) is not really closed, so maybe it doesn't matter?
-    // With distributed Collection API execution, each node will have such an executor but given how thread pools work,
+    //  But it seems that CollectionsHandler (that creates instances of this class) is not really
+    // closed, so maybe it doesn't matter?
+    // With distributed Collection API execution, each node will have such an executor but given how
+    // thread pools work,
     // threads will only be created if needed (including the corePoolSize threads).
-    distributedCollectionApiExecutorService = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, 10, 0L, TimeUnit.MILLISECONDS,
-        new SynchronousQueue<>(),
-        new SolrNamedThreadFactory("DistributedCollectionApiExecutorService"));
-
-    commandsExecutor = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, 20, 0L, TimeUnit.MILLISECONDS,
-        new SynchronousQueue<>(),
-        new SolrNamedThreadFactory("DistributedCollectionApiCommandExecutor"));
-
-    ccc = new DistributedCollectionCommandContext(this.coreContainer, this.distributedCollectionApiExecutorService);
+    distributedCollectionApiExecutorService =
+        new ExecutorUtil.MDCAwareThreadPoolExecutor(
+            5,
+            10,
+            0L,
+            TimeUnit.MILLISECONDS,
+            new SynchronousQueue<>(),
+            new SolrNamedThreadFactory("DistributedCollectionApiExecutorService"));
+
+    commandsExecutor =
+        new ExecutorUtil.MDCAwareThreadPoolExecutor(
+            5,
+            20,
+            0L,
+            TimeUnit.MILLISECONDS,
+            new SynchronousQueue<>(),
+            new SolrNamedThreadFactory("DistributedCollectionApiCommandExecutor"));
+
+    ccc =
+        new DistributedCollectionCommandContext(
+            this.coreContainer, this.distributedCollectionApiExecutorService);
     commandMapper = new CollApiCmds.CommandMap(ccc);
-    asyncTaskTracker = new DistributedApiAsyncTracker(ccc.getZkStateReader().getZkClient(), ZK_ASYNC_ROOT);
+    asyncTaskTracker =
+        new DistributedApiAsyncTracker(ccc.getZkStateReader().getZkClient(), ZK_ASYNC_ROOT);
   }
 
-  /**
-   * See {@link DistributedApiAsyncTracker#getAsyncTaskRequestStatus(String)}
-   */
-  public Pair<RequestStatusState, OverseerSolrResponse> getAsyncTaskRequestStatus(String asyncId) throws Exception {
+  /** See {@link DistributedApiAsyncTracker#getAsyncTaskRequestStatus(String)} */
+  public Pair<RequestStatusState, OverseerSolrResponse> getAsyncTaskRequestStatus(String asyncId)
+      throws Exception {
     return asyncTaskTracker.getAsyncTaskRequestStatus(asyncId);
   }
 
-  /**
-   * See {@link DistributedApiAsyncTracker#deleteSingleAsyncId(String)}
-   */
+  /** See {@link DistributedApiAsyncTracker#deleteSingleAsyncId(String)} */
   public boolean deleteSingleAsyncId(String asyncId) throws Exception {
     return asyncTaskTracker.deleteSingleAsyncId(asyncId);
   }
 
-  /**
-   * See {@link DistributedApiAsyncTracker#deleteAllAsyncIds()}
-   */
+  /** See {@link DistributedApiAsyncTracker#deleteAllAsyncIds()} */
   public void deleteAllAsyncIds() throws Exception {
     asyncTaskTracker.deleteAllAsyncIds();
   }
 
-
   /**
-   * When {@link org.apache.solr.handler.admin.CollectionsHandler#invokeAction} does not enqueue to overseer queue and
-   * instead calls this method, this method is expected to do the equivalent of what Overseer does in
-   * {@link org.apache.solr.cloud.OverseerConfigSetMessageHandler#processMessage}.
-   * <p>
-   * The steps leading to that call in the Overseer execution path are (and the equivalent is done here):
+   * When {@link org.apache.solr.handler.admin.CollectionsHandler#invokeAction} does not enqueue to
+   * overseer queue and instead calls this method, this method is expected to do the equivalent of
+   * what Overseer does in {@link
+   * org.apache.solr.cloud.OverseerConfigSetMessageHandler#processMessage}.
+   *
+   * <p>The steps leading to that call in the Overseer execution path are (and the equivalent is
+   * done here):
+   *
    * <ul>
-   * <li>{@link org.apache.solr.cloud.OverseerTaskProcessor#run()} gets the message from the ZK queue, grabs the
-   * corresponding locks (write lock on the config set target of the API command and a read lock on the base config set
-   * if any - the case for config set creation) then executes the command using an executor service (it also checks the
-   * asyncId if any is specified but async calls are not supported for Config Set API calls).</li>
-   * <li>In {@link org.apache.solr.cloud.OverseerTaskProcessor}.{@code Runner.run()} (run on an executor thread) a call is made to
-   * {@link org.apache.solr.cloud.OverseerConfigSetMessageHandler#processMessage} which does a few checks and calls the
-   * appropriate Config Set method.
+   *   <li>{@link org.apache.solr.cloud.OverseerTaskProcessor#run()} gets the message from the ZK
+   *       queue, grabs the corresponding locks (write lock on the config set target of the API
+   *       command and a read lock on the base config set if any - the case for config set creation)
+   *       then executes the command using an executor service (it also checks the asyncId if any is
+   *       specified but async calls are not supported for Config Set API calls).
+   *   <li>In {@link org.apache.solr.cloud.OverseerTaskProcessor}.{@code Runner.run()} (run on an
+   *       executor thread) a call is made to {@link
+   *       org.apache.solr.cloud.OverseerConfigSetMessageHandler#processMessage} which does a few
+   *       checks and calls the appropriate Config Set method.
    * </ul>
    */
-  public void runConfigSetCommand(SolrQueryResponse rsp, ConfigSetsHandler.ConfigSetOperation operation, Map<String, Object> result, long timeoutMs) throws Exception {
-    // We refuse new tasks, but will wait for already submitted ones (i.e. those that made it through this method earlier).
+  public void runConfigSetCommand(
+      SolrQueryResponse rsp,
+      ConfigSetsHandler.ConfigSetOperation operation,
+      Map<String, Object> result,
+      long timeoutMs)
+      throws Exception {
+    // We refuse new tasks, but will wait for already submitted ones (i.e. those that made it
+    // through this method earlier).
     // See stopAndWaitForPendingTasksToComplete() below

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/RoutedAlias.java
##########
@@ -233,33 +267,45 @@ private static RoutedAlias routedAliasForType(String aliasName, Map<String, Stri
   public abstract void validateRouteValue(AddUpdateCommand cmd) throws SolrException;
 
   /**
-   * Create any required collections and return the name of the collection to which the current document should be sent.
+   * Create any required collections and return the name of the collection to which the current
+   * document should be sent.
    *
    * @param cmd The command that might cause collection creation
-   * @return The name of the proper destination collection for the document which may or may not be a
-   * newly created collection
+   * @return The name of the proper destination collection for the document which may or may not be
+   *     a newly created collection
    */
   public String createCollectionsIfRequired(AddUpdateCommand cmd) {
 
     // Even though it is possible that multiple requests hit this code in the 1-2 sec that
-    // it takes to create a collection, it's an established anti-pattern to feed data with a very large number
-    // of client connections. This in mind, we only guard against spamming the overseer within a batch of
-    // updates. We are intentionally tolerating a low level of redundant requests in favor of simpler code. Most
-    // super-sized installations with many update clients will likely be multi-tenant and multiple tenants
-    // probably don't write to the same alias. As such, we have deferred any solution to the "many clients causing
-    // collection creation simultaneously" problem until such time as someone actually has that problem in a
+    // it takes to create a collection, it's an established anti-pattern to feed data with a very
+    // large number
+    // of client connections. This in mind, we only guard against spamming the overseer within a
+    // batch of
+    // updates. We are intentionally tolerating a low level of redundant requests in favor of
+    // simpler code. Most
+    // super-sized installations with many update clients will likely be multi-tenant and multiple
+    // tenants
+    // probably don't write to the same alias. As such, we have deferred any solution to the "many
+    // clients causing
+    // collection creation simultaneously" problem until such time as someone actually has that
+    // problem in a
     // real world use case that isn't just an anti-pattern.
     CandidateCollection candidateCollectionDesc = findCandidateGivenValue(cmd);
 
     try {
       // It's important not to add code between here and the prior call to findCandidateGivenValue()
-      // in processAdd() that invokes updateParsedCollectionAliases(). Doing so would update parsedCollectionsDesc
-      // and create a race condition. When Routed aliases have an implicit sort for their collections we
-      // are relying on the fact that collectionList.get(0) is returning the head of the parsed collections that
-      // existed when the collection list was consulted for the candidate value. If this class updates it's notion
-      // of the list of collections since candidateCollectionDesc was chosen, we could create collection n+2
+      // in processAdd() that invokes updateParsedCollectionAliases(). Doing so would update
+      // parsedCollectionsDesc
+      // and create a race condition. When Routed aliases have an implicit sort for their
+      // collections we
+      // are relying on the fact that collectionList.get(0) is returning the head of the parsed
+      // collections that
+      // existed when the collection list was consulted for the candidate value. If this class
+      // updates it's notion
+      // of the list of collections since candidateCollectionDesc was chosen, we could create
+      // collection n+2

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/TimeRoutedAlias.java
##########
@@ -352,18 +373,28 @@ public void validateRouteValue(AddUpdateCommand cmd) throws SolrException {
 
     // FUTURE: maybe in some cases the user would want to ignore/warn instead?
     if (docTimestamp.isAfter(Instant.now().plusMillis(getMaxFutureMs()))) {
-      throw new SolrException(BAD_REQUEST,
-          "The document's time routed key of " + docTimestamp + " is too far in the future given " +
-              ROUTER_MAX_FUTURE + "=" + getMaxFutureMs());
+      throw new SolrException(
+          BAD_REQUEST,
+          "The document's time routed key of "
+              + docTimestamp
+              + " is too far in the future given "
+              + ROUTER_MAX_FUTURE
+              + "="
+              + getMaxFutureMs());
     }
 
-    // Although this is also checked later, we need to check it here too to handle the case in Dimensional Routed
-    // aliases where one can legally have zero collections for a newly encountered category and thus the loop later
+    // Although this is also checked later, we need to check it here too to handle the case in
+    // Dimensional Routed
+    // aliases where one can legally have zero collections for a newly encountered category and thus
+    // the loop later
     // can't catch this.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/TimeRoutedAlias.java
##########
@@ -378,20 +409,26 @@ public void validateRouteValue(AddUpdateCommand cmd) throws SolrException {
       props.put(ROUTER_START, start);
 
       // This could race, but it only occurs when the alias is first used and the values produced
-      // should all be identical and who wins won't matter (baring cases of Date Math involving seconds,
+      // should all be identical and who wins won't matter (baring cases of Date Math involving
+      // seconds,
       // which is pretty far fetched). Putting this in a separate thread to ensure that any failed
       // races don't cause documents to get rejected.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/TimeRoutedAlias.java
##########
@@ -78,43 +77,53 @@
 
   // These two fields may be updated within the calling thread during processing but should
   // never be updated by any async creation thread.
-  private List<Map.Entry<Instant, String>> parsedCollectionsDesc; // k=timestamp (start), v=collection.  Sorted descending
-  private Aliases parsedCollectionsAliases; // a cached reference to the source of what we parse into parsedCollectionsDesc
+  private List<Map.Entry<Instant, String>>
+      parsedCollectionsDesc; // k=timestamp (start), v=collection.  Sorted descending
+  private Aliases
+      parsedCollectionsAliases; // a cached reference to the source of what we parse into
+  // parsedCollectionsDesc

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/TimeRoutedAlias.java
##########
@@ -455,14 +495,17 @@ public CandidateCollection findCandidateGivenValue(AddUpdateCommand cmd) {
 
     final Instant docTimestamp = parseRouteKey(value);
 
-    // reparse explicitly such that if we are a dimension in a DRA, the list gets culled by our context
-    // This does not normally happen with the above updateParsedCollectionAliases, because at that point the aliases
+    // reparse explicitly such that if we are a dimension in a DRA, the list gets culled by our
+    // context
+    // This does not normally happen with the above updateParsedCollectionAliases, because at that
+    // point the aliases
     // should be up to date and updateParsedCollectionAliases will short circuit

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/TimeRoutedAlias.java
##########
@@ -527,33 +580,43 @@ private CandidateCollection calcCandidateCollection(Instant docTimestamp) {
   }
 
   /**
-   * Deletes some of the oldest collection(s) based on {@link TimeRoutedAlias#getAutoDeleteAgeMath()}. If
-   * getAutoDelteAgemath is not present then this method does nothing. Per documentation is relative to a
-   * collection being created. Therefore if nothing is being created, nothing is deleted.
-   * @param actions The previously calculated add action(s). This collection should not be modified within
-   *                this method.
+   * Deletes some of the oldest collection(s) based on {@link
+   * TimeRoutedAlias#getAutoDeleteAgeMath()}. If getAutoDelteAgemath is not present then this method
+   * does nothing. Per documentation is relative to a collection being created. Therefore if nothing
+   * is being created, nothing is deleted.
+   *
+   * @param actions The previously calculated add action(s). This collection should not be modified
+   *     within this method.
    */
   private List<Action> calcDeletes(List<Action> actions) {
     final String autoDeleteAgeMathStr = this.getAutoDeleteAgeMath();
-    if (autoDeleteAgeMathStr == null || actions .size() == 0) {
+    if (autoDeleteAgeMathStr == null || actions.size() == 0) {
       return Collections.emptyList();
     }
     if (actions.size() > 1) {
-      throw new IllegalStateException("We are not supposed to be creating more than one collection at a time");
+      throw new IllegalStateException(
+          "We are not supposed to be creating more than one collection at a time");
     }
 
     String deletionReferenceCollection = actions.get(0).targetCollection;
-    Instant deletionReferenceInstant = parseInstantFromCollectionName(getAliasName(), deletionReferenceCollection);
+    Instant deletionReferenceInstant =
+        parseInstantFromCollectionName(getAliasName(), deletionReferenceCollection);
     final Instant delBefore;
     try {
-      delBefore = new DateMathParser(Date.from(computeNextCollTimestamp(deletionReferenceInstant)), this.getTimeZone()).parseMath(autoDeleteAgeMathStr).toInstant();
+      delBefore =
+          new DateMathParser(
+                  Date.from(computeNextCollTimestamp(deletionReferenceInstant)), this.getTimeZone())
+              .parseMath(autoDeleteAgeMathStr)
+              .toInstant();
     } catch (ParseException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e); // note: should not happen by this point
+      throw new SolrException(
+          SolrException.ErrorCode.SERVER_ERROR, e); // note: should not happen by this point
     }
 
     List<Action> collectionsToDelete = new ArrayList<>();
 
-    //iterating from newest to oldest, find the first collection that has a time <= "before".  We keep this collection
+    // iterating from newest to oldest, find the first collection that has a time <= "before".  We
+    // keep this collection
     // (and all newer to left) but we delete older collections, which are the ones that follow.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
##########
@@ -409,65 +482,84 @@ private DocCollection checkAndCompleteShardSplit(ClusterState prevState, DocColl
         }
         if (allActive) {
           if (log.isInfoEnabled()) {
-            log.info("Shard: {} - all {} replicas are active. Finding status of fellow sub-shards", sliceName, slice.getReplicasMap().size());
+            log.info(
+                "Shard: {} - all {} replicas are active. Finding status of fellow sub-shards",
+                sliceName,
+                slice.getReplicasMap().size());
           }
           // find out about other sub shards
           Map<String, Slice> allSlicesCopy = new HashMap<>(collection.getSlicesMap());
           List<Slice> subShardSlices = new ArrayList<>();
           outer:
           for (Map.Entry<String, Slice> entry : allSlicesCopy.entrySet()) {
-            if (sliceName.equals(entry.getKey()))
-              continue;
+            if (sliceName.equals(entry.getKey())) continue;
             Slice otherSlice = entry.getValue();
             if (otherSlice.getState() == Slice.State.RECOVERY) {
               if (slice.getParent() != null && slice.getParent().equals(otherSlice.getParent())) {
                 if (log.isInfoEnabled()) {
-                  log.info("Shard: {} - Fellow sub-shard: {} found", sliceName, otherSlice.getName());
+                  log.info(
+                      "Shard: {} - Fellow sub-shard: {} found", sliceName, otherSlice.getName());
                 }
                 // this is a fellow sub shard so check if all replicas are active
-                for (Map.Entry<String, Replica> sliceEntry : otherSlice.getReplicasMap().entrySet()) {
+                for (Map.Entry<String, Replica> sliceEntry :
+                    otherSlice.getReplicasMap().entrySet()) {
                   if (sliceEntry.getValue().getState() != Replica.State.ACTIVE) {
                     allActive = false;
                     break outer;
                   }
                 }
                 if (log.isInfoEnabled()) {
-                  log.info("Shard: {} - Fellow sub-shard: {} has all {} replicas active", sliceName, otherSlice.getName(), otherSlice.getReplicasMap().size());
+                  log.info(
+                      "Shard: {} - Fellow sub-shard: {} has all {} replicas active",
+                      sliceName,
+                      otherSlice.getName(),
+                      otherSlice.getReplicasMap().size());
                 }
                 subShardSlices.add(otherSlice);
               }
             }
           }
           if (allActive) {
             // hurray, all sub shard replicas are active
-            log.info("Shard: {} - All replicas across all fellow sub-shards are now ACTIVE.", sliceName);
+            log.info(
+                "Shard: {} - All replicas across all fellow sub-shards are now ACTIVE.", sliceName);
             String parentSliceName = (String) sliceProps.remove(Slice.PARENT);
-            // now lets see if the parent leader is still the same or else there's a chance of data loss
+            // now lets see if the parent leader is still the same or else there's a chance of data
+            // loss
             // see SOLR-9438 for details

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java
##########
@@ -63,14 +62,16 @@ public ZkWriteCommand createCollection(ClusterState clusterState, ZkNodeProps me
     }
 
     Map<String, Object> routerSpec = DocRouter.getRouterSpec(message);
-    String routerName = routerSpec.get(NAME) == null ? DocRouter.DEFAULT_NAME : (String) routerSpec.get(NAME);
+    String routerName =
+        routerSpec.get(NAME) == null ? DocRouter.DEFAULT_NAME : (String) routerSpec.get(NAME);
     DocRouter router = DocRouter.getDocRouter(routerName);
 
     Object messageShardsObj = message.get("shards");
 
     Map<String, Slice> slices;
-    if (messageShardsObj instanceof Map) { // we are being explicitly told the slice data (e.g. coll restore)
-      slices = Slice.loadAllFromMap(cName, (Map<String, Object>)messageShardsObj);
+    if (messageShardsObj
+        instanceof Map) { // we are being explicitly told the slice data (e.g. coll restore)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/api/collections/TimeRoutedAlias.java
##########
@@ -484,37 +527,47 @@ private CandidateCollection calcCandidateCollection(Instant docTimestamp) {
         if (i == 0) {
           next = computeNextCollTimestamp(colStartTime);
         }
-        if (!docTimestamp.isBefore(colStartTime)) {  // (inclusive lower bound)
+        if (!docTimestamp.isBefore(colStartTime)) { // (inclusive lower bound)
           CandidateCollection candidate;
           if (i == 0) {
-            if (docTimestamp.isBefore(next)) {       // (exclusive upper bound)
-              candidate = new CandidateCollection(NONE, entry.getValue()); //found it
+            if (docTimestamp.isBefore(next)) { // (exclusive upper bound)
+              candidate = new CandidateCollection(NONE, entry.getValue()); // found it
               // simply goes to head collection no action required
             } else {
-              // Although we create collections one at a time, this calculation of the ultimate destination is
+              // Although we create collections one at a time, this calculation of the ultimate
+              // destination is
               // useful for contextualizing TRA's used as dimensions in DRA's

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/overseer/CollectionMutator.java
##########
@@ -109,27 +113,30 @@ public ZkWriteCommand modifyCollection(final ClusterState clusterState, ZkNodePr
         if (val == null) continue;
         boolean enable = Boolean.parseBoolean(val);
         if (enable == coll.isPerReplicaState()) {
-          //already enabled
+          // already enabled
           log.error("trying to set perReplicaState to {} from {}", val, coll.isPerReplicaState());
           continue;
         }
-        replicaOps = PerReplicaStatesOps.modifyCollection(coll, enable, PerReplicaStates.fetch(coll.getZNode(), zkClient, null));
+        replicaOps =
+            PerReplicaStatesOps.modifyCollection(
+                coll, enable, PerReplicaStates.fetch(coll.getZNode(), zkClient, null));
       }
 
-
       if (message.containsKey(prop)) {
         hasAnyOps = true;
-        if (message.get(prop) == null)  {
+        if (message.get(prop) == null) {
           props.remove(prop);
-        } else  {
+        } else {
           // rename key from collection.configName to configName
           if (prop.equals(COLL_CONF)) {
             props.put(CONFIGNAME_PROP, message.get(prop));
           } else {
             props.put(prop, message.get(prop));
           }
         }
-        if (prop == REPLICATION_FACTOR) { //SOLR-11676 : keep NRT_REPLICAS and REPLICATION_FACTOR in sync
+        if (prop
+            == REPLICATION_FACTOR) { // SOLR-11676 : keep NRT_REPLICAS and REPLICATION_FACTOR in
+          // sync

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/impl/AttributeFetcherImpl.java
##########
@@ -92,41 +92,60 @@ public AttributeValues fetchAttributes() {
     Map<NodeMetric<?>, Map<Node, Object>> metricSnitchToNodeToValue = new HashMap<>();
     Map<String, CollectionMetricsBuilder> collectionMetricsBuilders = new HashMap<>();
     Map<Node, Set<String>> nodeToReplicaInternalTags = new HashMap<>();
-    Map<String, Set<ReplicaMetric<?>>> requestedCollectionNamesMetrics = requestedCollectionMetrics.entrySet().stream()
-        .collect(Collectors.toMap(e -> e.getKey().getName(), e -> e.getValue()));
-
-    // In order to match the returned values for the various snitches, we need to keep track of where each
-    // received value goes. Given the target maps are of different types (the maps from Node to whatever defined
-    // above) we instead pass a function taking two arguments, the node and the (non null) returned value,
-    // that will cast the value into the appropriate type for the snitch tag and insert it into the appropriate map
+    Map<String, Set<ReplicaMetric<?>>> requestedCollectionNamesMetrics =
+        requestedCollectionMetrics.entrySet().stream()
+            .collect(Collectors.toMap(e -> e.getKey().getName(), e -> e.getValue()));
+
+    // In order to match the returned values for the various snitches, we need to keep track of
+    // where each
+    // received value goes. Given the target maps are of different types (the maps from Node to
+    // whatever defined
+    // above) we instead pass a function taking two arguments, the node and the (non null) returned
+    // value,
+    // that will cast the value into the appropriate type for the snitch tag and insert it into the
+    // appropriate map
     // with the node as the key.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/impl/DelegatingPlacementPluginFactory.java
##########
@@ -55,7 +53,8 @@ public void setDelegate(PlacementPluginFactory<? extends PlacementPluginConfig>
     Phaser localPhaser = phaser; // volatile read
     if (localPhaser != null) {
       assert localPhaser.getRegisteredParties() == 1;
-      localPhaser.arrive(); // we should be the only ones registered, so this will advance phase each time
+      localPhaser
+          .arrive(); // we should be the only ones registered, so this will advance phase each time

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/events/impl/DelegatingClusterEventProducer.java
##########
@@ -104,27 +106,33 @@ public void setDelegate(ClusterEventProducer newDelegate) {
     Phaser localPhaser = phaser; // volatile read
     if (localPhaser != null) {
       assert localPhaser.getRegisteredParties() == 1;
-      localPhaser.arrive(); // we should be the only ones registered, so this will advance phase each time
+      localPhaser
+          .arrive(); // we should be the only ones registered, so this will advance phase each time

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cloud/overseer/ZkWriteCommand.java
##########
@@ -33,12 +31,19 @@
   public final boolean persistJsonState;
   public final PerReplicaStatesOps ops;
 
-  public ZkWriteCommand(String name, DocCollection collection, PerReplicaStatesOps replicaOps, boolean persistJsonState) {
+  public ZkWriteCommand(
+      String name,
+      DocCollection collection,
+      PerReplicaStatesOps replicaOps,
+      boolean persistJsonState) {
     isPerReplicaStateCollection = collection != null && collection.isPerReplicaState();
     this.name = name;
     this.collection = collection;
     this.ops = replicaOps;
-    this.persistJsonState = persistJsonState || !isPerReplicaStateCollection; // Always persist for non "per replica state" collections
+    this.persistJsonState =
+        persistJsonState
+            || !isPerReplicaStateCollection; // Always persist for non "per replica state"
+    // collections

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -230,61 +251,90 @@ private AffinityPlacementPlugin(long minimalFreeDiskGB, long prioritizedFreeDisk
         SolrCollection solrCollection = request.getCollection();
 
         // filter out nodes that don't meet the `withCollection` constraint
-        nodes = filterNodesWithCollection(placementContext.getCluster(), request, attrValues, nodes);
+        nodes =
+            filterNodesWithCollection(placementContext.getCluster(), request, attrValues, nodes);
         // filter out nodes that don't match the "node types" specified in the collection props
         nodes = filterNodesByNodeType(placementContext.getCluster(), request, attrValues, nodes);
 
-        // Split the set of nodes into 3 sets of nodes accepting each replica type (sets can overlap if nodes accept multiple replica types)
-        // These subsets sets are actually maps, because we capture the number of cores (of any replica type) present on each node.
-        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes = getAvailableNodesForReplicaTypes(nodes, attrValues);
-
-        // All available zones of live nodes. Due to some nodes not being candidates for placement, and some existing replicas
-        // being one availability zones that might be offline (i.e. their nodes are not live), this set might contain zones
+        // Split the set of nodes into 3 sets of nodes accepting each replica type (sets can overlap
+        // if nodes accept multiple replica types)
+        // These subsets sets are actually maps, because we capture the number of cores (of any
+        // replica type) present on each node.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -230,61 +251,90 @@ private AffinityPlacementPlugin(long minimalFreeDiskGB, long prioritizedFreeDisk
         SolrCollection solrCollection = request.getCollection();
 
         // filter out nodes that don't meet the `withCollection` constraint
-        nodes = filterNodesWithCollection(placementContext.getCluster(), request, attrValues, nodes);
+        nodes =
+            filterNodesWithCollection(placementContext.getCluster(), request, attrValues, nodes);
         // filter out nodes that don't match the "node types" specified in the collection props
         nodes = filterNodesByNodeType(placementContext.getCluster(), request, attrValues, nodes);
 
-        // Split the set of nodes into 3 sets of nodes accepting each replica type (sets can overlap if nodes accept multiple replica types)
-        // These subsets sets are actually maps, because we capture the number of cores (of any replica type) present on each node.
-        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes = getAvailableNodesForReplicaTypes(nodes, attrValues);
-
-        // All available zones of live nodes. Due to some nodes not being candidates for placement, and some existing replicas
-        // being one availability zones that might be offline (i.e. their nodes are not live), this set might contain zones
+        // Split the set of nodes into 3 sets of nodes accepting each replica type (sets can overlap
+        // if nodes accept multiple replica types)
+        // These subsets sets are actually maps, because we capture the number of cores (of any
+        // replica type) present on each node.
+        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes =
+            getAvailableNodesForReplicaTypes(nodes, attrValues);
+
+        // All available zones of live nodes. Due to some nodes not being candidates for placement,
+        // and some existing replicas
+        // being one availability zones that might be offline (i.e. their nodes are not live), this
+        // set might contain zones
         // on which it is impossible to place replicas. That's ok.
         Set<String> availabilityZones = getZonesFromNodes(nodes, attrValues);
 
         // Build the replica placement decisions here
         Set<ReplicaPlacement> replicaPlacements = new HashSet<>();
 
-        // Let's now iterate on all shards to create replicas for and start finding home sweet homes for the replicas
+        // Let's now iterate on all shards to create replicas for and start finding home sweet homes
+        // for the replicas
         for (String shardName : request.getShardNames()) {
-          // Inventory nodes (if any) that already have a replica of any type for the shard, because we can't be placing
-          // additional replicas on these. This data structure is updated after each replica to node assign and is used to
-          // make sure different replica types are not allocated to the same nodes (protecting same node assignments within
+          // Inventory nodes (if any) that already have a replica of any type for the shard, because
+          // we can't be placing
+          // additional replicas on these. This data structure is updated after each replica to node
+          // assign and is used to
+          // make sure different replica types are not allocated to the same nodes (protecting same
+          // node assignments within
           // a given replica type is done "by construction" in makePlacementDecisions()).

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -230,61 +251,90 @@ private AffinityPlacementPlugin(long minimalFreeDiskGB, long prioritizedFreeDisk
         SolrCollection solrCollection = request.getCollection();
 
         // filter out nodes that don't meet the `withCollection` constraint
-        nodes = filterNodesWithCollection(placementContext.getCluster(), request, attrValues, nodes);
+        nodes =
+            filterNodesWithCollection(placementContext.getCluster(), request, attrValues, nodes);
         // filter out nodes that don't match the "node types" specified in the collection props
         nodes = filterNodesByNodeType(placementContext.getCluster(), request, attrValues, nodes);
 
-        // Split the set of nodes into 3 sets of nodes accepting each replica type (sets can overlap if nodes accept multiple replica types)
-        // These subsets sets are actually maps, because we capture the number of cores (of any replica type) present on each node.
-        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes = getAvailableNodesForReplicaTypes(nodes, attrValues);
-
-        // All available zones of live nodes. Due to some nodes not being candidates for placement, and some existing replicas
-        // being one availability zones that might be offline (i.e. their nodes are not live), this set might contain zones
+        // Split the set of nodes into 3 sets of nodes accepting each replica type (sets can overlap
+        // if nodes accept multiple replica types)
+        // These subsets sets are actually maps, because we capture the number of cores (of any
+        // replica type) present on each node.
+        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes =
+            getAvailableNodesForReplicaTypes(nodes, attrValues);
+
+        // All available zones of live nodes. Due to some nodes not being candidates for placement,
+        // and some existing replicas
+        // being one availability zones that might be offline (i.e. their nodes are not live), this
+        // set might contain zones
         // on which it is impossible to place replicas. That's ok.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -402,44 +484,52 @@ private String getNodeAZ(Node n, final AttributeValues attrValues) {
       AzWithNodes(String azName, List<Node> availableNodesForPlacement) {
         this.azName = azName;
         this.availableNodesForPlacement = availableNodesForPlacement;
-        // Once the list is sorted to an order we're happy with, this flag is set to true to avoid sorting multiple times
+        // Once the list is sorted to an order we're happy with, this flag is set to true to avoid
+        // sorting multiple times
         // unnecessarily.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -488,96 +600,139 @@ private String getNodeAZ(Node n, final AttributeValues attrValues) {
     }
 
     /**
-     * <p>Picks nodes from {@code targetNodes} for placing {@code numReplicas} replicas.
+     * Picks nodes from {@code targetNodes} for placing {@code numReplicas} replicas.
      *
      * <p>The criteria used in this method are, in this order:
+     *
      * <ol>
-     *     <li>No more than one replica of a given shard on a given node (strictly enforced)</li>
-     *     <li>Balance as much as possible replicas of a given {@link org.apache.solr.cluster.Replica.ReplicaType} over available AZ's.
-     *     This balancing takes into account existing replicas <b>of the corresponding replica type</b>, if any.</li>
-     *     <li>Place replicas if possible on nodes having more than a certain amount of free disk space (note that nodes with a too small
-     *     amount of free disk space were eliminated as placement targets earlier, in {@link #getAvailableNodesForReplicaTypes(Set, AttributeValues)}). There's
-     *     a threshold here rather than sorting on the amount of free disk space, because sorting on that value would in
-     *     practice lead to never considering the number of cores on a node.</li>
-     *     <li>Place replicas on nodes having a smaller number of cores (the number of cores considered
-     *     for this decision includes previous placement decisions made during the processing of the placement request)</li>
+     *   <li>No more than one replica of a given shard on a given node (strictly enforced)
+     *   <li>Balance as much as possible replicas of a given {@link
+     *       org.apache.solr.cluster.Replica.ReplicaType} over available AZ's. This balancing takes
+     *       into account existing replicas <b>of the corresponding replica type</b>, if any.
+     *   <li>Place replicas if possible on nodes having more than a certain amount of free disk
+     *       space (note that nodes with a too small amount of free disk space were eliminated as
+     *       placement targets earlier, in {@link #getAvailableNodesForReplicaTypes(Set,
+     *       AttributeValues)}). There's a threshold here rather than sorting on the amount of free
+     *       disk space, because sorting on that value would in practice lead to never considering
+     *       the number of cores on a node.
+     *   <li>Place replicas on nodes having a smaller number of cores (the number of cores
+     *       considered for this decision includes previous placement decisions made during the
+     *       processing of the placement request)
      * </ol>
      */
-    @SuppressForbidden(reason = "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
-    private void makePlacementDecisions(SolrCollection solrCollection, String shardName, Set<String> availabilityZones,
-                                        Replica.ReplicaType replicaType, int numReplicas, final AttributeValues attrValues,
-                                        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes, Set<Node> nodesWithReplicas,
-                                        Map<Node, Integer> coresOnNodes, PlacementPlanFactory placementPlanFactory,
-                                        Set<ReplicaPlacement> replicaPlacements) throws PlacementException {
-      // Count existing replicas per AZ. We count only instances of the type of replica for which we need to do placement.
-      // If we ever want to balance replicas of any type across AZ's (and not each replica type balanced independently),
-      // we'd have to move this data structure to the caller of this method so it can be reused across different replica
-      // type placements for a given shard. Note then that this change would be risky. For example all NRT's and PULL
-      // replicas for a shard my be correctly balanced over three AZ's, but then all NRT can end up in the same AZ...
+    @SuppressForbidden(
+        reason =
+            "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
+    private void makePlacementDecisions(
+        SolrCollection solrCollection,
+        String shardName,
+        Set<String> availabilityZones,
+        Replica.ReplicaType replicaType,
+        int numReplicas,
+        final AttributeValues attrValues,
+        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes,
+        Set<Node> nodesWithReplicas,
+        Map<Node, Integer> coresOnNodes,
+        PlacementPlanFactory placementPlanFactory,
+        Set<ReplicaPlacement> replicaPlacements)
+        throws PlacementException {
+      // Count existing replicas per AZ. We count only instances of the type of replica for which we
+      // need to do placement.
+      // If we ever want to balance replicas of any type across AZ's (and not each replica type
+      // balanced independently),
+      // we'd have to move this data structure to the caller of this method so it can be reused
+      // across different replica
+      // type placements for a given shard. Note then that this change would be risky. For example
+      // all NRT's and PULL
+      // replicas for a shard my be correctly balanced over three AZ's, but then all NRT can end up
+      // in the same AZ...

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -230,61 +251,90 @@ private AffinityPlacementPlugin(long minimalFreeDiskGB, long prioritizedFreeDisk
         SolrCollection solrCollection = request.getCollection();
 
         // filter out nodes that don't meet the `withCollection` constraint
-        nodes = filterNodesWithCollection(placementContext.getCluster(), request, attrValues, nodes);
+        nodes =
+            filterNodesWithCollection(placementContext.getCluster(), request, attrValues, nodes);
         // filter out nodes that don't match the "node types" specified in the collection props
         nodes = filterNodesByNodeType(placementContext.getCluster(), request, attrValues, nodes);
 
-        // Split the set of nodes into 3 sets of nodes accepting each replica type (sets can overlap if nodes accept multiple replica types)
-        // These subsets sets are actually maps, because we capture the number of cores (of any replica type) present on each node.
-        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes = getAvailableNodesForReplicaTypes(nodes, attrValues);
-
-        // All available zones of live nodes. Due to some nodes not being candidates for placement, and some existing replicas
-        // being one availability zones that might be offline (i.e. their nodes are not live), this set might contain zones
+        // Split the set of nodes into 3 sets of nodes accepting each replica type (sets can overlap
+        // if nodes accept multiple replica types)
+        // These subsets sets are actually maps, because we capture the number of cores (of any
+        // replica type) present on each node.
+        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes =
+            getAvailableNodesForReplicaTypes(nodes, attrValues);
+
+        // All available zones of live nodes. Due to some nodes not being candidates for placement,
+        // and some existing replicas
+        // being one availability zones that might be offline (i.e. their nodes are not live), this
+        // set might contain zones
         // on which it is impossible to place replicas. That's ok.
         Set<String> availabilityZones = getZonesFromNodes(nodes, attrValues);
 
         // Build the replica placement decisions here
         Set<ReplicaPlacement> replicaPlacements = new HashSet<>();
 
-        // Let's now iterate on all shards to create replicas for and start finding home sweet homes for the replicas
+        // Let's now iterate on all shards to create replicas for and start finding home sweet homes
+        // for the replicas
         for (String shardName : request.getShardNames()) {
-          // Inventory nodes (if any) that already have a replica of any type for the shard, because we can't be placing
-          // additional replicas on these. This data structure is updated after each replica to node assign and is used to
-          // make sure different replica types are not allocated to the same nodes (protecting same node assignments within
+          // Inventory nodes (if any) that already have a replica of any type for the shard, because
+          // we can't be placing
+          // additional replicas on these. This data structure is updated after each replica to node
+          // assign and is used to
+          // make sure different replica types are not allocated to the same nodes (protecting same
+          // node assignments within
           // a given replica type is done "by construction" in makePlacementDecisions()).
           Set<Node> nodesWithReplicas =
               allNodesWithReplicas
                   .computeIfAbsent(solrCollection.getName(), col -> new HashMap<>())
-                  .computeIfAbsent(shardName, s -> {
-                    Set<Node> newNodeSet = new HashSet<>();
-                    Shard shard = solrCollection.getShard(s);
-                    if (shard != null) {
-                      // Prefill the set with the existing replicas
-                      for (Replica r : shard.replicas()) {
-                        newNodeSet.add(r.getNode());
-                      }
-                    }
-                    return newNodeSet;
-                  });
-
+                  .computeIfAbsent(
+                      shardName,
+                      s -> {
+                        Set<Node> newNodeSet = new HashSet<>();
+                        Shard shard = solrCollection.getShard(s);
+                        if (shard != null) {
+                          // Prefill the set with the existing replicas
+                          for (Replica r : shard.replicas()) {
+                            newNodeSet.add(r.getNode());
+                          }
+                        }
+                        return newNodeSet;
+                      });
 
           // Iterate on the replica types in the enum order. We place more strategic replicas first
-          // (NRT is more strategic than TLOG more strategic than PULL). This is in case we eventually decide that less
-          // strategic replica placement impossibility is not a problem that should lead to replica placement computation
-          // failure. Current code does fail if placement is impossible (constraint is at most one replica of a shard on any node).
+          // (NRT is more strategic than TLOG more strategic than PULL). This is in case we
+          // eventually decide that less
+          // strategic replica placement impossibility is not a problem that should lead to replica
+          // placement computation
+          // failure. Current code does fail if placement is impossible (constraint is at most one
+          // replica of a shard on any node).

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -593,44 +748,62 @@ private void makePlacementDecisions(SolrCollection solrCollection, String shardN
               break;
             }
             candidateAzEntries.add(entry);
-            // We remove all entries that are candidates: the "winner" will be modified, all entries might also be sorted,
+            // We remove all entries that are candidates: the "winner" will be modified, all entries
+            // might also be sorted,
             // so we'll insert back the updated versions later.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -488,96 +600,139 @@ private String getNodeAZ(Node n, final AttributeValues attrValues) {
     }
 
     /**
-     * <p>Picks nodes from {@code targetNodes} for placing {@code numReplicas} replicas.
+     * Picks nodes from {@code targetNodes} for placing {@code numReplicas} replicas.
      *
      * <p>The criteria used in this method are, in this order:
+     *
      * <ol>
-     *     <li>No more than one replica of a given shard on a given node (strictly enforced)</li>
-     *     <li>Balance as much as possible replicas of a given {@link org.apache.solr.cluster.Replica.ReplicaType} over available AZ's.
-     *     This balancing takes into account existing replicas <b>of the corresponding replica type</b>, if any.</li>
-     *     <li>Place replicas if possible on nodes having more than a certain amount of free disk space (note that nodes with a too small
-     *     amount of free disk space were eliminated as placement targets earlier, in {@link #getAvailableNodesForReplicaTypes(Set, AttributeValues)}). There's
-     *     a threshold here rather than sorting on the amount of free disk space, because sorting on that value would in
-     *     practice lead to never considering the number of cores on a node.</li>
-     *     <li>Place replicas on nodes having a smaller number of cores (the number of cores considered
-     *     for this decision includes previous placement decisions made during the processing of the placement request)</li>
+     *   <li>No more than one replica of a given shard on a given node (strictly enforced)
+     *   <li>Balance as much as possible replicas of a given {@link
+     *       org.apache.solr.cluster.Replica.ReplicaType} over available AZ's. This balancing takes
+     *       into account existing replicas <b>of the corresponding replica type</b>, if any.
+     *   <li>Place replicas if possible on nodes having more than a certain amount of free disk
+     *       space (note that nodes with a too small amount of free disk space were eliminated as
+     *       placement targets earlier, in {@link #getAvailableNodesForReplicaTypes(Set,
+     *       AttributeValues)}). There's a threshold here rather than sorting on the amount of free
+     *       disk space, because sorting on that value would in practice lead to never considering
+     *       the number of cores on a node.
+     *   <li>Place replicas on nodes having a smaller number of cores (the number of cores
+     *       considered for this decision includes previous placement decisions made during the
+     *       processing of the placement request)
      * </ol>
      */
-    @SuppressForbidden(reason = "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
-    private void makePlacementDecisions(SolrCollection solrCollection, String shardName, Set<String> availabilityZones,
-                                        Replica.ReplicaType replicaType, int numReplicas, final AttributeValues attrValues,
-                                        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes, Set<Node> nodesWithReplicas,
-                                        Map<Node, Integer> coresOnNodes, PlacementPlanFactory placementPlanFactory,
-                                        Set<ReplicaPlacement> replicaPlacements) throws PlacementException {
-      // Count existing replicas per AZ. We count only instances of the type of replica for which we need to do placement.
-      // If we ever want to balance replicas of any type across AZ's (and not each replica type balanced independently),
-      // we'd have to move this data structure to the caller of this method so it can be reused across different replica
-      // type placements for a given shard. Note then that this change would be risky. For example all NRT's and PULL
-      // replicas for a shard my be correctly balanced over three AZ's, but then all NRT can end up in the same AZ...
+    @SuppressForbidden(
+        reason =
+            "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
+    private void makePlacementDecisions(
+        SolrCollection solrCollection,
+        String shardName,
+        Set<String> availabilityZones,
+        Replica.ReplicaType replicaType,
+        int numReplicas,
+        final AttributeValues attrValues,
+        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes,
+        Set<Node> nodesWithReplicas,
+        Map<Node, Integer> coresOnNodes,
+        PlacementPlanFactory placementPlanFactory,
+        Set<ReplicaPlacement> replicaPlacements)
+        throws PlacementException {
+      // Count existing replicas per AZ. We count only instances of the type of replica for which we
+      // need to do placement.
+      // If we ever want to balance replicas of any type across AZ's (and not each replica type
+      // balanced independently),
+      // we'd have to move this data structure to the caller of this method so it can be reused
+      // across different replica
+      // type placements for a given shard. Note then that this change would be risky. For example
+      // all NRT's and PULL
+      // replicas for a shard my be correctly balanced over three AZ's, but then all NRT can end up
+      // in the same AZ...
       Map<String, Integer> azToNumReplicas = new HashMap<>();
       for (String az : availabilityZones) {
         azToNumReplicas.put(az, 0);
       }
 
-      // Build the set of candidate nodes for the placement, i.e. nodes that can accept the replica type
+      // Build the set of candidate nodes for the placement, i.e. nodes that can accept the replica
+      // type
       Set<Node> candidateNodes = new HashSet<>(replicaTypeToNodes.get(replicaType));
-      // Remove nodes that already have a replica for the shard (no two replicas of same shard can be put on same node)
+      // Remove nodes that already have a replica for the shard (no two replicas of same shard can
+      // be put on same node)
       candidateNodes.removeAll(nodesWithReplicas);
 
       Shard shard = solrCollection.getShard(shardName);
       if (shard != null) {
         // shard is non null if we're adding replicas to an already existing collection.
         // If we're creating the collection, the shards do not exist yet.
         for (Replica replica : shard.replicas()) {
-          // The node's AZ is counted as having a replica if it has a replica of the same type as the one we need
+          // The node's AZ is counted as having a replica if it has a replica of the same type as
+          // the one we need
           // to place here.
           if (replica.getType() == replicaType) {
             final String az = getNodeAZ(replica.getNode(), attrValues);
             if (azToNumReplicas.containsKey(az)) {
-              // We do not count replicas on AZ's for which we don't have any node to place on because it would not help
-              // the placement decision. If we did want to do that, note the dereferencing below can't be assumed as the
+              // We do not count replicas on AZ's for which we don't have any node to place on
+              // because it would not help
+              // the placement decision. If we did want to do that, note the dereferencing below
+              // can't be assumed as the
               // entry will not exist in the map.
               azToNumReplicas.put(az, azToNumReplicas.get(az) + 1);
             }
           }
         }
       }
 
-      // We now have the set of real candidate nodes, we've enforced "No more than one replica of a given shard on a given node".
-      // We also counted for the shard and replica type under consideration how many replicas were per AZ, so we can place
+      // We now have the set of real candidate nodes, we've enforced "No more than one replica of a
+      // given shard on a given node".
+      // We also counted for the shard and replica type under consideration how many replicas were
+      // per AZ, so we can place
       // (or try to place) replicas on AZ's that have fewer replicas
 
-      // Get the candidate nodes per AZ in order to build (further down) a mapping of AZ to placement candidates.
+      // Get the candidate nodes per AZ in order to build (further down) a mapping of AZ to
+      // placement candidates.
       Map<String, List<Node>> nodesPerAz = new HashMap<>();
       for (Node node : candidateNodes) {
         String nodeAz = getNodeAZ(node, attrValues);
         List<Node> nodesForAz = nodesPerAz.computeIfAbsent(nodeAz, k -> new ArrayList<>());
         nodesForAz.add(node);
       }
 
-      // Build a treeMap sorted by the number of replicas per AZ and including candidates nodes suitable for placement on the
-      // AZ, so we can easily select the next AZ to get a replica assignment and quickly (constant time) decide if placement
+      // Build a treeMap sorted by the number of replicas per AZ and including candidates nodes
+      // suitable for placement on the
+      // AZ, so we can easily select the next AZ to get a replica assignment and quickly (constant
+      // time) decide if placement
       // on this AZ is possible or not.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -488,96 +600,139 @@ private String getNodeAZ(Node n, final AttributeValues attrValues) {
     }
 
     /**
-     * <p>Picks nodes from {@code targetNodes} for placing {@code numReplicas} replicas.
+     * Picks nodes from {@code targetNodes} for placing {@code numReplicas} replicas.
      *
      * <p>The criteria used in this method are, in this order:
+     *
      * <ol>
-     *     <li>No more than one replica of a given shard on a given node (strictly enforced)</li>
-     *     <li>Balance as much as possible replicas of a given {@link org.apache.solr.cluster.Replica.ReplicaType} over available AZ's.
-     *     This balancing takes into account existing replicas <b>of the corresponding replica type</b>, if any.</li>
-     *     <li>Place replicas if possible on nodes having more than a certain amount of free disk space (note that nodes with a too small
-     *     amount of free disk space were eliminated as placement targets earlier, in {@link #getAvailableNodesForReplicaTypes(Set, AttributeValues)}). There's
-     *     a threshold here rather than sorting on the amount of free disk space, because sorting on that value would in
-     *     practice lead to never considering the number of cores on a node.</li>
-     *     <li>Place replicas on nodes having a smaller number of cores (the number of cores considered
-     *     for this decision includes previous placement decisions made during the processing of the placement request)</li>
+     *   <li>No more than one replica of a given shard on a given node (strictly enforced)
+     *   <li>Balance as much as possible replicas of a given {@link
+     *       org.apache.solr.cluster.Replica.ReplicaType} over available AZ's. This balancing takes
+     *       into account existing replicas <b>of the corresponding replica type</b>, if any.
+     *   <li>Place replicas if possible on nodes having more than a certain amount of free disk
+     *       space (note that nodes with a too small amount of free disk space were eliminated as
+     *       placement targets earlier, in {@link #getAvailableNodesForReplicaTypes(Set,
+     *       AttributeValues)}). There's a threshold here rather than sorting on the amount of free
+     *       disk space, because sorting on that value would in practice lead to never considering
+     *       the number of cores on a node.
+     *   <li>Place replicas on nodes having a smaller number of cores (the number of cores
+     *       considered for this decision includes previous placement decisions made during the
+     *       processing of the placement request)
      * </ol>
      */
-    @SuppressForbidden(reason = "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
-    private void makePlacementDecisions(SolrCollection solrCollection, String shardName, Set<String> availabilityZones,
-                                        Replica.ReplicaType replicaType, int numReplicas, final AttributeValues attrValues,
-                                        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes, Set<Node> nodesWithReplicas,
-                                        Map<Node, Integer> coresOnNodes, PlacementPlanFactory placementPlanFactory,
-                                        Set<ReplicaPlacement> replicaPlacements) throws PlacementException {
-      // Count existing replicas per AZ. We count only instances of the type of replica for which we need to do placement.
-      // If we ever want to balance replicas of any type across AZ's (and not each replica type balanced independently),
-      // we'd have to move this data structure to the caller of this method so it can be reused across different replica
-      // type placements for a given shard. Note then that this change would be risky. For example all NRT's and PULL
-      // replicas for a shard my be correctly balanced over three AZ's, but then all NRT can end up in the same AZ...
+    @SuppressForbidden(
+        reason =
+            "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
+    private void makePlacementDecisions(
+        SolrCollection solrCollection,
+        String shardName,
+        Set<String> availabilityZones,
+        Replica.ReplicaType replicaType,
+        int numReplicas,
+        final AttributeValues attrValues,
+        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes,
+        Set<Node> nodesWithReplicas,
+        Map<Node, Integer> coresOnNodes,
+        PlacementPlanFactory placementPlanFactory,
+        Set<ReplicaPlacement> replicaPlacements)
+        throws PlacementException {
+      // Count existing replicas per AZ. We count only instances of the type of replica for which we
+      // need to do placement.
+      // If we ever want to balance replicas of any type across AZ's (and not each replica type
+      // balanced independently),
+      // we'd have to move this data structure to the caller of this method so it can be reused
+      // across different replica
+      // type placements for a given shard. Note then that this change would be risky. For example
+      // all NRT's and PULL
+      // replicas for a shard my be correctly balanced over three AZ's, but then all NRT can end up
+      // in the same AZ...
       Map<String, Integer> azToNumReplicas = new HashMap<>();
       for (String az : availabilityZones) {
         azToNumReplicas.put(az, 0);
       }
 
-      // Build the set of candidate nodes for the placement, i.e. nodes that can accept the replica type
+      // Build the set of candidate nodes for the placement, i.e. nodes that can accept the replica
+      // type
       Set<Node> candidateNodes = new HashSet<>(replicaTypeToNodes.get(replicaType));
-      // Remove nodes that already have a replica for the shard (no two replicas of same shard can be put on same node)
+      // Remove nodes that already have a replica for the shard (no two replicas of same shard can
+      // be put on same node)
       candidateNodes.removeAll(nodesWithReplicas);
 
       Shard shard = solrCollection.getShard(shardName);
       if (shard != null) {
         // shard is non null if we're adding replicas to an already existing collection.
         // If we're creating the collection, the shards do not exist yet.
         for (Replica replica : shard.replicas()) {
-          // The node's AZ is counted as having a replica if it has a replica of the same type as the one we need
+          // The node's AZ is counted as having a replica if it has a replica of the same type as
+          // the one we need
           // to place here.
           if (replica.getType() == replicaType) {
             final String az = getNodeAZ(replica.getNode(), attrValues);
             if (azToNumReplicas.containsKey(az)) {
-              // We do not count replicas on AZ's for which we don't have any node to place on because it would not help
-              // the placement decision. If we did want to do that, note the dereferencing below can't be assumed as the
+              // We do not count replicas on AZ's for which we don't have any node to place on
+              // because it would not help
+              // the placement decision. If we did want to do that, note the dereferencing below
+              // can't be assumed as the
               // entry will not exist in the map.
               azToNumReplicas.put(az, azToNumReplicas.get(az) + 1);
             }
           }
         }
       }
 
-      // We now have the set of real candidate nodes, we've enforced "No more than one replica of a given shard on a given node".
-      // We also counted for the shard and replica type under consideration how many replicas were per AZ, so we can place
+      // We now have the set of real candidate nodes, we've enforced "No more than one replica of a
+      // given shard on a given node".
+      // We also counted for the shard and replica type under consideration how many replicas were
+      // per AZ, so we can place
       // (or try to place) replicas on AZ's that have fewer replicas

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -488,96 +600,139 @@ private String getNodeAZ(Node n, final AttributeValues attrValues) {
     }
 
     /**
-     * <p>Picks nodes from {@code targetNodes} for placing {@code numReplicas} replicas.
+     * Picks nodes from {@code targetNodes} for placing {@code numReplicas} replicas.
      *
      * <p>The criteria used in this method are, in this order:
+     *
      * <ol>
-     *     <li>No more than one replica of a given shard on a given node (strictly enforced)</li>
-     *     <li>Balance as much as possible replicas of a given {@link org.apache.solr.cluster.Replica.ReplicaType} over available AZ's.
-     *     This balancing takes into account existing replicas <b>of the corresponding replica type</b>, if any.</li>
-     *     <li>Place replicas if possible on nodes having more than a certain amount of free disk space (note that nodes with a too small
-     *     amount of free disk space were eliminated as placement targets earlier, in {@link #getAvailableNodesForReplicaTypes(Set, AttributeValues)}). There's
-     *     a threshold here rather than sorting on the amount of free disk space, because sorting on that value would in
-     *     practice lead to never considering the number of cores on a node.</li>
-     *     <li>Place replicas on nodes having a smaller number of cores (the number of cores considered
-     *     for this decision includes previous placement decisions made during the processing of the placement request)</li>
+     *   <li>No more than one replica of a given shard on a given node (strictly enforced)
+     *   <li>Balance as much as possible replicas of a given {@link
+     *       org.apache.solr.cluster.Replica.ReplicaType} over available AZ's. This balancing takes
+     *       into account existing replicas <b>of the corresponding replica type</b>, if any.
+     *   <li>Place replicas if possible on nodes having more than a certain amount of free disk
+     *       space (note that nodes with a too small amount of free disk space were eliminated as
+     *       placement targets earlier, in {@link #getAvailableNodesForReplicaTypes(Set,
+     *       AttributeValues)}). There's a threshold here rather than sorting on the amount of free
+     *       disk space, because sorting on that value would in practice lead to never considering
+     *       the number of cores on a node.
+     *   <li>Place replicas on nodes having a smaller number of cores (the number of cores
+     *       considered for this decision includes previous placement decisions made during the
+     *       processing of the placement request)
      * </ol>
      */
-    @SuppressForbidden(reason = "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
-    private void makePlacementDecisions(SolrCollection solrCollection, String shardName, Set<String> availabilityZones,
-                                        Replica.ReplicaType replicaType, int numReplicas, final AttributeValues attrValues,
-                                        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes, Set<Node> nodesWithReplicas,
-                                        Map<Node, Integer> coresOnNodes, PlacementPlanFactory placementPlanFactory,
-                                        Set<ReplicaPlacement> replicaPlacements) throws PlacementException {
-      // Count existing replicas per AZ. We count only instances of the type of replica for which we need to do placement.
-      // If we ever want to balance replicas of any type across AZ's (and not each replica type balanced independently),
-      // we'd have to move this data structure to the caller of this method so it can be reused across different replica
-      // type placements for a given shard. Note then that this change would be risky. For example all NRT's and PULL
-      // replicas for a shard my be correctly balanced over three AZ's, but then all NRT can end up in the same AZ...
+    @SuppressForbidden(
+        reason =
+            "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
+    private void makePlacementDecisions(
+        SolrCollection solrCollection,
+        String shardName,
+        Set<String> availabilityZones,
+        Replica.ReplicaType replicaType,
+        int numReplicas,
+        final AttributeValues attrValues,
+        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes,
+        Set<Node> nodesWithReplicas,
+        Map<Node, Integer> coresOnNodes,
+        PlacementPlanFactory placementPlanFactory,
+        Set<ReplicaPlacement> replicaPlacements)
+        throws PlacementException {
+      // Count existing replicas per AZ. We count only instances of the type of replica for which we
+      // need to do placement.
+      // If we ever want to balance replicas of any type across AZ's (and not each replica type
+      // balanced independently),
+      // we'd have to move this data structure to the caller of this method so it can be reused
+      // across different replica
+      // type placements for a given shard. Note then that this change would be risky. For example
+      // all NRT's and PULL
+      // replicas for a shard my be correctly balanced over three AZ's, but then all NRT can end up
+      // in the same AZ...
       Map<String, Integer> azToNumReplicas = new HashMap<>();
       for (String az : availabilityZones) {
         azToNumReplicas.put(az, 0);
       }
 
-      // Build the set of candidate nodes for the placement, i.e. nodes that can accept the replica type
+      // Build the set of candidate nodes for the placement, i.e. nodes that can accept the replica
+      // type
       Set<Node> candidateNodes = new HashSet<>(replicaTypeToNodes.get(replicaType));
-      // Remove nodes that already have a replica for the shard (no two replicas of same shard can be put on same node)
+      // Remove nodes that already have a replica for the shard (no two replicas of same shard can
+      // be put on same node)
       candidateNodes.removeAll(nodesWithReplicas);
 
       Shard shard = solrCollection.getShard(shardName);
       if (shard != null) {
         // shard is non null if we're adding replicas to an already existing collection.
         // If we're creating the collection, the shards do not exist yet.
         for (Replica replica : shard.replicas()) {
-          // The node's AZ is counted as having a replica if it has a replica of the same type as the one we need
+          // The node's AZ is counted as having a replica if it has a replica of the same type as
+          // the one we need
           // to place here.
           if (replica.getType() == replicaType) {
             final String az = getNodeAZ(replica.getNode(), attrValues);
             if (azToNumReplicas.containsKey(az)) {
-              // We do not count replicas on AZ's for which we don't have any node to place on because it would not help
-              // the placement decision. If we did want to do that, note the dereferencing below can't be assumed as the
+              // We do not count replicas on AZ's for which we don't have any node to place on
+              // because it would not help
+              // the placement decision. If we did want to do that, note the dereferencing below
+              // can't be assumed as the
               // entry will not exist in the map.
               azToNumReplicas.put(az, azToNumReplicas.get(az) + 1);
             }
           }
         }
       }
 
-      // We now have the set of real candidate nodes, we've enforced "No more than one replica of a given shard on a given node".
-      // We also counted for the shard and replica type under consideration how many replicas were per AZ, so we can place
+      // We now have the set of real candidate nodes, we've enforced "No more than one replica of a
+      // given shard on a given node".
+      // We also counted for the shard and replica type under consideration how many replicas were
+      // per AZ, so we can place
       // (or try to place) replicas on AZ's that have fewer replicas
 
-      // Get the candidate nodes per AZ in order to build (further down) a mapping of AZ to placement candidates.
+      // Get the candidate nodes per AZ in order to build (further down) a mapping of AZ to
+      // placement candidates.
       Map<String, List<Node>> nodesPerAz = new HashMap<>();
       for (Node node : candidateNodes) {
         String nodeAz = getNodeAZ(node, attrValues);
         List<Node> nodesForAz = nodesPerAz.computeIfAbsent(nodeAz, k -> new ArrayList<>());
         nodesForAz.add(node);
       }
 
-      // Build a treeMap sorted by the number of replicas per AZ and including candidates nodes suitable for placement on the
-      // AZ, so we can easily select the next AZ to get a replica assignment and quickly (constant time) decide if placement
+      // Build a treeMap sorted by the number of replicas per AZ and including candidates nodes
+      // suitable for placement on the
+      // AZ, so we can easily select the next AZ to get a replica assignment and quickly (constant
+      // time) decide if placement
       // on this AZ is possible or not.
-      TreeMultimap<Integer, AzWithNodes> azByExistingReplicas = TreeMultimap.create(Comparator.naturalOrder(), Ordering.arbitrary());
+      TreeMultimap<Integer, AzWithNodes> azByExistingReplicas =
+          TreeMultimap.create(Comparator.naturalOrder(), Ordering.arbitrary());
       for (Map.Entry<String, List<Node>> e : nodesPerAz.entrySet()) {
-        azByExistingReplicas.put(azToNumReplicas.get(e.getKey()), new AzWithNodes(e.getKey(), e.getValue()));
+        azByExistingReplicas.put(
+            azToNumReplicas.get(e.getKey()), new AzWithNodes(e.getKey(), e.getValue()));
       }
 
-      CoresAndDiskComparator coresAndDiskComparator = new CoresAndDiskComparator(attrValues, coresOnNodes, prioritizedFreeDiskGB);
+      CoresAndDiskComparator coresAndDiskComparator =
+          new CoresAndDiskComparator(attrValues, coresOnNodes, prioritizedFreeDiskGB);
 
       for (int i = 0; i < numReplicas; i++) {
-        // We have for each AZ on which we might have a chance of placing a replica, the list of candidate nodes for replicas
-        // (candidate: does not already have a replica of this shard and is in the corresponding AZ).
-        // Among the AZ's with the minimal number of replicas of the given replica type for the shard, we must pick the AZ that
-        // offers the best placement (based on number of cores and free disk space). In order to do so, for these "minimal" AZ's
-        // we sort the nodes from best to worst placement candidate (based on the number of cores and free disk space) then pick
-        // the AZ that has the best best node. We don't sort all AZ's because that will not necessarily be needed.
+        // We have for each AZ on which we might have a chance of placing a replica, the list of
+        // candidate nodes for replicas
+        // (candidate: does not already have a replica of this shard and is in the corresponding
+        // AZ).
+        // Among the AZ's with the minimal number of replicas of the given replica type for the
+        // shard, we must pick the AZ that
+        // offers the best placement (based on number of cores and free disk space). In order to do
+        // so, for these "minimal" AZ's
+        // we sort the nodes from best to worst placement candidate (based on the number of cores
+        // and free disk space) then pick
+        // the AZ that has the best best node. We don't sort all AZ's because that will not
+        // necessarily be needed.
         int minNumberOfReplicasPerAz = 0; // This value never observed but compiler can't tell
         Set<Map.Entry<Integer, AzWithNodes>> candidateAzEntries = null;
-        // Iterate over AZ's (in the order of increasing number of replicas on that AZ) and do two things: 1. remove those AZ's that
-        // have no nodes, no use iterating over these again and again (as we compute placement for more replicas), and 2. collect
+        // Iterate over AZ's (in the order of increasing number of replicas on that AZ) and do two
+        // things: 1. remove those AZ's that
+        // have no nodes, no use iterating over these again and again (as we compute placement for
+        // more replicas), and 2. collect
         // all those AZ with a minimal number of replicas.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -593,44 +748,62 @@ private void makePlacementDecisions(SolrCollection solrCollection, String shardN
               break;
             }
             candidateAzEntries.add(entry);
-            // We remove all entries that are candidates: the "winner" will be modified, all entries might also be sorted,
+            // We remove all entries that are candidates: the "winner" will be modified, all entries
+            // might also be sorted,
             // so we'll insert back the updated versions later.
             it.remove();
           }
         }
 
         if (candidateAzEntries == null) {
-          // This can happen because not enough nodes for the placement request or already too many nodes with replicas of
-          // the shard that can't accept new replicas or not enough nodes with enough free disk space.
-          throw new PlacementException("Not enough eligible nodes to place " + numReplicas + " replica(s) of type " + replicaType +
-              " for shard " + shardName + " of collection " + solrCollection.getName());
+          // This can happen because not enough nodes for the placement request or already too many
+          // nodes with replicas of
+          // the shard that can't accept new replicas or not enough nodes with enough free disk
+          // space.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -593,44 +748,62 @@ private void makePlacementDecisions(SolrCollection solrCollection, String shardN
               break;
             }
             candidateAzEntries.add(entry);
-            // We remove all entries that are candidates: the "winner" will be modified, all entries might also be sorted,
+            // We remove all entries that are candidates: the "winner" will be modified, all entries
+            // might also be sorted,
             // so we'll insert back the updated versions later.
             it.remove();
           }
         }
 
         if (candidateAzEntries == null) {
-          // This can happen because not enough nodes for the placement request or already too many nodes with replicas of
-          // the shard that can't accept new replicas or not enough nodes with enough free disk space.
-          throw new PlacementException("Not enough eligible nodes to place " + numReplicas + " replica(s) of type " + replicaType +
-              " for shard " + shardName + " of collection " + solrCollection.getName());
+          // This can happen because not enough nodes for the placement request or already too many
+          // nodes with replicas of
+          // the shard that can't accept new replicas or not enough nodes with enough free disk
+          // space.
+          throw new PlacementException(
+              "Not enough eligible nodes to place "
+                  + numReplicas
+                  + " replica(s) of type "
+                  + replicaType
+                  + " for shard "
+                  + shardName
+                  + " of collection "
+                  + solrCollection.getName());
         }
 
-        // Iterate over all candidate AZ's, sort them if needed and find the best one to use for this placement
+        // Iterate over all candidate AZ's, sort them if needed and find the best one to use for
+        // this placement
         Map.Entry<Integer, AzWithNodes> selectedAz = null;
         Node selectedAzBestNode = null;
         for (Map.Entry<Integer, AzWithNodes> candidateAzEntry : candidateAzEntries) {
           AzWithNodes azWithNodes = candidateAzEntry.getValue();
           List<Node> nodes = azWithNodes.availableNodesForPlacement;
 
           if (!azWithNodes.hasBeenSorted) {
-            // Make sure we do not tend to use always the same nodes (within an AZ) if all conditions are identical (well, this
-            // likely is not the case since after having added a replica to a node its number of cores increases for the next
-            // placement decision, but let's be defensive here, given that multiple concurrent placement decisions might see
-            // the same initial cluster state, and we want placement to be reasonable even in that case without creating an
+            // Make sure we do not tend to use always the same nodes (within an AZ) if all
+            // conditions are identical (well, this
+            // likely is not the case since after having added a replica to a node its number of
+            // cores increases for the next
+            // placement decision, but let's be defensive here, given that multiple concurrent
+            // placement decisions might see
+            // the same initial cluster state, and we want placement to be reasonable even in that
+            // case without creating an
             // unnecessary imbalance).

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -488,96 +600,139 @@ private String getNodeAZ(Node n, final AttributeValues attrValues) {
     }
 
     /**
-     * <p>Picks nodes from {@code targetNodes} for placing {@code numReplicas} replicas.
+     * Picks nodes from {@code targetNodes} for placing {@code numReplicas} replicas.
      *
      * <p>The criteria used in this method are, in this order:
+     *
      * <ol>
-     *     <li>No more than one replica of a given shard on a given node (strictly enforced)</li>
-     *     <li>Balance as much as possible replicas of a given {@link org.apache.solr.cluster.Replica.ReplicaType} over available AZ's.
-     *     This balancing takes into account existing replicas <b>of the corresponding replica type</b>, if any.</li>
-     *     <li>Place replicas if possible on nodes having more than a certain amount of free disk space (note that nodes with a too small
-     *     amount of free disk space were eliminated as placement targets earlier, in {@link #getAvailableNodesForReplicaTypes(Set, AttributeValues)}). There's
-     *     a threshold here rather than sorting on the amount of free disk space, because sorting on that value would in
-     *     practice lead to never considering the number of cores on a node.</li>
-     *     <li>Place replicas on nodes having a smaller number of cores (the number of cores considered
-     *     for this decision includes previous placement decisions made during the processing of the placement request)</li>
+     *   <li>No more than one replica of a given shard on a given node (strictly enforced)
+     *   <li>Balance as much as possible replicas of a given {@link
+     *       org.apache.solr.cluster.Replica.ReplicaType} over available AZ's. This balancing takes
+     *       into account existing replicas <b>of the corresponding replica type</b>, if any.
+     *   <li>Place replicas if possible on nodes having more than a certain amount of free disk
+     *       space (note that nodes with a too small amount of free disk space were eliminated as
+     *       placement targets earlier, in {@link #getAvailableNodesForReplicaTypes(Set,
+     *       AttributeValues)}). There's a threshold here rather than sorting on the amount of free
+     *       disk space, because sorting on that value would in practice lead to never considering
+     *       the number of cores on a node.
+     *   <li>Place replicas on nodes having a smaller number of cores (the number of cores
+     *       considered for this decision includes previous placement decisions made during the
+     *       processing of the placement request)
      * </ol>
      */
-    @SuppressForbidden(reason = "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
-    private void makePlacementDecisions(SolrCollection solrCollection, String shardName, Set<String> availabilityZones,
-                                        Replica.ReplicaType replicaType, int numReplicas, final AttributeValues attrValues,
-                                        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes, Set<Node> nodesWithReplicas,
-                                        Map<Node, Integer> coresOnNodes, PlacementPlanFactory placementPlanFactory,
-                                        Set<ReplicaPlacement> replicaPlacements) throws PlacementException {
-      // Count existing replicas per AZ. We count only instances of the type of replica for which we need to do placement.
-      // If we ever want to balance replicas of any type across AZ's (and not each replica type balanced independently),
-      // we'd have to move this data structure to the caller of this method so it can be reused across different replica
-      // type placements for a given shard. Note then that this change would be risky. For example all NRT's and PULL
-      // replicas for a shard my be correctly balanced over three AZ's, but then all NRT can end up in the same AZ...
+    @SuppressForbidden(
+        reason =
+            "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
+    private void makePlacementDecisions(
+        SolrCollection solrCollection,
+        String shardName,
+        Set<String> availabilityZones,
+        Replica.ReplicaType replicaType,
+        int numReplicas,
+        final AttributeValues attrValues,
+        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes,
+        Set<Node> nodesWithReplicas,
+        Map<Node, Integer> coresOnNodes,
+        PlacementPlanFactory placementPlanFactory,
+        Set<ReplicaPlacement> replicaPlacements)
+        throws PlacementException {
+      // Count existing replicas per AZ. We count only instances of the type of replica for which we
+      // need to do placement.
+      // If we ever want to balance replicas of any type across AZ's (and not each replica type
+      // balanced independently),
+      // we'd have to move this data structure to the caller of this method so it can be reused
+      // across different replica
+      // type placements for a given shard. Note then that this change would be risky. For example
+      // all NRT's and PULL
+      // replicas for a shard my be correctly balanced over three AZ's, but then all NRT can end up
+      // in the same AZ...
       Map<String, Integer> azToNumReplicas = new HashMap<>();
       for (String az : availabilityZones) {
         azToNumReplicas.put(az, 0);
       }
 
-      // Build the set of candidate nodes for the placement, i.e. nodes that can accept the replica type
+      // Build the set of candidate nodes for the placement, i.e. nodes that can accept the replica
+      // type
       Set<Node> candidateNodes = new HashSet<>(replicaTypeToNodes.get(replicaType));
-      // Remove nodes that already have a replica for the shard (no two replicas of same shard can be put on same node)
+      // Remove nodes that already have a replica for the shard (no two replicas of same shard can
+      // be put on same node)
       candidateNodes.removeAll(nodesWithReplicas);
 
       Shard shard = solrCollection.getShard(shardName);
       if (shard != null) {
         // shard is non null if we're adding replicas to an already existing collection.
         // If we're creating the collection, the shards do not exist yet.
         for (Replica replica : shard.replicas()) {
-          // The node's AZ is counted as having a replica if it has a replica of the same type as the one we need
+          // The node's AZ is counted as having a replica if it has a replica of the same type as
+          // the one we need
           // to place here.
           if (replica.getType() == replicaType) {
             final String az = getNodeAZ(replica.getNode(), attrValues);
             if (azToNumReplicas.containsKey(az)) {
-              // We do not count replicas on AZ's for which we don't have any node to place on because it would not help
-              // the placement decision. If we did want to do that, note the dereferencing below can't be assumed as the
+              // We do not count replicas on AZ's for which we don't have any node to place on
+              // because it would not help
+              // the placement decision. If we did want to do that, note the dereferencing below
+              // can't be assumed as the
               // entry will not exist in the map.
               azToNumReplicas.put(az, azToNumReplicas.get(az) + 1);
             }
           }
         }
       }
 
-      // We now have the set of real candidate nodes, we've enforced "No more than one replica of a given shard on a given node".
-      // We also counted for the shard and replica type under consideration how many replicas were per AZ, so we can place
+      // We now have the set of real candidate nodes, we've enforced "No more than one replica of a
+      // given shard on a given node".
+      // We also counted for the shard and replica type under consideration how many replicas were
+      // per AZ, so we can place
       // (or try to place) replicas on AZ's that have fewer replicas
 
-      // Get the candidate nodes per AZ in order to build (further down) a mapping of AZ to placement candidates.
+      // Get the candidate nodes per AZ in order to build (further down) a mapping of AZ to
+      // placement candidates.
       Map<String, List<Node>> nodesPerAz = new HashMap<>();
       for (Node node : candidateNodes) {
         String nodeAz = getNodeAZ(node, attrValues);
         List<Node> nodesForAz = nodesPerAz.computeIfAbsent(nodeAz, k -> new ArrayList<>());
         nodesForAz.add(node);
       }
 
-      // Build a treeMap sorted by the number of replicas per AZ and including candidates nodes suitable for placement on the
-      // AZ, so we can easily select the next AZ to get a replica assignment and quickly (constant time) decide if placement
+      // Build a treeMap sorted by the number of replicas per AZ and including candidates nodes
+      // suitable for placement on the
+      // AZ, so we can easily select the next AZ to get a replica assignment and quickly (constant
+      // time) decide if placement
       // on this AZ is possible or not.
-      TreeMultimap<Integer, AzWithNodes> azByExistingReplicas = TreeMultimap.create(Comparator.naturalOrder(), Ordering.arbitrary());
+      TreeMultimap<Integer, AzWithNodes> azByExistingReplicas =
+          TreeMultimap.create(Comparator.naturalOrder(), Ordering.arbitrary());
       for (Map.Entry<String, List<Node>> e : nodesPerAz.entrySet()) {
-        azByExistingReplicas.put(azToNumReplicas.get(e.getKey()), new AzWithNodes(e.getKey(), e.getValue()));
+        azByExistingReplicas.put(
+            azToNumReplicas.get(e.getKey()), new AzWithNodes(e.getKey(), e.getValue()));
       }
 
-      CoresAndDiskComparator coresAndDiskComparator = new CoresAndDiskComparator(attrValues, coresOnNodes, prioritizedFreeDiskGB);
+      CoresAndDiskComparator coresAndDiskComparator =
+          new CoresAndDiskComparator(attrValues, coresOnNodes, prioritizedFreeDiskGB);
 
       for (int i = 0; i < numReplicas; i++) {
-        // We have for each AZ on which we might have a chance of placing a replica, the list of candidate nodes for replicas
-        // (candidate: does not already have a replica of this shard and is in the corresponding AZ).
-        // Among the AZ's with the minimal number of replicas of the given replica type for the shard, we must pick the AZ that
-        // offers the best placement (based on number of cores and free disk space). In order to do so, for these "minimal" AZ's
-        // we sort the nodes from best to worst placement candidate (based on the number of cores and free disk space) then pick
-        // the AZ that has the best best node. We don't sort all AZ's because that will not necessarily be needed.
+        // We have for each AZ on which we might have a chance of placing a replica, the list of
+        // candidate nodes for replicas
+        // (candidate: does not already have a replica of this shard and is in the corresponding
+        // AZ).
+        // Among the AZ's with the minimal number of replicas of the given replica type for the
+        // shard, we must pick the AZ that
+        // offers the best placement (based on number of cores and free disk space). In order to do
+        // so, for these "minimal" AZ's
+        // we sort the nodes from best to worst placement candidate (based on the number of cores
+        // and free disk space) then pick
+        // the AZ that has the best best node. We don't sort all AZ's because that will not
+        // necessarily be needed.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactory.java
##########
@@ -94,23 +101,31 @@ public PlacementPlugin createPluginInstance() {
           nodesByCores.put(coresPerNodeTotal.get(node.getName()), node);
         }
 
-        Set<ReplicaPlacement> replicaPlacements = new HashSet<>(totalReplicasPerShard * request.getShardNames().size());
+        Set<ReplicaPlacement> replicaPlacements =
+            new HashSet<>(totalReplicasPerShard * request.getShardNames().size());
 
-        // Now place all replicas of all shards on nodes, by placing on nodes with the smallest number of cores and taking
-        // into account replicas placed during this computation. Note that for each shard we must place replicas on different
-        // nodes, when moving to the next shard we use the nodes sorted by their updated number of cores (due to replica
+        // Now place all replicas of all shards on nodes, by placing on nodes with the smallest
+        // number of cores and taking
+        // into account replicas placed during this computation. Note that for each shard we must
+        // place replicas on different
+        // nodes, when moving to the next shard we use the nodes sorted by their updated number of
+        // cores (due to replica
         // placements for previous shards).
         for (String shardName : request.getShardNames()) {
-          // Assign replicas based on the sort order of the nodesByCores tree multimap to put replicas on nodes with less
-          // cores first. We only need totalReplicasPerShard nodes given that's the number of replicas to place.
+          // Assign replicas based on the sort order of the nodesByCores tree multimap to put
+          // replicas on nodes with less
+          // cores first. We only need totalReplicasPerShard nodes given that's the number of
+          // replicas to place.
           // We assign based on the passed nodeEntriesToAssign list so the right nodes get replicas.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/BlobRepository.java
##########
@@ -52,22 +54,21 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.common.SolrException.ErrorCode.SERVER_ERROR;
-import static org.apache.solr.common.SolrException.ErrorCode.SERVICE_UNAVAILABLE;
-
 /**
- * The purpose of this class is to store the Jars loaded in memory and to keep only one copy of the Jar in a single node.
+ * The purpose of this class is to store the Jars loaded in memory and to keep only one copy of the
+ * Jar in a single node.
  */
 public class BlobRepository {
 
-  private static final long MAX_JAR_SIZE = Long.parseLong(
-      System.getProperty("runtime.lib.size", String.valueOf(5 * 1024 * 1024)));
+  private static final long MAX_JAR_SIZE =
+      Long.parseLong(System.getProperty("runtime.lib.size", String.valueOf(5 * 1024 * 1024)));
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
   public static final Random RANDOM;
   static final Pattern BLOB_KEY_PATTERN_CHECKER = Pattern.compile(".*/\\d+");
 
   static {
-    // We try to make things reproducible in the context of our tests by initializing the random instance
+    // We try to make things reproducible in the context of our tests by initializing the random
+    // instance
     // based on the current seed

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactory.java
##########
@@ -94,23 +101,31 @@ public PlacementPlugin createPluginInstance() {
           nodesByCores.put(coresPerNodeTotal.get(node.getName()), node);
         }
 
-        Set<ReplicaPlacement> replicaPlacements = new HashSet<>(totalReplicasPerShard * request.getShardNames().size());
+        Set<ReplicaPlacement> replicaPlacements =
+            new HashSet<>(totalReplicasPerShard * request.getShardNames().size());
 
-        // Now place all replicas of all shards on nodes, by placing on nodes with the smallest number of cores and taking
-        // into account replicas placed during this computation. Note that for each shard we must place replicas on different
-        // nodes, when moving to the next shard we use the nodes sorted by their updated number of cores (due to replica
+        // Now place all replicas of all shards on nodes, by placing on nodes with the smallest
+        // number of cores and taking
+        // into account replicas placed during this computation. Note that for each shard we must
+        // place replicas on different
+        // nodes, when moving to the next shard we use the nodes sorted by their updated number of
+        // cores (due to replica
         // placements for previous shards).

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactory.java
##########
@@ -94,23 +101,31 @@ public PlacementPlugin createPluginInstance() {
           nodesByCores.put(coresPerNodeTotal.get(node.getName()), node);
         }
 
-        Set<ReplicaPlacement> replicaPlacements = new HashSet<>(totalReplicasPerShard * request.getShardNames().size());
+        Set<ReplicaPlacement> replicaPlacements =
+            new HashSet<>(totalReplicasPerShard * request.getShardNames().size());
 
-        // Now place all replicas of all shards on nodes, by placing on nodes with the smallest number of cores and taking
-        // into account replicas placed during this computation. Note that for each shard we must place replicas on different
-        // nodes, when moving to the next shard we use the nodes sorted by their updated number of cores (due to replica
+        // Now place all replicas of all shards on nodes, by placing on nodes with the smallest
+        // number of cores and taking
+        // into account replicas placed during this computation. Note that for each shard we must
+        // place replicas on different
+        // nodes, when moving to the next shard we use the nodes sorted by their updated number of
+        // cores (due to replica
         // placements for previous shards).
         for (String shardName : request.getShardNames()) {
-          // Assign replicas based on the sort order of the nodesByCores tree multimap to put replicas on nodes with less
-          // cores first. We only need totalReplicasPerShard nodes given that's the number of replicas to place.
+          // Assign replicas based on the sort order of the nodesByCores tree multimap to put
+          // replicas on nodes with less
+          // cores first. We only need totalReplicasPerShard nodes given that's the number of
+          // replicas to place.
           // We assign based on the passed nodeEntriesToAssign list so the right nodes get replicas.
-          ArrayList<Map.Entry<Integer, Node>> nodeEntriesToAssign = new ArrayList<>(totalReplicasPerShard);
+          ArrayList<Map.Entry<Integer, Node>> nodeEntriesToAssign =
+              new ArrayList<>(totalReplicasPerShard);
           Iterator<Map.Entry<Integer, Node>> treeIterator = nodesByCores.entries().iterator();
           for (int i = 0; i < totalReplicasPerShard; i++) {
             nodeEntriesToAssign.add(treeIterator.next());
           }
 
-          // Update the number of cores each node will have once the assignments below got executed so the next shard picks the
+          // Update the number of cores each node will have once the assignments below got executed
+          // so the next shard picks the
           // lowest loaded nodes for its replicas.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -516,56 +538,62 @@ private synchronized void initializeAuthenticationPlugin(Map<String, Object> aut
     } catch (Exception e) {
       log.error("Exception while attempting to close old authentication plugin", e);
     }
-
   }
 
   private void setupHttpClientForAuthPlugin(Object authcPlugin) {
     if (authcPlugin instanceof HttpClientBuilderPlugin) {
       // Setup HttpClient for internode communication
       HttpClientBuilderPlugin builderPlugin = ((HttpClientBuilderPlugin) authcPlugin);
-      SolrHttpClientBuilder builder = builderPlugin.getHttpClientBuilder(HttpClientUtil.getHttpClientBuilder());
+      SolrHttpClientBuilder builder =
+          builderPlugin.getHttpClientBuilder(HttpClientUtil.getHttpClientBuilder());
 
-
-      // this caused plugins like KerberosPlugin to register it's intercepts, but this intercept logic is also
-      // handled by the pki authentication code when it decideds to let the plugin handle auth via it's intercept
+      // this caused plugins like KerberosPlugin to register it's intercepts, but this intercept
+      // logic is also
+      // handled by the pki authentication code when it decideds to let the plugin handle auth via
+      // it's intercept
       // - so you would end up with two intercepts

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -516,56 +538,62 @@ private synchronized void initializeAuthenticationPlugin(Map<String, Object> aut
     } catch (Exception e) {
       log.error("Exception while attempting to close old authentication plugin", e);
     }
-
   }
 
   private void setupHttpClientForAuthPlugin(Object authcPlugin) {
     if (authcPlugin instanceof HttpClientBuilderPlugin) {
       // Setup HttpClient for internode communication
       HttpClientBuilderPlugin builderPlugin = ((HttpClientBuilderPlugin) authcPlugin);
-      SolrHttpClientBuilder builder = builderPlugin.getHttpClientBuilder(HttpClientUtil.getHttpClientBuilder());
+      SolrHttpClientBuilder builder =
+          builderPlugin.getHttpClientBuilder(HttpClientUtil.getHttpClientBuilder());
 
-
-      // this caused plugins like KerberosPlugin to register it's intercepts, but this intercept logic is also
-      // handled by the pki authentication code when it decideds to let the plugin handle auth via it's intercept
+      // this caused plugins like KerberosPlugin to register it's intercepts, but this intercept
+      // logic is also
+      // handled by the pki authentication code when it decideds to let the plugin handle auth via
+      // it's intercept
       // - so you would end up with two intercepts
       // -->
       //  shardHandlerFactory.setSecurityBuilder(builderPlugin); // calls setup for the authcPlugin
       //  updateShardHandler.setSecurityBuilder(builderPlugin);
       // <--
 
-      // This should not happen here at all - it's only currently required due to its affect on http1 clients
+      // This should not happen here at all - it's only currently required due to its affect on
+      // http1 clients
       // in a test or two incorrectly counting on it for their configuration.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/ConfigSetService.java
##########
@@ -275,22 +292,27 @@ public ConfigSetService(SolrResourceLoader loader, boolean shareSchema) {
    * @param isTrusted is the configset trusted?
    * @return a SolrConfig object
    */
-  protected SolrConfig createSolrConfig(CoreDescriptor cd, SolrResourceLoader loader, boolean isTrusted) {
-    return SolrConfig.readFromResourceLoader(loader, cd.getConfigName(), isTrusted, cd.getSubstitutableProperties());
+  protected SolrConfig createSolrConfig(
+      CoreDescriptor cd, SolrResourceLoader loader, boolean isTrusted) {
+    return SolrConfig.readFromResourceLoader(
+        loader, cd.getConfigName(), isTrusted, cd.getSubstitutableProperties());
   }
 
   /**
-   * Create an IndexSchema object for a core.  It might be a cached lookup.
+   * Create an IndexSchema object for a core. It might be a cached lookup.
    *
    * @param cd the core's CoreDescriptor
    * @param solrConfig the core's SolrConfig
    * @return an IndexSchema
    */
-  protected IndexSchema createIndexSchema(CoreDescriptor cd, SolrConfig solrConfig, boolean forceFetch) throws IOException {
-    // This is the schema name from the core descriptor.  Sometimes users specify a custom schema file.
+  protected IndexSchema createIndexSchema(
+      CoreDescriptor cd, SolrConfig solrConfig, boolean forceFetch) throws IOException {
+    // This is the schema name from the core descriptor.  Sometimes users specify a custom schema
+    // file.
     //   Important:  indexSchemaFactory.create wants this!
     String cdSchemaName = cd.getSchemaName();
-    // This is the schema name that we think will actually be used.  In the case of a managed schema,
+    // This is the schema name that we think will actually be used.  In the case of a managed
+    // schema,
     //  we don't know for sure without examining what files exists in the configSet, and we don't
     //  want to pay the overhead of that at this juncture.  If we guess wrong, no schema sharing.
     //  The fix is usually to name your schema managed-schema.xml instead of schema.xml.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -488,96 +600,139 @@ private String getNodeAZ(Node n, final AttributeValues attrValues) {
     }
 
     /**
-     * <p>Picks nodes from {@code targetNodes} for placing {@code numReplicas} replicas.
+     * Picks nodes from {@code targetNodes} for placing {@code numReplicas} replicas.
      *
      * <p>The criteria used in this method are, in this order:
+     *
      * <ol>
-     *     <li>No more than one replica of a given shard on a given node (strictly enforced)</li>
-     *     <li>Balance as much as possible replicas of a given {@link org.apache.solr.cluster.Replica.ReplicaType} over available AZ's.
-     *     This balancing takes into account existing replicas <b>of the corresponding replica type</b>, if any.</li>
-     *     <li>Place replicas if possible on nodes having more than a certain amount of free disk space (note that nodes with a too small
-     *     amount of free disk space were eliminated as placement targets earlier, in {@link #getAvailableNodesForReplicaTypes(Set, AttributeValues)}). There's
-     *     a threshold here rather than sorting on the amount of free disk space, because sorting on that value would in
-     *     practice lead to never considering the number of cores on a node.</li>
-     *     <li>Place replicas on nodes having a smaller number of cores (the number of cores considered
-     *     for this decision includes previous placement decisions made during the processing of the placement request)</li>
+     *   <li>No more than one replica of a given shard on a given node (strictly enforced)
+     *   <li>Balance as much as possible replicas of a given {@link
+     *       org.apache.solr.cluster.Replica.ReplicaType} over available AZ's. This balancing takes
+     *       into account existing replicas <b>of the corresponding replica type</b>, if any.
+     *   <li>Place replicas if possible on nodes having more than a certain amount of free disk
+     *       space (note that nodes with a too small amount of free disk space were eliminated as
+     *       placement targets earlier, in {@link #getAvailableNodesForReplicaTypes(Set,
+     *       AttributeValues)}). There's a threshold here rather than sorting on the amount of free
+     *       disk space, because sorting on that value would in practice lead to never considering
+     *       the number of cores on a node.
+     *   <li>Place replicas on nodes having a smaller number of cores (the number of cores
+     *       considered for this decision includes previous placement decisions made during the
+     *       processing of the placement request)
      * </ol>
      */
-    @SuppressForbidden(reason = "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
-    private void makePlacementDecisions(SolrCollection solrCollection, String shardName, Set<String> availabilityZones,
-                                        Replica.ReplicaType replicaType, int numReplicas, final AttributeValues attrValues,
-                                        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes, Set<Node> nodesWithReplicas,
-                                        Map<Node, Integer> coresOnNodes, PlacementPlanFactory placementPlanFactory,
-                                        Set<ReplicaPlacement> replicaPlacements) throws PlacementException {
-      // Count existing replicas per AZ. We count only instances of the type of replica for which we need to do placement.
-      // If we ever want to balance replicas of any type across AZ's (and not each replica type balanced independently),
-      // we'd have to move this data structure to the caller of this method so it can be reused across different replica
-      // type placements for a given shard. Note then that this change would be risky. For example all NRT's and PULL
-      // replicas for a shard my be correctly balanced over three AZ's, but then all NRT can end up in the same AZ...
+    @SuppressForbidden(
+        reason =
+            "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
+    private void makePlacementDecisions(
+        SolrCollection solrCollection,
+        String shardName,
+        Set<String> availabilityZones,
+        Replica.ReplicaType replicaType,
+        int numReplicas,
+        final AttributeValues attrValues,
+        EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes,
+        Set<Node> nodesWithReplicas,
+        Map<Node, Integer> coresOnNodes,
+        PlacementPlanFactory placementPlanFactory,
+        Set<ReplicaPlacement> replicaPlacements)
+        throws PlacementException {
+      // Count existing replicas per AZ. We count only instances of the type of replica for which we
+      // need to do placement.
+      // If we ever want to balance replicas of any type across AZ's (and not each replica type
+      // balanced independently),
+      // we'd have to move this data structure to the caller of this method so it can be reused
+      // across different replica
+      // type placements for a given shard. Note then that this change would be risky. For example
+      // all NRT's and PULL
+      // replicas for a shard my be correctly balanced over three AZ's, but then all NRT can end up
+      // in the same AZ...
       Map<String, Integer> azToNumReplicas = new HashMap<>();
       for (String az : availabilityZones) {
         azToNumReplicas.put(az, 0);
       }
 
-      // Build the set of candidate nodes for the placement, i.e. nodes that can accept the replica type
+      // Build the set of candidate nodes for the placement, i.e. nodes that can accept the replica
+      // type
       Set<Node> candidateNodes = new HashSet<>(replicaTypeToNodes.get(replicaType));
-      // Remove nodes that already have a replica for the shard (no two replicas of same shard can be put on same node)
+      // Remove nodes that already have a replica for the shard (no two replicas of same shard can
+      // be put on same node)
       candidateNodes.removeAll(nodesWithReplicas);
 
       Shard shard = solrCollection.getShard(shardName);
       if (shard != null) {
         // shard is non null if we're adding replicas to an already existing collection.
         // If we're creating the collection, the shards do not exist yet.
         for (Replica replica : shard.replicas()) {
-          // The node's AZ is counted as having a replica if it has a replica of the same type as the one we need
+          // The node's AZ is counted as having a replica if it has a replica of the same type as
+          // the one we need
           // to place here.
           if (replica.getType() == replicaType) {
             final String az = getNodeAZ(replica.getNode(), attrValues);
             if (azToNumReplicas.containsKey(az)) {
-              // We do not count replicas on AZ's for which we don't have any node to place on because it would not help
-              // the placement decision. If we did want to do that, note the dereferencing below can't be assumed as the
+              // We do not count replicas on AZ's for which we don't have any node to place on
+              // because it would not help
+              // the placement decision. If we did want to do that, note the dereferencing below
+              // can't be assumed as the
               // entry will not exist in the map.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -1278,27 +1418,33 @@ public SolrCore create(String coreName, Path instancePath, Map<String, String> p
       try {
         if (getZkController() != null) {
           if (cd.getCloudDescriptor().getCoreNodeName() == null) {
-            throw new SolrException(ErrorCode.SERVER_ERROR, "coreNodeName missing " + parameters.toString());
+            throw new SolrException(
+                ErrorCode.SERVER_ERROR, "coreNodeName missing " + parameters.toString());
           }
           preExisitingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd);
         }
 
-        // Much of the logic in core handling pre-supposes that the core.properties file already exists, so create it
+        // Much of the logic in core handling pre-supposes that the core.properties file already
+        // exists, so create it
         // first and clean it up if there's an error.
         coresLocator.create(this, cd);
 
         SolrCore core;
         try {
           solrCores.waitAddPendingCoreOps(cd.getName());
           core = createFromDescriptor(cd, true, newCollection);
-          coresLocator.persist(this, cd); // Write out the current core properties in case anything changed when the core was created
+          coresLocator.persist(
+              this,
+              cd); // Write out the current core properties in case anything changed when the core
+          // was created

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -516,56 +538,62 @@ private synchronized void initializeAuthenticationPlugin(Map<String, Object> aut
     } catch (Exception e) {
       log.error("Exception while attempting to close old authentication plugin", e);
     }
-
   }
 
   private void setupHttpClientForAuthPlugin(Object authcPlugin) {
     if (authcPlugin instanceof HttpClientBuilderPlugin) {
       // Setup HttpClient for internode communication
       HttpClientBuilderPlugin builderPlugin = ((HttpClientBuilderPlugin) authcPlugin);
-      SolrHttpClientBuilder builder = builderPlugin.getHttpClientBuilder(HttpClientUtil.getHttpClientBuilder());
+      SolrHttpClientBuilder builder =
+          builderPlugin.getHttpClientBuilder(HttpClientUtil.getHttpClientBuilder());
 
-
-      // this caused plugins like KerberosPlugin to register it's intercepts, but this intercept logic is also
-      // handled by the pki authentication code when it decideds to let the plugin handle auth via it's intercept
+      // this caused plugins like KerberosPlugin to register it's intercepts, but this intercept
+      // logic is also
+      // handled by the pki authentication code when it decideds to let the plugin handle auth via
+      // it's intercept
       // - so you would end up with two intercepts
       // -->
       //  shardHandlerFactory.setSecurityBuilder(builderPlugin); // calls setup for the authcPlugin
       //  updateShardHandler.setSecurityBuilder(builderPlugin);
       // <--
 
-      // This should not happen here at all - it's only currently required due to its affect on http1 clients
+      // This should not happen here at all - it's only currently required due to its affect on
+      // http1 clients
       // in a test or two incorrectly counting on it for their configuration.
       // -->
 
       SolrHttpClientContextBuilder httpClientBuilder = new SolrHttpClientContextBuilder();
       if (builder.getCredentialsProviderProvider() != null) {
-        httpClientBuilder.setDefaultCredentialsProvider(new CredentialsProviderProvider() {
+        httpClientBuilder.setDefaultCredentialsProvider(
+            new CredentialsProviderProvider() {
 
-          @Override
-          public CredentialsProvider getCredentialsProvider() {
-            return builder.getCredentialsProviderProvider().getCredentialsProvider();
-          }
-        });
+              @Override
+              public CredentialsProvider getCredentialsProvider() {
+                return builder.getCredentialsProviderProvider().getCredentialsProvider();
+              }
+            });
       }
       if (builder.getAuthSchemeRegistryProvider() != null) {
-        httpClientBuilder.setAuthSchemeRegistryProvider(new AuthSchemeRegistryProvider() {
+        httpClientBuilder.setAuthSchemeRegistryProvider(
+            new AuthSchemeRegistryProvider() {
 
-          @Override
-          public Lookup<AuthSchemeProvider> getAuthSchemeRegistry() {
-            return builder.getAuthSchemeRegistryProvider().getAuthSchemeRegistry();
-          }
-        });
+              @Override
+              public Lookup<AuthSchemeProvider> getAuthSchemeRegistry() {
+                return builder.getAuthSchemeRegistryProvider().getAuthSchemeRegistry();
+              }
+            });
       }
 
       HttpClientUtil.setHttpClientRequestContextBuilder(httpClientBuilder);
 
       // <--
     }
 
-    // Always register PKI auth interceptor, which will then delegate the decision of who should secure
+    // Always register PKI auth interceptor, which will then delegate the decision of who should
+    // secure
     // each request to the configured authentication plugin.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -1049,39 +1167,46 @@ public void shutdown() {
 
       objectCache.clear();
 
-      // It's still possible that one of the pending dynamic load operation is waiting, so wake it up if so.
+      // It's still possible that one of the pending dynamic load operation is waiting, so wake it
+      // up if so.
       // Since all the pending operations queues have been drained, there should be nothing to do.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -1278,27 +1418,33 @@ public SolrCore create(String coreName, Path instancePath, Map<String, String> p
       try {
         if (getZkController() != null) {
           if (cd.getCloudDescriptor().getCoreNodeName() == null) {
-            throw new SolrException(ErrorCode.SERVER_ERROR, "coreNodeName missing " + parameters.toString());
+            throw new SolrException(
+                ErrorCode.SERVER_ERROR, "coreNodeName missing " + parameters.toString());
           }
           preExisitingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd);
         }
 
-        // Much of the logic in core handling pre-supposes that the core.properties file already exists, so create it
+        // Much of the logic in core handling pre-supposes that the core.properties file already
+        // exists, so create it
         // first and clean it up if there's an error.
         coresLocator.create(this, cd);
 
         SolrCore core;
         try {
           solrCores.waitAddPendingCoreOps(cd.getName());
           core = createFromDescriptor(cd, true, newCollection);
-          coresLocator.persist(this, cd); // Write out the current core properties in case anything changed when the core was created
+          coresLocator.persist(
+              this,
+              cd); // Write out the current core properties in case anything changed when the core
+          // was created
         } finally {
           solrCores.removeFromPendingOps(cd.getName());
         }
 
         return core;
       } catch (Exception ex) {
-        // First clean up any core descriptor, there should never be an existing core.properties file for any core that
+        // First clean up any core descriptor, there should never be an existing core.properties
+        // file for any core that
         // failed to be created on-the-fly.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/BlobRepository.java
##########
@@ -289,14 +311,16 @@ public void decrementBlobRefCount(BlobContentRef<?> ref) {
 
   public static class BlobContent<T> {
     public final String key;
-    private final T content; // holds byte buffer or cached object, holding both is a waste of memory
+    private final T
+        content; // holds byte buffer or cached object, holding both is a waste of memory
     // ref counting mechanism

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -1278,27 +1418,33 @@ public SolrCore create(String coreName, Path instancePath, Map<String, String> p
       try {
         if (getZkController() != null) {
           if (cd.getCloudDescriptor().getCoreNodeName() == null) {
-            throw new SolrException(ErrorCode.SERVER_ERROR, "coreNodeName missing " + parameters.toString());
+            throw new SolrException(
+                ErrorCode.SERVER_ERROR, "coreNodeName missing " + parameters.toString());
           }
           preExisitingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd);
         }
 
-        // Much of the logic in core handling pre-supposes that the core.properties file already exists, so create it
+        // Much of the logic in core handling pre-supposes that the core.properties file already
+        // exists, so create it
         // first and clean it up if there's an error.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -731,31 +766,44 @@ public void load() {
 
     MDCLoggingContext.setNode(this);
 
-    securityConfHandler = isZooKeeperAware() ? new SecurityConfHandlerZk(this) : new SecurityConfHandlerLocal(this);
+    securityConfHandler =
+        isZooKeeperAware() ? new SecurityConfHandlerZk(this) : new SecurityConfHandlerLocal(this);
     reloadSecurityProperties();
     warnUsersOfInsecureSettings();
     this.backupRepoFactory = new BackupRepositoryFactory(cfg.getBackupRepositoryPlugins());
     coreConfigService = ConfigSetService.createConfigSetService(this);
     createHandler(ZK_PATH, ZookeeperInfoHandler.class.getName(), ZookeeperInfoHandler.class);
-    createHandler(ZK_STATUS_PATH, ZookeeperStatusHandler.class.getName(), ZookeeperStatusHandler.class);
-
-    // CoreContainer is initialized enough at this stage so we can set distributedCollectionCommandRunner (the
-    // construction of DistributedCollectionConfigSetCommandRunner uses Zookeeper so can't be done from the CoreContainer constructor
-    // because there Zookeeper is not yet ready). Given this is used in the CollectionsHandler created next line, this is
-    // the latest point where distributedCollectionCommandRunner can be initialized without refactoring this method...
-    // TODO: manage to completely build CoreContainer in the constructor and not in the load() method... Requires some test refactoring.
-    this.distributedCollectionCommandRunner = isZooKeeperAware() && cfg.getCloudConfig().getDistributedCollectionConfigSetExecution() ?
-        Optional.of(new DistributedCollectionConfigSetCommandRunner(this)) : Optional.empty();
-
-    collectionsHandler = createHandler(COLLECTIONS_HANDLER_PATH, cfg.getCollectionsHandlerClass(), CollectionsHandler.class);
+    createHandler(
+        ZK_STATUS_PATH, ZookeeperStatusHandler.class.getName(), ZookeeperStatusHandler.class);
+
+    // CoreContainer is initialized enough at this stage so we can set
+    // distributedCollectionCommandRunner (the
+    // construction of DistributedCollectionConfigSetCommandRunner uses Zookeeper so can't be done
+    // from the CoreContainer constructor
+    // because there Zookeeper is not yet ready). Given this is used in the CollectionsHandler
+    // created next line, this is
+    // the latest point where distributedCollectionCommandRunner can be initialized without
+    // refactoring this method...
+    // TODO: manage to completely build CoreContainer in the constructor and not in the load()
+    // method... Requires some test refactoring.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -1625,20 +1803,27 @@ private CoreDescriptor reloadCoreDescriptor(CoreDescriptor oldDesc) {
     }
 
     CorePropertiesLocator cpl = new CorePropertiesLocator(null);
-    CoreDescriptor ret = cpl.buildCoreDescriptor(oldDesc.getInstanceDir().resolve(PROPERTIES_FILENAME), this);
+    CoreDescriptor ret =
+        cpl.buildCoreDescriptor(oldDesc.getInstanceDir().resolve(PROPERTIES_FILENAME), this);
 
-    // Ok, this little jewel is all because we still create core descriptors on the fly from lists of properties
-    // in tests particularly. Theoretically, there should be _no_ way to create a CoreDescriptor in the new world
+    // Ok, this little jewel is all because we still create core descriptors on the fly from lists
+    // of properties
+    // in tests particularly. Theoretically, there should be _no_ way to create a CoreDescriptor in
+    // the new world
     // of core discovery without writing the core.properties file out first.
     //
-    // TODO: remove core.properties from the conf directory in test files, it's in a bad place there anyway.
+    // TODO: remove core.properties from the conf directory in test files, it's in a bad place there
+    // anyway.
     if (ret == null) {
-      oldDesc.loadExtraProperties(); // there may be changes to extra properties that we need to pick up.
+      oldDesc
+          .loadExtraProperties(); // there may be changes to extra properties that we need to pick
+      // up.
       return oldDesc;

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -1672,7 +1853,8 @@ public void reload(String name, UUID coreId) {
     SolrCore newCore = null;
     SolrCore core = solrCores.getCoreFromAnyList(name, false, coreId);
     if (core != null) {
-      // The underlying core properties files may have changed, we don't really know. So we have a (perhaps) stale
+      // The underlying core properties files may have changed, we don't really know. So we have a
+      // (perhaps) stale
       // CoreDescriptor and we need to reload it from the disk files

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -1625,20 +1803,27 @@ private CoreDescriptor reloadCoreDescriptor(CoreDescriptor oldDesc) {
     }
 
     CorePropertiesLocator cpl = new CorePropertiesLocator(null);
-    CoreDescriptor ret = cpl.buildCoreDescriptor(oldDesc.getInstanceDir().resolve(PROPERTIES_FILENAME), this);
+    CoreDescriptor ret =
+        cpl.buildCoreDescriptor(oldDesc.getInstanceDir().resolve(PROPERTIES_FILENAME), this);
 
-    // Ok, this little jewel is all because we still create core descriptors on the fly from lists of properties
-    // in tests particularly. Theoretically, there should be _no_ way to create a CoreDescriptor in the new world
+    // Ok, this little jewel is all because we still create core descriptors on the fly from lists
+    // of properties
+    // in tests particularly. Theoretically, there should be _no_ way to create a CoreDescriptor in
+    // the new world
     // of core discovery without writing the core.properties file out first.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -1932,18 +2121,24 @@ public SolrCore getCore(String name, UUID id) {
     if (null != loadFailure) {
       throw new SolrCoreInitializationException(name, loadFailure.exception);
     }
-    // This is a bit of awkwardness where SolrCloud and transient cores don't play nice together. For transient cores,
-    // we have to allow them to be created at any time there hasn't been a core load failure (use reload to cure that).
-    // But for TestConfigSetsAPI.testUploadWithScriptUpdateProcessor, this needs to _not_ try to load the core if
-    // the core is null and there was an error. If you change this, be sure to run both TestConfiSetsAPI and
+    // This is a bit of awkwardness where SolrCloud and transient cores don't play nice together.
+    // For transient cores,
+    // we have to allow them to be created at any time there hasn't been a core load failure (use
+    // reload to cure that).
+    // But for TestConfigSetsAPI.testUploadWithScriptUpdateProcessor, this needs to _not_ try to
+    // load the core if
+    // the core is null and there was an error. If you change this, be sure to run both
+    // TestConfiSetsAPI and
     // TestLazyCores
     if (desc == null || zkSys.getZkController() != null) return null;
 
     // This will put an entry in pending core ops if the core isn't loaded. Here's where moving the
     // waitAddPendingCoreOps to createFromDescriptor would introduce a race condition.
     core = solrCores.waitAddPendingCoreOps(name);
 
-    if (isShutDown) return null; // We're quitting, so stop. This needs to be after the wait above since we may come off
+    if (isShutDown)
+      return null; // We're quitting, so stop. This needs to be after the wait above since we may
+    // come off
     // the wait as a consequence of shutting down.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -1932,18 +2121,24 @@ public SolrCore getCore(String name, UUID id) {
     if (null != loadFailure) {
       throw new SolrCoreInitializationException(name, loadFailure.exception);
     }
-    // This is a bit of awkwardness where SolrCloud and transient cores don't play nice together. For transient cores,
-    // we have to allow them to be created at any time there hasn't been a core load failure (use reload to cure that).
-    // But for TestConfigSetsAPI.testUploadWithScriptUpdateProcessor, this needs to _not_ try to load the core if
-    // the core is null and there was an error. If you change this, be sure to run both TestConfiSetsAPI and
+    // This is a bit of awkwardness where SolrCloud and transient cores don't play nice together.
+    // For transient cores,
+    // we have to allow them to be created at any time there hasn't been a core load failure (use
+    // reload to cure that).
+    // But for TestConfigSetsAPI.testUploadWithScriptUpdateProcessor, this needs to _not_ try to
+    // load the core if
+    // the core is null and there was an error. If you change this, be sure to run both
+    // TestConfiSetsAPI and
     // TestLazyCores

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -2228,7 +2426,8 @@ public void run() {
         try {
           solrCores.getModifyLock().wait();
         } catch (InterruptedException e) {
-          // Well, if we've been told to stop, we will. Otherwise, continue on and check to see if there are
+          // Well, if we've been told to stop, we will. Otherwise, continue on and check to see if
+          // there are
           // any cores to close.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -1625,20 +1803,27 @@ private CoreDescriptor reloadCoreDescriptor(CoreDescriptor oldDesc) {
     }
 
     CorePropertiesLocator cpl = new CorePropertiesLocator(null);
-    CoreDescriptor ret = cpl.buildCoreDescriptor(oldDesc.getInstanceDir().resolve(PROPERTIES_FILENAME), this);
+    CoreDescriptor ret =
+        cpl.buildCoreDescriptor(oldDesc.getInstanceDir().resolve(PROPERTIES_FILENAME), this);
 
-    // Ok, this little jewel is all because we still create core descriptors on the fly from lists of properties
-    // in tests particularly. Theoretically, there should be _no_ way to create a CoreDescriptor in the new world
+    // Ok, this little jewel is all because we still create core descriptors on the fly from lists
+    // of properties
+    // in tests particularly. Theoretically, there should be _no_ way to create a CoreDescriptor in
+    // the new world
     // of core discovery without writing the core.properties file out first.
     //
-    // TODO: remove core.properties from the conf directory in test files, it's in a bad place there anyway.
+    // TODO: remove core.properties from the conf directory in test files, it's in a bad place there
+    // anyway.
     if (ret == null) {
-      oldDesc.loadExtraProperties(); // there may be changes to extra properties that we need to pick up.
+      oldDesc
+          .loadExtraProperties(); // there may be changes to extra properties that we need to pick
+      // up.
       return oldDesc;
-
     }
-    // The CloudDescriptor bit here is created in a very convoluted way, requiring access to private methods
-    // in ZkController. When reloading, this behavior is identical to what used to happen where a copy of the old
+    // The CloudDescriptor bit here is created in a very convoluted way, requiring access to private
+    // methods
+    // in ZkController. When reloading, this behavior is identical to what used to happen where a
+    // copy of the old
     // CoreDescriptor was just re-used.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
##########
@@ -172,42 +162,47 @@ public synchronized IndexCommit getAndSaveCommitPoint(Long generation) {
       throw new NullPointerException("generation to get and save must not be null");
     }
     final IndexCommit commit = knownCommits.get(generation);
-    if ( (null != commit && false != commit.isDeleted())
-         || (null == commit && null != latestCommit && generation < latestCommit.getGeneration()) ) {
-      throw new IllegalStateException
-        ("Specified index generation is too old to be saved: " + generation);
+    if ((null != commit && false != commit.isDeleted())
+        || (null == commit && null != latestCommit && generation < latestCommit.getGeneration())) {
+      throw new IllegalStateException(
+          "Specified index generation is too old to be saved: " + generation);
     }
-    final AtomicInteger refCount
-      = savedCommits.computeIfAbsent(generation, s -> { return new AtomicInteger(); });
+    final AtomicInteger refCount =
+        savedCommits.computeIfAbsent(
+            generation,
+            s -> {
+              return new AtomicInteger();
+            });
     final int currentCount = refCount.incrementAndGet();
     log.debug("Saving generation={}, refCount={}", generation, currentCount);
     return commit;
   }
-  
+
   public IndexDeletionPolicy getWrappedDeletionPolicy() {
     return deletionPolicy;
   }
 
   /**
    * Set the duration for which commit point is to be reserved by the deletion policy.
-   * <p>
-   * <b>NOTE:</b> This method does not make any garuntees that the specified index generation exists, 
-   * or that the specified generation has not already ben deleted.  The only garuntee is that 
-   * <em>if</em> the specified generation exists now, or is created at some point in the future, then 
-   * it will be resered for <em>at least</em> the specified <code>reserveTime</code>.
-   * </p>
+   *
+   * <p><b>NOTE:</b> This method does not make any garuntees that the specified index generation
+   * exists, or that the specified generation has not already ben deleted. The only garuntee is that
+   * <em>if</em> the specified generation exists now, or is created at some point in the future,
+   * then it will be resered for <em>at least</em> the specified <code>reserveTime</code>.
    *
    * @param indexGen gen of the commit point to be reserved
-   * @param reserveTime durration in milliseconds (relative to 'now') for which the commit point is to be reserved
+   * @param reserveTime durration in milliseconds (relative to 'now') for which the commit point is
+   *     to be reserved
    */
   public void setReserveDuration(Long indexGen, long reserveTime) {
-    // since 'reserves' is a concurrent HashMap, we don't need to synchronize this method as long as all
+    // since 'reserves' is a concurrent HashMap, we don't need to synchronize this method as long as
+    // all
     // operations on 'reserves' are done atomically.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
##########
@@ -450,42 +444,41 @@ private synchronized void updateLatestCommit(final List<IndexCommitWrapper> list
     // The upside of this current approach, and not completley synchornizing onInit/onCommit
     // is that we have no control over what delegate is used, or how long those calls might take.
     //
-    // If the hypotehtical situation above ever becomes problematic, then an alternative approach might be
-    // to *add* to the Set/Map of all known commits *before* delegating, then *remove* everything except
+    // If the hypotehtical situation above ever becomes problematic, then an alternative approach
+    // might be
+    // to *add* to the Set/Map of all known commits *before* delegating, then *remove* everything
+    // except
     // the new (non-deleted) commits *after* delegating.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrConfig.java
##########
@@ -817,14 +944,16 @@ public PluginInfo getPluginInfo(String type) {
       return result.get(0);
     }
 
-    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-        "Multiple plugins configured for type: " + type);
+    throw new SolrException(
+        SolrException.ErrorCode.SERVER_ERROR, "Multiple plugins configured for type: " + type);
   }
 
   private void initLibs(SolrResourceLoader loader, boolean isConfigsetTrusted) {
     // TODO Want to remove SolrResourceLoader.getInstancePath; it can be on a Standalone subclass.
-    //  For Zk subclass, it's needed for the time being as well.  We could remove that one if we remove two things
-    //  in SolrCloud: (1) instancePath/lib  and (2) solrconfig lib directives with relative paths.  Can wait till 9.0.
+    //  For Zk subclass, it's needed for the time being as well.  We could remove that one if we
+    // remove two things
+    //  in SolrCloud: (1) instancePath/lib  and (2) solrconfig lib directives with relative paths.
+    // Can wait till 9.0.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/NodeConfig.java
##########
@@ -107,24 +105,47 @@
 
   private final PluginInfo tracerConfig;
 
-  // Track if this config was loaded from zookeeper so that we can skip validating the zookeeper connection later
-  // If it becomes necessary to track multiple potential sources in the future, replace this with an Enum
+  // Track if this config was loaded from zookeeper so that we can skip validating the zookeeper
+  // connection later
+  // If it becomes necessary to track multiple potential sources in the future, replace this with an
+  // Enum

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCores.java
##########
@@ -16,38 +16,42 @@
  */
 package org.apache.solr.core;
 
+import java.lang.invoke.MethodHandles;
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.util.ExecutorUtil;
 import org.apache.solr.common.util.SolrNamedThreadFactory;
 import org.apache.solr.logging.MDCLoggingContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.lang.invoke.MethodHandles;
-import java.util.*;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-
 class SolrCores {
 
-  private static final Object modifyLock = new Object(); // for locking around manipulating any of the core maps.
+  private static final Object modifyLock =
+      new Object(); // for locking around manipulating any of the core maps.
   private final Map<String, SolrCore> cores = new LinkedHashMap<>(); // For "permanent" cores
 
   // These descriptors, once loaded, will _not_ be unloaded, i.e. they are not "transient".
   private final Map<String, CoreDescriptor> residentDescriptors = new LinkedHashMap<>();
 
   private final CoreContainer container;
-  
-  private Set<String> currentlyLoadingCores = Collections.newSetFromMap(new ConcurrentHashMap<String,Boolean>());
+
+  private Set<String> currentlyLoadingCores =
+      Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  // This map will hold objects that are being currently operated on. The core (value) may be null in the case of
-  // initial load. The rule is, never to any operation on a core that is currently being operated upon.
+  // This map will hold objects that are being currently operated on. The core (value) may be null
+  // in the case of
+  // initial load. The rule is, never to any operation on a core that is currently being operated
+  // upon.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCores.java
##########
@@ -16,38 +16,42 @@
  */
 package org.apache.solr.core;
 
+import java.lang.invoke.MethodHandles;
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.util.ExecutorUtil;
 import org.apache.solr.common.util.SolrNamedThreadFactory;
 import org.apache.solr.logging.MDCLoggingContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.lang.invoke.MethodHandles;
-import java.util.*;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-
 class SolrCores {
 
-  private static final Object modifyLock = new Object(); // for locking around manipulating any of the core maps.
+  private static final Object modifyLock =
+      new Object(); // for locking around manipulating any of the core maps.
   private final Map<String, SolrCore> cores = new LinkedHashMap<>(); // For "permanent" cores
 
   // These descriptors, once loaded, will _not_ be unloaded, i.e. they are not "transient".
   private final Map<String, CoreDescriptor> residentDescriptors = new LinkedHashMap<>();
 
   private final CoreContainer container;
-  
-  private Set<String> currentlyLoadingCores = Collections.newSetFromMap(new ConcurrentHashMap<String,Boolean>());
+
+  private Set<String> currentlyLoadingCores =
+      Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  // This map will hold objects that are being currently operated on. The core (value) may be null in the case of
-  // initial load. The rule is, never to any operation on a core that is currently being operated upon.
+  // This map will hold objects that are being currently operated on. The core (value) may be null
+  // in the case of
+  // initial load. The rule is, never to any operation on a core that is currently being operated
+  // upon.
   private static final Set<String> pendingCoreOps = new HashSet<>();
 
-  // Due to the fact that closes happen potentially whenever anything is _added_ to the transient core list, we need
+  // Due to the fact that closes happen potentially whenever anything is _added_ to the transient
+  // core list, we need
   // to essentially queue them up to be handled via pendingCoreOps.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCores.java
##########
@@ -404,21 +421,24 @@ protected SolrCore waitAddPendingCoreOps(String name) {
         }
       } while (pending);
       // We _really_ need to do this within the synchronized block!
-      if (! container.isShutDown()) {
-        if (! pendingCoreOps.add(name)) {
+      if (!container.isShutDown()) {
+        if (!pendingCoreOps.add(name)) {
           log.warn("Replaced an entry in pendingCoreOps {}, we should not be doing this", name);
         }
-        return getCoreFromAnyList(name, false); // we might have been _unloading_ the core, so return the core if it was loaded.
+        return getCoreFromAnyList(
+            name,
+            false); // we might have been _unloading_ the core, so return the core if it was loaded.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCores.java
##########
@@ -328,8 +343,10 @@ SolrCore  getCoreFromAnyList(String name, boolean incRefCount, UUID coreId) {
     }
   }
 
-  // See SOLR-5366 for why the UNLOAD command needs to know whether a core is actually loaded or not, it might have
-  // to close the core. However, there's a race condition. If the core happens to be in the pending "to close" queue,
+  // See SOLR-5366 for why the UNLOAD command needs to know whether a core is actually loaded or
+  // not, it might have
+  // to close the core. However, there's a race condition. If the core happens to be in the pending
+  // "to close" queue,

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCores.java
##########
@@ -277,48 +288,52 @@ protected void swap(String n0, String n1) {
           throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + n1);
         }
       }
-      // When we swap the cores, we also need to swap the associated core descriptors. Note, this changes the 
+      // When we swap the cores, we also need to swap the associated core descriptors. Note, this
+      // changes the
       // name of the coreDescriptor by virtue of the c-tor

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/CoreContainer.java
##########
@@ -2218,8 +2414,10 @@ public void runAsync(Runnable r) {
   }
 
   // It's important that this be the _only_ thread removing things from pendingDynamicCloses!
-  // This is single-threaded, but I tried a multi-threaded approach and didn't see any performance gains, so
-  // there's no good justification for the complexity. I suspect that the locking on things like DefaultSolrCoreState
+  // This is single-threaded, but I tried a multi-threaded approach and didn't see any performance
+  // gains, so
+  // there's no good justification for the complexity. I suspect that the locking on things like
+  // DefaultSolrCoreState
   // essentially create a single-threaded process anyway.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCores.java
##########
@@ -83,10 +87,11 @@ public void load(SolrResourceLoader loader) {
     }
   }
 
-  // We are shutting down. You can't hold the lock on the various lists of cores while they shut down, so we need to
+  // We are shutting down. You can't hold the lock on the various lists of cores while they shut
+  // down, so we need to
   // make a temporary copy of the names and shut them down outside the lock.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCores.java
##########
@@ -96,13 +101,16 @@ protected void close() {
       }
     }
 
-    // It might be possible for one of the cores to move from one list to another while we're closing them. So
-    // loop through the lists until they're all empty. In particular, the core could have moved from the transient
+    // It might be possible for one of the cores to move from one list to another while we're
+    // closing them. So
+    // loop through the lists until they're all empty. In particular, the core could have moved from
+    // the transient
     // list to the pendingCloses list.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCores.java
##########
@@ -429,14 +449,17 @@ protected Object getModifyLock() {
     return modifyLock;
   }
 
-  // Be a little careful. We don't want to either open or close a core unless it's _not_ being opened or closed by
-  // another thread. So within this lock we'll walk along the list of pending closes until we find something NOT in
-  // the list of threads currently being loaded or reloaded. The "usual" case will probably return the very first
+  // Be a little careful. We don't want to either open or close a core unless it's _not_ being
+  // opened or closed by
+  // another thread. So within this lock we'll walk along the list of pending closes until we find
+  // something NOT in
+  // the list of threads currently being loaded or reloaded. The "usual" case will probably return
+  // the very first
   // one anyway..

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java
##########
@@ -80,44 +79,71 @@
 /**
  * @since solr 1.3
  */
-public class SolrResourceLoader implements ResourceLoader, Closeable, SolrClassLoader, SolrCoreAware  {
+public class SolrResourceLoader
+    implements ResourceLoader, Closeable, SolrClassLoader, SolrCoreAware {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
   private static final String base = "org.apache.solr";
   private static final String[] packages = {
-      "", "analysis.", "schema.", "handler.", "handler.tagger.", "search.", "update.", "core.", "response.", "request.",
-      "update.processor.", "util.", "spelling.", "handler.component.",
-      "spelling.suggest.", "spelling.suggest.fst.", "rest.schema.analysis.", "security.", "handler.admin.",
-      "security.jwt.", "security.hadoop.", "handler.sql.", "hdfs.", "hdfs.update."
+    "",
+    "analysis.",
+    "schema.",
+    "handler.",
+    "handler.tagger.",
+    "search.",
+    "update.",
+    "core.",
+    "response.",
+    "request.",
+    "update.processor.",
+    "util.",
+    "spelling.",
+    "handler.component.",
+    "spelling.suggest.",
+    "spelling.suggest.fst.",
+    "rest.schema.analysis.",
+    "security.",
+    "handler.admin.",
+    "security.jwt.",
+    "security.hadoop.",
+    "handler.sql.",
+    "hdfs.",
+    "hdfs.update."
   };
   private static final Charset UTF_8 = StandardCharsets.UTF_8;
-  public static final String SOLR_ALLOW_UNSAFE_RESOURCELOADING_PARAM = "solr.allow.unsafe.resourceloading";
+  public static final String SOLR_ALLOW_UNSAFE_RESOURCELOADING_PARAM =
+      "solr.allow.unsafe.resourceloading";
   private final boolean allowUnsafeResourceloading;
 
-
   private String name = "";
   protected URLClassLoader classLoader;
   private final Path instanceDir;
   private String coreName;
   private UUID coreId;
   private SolrConfig config;
   private CoreContainer coreContainer;
-  private PackageListeningClassLoader schemaLoader ;
+  private PackageListeningClassLoader schemaLoader;
 
-  private PackageListeningClassLoader coreReloadingClassLoader ;
-  private final List<SolrCoreAware> waitingForCore = Collections.synchronizedList(new ArrayList<>());
+  private PackageListeningClassLoader coreReloadingClassLoader;
+  private final List<SolrCoreAware> waitingForCore =
+      Collections.synchronizedList(new ArrayList<>());
   private final List<SolrInfoBean> infoMBeans = Collections.synchronizedList(new ArrayList<>());
-  private final List<ResourceLoaderAware> waitingForResources = Collections.synchronizedList(new ArrayList<>());
+  private final List<ResourceLoaderAware> waitingForResources =
+      Collections.synchronizedList(new ArrayList<>());
 
   private volatile boolean live;
 
-  // Provide a registry so that managed resources can register themselves while the XML configuration
+  // Provide a registry so that managed resources can register themselves while the XML
+  // configuration
   // documents are being parsed ... after all are registered, they are asked by the RestManager to
-  // initialize themselves. This two-step process is required because not all resources are available
+  // initialize themselves. This two-step process is required because not all resources are
+  // available
   // (such as the SolrZkClient) when XML docs are being parsed.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCores.java
##########
@@ -404,21 +421,24 @@ protected SolrCore waitAddPendingCoreOps(String name) {
         }
       } while (pending);
       // We _really_ need to do this within the synchronized block!
-      if (! container.isShutDown()) {
-        if (! pendingCoreOps.add(name)) {
+      if (!container.isShutDown()) {
+        if (!pendingCoreOps.add(name)) {
           log.warn("Replaced an entry in pendingCoreOps {}, we should not be doing this", name);
         }
-        return getCoreFromAnyList(name, false); // we might have been _unloading_ the core, so return the core if it was loaded.
+        return getCoreFromAnyList(
+            name,
+            false); // we might have been _unloading_ the core, so return the core if it was loaded.
       }
     }
     return null;
   }
 
-  // We should always be removing the first thing in the list with our name! The idea here is to NOT do anything n
+  // We should always be removing the first thing in the list with our name! The idea here is to NOT
+  // do anything n
   // any core while some other operation is working on that core.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheFactory.java
##########
@@ -63,20 +66,32 @@ public static TransientSolrCoreCacheFactory newInstance(SolrResourceLoader loade
     } catch (Exception e) {
       // Many things could cause this, bad solrconfig, mis-typed class name, whatever.
       // Throw an exception to stop loading here; never return null.
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error instantiating "
-              + TransientSolrCoreCacheFactory.class.getName() + " class [" + info.className + "]", e);
+      throw new SolrException(
+          SolrException.ErrorCode.SERVER_ERROR,
+          "Error instantiating "
+              + TransientSolrCoreCacheFactory.class.getName()
+              + " class ["
+              + info.className
+              + "]",
+          e);
     }
   }
 
   public static final PluginInfo DEFAULT_TRANSIENT_SOLR_CACHE_INFO =
-      new PluginInfo("transientSolrCoreCacheFactory",
-          ImmutableMap.of("class", TransientSolrCoreCacheFactoryDefault.class.getName(), 
-              "name", TransientSolrCoreCacheFactory.class.getName()),
-          null, Collections.<PluginInfo>emptyList());
-
+      new PluginInfo(
+          "transientSolrCoreCacheFactory",
+          ImmutableMap.of(
+              "class",
+              TransientSolrCoreCacheFactoryDefault.class.getName(),
+              "name",
+              TransientSolrCoreCacheFactory.class.getName()),
+          null,
+          Collections.<PluginInfo>emptyList());
 
-  // Need this because the plugin framework doesn't require a PluginINfo in the init method, don't see a way to
-  // pass additional parameters and we need this when we create the transient core cache, it's _really_ important.
+  // Need this because the plugin framework doesn't require a PluginINfo in the init method, don't
+  // see a way to
+  // pass additional parameters and we need this when we create the transient core cache, it's
+  // _really_ important.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java
##########
@@ -36,124 +58,117 @@
 import org.w3c.dom.NodeList;
 import org.xml.sax.InputSource;
 
-import javax.management.MBeanServer;
-import javax.xml.xpath.XPath;
-import javax.xml.xpath.XPathConstants;
-import javax.xml.xpath.XPathExpressionException;
-import java.io.ByteArrayInputStream;
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-
-/**
- * Loads {@code solr.xml}.
- */
+/** Loads {@code solr.xml}. */
 public class SolrXmlConfig {
 
-  // TODO should these from* methods return a NodeConfigBuilder so that the caller (a test) can make further
+  // TODO should these from* methods return a NodeConfigBuilder so that the caller (a test) can make
+  // further
   //  manipulations like add properties and set the CorePropertiesLocator and "async" mode?
 
-  public final static String ZK_HOST = "zkHost";
-  public final static String SOLR_XML_FILE = "solr.xml";
-  public final static String SOLR_DATA_HOME = "solr.data.home";
+  public static final String ZK_HOST = "zkHost";
+  public static final String SOLR_XML_FILE = "solr.xml";
+  public static final String SOLR_DATA_HOME = "solr.data.home";
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
   private static final Pattern COMMA_SEPARATED_PATTERN = Pattern.compile("\\s*,\\s*");
 
   /**
-   * Given some node Properties, checks if non-null and a 'zkHost' is alread included.  If so, the Properties are
-   * returned as is.  If not, then the returned value will be a new Properties, wrapping the original Properties, 
-   * with the 'zkHost' value set based on the value of the corispond System property (if set)
+   * Given some node Properties, checks if non-null and a 'zkHost' is alread included. If so, the
+   * Properties are returned as is. If not, then the returned value will be a new Properties,
+   * wrapping the original Properties, with the 'zkHost' value set based on the value of the
+   * corispond System property (if set)
    *
-   * In theory we only need this logic once, ideally in SolrDispatchFilter, but we put it here to re-use 
-   * redundently because of how much surface area our API has for various tests to poke at us.
+   * <p>In theory we only need this logic once, ideally in SolrDispatchFilter, but we put it here to
+   * re-use redundently because of how much surface area our API has for various tests to poke at
+   * us.
    */
   public static Properties wrapAndSetZkHostFromSysPropIfNeeded(final Properties props) {
-    if (null != props && ! StringUtils.isEmpty(props.getProperty(ZK_HOST))) {
+    if (null != props && !StringUtils.isEmpty(props.getProperty(ZK_HOST))) {
       // nothing to do...
       return props;
     }
     // we always wrap if we might set a property -- never mutate the original props
     final Properties results = (null == props ? new Properties() : new Properties(props));
     final String sysprop = System.getProperty(ZK_HOST);
-    if (! StringUtils.isEmpty(sysprop)) {
+    if (!StringUtils.isEmpty(sysprop)) {
       results.setProperty(ZK_HOST, sysprop);
     }
     return results;
   }
 
-  
   public static NodeConfig fromConfig(Path solrHome, XmlConfigFile config, boolean fromZookeeper) {
 
     checkForIllegalConfig(config);
 
-    // sanity check: if our config came from zookeeper, then there *MUST* be Node Properties that tell us
-    // what zkHost was used to read it (either via webapp context attribute, or that SolrDispatchFilter
+    // sanity check: if our config came from zookeeper, then there *MUST* be Node Properties that
+    // tell us
+    // what zkHost was used to read it (either via webapp context attribute, or that
+    // SolrDispatchFilter
     // filled in for us from system properties)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java
##########
@@ -36,124 +58,117 @@
 import org.w3c.dom.NodeList;
 import org.xml.sax.InputSource;
 
-import javax.management.MBeanServer;
-import javax.xml.xpath.XPath;
-import javax.xml.xpath.XPathConstants;
-import javax.xml.xpath.XPathExpressionException;
-import java.io.ByteArrayInputStream;
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-
-/**
- * Loads {@code solr.xml}.
- */
+/** Loads {@code solr.xml}. */
 public class SolrXmlConfig {
 
-  // TODO should these from* methods return a NodeConfigBuilder so that the caller (a test) can make further
+  // TODO should these from* methods return a NodeConfigBuilder so that the caller (a test) can make
+  // further
   //  manipulations like add properties and set the CorePropertiesLocator and "async" mode?
 
-  public final static String ZK_HOST = "zkHost";
-  public final static String SOLR_XML_FILE = "solr.xml";
-  public final static String SOLR_DATA_HOME = "solr.data.home";
+  public static final String ZK_HOST = "zkHost";
+  public static final String SOLR_XML_FILE = "solr.xml";
+  public static final String SOLR_DATA_HOME = "solr.data.home";
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
   private static final Pattern COMMA_SEPARATED_PATTERN = Pattern.compile("\\s*,\\s*");
 
   /**
-   * Given some node Properties, checks if non-null and a 'zkHost' is alread included.  If so, the Properties are
-   * returned as is.  If not, then the returned value will be a new Properties, wrapping the original Properties, 
-   * with the 'zkHost' value set based on the value of the corispond System property (if set)
+   * Given some node Properties, checks if non-null and a 'zkHost' is alread included. If so, the
+   * Properties are returned as is. If not, then the returned value will be a new Properties,
+   * wrapping the original Properties, with the 'zkHost' value set based on the value of the
+   * corispond System property (if set)
    *
-   * In theory we only need this logic once, ideally in SolrDispatchFilter, but we put it here to re-use 
-   * redundently because of how much surface area our API has for various tests to poke at us.
+   * <p>In theory we only need this logic once, ideally in SolrDispatchFilter, but we put it here to
+   * re-use redundently because of how much surface area our API has for various tests to poke at
+   * us.
    */
   public static Properties wrapAndSetZkHostFromSysPropIfNeeded(final Properties props) {
-    if (null != props && ! StringUtils.isEmpty(props.getProperty(ZK_HOST))) {
+    if (null != props && !StringUtils.isEmpty(props.getProperty(ZK_HOST))) {
       // nothing to do...
       return props;
     }
     // we always wrap if we might set a property -- never mutate the original props
     final Properties results = (null == props ? new Properties() : new Properties(props));
     final String sysprop = System.getProperty(ZK_HOST);
-    if (! StringUtils.isEmpty(sysprop)) {
+    if (!StringUtils.isEmpty(sysprop)) {
       results.setProperty(ZK_HOST, sysprop);
     }
     return results;
   }
 
-  
   public static NodeConfig fromConfig(Path solrHome, XmlConfigFile config, boolean fromZookeeper) {
 
     checkForIllegalConfig(config);
 
-    // sanity check: if our config came from zookeeper, then there *MUST* be Node Properties that tell us
-    // what zkHost was used to read it (either via webapp context attribute, or that SolrDispatchFilter
+    // sanity check: if our config came from zookeeper, then there *MUST* be Node Properties that
+    // tell us
+    // what zkHost was used to read it (either via webapp context attribute, or that
+    // SolrDispatchFilter
     // filled in for us from system properties)
-    assert ( (! fromZookeeper) || (null != config.getSubstituteProperties()
-                                   && null != config.getSubstituteProperties().getProperty(ZK_HOST)));
-    
-    // Regardless of where/how we this XmlConfigFile was loaded from, if it contains a zkHost property,
-    // we're going to use that as our "default" and only *directly* check the system property if it's not specified.
+    assert ((!fromZookeeper)
+        || (null != config.getSubstituteProperties()
+            && null != config.getSubstituteProperties().getProperty(ZK_HOST)));
+
+    // Regardless of where/how we this XmlConfigFile was loaded from, if it contains a zkHost
+    // property,
+    // we're going to use that as our "default" and only *directly* check the system property if
+    // it's not specified.
     //
-    // (checking the sys prop here is really just for tests that by-pass SolrDispatchFilter. In non-test situations,
-    // SolrDispatchFilter will check the system property if needed in order to try and load solr.xml from ZK, and
+    // (checking the sys prop here is really just for tests that by-pass SolrDispatchFilter. In
+    // non-test situations,
+    // SolrDispatchFilter will check the system property if needed in order to try and load solr.xml
+    // from ZK, and
     // should have put the sys prop value in the node properties for us)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/TransientSolrCoreCacheDefault.java
##########
@@ -196,8 +204,10 @@ public CoreDescriptor removeTransientDescriptor(String name) {
   }
 
   @Override
-  public int getStatus(String coreName) { return 0; } //no_op for default handler.
+  public int getStatus(String coreName) {
+    return 0;
+  } // no_op for default handler.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java
##########
@@ -36,124 +58,117 @@
 import org.w3c.dom.NodeList;
 import org.xml.sax.InputSource;
 
-import javax.management.MBeanServer;
-import javax.xml.xpath.XPath;
-import javax.xml.xpath.XPathConstants;
-import javax.xml.xpath.XPathExpressionException;
-import java.io.ByteArrayInputStream;
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-import static org.apache.solr.common.params.CommonParams.NAME;
-
-
-/**
- * Loads {@code solr.xml}.
- */
+/** Loads {@code solr.xml}. */
 public class SolrXmlConfig {
 
-  // TODO should these from* methods return a NodeConfigBuilder so that the caller (a test) can make further
+  // TODO should these from* methods return a NodeConfigBuilder so that the caller (a test) can make
+  // further
   //  manipulations like add properties and set the CorePropertiesLocator and "async" mode?

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/backup/repository/BackupRepositoryFactory.java
##########
@@ -59,21 +60,25 @@ public BackupRepositoryFactory(PluginInfo[] backupRepoPlugins) {
       }
 
       if (this.defaultBackupRepoPlugin != null) {
-        log.info("Default configuration for backup repository is with configuration params {}",
-                defaultBackupRepoPlugin);
+        log.info(
+            "Default configuration for backup repository is with configuration params {}",
+            defaultBackupRepoPlugin);
       }
     }
   }
 
   public BackupRepository newInstance(SolrResourceLoader loader, String name) {
     Objects.requireNonNull(loader);
     Objects.requireNonNull(name);
-    PluginInfo repo = Objects.requireNonNull(backupRepoPluginByName.get(name),
+    PluginInfo repo =
+        Objects.requireNonNull(
+            backupRepoPluginByName.get(name),
             "Could not find a backup repository with name " + name);
 
     BackupRepository result = loader.newInstance(repo.className, BackupRepository.class);
     if ("trackingBackupRepository".equals(name)) {
-      // newInstance can be called by multiple threads, synchronization prevents simultaneous multi-threaded 'adds' from
+      // newInstance can be called by multiple threads, synchronization prevents simultaneous
+      // multi-threaded 'adds' from
       // corrupting the namedlist

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
##########
@@ -289,63 +294,62 @@ private int arrayCompare(int[] a, int[] b) {
         tokenNamedList.add("match", true);
       }
 
-      token.reflectWith(new AttributeReflector() {
-        @Override
-        public void reflect(Class<? extends Attribute> attClass, String key, Object value) {
-          // leave out position and bytes term
-          if (TermToBytesRefAttribute.class.isAssignableFrom(attClass))
-            return;
-          if (CharTermAttribute.class.isAssignableFrom(attClass))
-            return;
-          if (PositionIncrementAttribute.class.isAssignableFrom(attClass))
-            return;
-          
-          String k = attClass.getName() + '#' + key;
-          
-          // map keys for "standard attributes":
-          if (ATTRIBUTE_MAPPING.containsKey(k)) {
-            k = ATTRIBUTE_MAPPING.get(k);
-          }
-          
-          if (value instanceof BytesRef) {
-            final BytesRef p = (BytesRef) value;
-            value = p.toString();
-          }
+      token.reflectWith(
+          new AttributeReflector() {
+            @Override
+            public void reflect(Class<? extends Attribute> attClass, String key, Object value) {
+              // leave out position and bytes term
+              if (TermToBytesRefAttribute.class.isAssignableFrom(attClass)) return;
+              if (CharTermAttribute.class.isAssignableFrom(attClass)) return;
+              if (PositionIncrementAttribute.class.isAssignableFrom(attClass)) return;
 
-          tokenNamedList.add(k, value);
-        }
-      });
+              String k = attClass.getName() + '#' + key;
+
+              // map keys for "standard attributes":
+              if (ATTRIBUTE_MAPPING.containsKey(k)) {
+                k = ATTRIBUTE_MAPPING.get(k);
+              }
+
+              if (value instanceof BytesRef) {
+                final BytesRef p = (BytesRef) value;
+                value = p.toString();
+              }
+
+              tokenNamedList.add(k, value);
+            }
+          });
 
       tokensNamedLists.add(tokenNamedList);
     }
 
     return tokensNamedLists;
   }
-  
-  private String writeCharStream(NamedList<Object> out, Reader input ){
+
+  private String writeCharStream(NamedList<Object> out, Reader input) {
     final int BUFFER_SIZE = 1024;
     char[] buf = new char[BUFFER_SIZE];
     int len = 0;
     StringBuilder sb = new StringBuilder();
     do {
       try {
-        len = input.read( buf, 0, BUFFER_SIZE );
+        len = input.read(buf, 0, BUFFER_SIZE);
       } catch (IOException e) {
         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
       }
-      if( len > 0 )
-        sb.append(buf, 0, len);
-    } while( len == BUFFER_SIZE );
-    out.add( input.getClass().getName(), sb.toString());
+      if (len > 0) sb.append(buf, 0, len);
+    } while (len == BUFFER_SIZE);
+    out.add(input.getClass().getName(), sb.toString());
     return sb.toString();
   }
 
-  // ================================================= Inner classes =================================================
+  // ================================================= Inner classes
+  // =================================================

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/backup/BackupFilePaths.java
##########
@@ -29,148 +30,160 @@
 import java.util.Optional;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
-
-import static org.apache.solr.core.backup.BackupId.TRADITIONAL_BACKUP;
-import static org.apache.solr.core.backup.BackupManager.ZK_STATE_DIR;
+import org.apache.solr.core.backup.repository.BackupRepository;
 
 /**
  * Utility class for getting paths related to backups, or parsing information out of those paths.
  */
 public class BackupFilePaths {
 
-    private static final Pattern BACKUP_PROPS_ID_PTN = Pattern.compile("backup_([0-9]+).properties");
-    private BackupRepository repository;
-    private URI backupLoc;
-
-    /**
-     * Create a BackupFilePaths object.
-     *
-     * @param repository the repository; used primarily to resolve URIs.
-     * @param backupLoc the root location for a named backup.  For traditional backups this is expected to take the form
-     *                  baseLocation/backupName.  For incremental backups this is expected to be of the form
-     *                  baseLocation/backupName/collectionName.
-     */
-    public BackupFilePaths(BackupRepository repository, URI backupLoc) {
-        this.repository = repository;
-        this.backupLoc = backupLoc;
-    }
-
-    /**
-     * Return a URI for the 'index' location, responsible for holding index files for all backups at this location.
-     *
-     * Only valid for incremental backups.
-     */
-    public URI getIndexDir() {
-        return repository.resolveDirectory(backupLoc, "index");
-    }
-
-    /**
-     * Return a URI for the 'shard_backup_metadata' location, which contains metadata files about each shard backup.
-     *
-     * Only valid for incremental backups.
-     */
-    public URI getShardBackupMetadataDir() {
-        return repository.resolveDirectory(backupLoc, "shard_backup_metadata");
+  private static final Pattern BACKUP_PROPS_ID_PTN = Pattern.compile("backup_([0-9]+).properties");
+  private BackupRepository repository;
+  private URI backupLoc;
+
+  /**
+   * Create a BackupFilePaths object.
+   *
+   * @param repository the repository; used primarily to resolve URIs.
+   * @param backupLoc the root location for a named backup. For traditional backups this is expected
+   *     to take the form baseLocation/backupName. For incremental backups this is expected to be of
+   *     the form baseLocation/backupName/collectionName.
+   */
+  public BackupFilePaths(BackupRepository repository, URI backupLoc) {
+    this.repository = repository;
+    this.backupLoc = backupLoc;
+  }
+
+  /**
+   * Return a URI for the 'index' location, responsible for holding index files for all backups at
+   * this location.
+   *
+   * <p>Only valid for incremental backups.
+   */
+  public URI getIndexDir() {
+    return repository.resolveDirectory(backupLoc, "index");
+  }
+
+  /**
+   * Return a URI for the 'shard_backup_metadata' location, which contains metadata files about each
+   * shard backup.
+   *
+   * <p>Only valid for incremental backups.
+   */
+  public URI getShardBackupMetadataDir() {
+    return repository.resolveDirectory(backupLoc, "shard_backup_metadata");
+  }
+
+  public URI getBackupLocation() {
+    return backupLoc;
+  }
+
+  /**
+   * Create all locations required to store an incremental backup.
+   *
+   * @throws IOException for issues encountered using repository to create directories
+   */
+  public void createIncrementalBackupFolders() throws IOException {
+    repository.createDirectory(backupLoc);
+    repository.createDirectory(getIndexDir());
+    repository.createDirectory(getShardBackupMetadataDir());
+  }
+
+  /**
+   * Get the directory name used to hold backed up ZK state
+   *
+   * <p>Valid for both incremental and traditional backups.
+   *
+   * @param id the ID of the backup in question
+   */
+  public static String getZkStateDir(BackupId id) {
+    if (id.id == TRADITIONAL_BACKUP) {
+      return ZK_STATE_DIR;
     }
-
-    public URI getBackupLocation() {
-        return backupLoc;
-    }
-
-    /**
-     * Create all locations required to store an incremental backup.
-     *
-     * @throws IOException for issues encountered using repository to create directories
-     */
-    public void createIncrementalBackupFolders() throws IOException {
-        repository.createDirectory(backupLoc);
-        repository.createDirectory(getIndexDir());
-        repository.createDirectory(getShardBackupMetadataDir());
+    return String.format(Locale.ROOT, "%s_%d", ZK_STATE_DIR, id.id);
+  }
+
+  /**
+   * Get the filename of the top-level backup properties file
+   *
+   * <p>Valid for both incremental and traditional backups.
+   *
+   * @param id the ID of the backup in question
+   */
+  public static String getBackupPropsName(BackupId id) {
+    if (id.id == TRADITIONAL_BACKUP) {
+      return BackupManager.TRADITIONAL_BACKUP_PROPS_FILE;
     }
-
-    /**
-     * Get the directory name used to hold backed up ZK state
-     *
-     * Valid for both incremental and traditional backups.
-     *
-     * @param id the ID of the backup in question
-     */
-    public static String getZkStateDir(BackupId id) {
-        if (id.id == TRADITIONAL_BACKUP) {
-            return ZK_STATE_DIR;
-        }
-        return String.format(Locale.ROOT, "%s_%d", ZK_STATE_DIR, id.id);
-    }
-
-    /**
-     * Get the filename of the top-level backup properties file
-     *
-     * Valid for both incremental and traditional backups.
-     *
-     * @param id the ID of the backup in question
-     */
-    public static String getBackupPropsName(BackupId id) {
-        if (id.id == TRADITIONAL_BACKUP) {
-            return BackupManager.TRADITIONAL_BACKUP_PROPS_FILE;
-        }
-        return getBackupPropsName(id.id);
+    return getBackupPropsName(id.id);
+  }
+
+  /**
+   * Identify all strings which appear to be the filename of a top-level backup properties file.
+   *
+   * <p>Only valid for incremental backups.
+   *
+   * @param listFiles a list of strings, filenames which may or may not correspond to backup
+   *     properties files
+   */
+  public static List<BackupId> findAllBackupIdsFromFileListing(String[] listFiles) {
+    List<BackupId> result = new ArrayList<>();
+    for (String file : listFiles) {
+      Matcher m = BACKUP_PROPS_ID_PTN.matcher(file);
+      if (m.find()) {
+        result.add(new BackupId(Integer.parseInt(m.group(1))));
+      }
     }
 
-    /**
-     * Identify all strings which appear to be the filename of a top-level backup properties file.
-     *
-     * Only valid for incremental backups.
-     *
-     * @param listFiles a list of strings, filenames which may or may not correspond to backup properties files
-     */
-    public static List<BackupId> findAllBackupIdsFromFileListing(String[] listFiles) {
-        List<BackupId> result = new ArrayList<>();
-        for (String file: listFiles) {
-            Matcher m = BACKUP_PROPS_ID_PTN.matcher(file);
-            if (m.find()) {
-                result.add(new BackupId(Integer.parseInt(m.group(1))));
-            }
-        }
-
-        return result;
+    return result;
+  }
+
+  /**
+   * Identify the string from an array of filenames which represents the most recent top-level
+   * backup properties file.
+   *
+   * <p>Only valid for incremental backups.
+   *
+   * @param listFiles a list of strings, filenames which may or may not correspond to backup
+   *     properties files.
+   */
+  public static Optional<BackupId> findMostRecentBackupIdFromFileListing(String[] listFiles) {
+    return findAllBackupIdsFromFileListing(listFiles).stream()
+        .max(Comparator.comparingInt(o -> o.id));
+  }
+
+  /**
+   * Builds the URI for the backup location given the user-provided 'location' and backup 'name'.
+   *
+   * @param repository the backup repository, used to list files and resolve URI's.
+   * @param location a URI representing the repository location holding each backup name
+   * @param backupName the specific backup name to create a URI for
+   */
+  public static URI buildExistingBackupLocationURI(
+      BackupRepository repository, URI location, String backupName) throws IOException {
+    final URI backupNameUri = repository.resolveDirectory(location, backupName);
+    final String[] entries = repository.listAll(backupNameUri);
+    final boolean incremental =
+        !Arrays.stream(entries)
+            .anyMatch(entry -> entry.equals(BackupManager.TRADITIONAL_BACKUP_PROPS_FILE));
+    if (incremental) {
+      // Incremental backups have an additional URI path component representing the collection that
+      // was backed up.
+      // This collection directory is the path assumed by other backup code.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/backup/BackupManager.java
##########
@@ -312,26 +345,31 @@ private void downloadConfigToRepo(ConfigSetService configSetService, String conf
     }
   }
 
-  private void uploadConfigToSolrCloud(ConfigSetService configSetService, URI sourceDir, String configName, String filePrefix) throws IOException {
+  private void uploadConfigToSolrCloud(
+      ConfigSetService configSetService, URI sourceDir, String configName, String filePrefix)
+      throws IOException {
     for (String file : repository.listAll(sourceDir)) {
       String filePath = filePrefix + file;
       URI path = repository.resolve(sourceDir, file);
       BackupRepository.PathType t = repository.getPathType(path);
       switch (t) {
-        case FILE: {
-          try (IndexInput is = repository.openInput(sourceDir, file, IOContext.DEFAULT)) {
-            byte[] arr = new byte[(int) is.length()]; // probably ok since the config file should be small.
-            is.readBytes(arr, 0, (int) is.length());
-            configSetService.uploadFileToConfig(configName, filePath, arr, false);
+        case FILE:
+          {
+            try (IndexInput is = repository.openInput(sourceDir, file, IOContext.DEFAULT)) {
+              byte[] arr =
+                  new byte[(int) is.length()]; // probably ok since the config file should be small.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/filestore/DistribPackageStore.java
##########
@@ -486,16 +501,20 @@ public void delete(String path) {
     List<String> nodes = coreContainer.getPackageStoreAPI().shuffledNodes();
     HttpClient client = coreContainer.getUpdateShardHandler().getDefaultHttpClient();
     for (String node : nodes) {
-      String baseUrl = coreContainer.getZkController().getZkStateReader().getBaseUrlForNodeName(node);
+      String baseUrl =
+          coreContainer.getZkController().getZkStateReader().getBaseUrlForNodeName(node);
       String url = baseUrl.replace("/solr", "/api") + "/node/files" + path;
       HttpDelete del = new HttpDelete(url);
-      coreContainer.runAsync(() -> Utils.executeHttpMethod(client, url, null, del));//invoke delete command on all nodes asynchronously
+      coreContainer.runAsync(
+          () ->
+              Utils.executeHttpMethod(
+                  client, url, null, del)); // invoke delete command on all nodes asynchronously

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
##########
@@ -648,23 +821,34 @@ private void makePlacementDecisions(SolrCollection solrCollection, String shardN
           }
         }
 
-        // Insert back a corrected entry for the winning AZ: one more replica living there and one less node that can accept new replicas
-        // (the remaining candidate node list might be empty, in which case it will be cleaned up on the next iteration).
+        // Insert back a corrected entry for the winning AZ: one more replica living there and one
+        // less node that can accept new replicas
+        // (the remaining candidate node list might be empty, in which case it will be cleaned up on
+        // the next iteration).

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/snapshots/SolrSnapshotManager.java
##########
@@ -250,57 +266,66 @@ public void onCommit(List<? extends IndexCommit> commits)
    * @param snapshots The snapshots to be preserved.
    * @throws IOException in case of I/O errors.
    */
-  public static void deleteNonSnapshotIndexFiles(SolrCore core, Directory dir, Collection<SnapshotMetaData> snapshots) throws IOException {
+  public static void deleteNonSnapshotIndexFiles(
+      SolrCore core, Directory dir, Collection<SnapshotMetaData> snapshots) throws IOException {
     final Set<Long> genNumbers = new HashSet<>();
     for (SnapshotMetaData m : snapshots) {
       genNumbers.add(m.getGenerationNumber());
     }
 
-    deleteSnapshotIndexFiles(core, dir, new IndexDeletionPolicy() {
-      @Override
-      public void onInit(List<? extends IndexCommit> commits) throws IOException {
-        for (IndexCommit ic : commits) {
-          if (!genNumbers.contains(ic.getGeneration())) {
-            if (log.isInfoEnabled()) {
-              log.info("Deleting non-snapshotted index commit with generation {}", ic.getGeneration());
+    deleteSnapshotIndexFiles(
+        core,
+        dir,
+        new IndexDeletionPolicy() {
+          @Override
+          public void onInit(List<? extends IndexCommit> commits) throws IOException {
+            for (IndexCommit ic : commits) {
+              if (!genNumbers.contains(ic.getGeneration())) {
+                if (log.isInfoEnabled()) {
+                  log.info(
+                      "Deleting non-snapshotted index commit with generation {}",
+                      ic.getGeneration());
+                }
+                ic.delete();
+              }
             }
-            ic.delete();
           }
-        }
-      }
 
-      @Override
-      public void onCommit(List<? extends IndexCommit> commits)
-          throws IOException {}
-    });
+          @Override
+          public void onCommit(List<? extends IndexCommit> commits) throws IOException {}
+        });
   }
 
   /**
-   * This method deletes index files of the {@linkplain IndexCommit} for the specified generation number.
+   * This method deletes index files of the {@linkplain IndexCommit} for the specified generation
+   * number.
    *
    * @param core The Solr core
    * @param dir The index directory storing the snapshot.
    * @throws IOException in case of I/O errors.
    */
-
   @SuppressWarnings({"try", "unused"})
-  private static void deleteSnapshotIndexFiles(SolrCore core, Directory dir, IndexDeletionPolicy delPolicy) throws IOException {
+  private static void deleteSnapshotIndexFiles(
+      SolrCore core, Directory dir, IndexDeletionPolicy delPolicy) throws IOException {
     IndexWriterConfig conf = core.getSolrConfig().indexConfig.toIndexWriterConfig(core);
     conf.setOpenMode(OpenMode.APPEND);
-    conf.setMergePolicy(NoMergePolicy.INSTANCE);//Don't want to merge any commits here!
+    conf.setMergePolicy(NoMergePolicy.INSTANCE); // Don't want to merge any commits here!
     conf.setIndexDeletionPolicy(delPolicy);
     conf.setCodec(core.getCodec());
     try (SolrIndexWriter iw = new SolrIndexWriter("SolrSnapshotCleaner", dir, conf)) {
-      // Do nothing. The only purpose of opening index writer is to invoke the Lucene IndexDeletionPolicy#onInit
+      // Do nothing. The only purpose of opening index writer is to invoke the Lucene
+      // IndexDeletionPolicy#onInit
       // method so that we can cleanup the files associated with specified index commit.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/ZkContainer.java
##########
@@ -47,38 +46,38 @@
 import org.slf4j.LoggerFactory;
 
 /**
- * Used by {@link CoreContainer} to hold ZooKeeper / SolrCloud info, especially {@link ZkController}.
- * Mainly it does some ZK initialization, and ensures a loading core registers in ZK.
- * Even when in standalone mode, perhaps surprisingly, an instance of this class exists.
- * If {@link #getZkController()} returns null then we're in standalone mode.
+ * Used by {@link CoreContainer} to hold ZooKeeper / SolrCloud info, especially {@link
+ * ZkController}. Mainly it does some ZK initialization, and ensures a loading core registers in ZK.
+ * Even when in standalone mode, perhaps surprisingly, an instance of this class exists. If {@link
+ * #getZkController()} returns null then we're in standalone mode.
  */
 public class ZkContainer {
-  // NOTE DWS: It's debatable if this in-between class is needed instead of folding it all into ZkController.
+  // NOTE DWS: It's debatable if this in-between class is needed instead of folding it all into
+  // ZkController.
   //  ZKC is huge though.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/CatStream.java
##########
@@ -229,12 +236,17 @@ private void findReadableFiles(CrawlFile seed, List<CrawlFile> foundFiles) {
       foundFiles.add(seed);
     } else if (Files.isDirectory(entry)) {
       try (Stream<Path> directoryContents = Files.list(entry)) {
-        directoryContents.sorted().forEach(iPath -> {
-          // debatable: should the separator be OS/file-system specific, or perhaps always "/" ?
-          final String displayPathSeparator = iPath.getFileSystem().getSeparator();
-          final String itemDisplayPath = seed.displayPath + displayPathSeparator + iPath.getFileName();
-          findReadableFiles(new CrawlFile(itemDisplayPath, iPath), foundFiles);
-        });
+        directoryContents
+            .sorted()
+            .forEach(
+                iPath -> {
+                  // debatable: should the separator be OS/file-system specific, or perhaps always
+                  // "/" ?

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
##########
@@ -253,8 +280,9 @@ public IndexFetcher(final NamedList<?> initArgs, final ReplicationHandler handle
     useInternalCompression = INTERNAL.equals(compress);
     useExternalCompression = EXTERNAL.equals(compress);
     connTimeout = getParameter(initArgs, HttpClientUtil.PROP_CONNECTION_TIMEOUT, 30000, null);
-    
-    // allow a leader override for tests - you specify this in /replication follower section of solrconfig and some
+
+    // allow a leader override for tests - you specify this in /replication follower section of
+    // solrconfig and some
     // test don't want to define this

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
##########
@@ -383,33 +387,42 @@ public boolean incrementToken() {
       }
     }
 
-
     protected void addAttributes(AttributeSource attributeSource) {
-      // note: ideally we wouldn't call addAttributeImpl which is marked internal. But nonetheless it's possible
-      //  this method is used by some custom attributes, especially since Solr doesn't provide a way to customize the
-      //  AttributeFactory which is the recommended way to choose which classes implement which attributes.
+      // note: ideally we wouldn't call addAttributeImpl which is marked internal. But nonetheless
+      // it's possible
+      //  this method is used by some custom attributes, especially since Solr doesn't provide a way
+      // to customize the
+      //  AttributeFactory which is the recommended way to choose which classes implement which
+      // attributes.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java
##########
@@ -121,20 +121,19 @@ public String getDescription() {
     return "Provides a breakdown of the analysis process of provided documents";
   }
 
-
-  //================================================ Helper Methods ==================================================
+  // ================================================ Helper Methods
+  // ==================================================

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
##########
@@ -1005,36 +1133,52 @@ private long downloadIndexFiles(boolean downloadCompleteIndex, Directory indexDi
     }
 
     if (log.isInfoEnabled()) {
-      log.info("tmpIndexDir_type  : {} , {}", tmpIndexDir.getClass(), FilterDirectory.unwrap(tmpIndexDir));
+      log.info(
+          "tmpIndexDir_type  : {} , {}",
+          tmpIndexDir.getClass(),
+          FilterDirectory.unwrap(tmpIndexDir));
     }
     long usableSpace = usableDiskSpaceProvider.apply(tmpIndexDirPath);
     if (getApproxTotalSpaceReqd(totalSpaceRequired) > usableSpace) {
       deleteFilesInAdvance(indexDir, indexDirPath, totalSpaceRequired, usableSpace);
     }
 
-    for (Map<String,Object> file : filesToDownload) {
+    for (Map<String, Object> file : filesToDownload) {
       String filename = (String) file.get(NAME);
       long size = (Long) file.get(SIZE);
-      CompareResult compareResult = compareFile(indexDir, filename, size, (Long) file.get(CHECKSUM));
+      CompareResult compareResult =
+          compareFile(indexDir, filename, size, (Long) file.get(CHECKSUM));
       boolean alwaysDownload = filesToAlwaysDownloadIfNoChecksums(filename, size, compareResult);
       if (log.isDebugEnabled()) {
-        log.debug("Downloading file={} size={} checksum={} alwaysDownload={}", filename, size, file.get(CHECKSUM), alwaysDownload);
+        log.debug(
+            "Downloading file={} size={} checksum={} alwaysDownload={}",
+            filename,
+            size,
+            file.get(CHECKSUM),
+            alwaysDownload);
       }
       if (!compareResult.equal || downloadCompleteIndex || alwaysDownload) {
         File localFile = new File(indexDirPath, filename);
-        if (downloadCompleteIndex && doDifferentialCopy && compareResult.equal && compareResult.checkSummed
+        if (downloadCompleteIndex
+            && doDifferentialCopy
+            && compareResult.equal
+            && compareResult.checkSummed
             && localFile.exists()) {
           if (log.isInfoEnabled()) {
-            log.info("Don't need to download this file. Local file's path is: {}, checksum is: {}",
-                localFile.getAbsolutePath(), file.get(CHECKSUM));
+            log.info(
+                "Don't need to download this file. Local file's path is: {}, checksum is: {}",
+                localFile.getAbsolutePath(),
+                file.get(CHECKSUM));
           }
-          // A hard link here should survive the eventual directory move, and should be more space efficient as
+          // A hard link here should survive the eventual directory move, and should be more space
+          // efficient as
           // compared to a file copy. TODO: Maybe we could do a move safely here?

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
##########
@@ -900,7 +1015,8 @@ private StringBuilder readToStringBuilder(long replicationTime, String str) {
   private void openNewSearcherAndUpdateCommitPoint() throws IOException {
     RefCounted<SolrIndexSearcher> searcher = null;
     IndexCommit commitPoint;
-    // must get the latest solrCore object because the one we have might be closed because of a reload
+    // must get the latest solrCore object because the one we have might be closed because of a
+    // reload
     // todo stop keeping solrCore around

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
##########
@@ -612,9 +707,9 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel
               }
 
               log.info("Configuration files are modified, core will be reloaded");
-              logReplicationTimeAndConfFiles(modifiedConfFiles,
-                  successfulInstall);// write to a file time of replication and
-                                     // conf files.
+              logReplicationTimeAndConfFiles(
+                  modifiedConfFiles, successfulInstall); // write to a file time of replication and
+              // conf files.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
##########
@@ -507,29 +570,43 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel
       }
 
       // Create the sync service
-      fsyncService = ExecutorUtil.newMDCAwareSingleThreadExecutor(new SolrNamedThreadFactory("fsyncService"));
+      fsyncService =
+          ExecutorUtil.newMDCAwareSingleThreadExecutor(new SolrNamedThreadFactory("fsyncService"));
       // use a synchronized list because the list is read by other threads (to show details)
       filesDownloaded = Collections.synchronizedList(new ArrayList<Map<String, Object>>());
-      // if the generation of leader is older than that of the follower , it means they are not compatible to be copied
+      // if the generation of leader is older than that of the follower , it means they are not
+      // compatible to be copied
       // then a new index directory to be created and all the files need to be copied
-      boolean isFullCopyNeeded = IndexDeletionPolicyWrapper
-          .getCommitTimestamp(commit) >= latestVersion
-          || commit.getGeneration() >= latestGeneration || forceReplication;
+      boolean isFullCopyNeeded =
+          IndexDeletionPolicyWrapper.getCommitTimestamp(commit) >= latestVersion
+              || commit.getGeneration() >= latestGeneration
+              || forceReplication;
 
       String timestamp = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
       String tmpIdxDirName = "index." + timestamp;
       tmpIndexDirPath = solrCore.getDataDir() + tmpIdxDirName;
 
-      tmpIndexDir = solrCore.getDirectoryFactory().get(tmpIndexDirPath, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
+      tmpIndexDir =
+          solrCore
+              .getDirectoryFactory()
+              .get(
+                  tmpIndexDirPath,
+                  DirContext.DEFAULT,
+                  solrCore.getSolrConfig().indexConfig.lockType);
 
       // cindex dir...
       indexDirPath = solrCore.getIndexDir();
-      indexDir = solrCore.getDirectoryFactory().get(indexDirPath, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
+      indexDir =
+          solrCore
+              .getDirectoryFactory()
+              .get(indexDirPath, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
 
       try {
 
-        // We will compare all the index files from the leader vs the index files on disk to see if there is a mismatch
-        // in the metadata. If there is a mismatch for the same index file then we download the entire index
+        // We will compare all the index files from the leader vs the index files on disk to see if
+        // there is a mismatch
+        // in the metadata. If there is a mismatch for the same index file then we download the
+        // entire index
         // (except when differential copy is applicable) again.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
##########
@@ -507,29 +570,43 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreRel
       }
 
       // Create the sync service
-      fsyncService = ExecutorUtil.newMDCAwareSingleThreadExecutor(new SolrNamedThreadFactory("fsyncService"));
+      fsyncService =
+          ExecutorUtil.newMDCAwareSingleThreadExecutor(new SolrNamedThreadFactory("fsyncService"));
       // use a synchronized list because the list is read by other threads (to show details)
       filesDownloaded = Collections.synchronizedList(new ArrayList<Map<String, Object>>());
-      // if the generation of leader is older than that of the follower , it means they are not compatible to be copied
+      // if the generation of leader is older than that of the follower , it means they are not
+      // compatible to be copied
       // then a new index directory to be created and all the files need to be copied

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
##########
@@ -1238,15 +1404,16 @@ private boolean isIndexStale(Directory dir) throws IOException {
 
   /**
    * Copy a file by the File#renameTo() method. If it fails, it is considered a failure
-   * <p/>
+   *
+   * <p>

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
##########
@@ -1073,25 +1220,27 @@ private static Long getUsableSpace(String dir) {
     }
   }
 
-
-
   private long getApproxTotalSpaceReqd(long totalSpaceRequired) {
-    long approxTotalSpaceReqd = (long) (totalSpaceRequired * 1.05);// add 5% extra for safety
-    approxTotalSpaceReqd += (100 * 1024 * 1024); //we should have an extra of 100MB free after everything is downloaded
+    long approxTotalSpaceReqd = (long) (totalSpaceRequired * 1.05); // add 5% extra for safety
+    approxTotalSpaceReqd +=
+        (100 * 1024 * 1024); // we should have an extra of 100MB free after everything is downloaded

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
##########
@@ -321,53 +312,66 @@ public MoreLikeThisHelper( SolrParams params, SolrIndexSearcher searcher )
         }
       }
       String[] fields = list.toArray(new String[list.size()]);
-      if( fields.length < 1 ) {
-        throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, 
-            "MoreLikeThis requires at least one similarity field: "+MoreLikeThisParams.SIMILARITY_FIELDS );
+      if (fields.length < 1) {
+        throw new SolrException(
+            SolrException.ErrorCode.BAD_REQUEST,
+            "MoreLikeThis requires at least one similarity field: "
+                + MoreLikeThisParams.SIMILARITY_FIELDS);
       }
-      
-      this.mlt = new MoreLikeThis( reader ); // TODO -- after LUCENE-896, we can use , searcher.getSimilarity() );
+
+      this.mlt =
+          new MoreLikeThis(
+              reader); // TODO -- after LUCENE-896, we can use , searcher.getSimilarity() );

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
##########
@@ -267,12 +279,14 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw
         // in a catastrophic failure, but will result in the client getting an empty file list for
         // the CMD_GET_FILE_LIST command.
         //
-        core.getDeletionPolicy().setReserveDuration(commitPoint.getGeneration(), reserveCommitDuration);
+        core.getDeletionPolicy()
+            .setReserveDuration(commitPoint.getGeneration(), reserveCommitDuration);
         rsp.add(CMD_INDEX_VERSION, IndexDeletionPolicyWrapper.getCommitTimestamp(commitPoint));
         rsp.add(GENERATION, commitPoint.getGeneration());
         rsp.add(STATUS, OK_STATUS);
       } else {
-        // This happens when replication is not configured to happen after startup and no commit/optimize
+        // This happens when replication is not configured to happen after startup and no
+        // commit/optimize
         // has happened yet.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
##########
@@ -1373,45 +1468,45 @@ public void inform(SolrCore core) {
   }
 
   // check leader or follower is enabled
-  private boolean isEnabled(NamedList<?> params ){
-    if( params == null ) return false;
-    Object enable = params.get( "enable" );
-    if( enable == null ) return true;
-    if( enable instanceof String )
-      return StrUtils.parseBool( (String)enable );
-    return Boolean.TRUE.equals( enable );
+  private boolean isEnabled(NamedList<?> params) {
+    if (params == null) return false;
+    Object enable = params.get("enable");
+    if (enable == null) return true;
+    if (enable instanceof String) return StrUtils.parseBool((String) enable);
+    return Boolean.TRUE.equals(enable);
   }
 
-  private final CloseHook startShutdownHook = new CloseHook() {
-    @Override
-    public void preClose(SolrCore core) {
-      if (executorService != null)
-        executorService.shutdown(); // we don't wait for shutdown - this can deadlock core reload
-    }
+  private final CloseHook startShutdownHook =
+      new CloseHook() {
+        @Override
+        public void preClose(SolrCore core) {
+          if (executorService != null)
+            executorService
+                .shutdown(); // we don't wait for shutdown - this can deadlock core reload

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
##########
@@ -508,8 +536,10 @@ private void restore(SolrParams params, SolrQueryResponse rsp, SolrQueryRequest
 
     URI locationUri = repo.createDirectoryURI(location);
 
-    //If name is not provided then look for the last unnamed( the ones with the snapshot.timestamp format)
-    //snapshot folder since we allow snapshots to be taken without providing a name. Pick the latest timestamp.
+    // If name is not provided then look for the last unnamed( the ones with the snapshot.timestamp
+    // format)
+    // snapshot folder since we allow snapshots to be taken without providing a name. Pick the
+    // latest timestamp.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
##########
@@ -1046,23 +1190,26 @@ private long downloadIndexFiles(boolean downloadCompleteIndex, Directory indexDi
         }
       }
     }
-    log.info("Bytes downloaded: {}, Bytes skipped downloading: {}", bytesDownloaded, bytesSkippedCopying);
+    log.info(
+        "Bytes downloaded: {}, Bytes skipped downloading: {}",
+        bytesDownloaded,
+        bytesSkippedCopying);
     return bytesDownloaded;
   }
 
-  //only for testing purposes. do not use this anywhere else
-  //-----------START----------------------
+  // only for testing purposes. do not use this anywhere else
+  // -----------START----------------------
   static BooleanSupplier testWait = () -> true;
   static Function<String, Long> usableDiskSpaceProvider = dir -> getUsableSpace(dir);
-  //------------ END---------------------
-
+  // ------------ END---------------------
 
   private static Long getUsableSpace(String dir) {
     try {
       File file = new File(dir);
       if (!file.exists()) {
         file = file.getParentFile();
-        if (!file.exists()) {//this is not a disk directory . so just pretend that there is enough space
+        if (!file.exists()) { // this is not a disk directory . so just pretend that there is enough
+          // space

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
##########
@@ -1617,15 +1715,16 @@ public void write(OutputStream out) throws IOException {
           fos.flush();
           log.debug("Wrote {} bytes for file {}", offset + read, fileName); // nowarn
 
-          //Pause if necessary
+          // Pause if necessary
           maxBytesBeforePause += read;
           if (maxBytesBeforePause >= rateLimiter.getMinPauseCheckBytes()) {
             rateLimiter.pause(maxBytesBeforePause);
             maxBytesBeforePause = 0;
           }
           if (read != buf.length) {
             writeNothingAndFlush();
-            fos.close(); // we close because DeflaterOutputStream requires a close call, but but the request outputstream is protected
+            fos.close(); // we close because DeflaterOutputStream requires a close call, but but
+            // the request outputstream is protected

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
##########
@@ -1635,156 +1818,176 @@ private int fetchPackets(FastInputStream fis) throws Exception {
           }
           long checkSumServer = -1;
           fis.readFully(intbytes);
-          //read the size of the packet
+          // read the size of the packet
           int packetSize = readInt(intbytes);
           if (packetSize <= 0) {
             log.warn("No content received for file: {}", fileName);
             return NO_CONTENT;
           }
-          //TODO consider recoding the remaining logic to not use/need buf[]; instead use the internal buffer of fis
+          // TODO consider recoding the remaining logic to not use/need buf[]; instead use the
+          // internal buffer of fis
           if (buf.length < packetSize) {
-            //This shouldn't happen since sender should use PACKET_SZ and we init the buf based on that too
+            // This shouldn't happen since sender should use PACKET_SZ and we init the buf based on
+            // that too
             buf = new byte[packetSize];
           }
           if (checksum != null) {
-            //read the checksum
+            // read the checksum
             fis.readFully(longbytes);
             checkSumServer = readLong(longbytes);
           }
-          //then read the packet of bytes
+          // then read the packet of bytes
           fis.readFully(buf, 0, packetSize);
-          //compare the checksum as sent from the leader
+          // compare the checksum as sent from the leader
           if (includeChecksum) {
             checksum.reset();
             checksum.update(buf, 0, packetSize);
             long checkSumClient = checksum.getValue();
             if (checkSumClient != checkSumServer) {
               log.error("Checksum not matched between client and server for file: {}", fileName);
-              //if checksum is wrong it is a problem return (there doesn't seem to be a retry in this case.)
+              // if checksum is wrong it is a problem return (there doesn't seem to be a retry in
+              // this case.)
               return 1;
             }
           }
-          //if everything is fine, write down the packet to the file
+          // if everything is fine, write down the packet to the file
           file.write(buf, packetSize);
           bytesDownloaded += packetSize;
           log.debug("Fetched and wrote {} bytes of file: {}", bytesDownloaded, fileName);
-          //errorCount is always set to zero after a successful packet
+          // errorCount is always set to zero after a successful packet
           errorCount = 0;
-          if (bytesDownloaded >= size)
-            return 0;
+          if (bytesDownloaded >= size) return 0;
         }
       } catch (ReplicationHandlerException e) {
         throw e;
       } catch (Exception e) {
-        log.warn("Error in fetching file: {} (downloaded {} of {} bytes)",
-            fileName, bytesDownloaded, size, e);
-        //for any failure, increment the error count
+        log.warn(
+            "Error in fetching file: {} (downloaded {} of {} bytes)",
+            fileName,
+            bytesDownloaded,
+            size,
+            e);
+        // for any failure, increment the error count
         errorCount++;
-        //if it fails for the same packet for MAX_RETRIES fail and come out
+        // if it fails for the same packet for MAX_RETRIES fail and come out
         if (errorCount > MAX_RETRIES) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "Failed to fetch file: " + fileName +
-                  " (downloaded " + bytesDownloaded + " of " + size + " bytes" +
-                  ", error count: " + errorCount + " > " + MAX_RETRIES + ")", e);
+          throw new SolrException(
+              SolrException.ErrorCode.SERVER_ERROR,
+              "Failed to fetch file: "
+                  + fileName
+                  + " (downloaded "
+                  + bytesDownloaded
+                  + " of "
+                  + size
+                  + " bytes"
+                  + ", error count: "
+                  + errorCount
+                  + " > "
+                  + MAX_RETRIES
+                  + ")",
+              e);
         }
         return ERR;
       }
     }
 
     /**
-     * The webcontainer flushes the data only after it fills the buffer size. So, all data has to be read as readFully()
-     * other wise it fails. So read everything as bytes and then extract an integer out of it
+     * The webcontainer flushes the data only after it fills the buffer size. So, all data has to be
+     * read as readFully() other wise it fails. So read everything as bytes and then extract an
+     * integer out of it
      */
     private int readInt(byte[] b) {
-      return (((b[0] & 0xff) << 24) | ((b[1] & 0xff) << 16)
-          | ((b[2] & 0xff) << 8) | (b[3] & 0xff));
-
+      return (((b[0] & 0xff) << 24) | ((b[1] & 0xff) << 16) | ((b[2] & 0xff) << 8) | (b[3] & 0xff));
     }
 
-    /**
-     * Same as above but to read longs from a byte array
-     */
+    /** Same as above but to read longs from a byte array */
     private long readLong(byte[] b) {
-      return (((long) (b[0] & 0xff)) << 56) | (((long) (b[1] & 0xff)) << 48)
-          | (((long) (b[2] & 0xff)) << 40) | (((long) (b[3] & 0xff)) << 32)
-          | (((long) (b[4] & 0xff)) << 24) | ((b[5] & 0xff) << 16)
-          | ((b[6] & 0xff) << 8) | ((b[7] & 0xff));
-
+      return (((long) (b[0] & 0xff)) << 56)
+          | (((long) (b[1] & 0xff)) << 48)
+          | (((long) (b[2] & 0xff)) << 40)
+          | (((long) (b[3] & 0xff)) << 32)
+          | (((long) (b[4] & 0xff)) << 24)
+          | ((b[5] & 0xff) << 16)
+          | ((b[6] & 0xff) << 8)
+          | ((b[7] & 0xff));
     }
 
-    /**
-     * cleanup everything
-     */
+    /** cleanup everything */
     private void cleanup() {
       try {
         file.close();
-      } catch (Exception e) {/* no-op */
+      } catch (Exception e) {
+        /* no-op */
         log.error("Error closing file: {}", this.saveAs, e);
       }
       if (bytesDownloaded != size) {
-        //if the download is not complete then
-        //delete the file being downloaded
+        // if the download is not complete then
+        // delete the file being downloaded
         try {
           file.delete();
         } catch (Exception e) {
           log.error("Error deleting file: {}", this.saveAs, e);
         }
-        //if the failure is due to a user abort it is returned normally else an exception is thrown
+        // if the failure is due to a user abort it is returned normally else an exception is thrown
         if (!aborted)
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-              "Unable to download " + fileName + " completely. Downloaded "
-                  + bytesDownloaded + "!=" + size);
+          throw new SolrException(
+              SolrException.ErrorCode.SERVER_ERROR,
+              "Unable to download "
+                  + fileName
+                  + " completely. Downloaded "
+                  + bytesDownloaded
+                  + "!="
+                  + size);
       }
     }
 
-    /**
-     * Open a new stream using HttpClient
-     */
+    /** Open a new stream using HttpClient */
     private FastInputStream getStream() throws IOException {
 
       ModifiableSolrParams params = new ModifiableSolrParams();
 
-//    //the method is command=filecontent
+      //    //the method is command=filecontent
       params.set(COMMAND, CMD_GET_FILE);
       params.set(GENERATION, Long.toString(indexGen));
       params.set(CommonParams.QT, ReplicationHandler.PATH);
-      //add the version to download. This is used to reserve the download
+      // add the version to download. This is used to reserve the download
       params.set(solrParamOutput, fileName);
       if (useInternalCompression) {
         params.set(COMPRESSION, "true");
       }
-      //use checksum
+      // use checksum
       if (this.includeChecksum) {
         params.set(CHECKSUM, true);
       }
-      //wt=filestream this is a custom protocol
+      // wt=filestream this is a custom protocol
       params.set(CommonParams.WT, FILE_STREAM);
-      // This happen if there is a failure there is a retry. the offset=<sizedownloaded> ensures that
+      // This happen if there is a failure there is a retry. the offset=<sizedownloaded> ensures
+      // that
       // the server starts from the offset

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
##########
@@ -1560,14 +1654,17 @@ protected void initWrite() throws IOException {
       }
       buf = new byte[(len == -1 || len > PACKET_SZ) ? PACKET_SZ : len];
 
-      //reserve commit point till write is complete
-      if(indexGen != null) {
+      // reserve commit point till write is complete
+      if (indexGen != null) {
         delPolicy.saveCommitPoint(indexGen);
       }
     }
 
     protected void createOutputStream(OutputStream out) {
-      out = new CloseShieldOutputStream(out); // DeflaterOutputStream requires a close call, but don't close the request outputstream
+      out =
+          new CloseShieldOutputStream(
+              out); // DeflaterOutputStream requires a close call, but don't close the request
+      // outputstream

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
##########
@@ -1576,17 +1673,18 @@ protected void createOutputStream(OutputStream out) {
     }
 
     protected void extendReserveAndReleaseCommitPoint() {
-      if(indexGen != null) {
-        //Reserve the commit point for another 10s for the next file to be to fetched.
-        //We need to keep extending the commit reservation between requests so that the replica can fetch
-        //all the files correctly.
+      if (indexGen != null) {
+        // Reserve the commit point for another 10s for the next file to be to fetched.
+        // We need to keep extending the commit reservation between requests so that the replica can
+        // fetch
+        // all the files correctly.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/StreamHandler.java
##########
@@ -172,37 +181,41 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw
     TupleStream tupleStream;
 
     try {
-      StreamExpression streamExpression = StreamExpressionParser.parse(params.get(StreamParams.EXPR));
+      StreamExpression streamExpression =
+          StreamExpressionParser.parse(params.get(StreamParams.EXPR));
       if (this.streamFactory.isEvaluator(streamExpression)) {
         StreamExpression tupleExpression = new StreamExpression(StreamParams.TUPLE);
-        tupleExpression.addParameter(new StreamExpressionNamedParameter(StreamParams.RETURN_VALUE, streamExpression));
+        tupleExpression.addParameter(
+            new StreamExpressionNamedParameter(StreamParams.RETURN_VALUE, streamExpression));
         tupleStream = this.streamFactory.constructStream(tupleExpression);
       } else {
         tupleStream = this.streamFactory.constructStream(streamExpression);
       }
     } catch (Exception e) {
-      // Catch exceptions that occur while the stream is being created. This will include streaming expression parse
+      // Catch exceptions that occur while the stream is being created. This will include streaming
+      // expression parse
       // rules.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/BackupCoreOp.java
##########
@@ -63,30 +64,43 @@ public void execute(CoreAdminHandler.CallInfo it) throws Exception {
         if ("file".equals(locationUri.getScheme())) {
           core.getCoreContainer().assertPathAllowed(Paths.get(locationUri));
         }
-        final ShardBackupId prevShardBackupId = prevShardBackupIdStr != null ? ShardBackupId.from(prevShardBackupIdStr) : null;
+        final ShardBackupId prevShardBackupId =
+            prevShardBackupIdStr != null ? ShardBackupId.from(prevShardBackupIdStr) : null;
         BackupFilePaths incBackupFiles = new BackupFilePaths(repository, locationUri);
-        IncrementalShardBackup incSnapShooter = new IncrementalShardBackup(repository, core, incBackupFiles,
-                prevShardBackupId, shardBackupId, Optional.ofNullable(commitName));
+        IncrementalShardBackup incSnapShooter =
+            new IncrementalShardBackup(
+                repository,
+                core,
+                incBackupFiles,
+                prevShardBackupId,
+                shardBackupId,
+                Optional.ofNullable(commitName));
         NamedList<Object> rsp = incSnapShooter.backup();
         it.rsp.addResponse(rsp);
       } else {
         SnapShooter snapShooter = new SnapShooter(repository, core, locationUri, name, commitName);
         // validateCreateSnapshot will create parent dirs instead of throw; that choice is dubious.
         //  But we want to throw. One reason is that
-        //  this dir really should, in fact must, already exist here if triggered via a collection backup on a shared
+        //  this dir really should, in fact must, already exist here if triggered via a collection
+        // backup on a shared
         //  file system. Otherwise, perhaps the FS location isn't shared -- we want an error.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
##########
@@ -1673,17 +1768,17 @@ public void write(OutputStream out) throws IOException {
 
         if (Files.isReadable(file)) {
           try (SeekableByteChannel channel = Files.newByteChannel(file)) {
-            //if offset is mentioned move the pointer to that point
-            if (offset != -1)
-              channel.position(offset);
+            // if offset is mentioned move the pointer to that point
+            if (offset != -1) channel.position(offset);
             ByteBuffer bb = ByteBuffer.wrap(buf);
 
             while (true) {
               bb.clear();
               long bytesRead = channel.read(bb);
               if (bytesRead <= 0) {
                 writeNothingAndFlush();
-                fos.close(); // we close because DeflaterOutputStream requires a close call, but the request outputstream is protected
+                fos.close(); // we close because DeflaterOutputStream requires a close call, but
+                // the request outputstream is protected

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/BaseHandlerApiSupport.java
##########
@@ -112,30 +110,36 @@ public void call(SolrQueryRequest req, SolrQueryResponse rsp) {
                 }
               }
             }
-            wrapParams(req, new CommandOperation("", Collections.emptyMap()), commands.get(0), true);
+            wrapParams(
+                req, new CommandOperation("", Collections.emptyMap()), commands.get(0), true);
             commands.get(0).invoke(req, rsp, apiHandler);
           }
 
         } catch (SolrException e) {
           throw e;
         } catch (Exception e) {
-          throw new SolrException(BAD_REQUEST, e); //TODO BAD_REQUEST is a wild guess; should we flip the default?  fail here to investigate how this happens in tests
+          throw new SolrException(
+              BAD_REQUEST,
+              e); // TODO BAD_REQUEST is a wild guess; should we flip the default?  fail here to
+          // investigate how this happens in tests

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
##########
@@ -558,18 +608,18 @@ private static void populateFieldInfo(IndexSchema schema,
     field.add("copyDests", toListOfStringDests(schema.getCopyFieldsList(f.getName())));
     field.add("copySources", schema.getCopySources(f.getName()));
 
+    fields.put(f.getName(), field);
 
-    fields.put( f.getName(), field );
-
-    List<String> v = typeusemap.get( ft.getTypeName() );
-    if( v == null ) {
+    List<String> v = typeusemap.get(ft.getTypeName());
+    if (v == null) {
       v = new ArrayList<>();
     }
-    v.add( f.getName() );
-    typeusemap.put( ft.getTypeName(), v );
+    v.add(f.getName());
+    typeusemap.put(ft.getTypeName(), v);
   }
 
-  // This method just gets the top-most level of information. This was conflated with getting detailed info
+  // This method just gets the top-most level of information. This was conflated with getting
+  // detailed info
   // for *all* the fields, called from CoreAdminHandler etc.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
##########
@@ -344,44 +385,49 @@ private static String getFieldFlags( SchemaField f )
     Set<String> fields = null;
     String fl = params.get(CommonParams.FL);
     if (fl != null) {
-      fields = new TreeSet<>(Arrays.asList(fl.split( "[,\\s]+" )));
+      fields = new TreeSet<>(Arrays.asList(fl.split("[,\\s]+")));
     }
 
     LeafReader reader = searcher.getSlowAtomicReader();
     IndexSchema schema = searcher.getSchema();
 
-    // Don't be tempted to put this in the loop below, the whole point here is to alphabetize the fields!
+    // Don't be tempted to put this in the loop below, the whole point here is to alphabetize the
+    // fields!
     Set<String> fieldNames = new TreeSet<>();
-    for(FieldInfo fieldInfo : reader.getFieldInfos()) {
+    for (FieldInfo fieldInfo : reader.getFieldInfos()) {
       fieldNames.add(fieldInfo.name);
     }
 
     // Walk the term enum and keep a priority queue for each map in our set
     SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<>();
 
     for (String fieldName : fieldNames) {
-      if (fields != null && ! fields.contains(fieldName) && ! fields.contains("*")) {
-        continue; //we're not interested in this field Still an issue here
+      if (fields != null && !fields.contains(fieldName) && !fields.contains("*")) {
+        continue; // we're not interested in this field Still an issue here
       }
 
       SimpleOrderedMap<Object> fieldMap = new SimpleOrderedMap<>();
 
-      SchemaField sfield = schema.getFieldOrNull( fieldName );
-      FieldType ftype = (sfield==null)?null:sfield.getType();
+      SchemaField sfield = schema.getFieldOrNull(fieldName);
+      FieldType ftype = (sfield == null) ? null : sfield.getType();
 
-      fieldMap.add( "type", (ftype==null)?null:ftype.getTypeName() );
+      fieldMap.add("type", (ftype == null) ? null : ftype.getTypeName());
       fieldMap.add("schema", getFieldFlags(sfield));
-      if (sfield != null && schema.isDynamicField(sfield.getName()) && schema.getDynamicPattern(sfield.getName()) != null) {
+      if (sfield != null
+          && schema.isDynamicField(sfield.getName())
+          && schema.getDynamicPattern(sfield.getName()) != null) {
         fieldMap.add("dynamicBase", schema.getDynamicPattern(sfield.getName()));
       }
       Terms terms = reader.terms(fieldName);
-      if (terms == null) { // Not indexed, so we need to report what we can (it made it through the fl param if specified)
-        finfo.add( fieldName, fieldMap );
+      if (terms
+          == null) { // Not indexed, so we need to report what we can (it made it through the fl
+        // param if specified)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/StreamHandler.java
##########
@@ -259,34 +280,47 @@ private void handleAdmin(SolrQueryRequest req, SolrQueryResponse rsp, SolrParams
     String id = params.get(ID);
     DaemonStream d = daemons.get(id);
     if (d == null) {
-      rsp.add(StreamParams.RESULT_SET, new DaemonResponseStream("Daemon:" + id + " not found on " + coreName));
+      rsp.add(
+          StreamParams.RESULT_SET,
+          new DaemonResponseStream("Daemon:" + id + " not found on " + coreName));
       return;
     }
 
     switch (action) {
       case "stop":
         d.close();
-        rsp.add(StreamParams.RESULT_SET, new DaemonResponseStream("Daemon:" + id + " stopped on " + coreName));
+        rsp.add(
+            StreamParams.RESULT_SET,
+            new DaemonResponseStream("Daemon:" + id + " stopped on " + coreName));
         break;
 
       case "start":
         try {
           d.open();
         } catch (IOException e) {
-          rsp.add(StreamParams.RESULT_SET, new DaemonResponseStream("Daemon: " + id + " error: " + e.getMessage()));
+          rsp.add(
+              StreamParams.RESULT_SET,
+              new DaemonResponseStream("Daemon: " + id + " error: " + e.getMessage()));
         }
-        rsp.add(StreamParams.RESULT_SET, new DaemonResponseStream("Daemon:" + id + " started on " + coreName));
+        rsp.add(
+            StreamParams.RESULT_SET,
+            new DaemonResponseStream("Daemon:" + id + " started on " + coreName));
         break;
 
       case "kill":
         daemons.remove(id);
-        d.close(); // we already found it in the daemons list, so we don't need to verify we removed it.
-        rsp.add(StreamParams.RESULT_SET, new DaemonResponseStream("Daemon:" + id + " killed on " + coreName));
+        d.close(); // we already found it in the daemons list, so we don't need to verify we
+        // removed it.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/SnapShooter.java
##########
@@ -69,21 +67,34 @@
   public SnapShooter(SolrCore core, String location, String snapshotName) {
     String snapDirStr = null;
     // Note - This logic is only applicable to the usecase where a shared file-system is exposed via
-    // local file-system interface (primarily for backwards compatibility). For other use-cases, users
+    // local file-system interface (primarily for backwards compatibility). For other use-cases,
+    // users
     // will be required to specify "location" where the backup should be stored.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
##########
@@ -417,10 +463,13 @@ private static Document getFirstLiveDoc(Terms terms, LeafReader reader) throws I
     PostingsEnum postingsEnum = null;
     TermsEnum termsEnum = terms.iterator();
     BytesRef text;
-    // Deal with the chance that the first bunch of terms are in deleted documents. Is there a better way?
+    // Deal with the chance that the first bunch of terms are in deleted documents. Is there a
+    // better way?
     for (int idx = 0; idx < 1000 && postingsEnum == null; ++idx) {
       text = termsEnum.next();
-      if (text == null) { // Ran off the end of the terms enum without finding any live docs with that field in them.
+      if (text
+          == null) { // Ran off the end of the terms enum without finding any live docs with that
+        // field in them.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/PrepRecoveryOp.java
##########
@@ -63,105 +61,151 @@ public void execute(CallInfo it) throws Exception {
     int conflictWaitMs = coreContainer.getZkController().getLeaderConflictResolveWait();
     log.info(
         "Going to wait for coreNodeName: {}, state: {}, checkLive: {}, onlyIfLeader: {}, onlyIfLeaderActive: {}",
-        coreNodeName, waitForState, checkLive, onlyIfLeader, onlyIfLeaderActive);
+        coreNodeName,
+        waitForState,
+        checkLive,
+        onlyIfLeader,
+        onlyIfLeaderActive);
 
     String collectionName;
     CloudDescriptor cloudDescriptor;
     try (SolrCore core = coreContainer.getCore(cname)) {
-      if (core == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "core not found:" + cname);
+      if (core == null)
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "core not found:" + cname);
       collectionName = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-      cloudDescriptor = core.getCoreDescriptor()
-          .getCloudDescriptor();
+      cloudDescriptor = core.getCoreDescriptor().getCloudDescriptor();
     }
     AtomicReference<String> errorMessage = new AtomicReference<>();
     try {
-      coreContainer.getZkController().getZkStateReader().waitForState(collectionName, conflictWaitMs, TimeUnit.MILLISECONDS, (n, c) -> {
-        if (c == null)
-          return false;
-
-        try (SolrCore core = coreContainer.getCore(cname)) {
-          if (core == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "core not found:" + cname);
-          if (onlyIfLeader != null && onlyIfLeader) {
-            if (!core.getCoreDescriptor().getCloudDescriptor().isLeader()) {
-              throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "We are not the leader");
-            }
-          }
-        }
-
-        // wait until we are sure the recovering node is ready
-        // to accept updates
-        Replica.State state = null;
-        boolean live = false;
-        Slice slice = c.getSlice(cloudDescriptor.getShardId());
-        if (slice != null) {
-          final Replica replica = slice.getReplicasMap().get(coreNodeName);
-          if (replica != null) {
-            state = replica.getState();
-            live = n.contains(nodeName);
-
-            final Replica.State localState = cloudDescriptor.getLastPublished();
-
-            // TODO: This is funky but I've seen this in testing where the replica asks the
-            // leader to be in recovery? Need to track down how that happens ... in the meantime,
-            // this is a safeguard
-            boolean leaderDoesNotNeedRecovery = (onlyIfLeader != null &&
-                onlyIfLeader &&
-                cname.equals(replica.getStr("core")) &&
-                waitForState == Replica.State.RECOVERING &&
-                localState == Replica.State.ACTIVE &&
-                state == Replica.State.ACTIVE);
-
-            if (leaderDoesNotNeedRecovery) {
-              log.warn("Leader {} ignoring request to be in the recovering state because it is live and active.", cname);
-            }
-
-            ZkShardTerms shardTerms = coreContainer.getZkController().getShardTerms(collectionName, slice.getName());
-            // if the replica is waiting for leader to see recovery state, the leader should refresh its terms
-            if (waitForState == Replica.State.RECOVERING && shardTerms.registered(coreNodeName)
-                && shardTerms.skipSendingUpdatesTo(coreNodeName)) {
-              // The replica changed it term, then published itself as RECOVERING.
-              // This core already see replica as RECOVERING
-              // so it is guarantees that a live-fetch will be enough for this core to see max term published
-              shardTerms.refreshTerms();
-            }
-
-            boolean onlyIfActiveCheckResult = onlyIfLeaderActive != null && onlyIfLeaderActive
-                && localState != Replica.State.ACTIVE;
-            if (log.isInfoEnabled()) {
-              log.info(
-                  "In WaitForState(" + waitForState + "): collection=" + collectionName + ", shard=" + slice.getName() +
-                      ", thisCore=" + cname + ", leaderDoesNotNeedRecovery=" + leaderDoesNotNeedRecovery +
-                      ", isLeader? " + cloudDescriptor.isLeader() +
-                      ", live=" + live + ", checkLive=" + checkLive + ", currentState=" + state
-                      + ", localState=" + localState + ", nodeName=" + nodeName +
-                      ", coreNodeName=" + coreNodeName + ", onlyIfActiveCheckResult=" + onlyIfActiveCheckResult
-                      + ", nodeProps: " + replica); //nowarn
-            }
-            if (!onlyIfActiveCheckResult && replica != null && (state == waitForState || leaderDoesNotNeedRecovery)) {
-              if (checkLive == null) {
-                return true;
-              } else if (checkLive && live) {
-                return true;
-              } else if (!checkLive && !live) {
-                return true;
-              }
-            }
-          }
-        }
-
-        if (coreContainer.isShutDown()) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Solr is shutting down");
-        }
-
-        return false;
-      });
+      coreContainer
+          .getZkController()
+          .getZkStateReader()
+          .waitForState(
+              collectionName,
+              conflictWaitMs,
+              TimeUnit.MILLISECONDS,
+              (n, c) -> {
+                if (c == null) return false;
+
+                try (SolrCore core = coreContainer.getCore(cname)) {
+                  if (core == null)
+                    throw new SolrException(
+                        SolrException.ErrorCode.BAD_REQUEST, "core not found:" + cname);
+                  if (onlyIfLeader != null && onlyIfLeader) {
+                    if (!core.getCoreDescriptor().getCloudDescriptor().isLeader()) {
+                      throw new SolrException(
+                          SolrException.ErrorCode.BAD_REQUEST, "We are not the leader");
+                    }
+                  }
+                }
+
+                // wait until we are sure the recovering node is ready
+                // to accept updates
+                Replica.State state = null;
+                boolean live = false;
+                Slice slice = c.getSlice(cloudDescriptor.getShardId());
+                if (slice != null) {
+                  final Replica replica = slice.getReplicasMap().get(coreNodeName);
+                  if (replica != null) {
+                    state = replica.getState();
+                    live = n.contains(nodeName);
+
+                    final Replica.State localState = cloudDescriptor.getLastPublished();
+
+                    // TODO: This is funky but I've seen this in testing where the replica asks the
+                    // leader to be in recovery? Need to track down how that happens ... in the
+                    // meantime,
+                    // this is a safeguard

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/SnapShooter.java
##########
@@ -208,82 +227,103 @@ public static IndexCommit getAndSaveNamedIndexCommit(SolrCore solrCore, String c
       if (namedCommit.isPresent()) {
         final IndexCommit commit = namedCommit.get();
         if (log.isDebugEnabled()) {
-          log.debug("Using named commit: name={}, generation={}", commitName, commit.getGeneration());
+          log.debug(
+              "Using named commit: name={}, generation={}", commitName, commit.getGeneration());
         }
         delPolicy.saveCommitPoint(commit.getGeneration());
         return commit;
       }
     } // else...
-    throw new SolrException(ErrorCode.BAD_REQUEST, "Unable to find an index commit with name " +
-            commitName + " for core " + solrCore.getName());
+    throw new SolrException(
+        ErrorCode.BAD_REQUEST,
+        "Unable to find an index commit with name "
+            + commitName
+            + " for core "
+            + solrCore.getName());
   }
 
-  public void createSnapAsync(final int numberToKeep, Consumer<NamedList<?>> result) throws IOException {
-    //TODO should use Solr's ExecutorUtil
-    new Thread(() -> {
-      NamedList<Object> snapShootDetails;
-      try {
-        snapShootDetails = createSnapshot();
-      } catch (Exception e) {
-        log.error("Exception while creating snapshot", e);
-        snapShootDetails = new NamedList<>();
-        snapShootDetails.add("exception", e.getMessage());
-      }
-      if (snapshotName == null) {
-        try {
-          deleteOldBackups(numberToKeep);
-        } catch (IOException e) {
-          log.warn("Unable to delete old snapshots ", e);
-        }
-      }
-      if (null != snapShootDetails) result.accept(snapShootDetails);
-    }, "CreateSnapshot").start();
-
+  public void createSnapAsync(final int numberToKeep, Consumer<NamedList<?>> result)
+      throws IOException {
+    // TODO should use Solr's ExecutorUtil
+    new Thread(
+            () -> {
+              NamedList<Object> snapShootDetails;
+              try {
+                snapShootDetails = createSnapshot();
+              } catch (Exception e) {
+                log.error("Exception while creating snapshot", e);
+                snapShootDetails = new NamedList<>();
+                snapShootDetails.add("exception", e.getMessage());
+              }
+              if (snapshotName == null) {
+                try {
+                  deleteOldBackups(numberToKeep);
+                } catch (IOException e) {
+                  log.warn("Unable to delete old snapshots ", e);
+                }
+              }
+              if (null != snapShootDetails) result.accept(snapShootDetails);
+            },
+            "CreateSnapshot")
+        .start();
   }
 
   /**
    * Handles the logic of creating a snapshot
-   * <p>
-   * <b>NOTE:</b> The caller <em>MUST</em> ensure that the {@link IndexCommit} is saved prior to 
-   * calling this method, and released after calling this method, or there is no no garuntee that the 
-   * method will function correctly.
-   * </p>
+   *
+   * <p><b>NOTE:</b> The caller <em>MUST</em> ensure that the {@link IndexCommit} is saved prior to
+   * calling this method, and released after calling this method, or there is no no garuntee that
+   * the method will function correctly.
    *
    * @see IndexDeletionPolicyWrapper#saveCommitPoint
    * @see IndexDeletionPolicyWrapper#releaseCommitPoint
    */
   protected NamedList<Object> createSnapshot(final IndexCommit indexCommit) throws Exception {
     assert indexCommit != null;
     if (log.isInfoEnabled()) {
-      log.info("Creating backup snapshot {} at {}", (snapshotName == null ? "<not named>" : snapshotName), baseSnapDirPath);
+      log.info(
+          "Creating backup snapshot {} at {}",
+          (snapshotName == null ? "<not named>" : snapshotName),
+          baseSnapDirPath);
     }
     boolean success = false;
     try {
       NamedList<Object> details = new SimpleOrderedMap<>();
       details.add("startTime", Instant.now().toString());
 
       Collection<String> files = indexCommit.getFileNames();
-      Directory dir = solrCore.getDirectoryFactory().get(solrCore.getIndexDir(), DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
+      Directory dir =
+          solrCore
+              .getDirectoryFactory()
+              .get(
+                  solrCore.getIndexDir(),
+                  DirContext.DEFAULT,
+                  solrCore.getSolrConfig().indexConfig.lockType);
       try {
-        for(String fileName : files) {
-          log.debug("Copying fileName={} from dir={} to snapshot={}", fileName, dir, snapshotDirPath);
+        for (String fileName : files) {
+          log.debug(
+              "Copying fileName={} from dir={} to snapshot={}", fileName, dir, snapshotDirPath);
           backupRepo.copyFileFrom(dir, fileName, snapshotDirPath);
         }
       } finally {
         solrCore.getDirectoryFactory().release(dir);
       }
 
       String endTime = Instant.now().toString();
-      
-      details.add("fileCount", files.size()); // DEPRECATED: for removal, replaced with indexFileCount
+
+      details.add(
+          "fileCount", files.size()); // DEPRECATED: for removal, replaced with indexFileCount

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
##########
@@ -601,62 +653,67 @@ private static void populateFieldInfo(IndexSchema schema,
   interface IOSupplier {
     boolean get() throws IOException;
   }
-  
+
   private static Object closeSafe(IOSupplier isCurrent) {
     try {
       return isCurrent.get();
-    }catch(AlreadyClosedException | IOException exception) {
+    } catch (AlreadyClosedException | IOException exception) {
     }
     return false;
   }
 
-
   /**
-   * <p>A helper method that attempts to determine the file length of the the segments file for the 
+   * A helper method that attempts to determine the file length of the the segments file for the
    * specified IndexCommit from it's Directory.
-   * </p>
-   * <p>
-   * If any sort of {@link IOException} occurs, this method will return "-1" and swallow the exception since 
-   * this may be normal if the IndexCommit is no longer "on disk".  The specific type of the Exception will 
-   * affect how severely it is logged: {@link NoSuchFileException} is considered more "acceptible" then other 
-   * types of IOException which may indicate an actual problem with the Directory.
+   *
+   * <p>If any sort of {@link IOException} occurs, this method will return "-1" and swallow the
+   * exception since this may be normal if the IndexCommit is no longer "on disk". The specific type
+   * of the Exception will affect how severely it is logged: {@link NoSuchFileException} is
+   * considered more "acceptible" then other types of IOException which may indicate an actual
+   * problem with the Directory.
    */
   private static long getSegmentsFileLength(IndexCommit commit) {
     try {
       return commit.getDirectory().fileLength(commit.getSegmentsFileName());
     } catch (NoSuchFileException okException) {
-      log.debug("Unable to determine the (optional) fileSize for the current IndexReader's segments file because it is "
-          + "no longer in the Directory, this can happen if there are new commits since the Reader was opened"
-          , okException);
+      log.debug(
+          "Unable to determine the (optional) fileSize for the current IndexReader's segments file because it is "
+              + "no longer in the Directory, this can happen if there are new commits since the Reader was opened",
+          okException);
     } catch (IOException strangeException) {
-      log.warn("Ignoring IOException wile attempting to determine the (optional) fileSize stat for the current IndexReader's segments file",
-               strangeException);
+      log.warn(
+          "Ignoring IOException wile attempting to determine the (optional) fileSize stat for the current IndexReader's segments file",
+          strangeException);
     }
     return -1;
   }
 
-  // Get terribly detailed information about a particular field. This is a very expensive call, use it with caution
+  // Get terribly detailed information about a particular field. This is a very expensive call, use
+  // it with caution
   // especially on large indexes!

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
##########
@@ -601,62 +653,67 @@ private static void populateFieldInfo(IndexSchema schema,
   interface IOSupplier {
     boolean get() throws IOException;
   }
-  
+
   private static Object closeSafe(IOSupplier isCurrent) {
     try {
       return isCurrent.get();
-    }catch(AlreadyClosedException | IOException exception) {
+    } catch (AlreadyClosedException | IOException exception) {
     }
     return false;
   }
 
-
   /**
-   * <p>A helper method that attempts to determine the file length of the the segments file for the 
+   * A helper method that attempts to determine the file length of the the segments file for the
    * specified IndexCommit from it's Directory.
-   * </p>
-   * <p>
-   * If any sort of {@link IOException} occurs, this method will return "-1" and swallow the exception since 
-   * this may be normal if the IndexCommit is no longer "on disk".  The specific type of the Exception will 
-   * affect how severely it is logged: {@link NoSuchFileException} is considered more "acceptible" then other 
-   * types of IOException which may indicate an actual problem with the Directory.
+   *
+   * <p>If any sort of {@link IOException} occurs, this method will return "-1" and swallow the
+   * exception since this may be normal if the IndexCommit is no longer "on disk". The specific type
+   * of the Exception will affect how severely it is logged: {@link NoSuchFileException} is
+   * considered more "acceptible" then other types of IOException which may indicate an actual
+   * problem with the Directory.
    */
   private static long getSegmentsFileLength(IndexCommit commit) {
     try {
       return commit.getDirectory().fileLength(commit.getSegmentsFileName());
     } catch (NoSuchFileException okException) {
-      log.debug("Unable to determine the (optional) fileSize for the current IndexReader's segments file because it is "
-          + "no longer in the Directory, this can happen if there are new commits since the Reader was opened"
-          , okException);
+      log.debug(
+          "Unable to determine the (optional) fileSize for the current IndexReader's segments file because it is "
+              + "no longer in the Directory, this can happen if there are new commits since the Reader was opened",
+          okException);
     } catch (IOException strangeException) {
-      log.warn("Ignoring IOException wile attempting to determine the (optional) fileSize stat for the current IndexReader's segments file",
-               strangeException);
+      log.warn(
+          "Ignoring IOException wile attempting to determine the (optional) fileSize stat for the current IndexReader's segments file",
+          strangeException);
     }
     return -1;
   }
 
-  // Get terribly detailed information about a particular field. This is a very expensive call, use it with caution
+  // Get terribly detailed information about a particular field. This is a very expensive call, use
+  // it with caution
   // especially on large indexes!
-  private static void getDetailedFieldInfo(SolrQueryRequest req, String field, SimpleOrderedMap<Object> fieldMap)
-      throws IOException {
+  private static void getDetailedFieldInfo(
+      SolrQueryRequest req, String field, SimpleOrderedMap<Object> fieldMap) throws IOException {
 
     SolrParams params = req.getParams();
-    final int numTerms = params.getInt( NUMTERMS, DEFAULT_COUNT );
+    final int numTerms = params.getInt(NUMTERMS, DEFAULT_COUNT);
 
-    TopTermQueue tiq = new TopTermQueue(numTerms + 1);  // Something to collect the top N terms in.
+    TopTermQueue tiq = new TopTermQueue(numTerms + 1); // Something to collect the top N terms in.
 
     final CharsRefBuilder spare = new CharsRefBuilder();
 
     Terms terms = MultiTerms.getTerms(req.getSearcher().getIndexReader(), field);
-    if (terms == null) {  // field does not exist
+    if (terms == null) { // field does not exist
       return;
     }
     TermsEnum termsEnum = terms.iterator();
     BytesRef text;
     int[] buckets = new int[HIST_ARRAY_SIZE];
     while ((text = termsEnum.next()) != null) {
       ++tiq.distinctTerms;
-      int freq = termsEnum.docFreq();  // This calculation seems odd, but it gives the same results as it used to.
+      int freq =
+          termsEnum
+              .docFreq(); // This calculation seems odd, but it gives the same results as it used
+      // to.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
##########
@@ -126,14 +128,14 @@
   void execute() throws KeeperException, InterruptedException {
     DocCollection dc = checkParams();
 
-
     int max = req.getParams().getInt(MAX_AT_ONCE_PROP, Integer.MAX_VALUE);
     if (max <= 0) max = Integer.MAX_VALUE;
     int maxWaitSecs = req.getParams().getInt(MAX_WAIT_SECONDS_PROP, 60);
 
-
-    // If there are a maximum number of simultaneous requests specified, we have to pause when we have that many
-    // outstanding requests and wait for at least one to finish before going on the the next rebalance.
+    // If there are a maximum number of simultaneous requests specified, we have to pause when we
+    // have that many
+    // outstanding requests and wait for at least one to finish before going on the the next
+    // rebalance.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
##########
@@ -579,16 +629,18 @@ private static void populateFieldInfo(IndexSchema schema,
     indexInfo.add("numDocs", reader.numDocs());
     indexInfo.add("maxDoc", reader.maxDoc());
     indexInfo.add("deletedDocs", reader.maxDoc() - reader.numDocs());
-    indexInfo.add("version", reader.getVersion());  // TODO? Is this different then: IndexReader.getCurrentVersion( dir )?
+    indexInfo.add(
+        "version",
+        reader.getVersion()); // TODO? Is this different then: IndexReader.getCurrentVersion( dir )?

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
##########
@@ -414,21 +452,28 @@ private MetricFilter parseMustMatchFilter(SolrParams params) {
   private List<MetricType> parseMetricTypes(SolrParams params) {
     String[] typeStr = params.getParams(TYPE_PARAM);
     List<String> types = Collections.emptyList();
-    if (typeStr != null && typeStr.length > 0)  {
+    if (typeStr != null && typeStr.length > 0) {
       types = new ArrayList<>();
       for (String type : typeStr) {
         types.addAll(StrUtils.splitSmart(type, ','));
       }
     }
 
-    List<MetricType> metricTypes = Collections.singletonList(MetricType.all); // include all metrics by default
+    List<MetricType> metricTypes =
+        Collections.singletonList(MetricType.all); // include all metrics by default

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
##########
@@ -215,19 +227,23 @@ private void checkLeaderStatus() throws InterruptedException, KeeperException {
   // The process is:
   // if the replica with preferredLeader is already the leader, do nothing
   // Otherwise:
-  // > if two nodes have the same sequence number and both point to the current leader, we presume that we've just
+  // > if two nodes have the same sequence number and both point to the current leader, we presume
+  // that we've just
   //   moved it, move the one that does _not_ have the preferredLeader to the end of the list.
-  // > move the current leader to the end of the list. This _should_ mean that the current ephemeral node in the
-  //   leader election queue is removed and the only remaining node watching it is triggered to become leader.
+  // > move the current leader to the end of the list. This _should_ mean that the current ephemeral
+  // node in the
+  //   leader election queue is removed and the only remaining node watching it is triggered to
+  // become leader.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
##########
@@ -250,42 +268,53 @@ private void ensurePreferredIsLeader(Slice slice) throws KeeperException, Interr
       // 1> if the preferred leader isn't first in line, tell it to re-queue itself.
       // 2> tell the actual leader to re-queue itself.
 
-      // Ok, the sorting for election nodes is a bit strange. If the sequence numbers are the same, then the whole
-      // string is used, but that sorts nodes with the same sequence number by their session IDs from ZK.
-      // While this is determinate, it's not quite what we need, so re-queue nodes that aren't us and are
+      // Ok, the sorting for election nodes is a bit strange. If the sequence numbers are the same,
+      // then the whole
+      // string is used, but that sorts nodes with the same sequence number by their session IDs
+      // from ZK.
+      // While this is determinate, it's not quite what we need, so re-queue nodes that aren't us
+      // and are
       // watching the leader node..

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
##########
@@ -172,20 +177,26 @@ private DocCollection checkParams() throws KeeperException, InterruptedException
 
     collectionName = req.getParams().get(COLLECTION_PROP);
     if (StringUtils.isBlank(collectionName)) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          String.format(Locale.ROOT, "The " + COLLECTION_PROP + " is required for the Rebalance Leaders command."));
+      throw new SolrException(
+          SolrException.ErrorCode.BAD_REQUEST,
+          String.format(
+              Locale.ROOT,
+              "The " + COLLECTION_PROP + " is required for the Rebalance Leaders command."));
     }
     coreContainer.getZkController().getZkStateReader().forceUpdateCollection(collectionName);
     ClusterState clusterState = coreContainer.getZkController().getClusterState();
 
     DocCollection dc = clusterState.getCollection(collectionName);
     if (dc == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
+      throw new SolrException(
+          SolrException.ErrorCode.BAD_REQUEST,
+          "Collection '" + collectionName + "' does not exist, no action taken.");
     }
     return dc;
   }
 
-  // Once we've done all the fiddling with the queues, check on the way out to see if all the active preferred
+  // Once we've done all the fiddling with the queues, check on the way out to see if all the active
+  // preferred
   // leaders that we intended to change are in fact the leaders.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/PrepRecoveryOp.java
##########
@@ -63,105 +61,151 @@ public void execute(CallInfo it) throws Exception {
     int conflictWaitMs = coreContainer.getZkController().getLeaderConflictResolveWait();
     log.info(
         "Going to wait for coreNodeName: {}, state: {}, checkLive: {}, onlyIfLeader: {}, onlyIfLeaderActive: {}",
-        coreNodeName, waitForState, checkLive, onlyIfLeader, onlyIfLeaderActive);
+        coreNodeName,
+        waitForState,
+        checkLive,
+        onlyIfLeader,
+        onlyIfLeaderActive);
 
     String collectionName;
     CloudDescriptor cloudDescriptor;
     try (SolrCore core = coreContainer.getCore(cname)) {
-      if (core == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "core not found:" + cname);
+      if (core == null)
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "core not found:" + cname);
       collectionName = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-      cloudDescriptor = core.getCoreDescriptor()
-          .getCloudDescriptor();
+      cloudDescriptor = core.getCoreDescriptor().getCloudDescriptor();
     }
     AtomicReference<String> errorMessage = new AtomicReference<>();
     try {
-      coreContainer.getZkController().getZkStateReader().waitForState(collectionName, conflictWaitMs, TimeUnit.MILLISECONDS, (n, c) -> {
-        if (c == null)
-          return false;
-
-        try (SolrCore core = coreContainer.getCore(cname)) {
-          if (core == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "core not found:" + cname);
-          if (onlyIfLeader != null && onlyIfLeader) {
-            if (!core.getCoreDescriptor().getCloudDescriptor().isLeader()) {
-              throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "We are not the leader");
-            }
-          }
-        }
-
-        // wait until we are sure the recovering node is ready
-        // to accept updates
-        Replica.State state = null;
-        boolean live = false;
-        Slice slice = c.getSlice(cloudDescriptor.getShardId());
-        if (slice != null) {
-          final Replica replica = slice.getReplicasMap().get(coreNodeName);
-          if (replica != null) {
-            state = replica.getState();
-            live = n.contains(nodeName);
-
-            final Replica.State localState = cloudDescriptor.getLastPublished();
-
-            // TODO: This is funky but I've seen this in testing where the replica asks the
-            // leader to be in recovery? Need to track down how that happens ... in the meantime,
-            // this is a safeguard
-            boolean leaderDoesNotNeedRecovery = (onlyIfLeader != null &&
-                onlyIfLeader &&
-                cname.equals(replica.getStr("core")) &&
-                waitForState == Replica.State.RECOVERING &&
-                localState == Replica.State.ACTIVE &&
-                state == Replica.State.ACTIVE);
-
-            if (leaderDoesNotNeedRecovery) {
-              log.warn("Leader {} ignoring request to be in the recovering state because it is live and active.", cname);
-            }
-
-            ZkShardTerms shardTerms = coreContainer.getZkController().getShardTerms(collectionName, slice.getName());
-            // if the replica is waiting for leader to see recovery state, the leader should refresh its terms
-            if (waitForState == Replica.State.RECOVERING && shardTerms.registered(coreNodeName)
-                && shardTerms.skipSendingUpdatesTo(coreNodeName)) {
-              // The replica changed it term, then published itself as RECOVERING.
-              // This core already see replica as RECOVERING
-              // so it is guarantees that a live-fetch will be enough for this core to see max term published
-              shardTerms.refreshTerms();
-            }
-
-            boolean onlyIfActiveCheckResult = onlyIfLeaderActive != null && onlyIfLeaderActive
-                && localState != Replica.State.ACTIVE;
-            if (log.isInfoEnabled()) {
-              log.info(
-                  "In WaitForState(" + waitForState + "): collection=" + collectionName + ", shard=" + slice.getName() +
-                      ", thisCore=" + cname + ", leaderDoesNotNeedRecovery=" + leaderDoesNotNeedRecovery +
-                      ", isLeader? " + cloudDescriptor.isLeader() +
-                      ", live=" + live + ", checkLive=" + checkLive + ", currentState=" + state
-                      + ", localState=" + localState + ", nodeName=" + nodeName +
-                      ", coreNodeName=" + coreNodeName + ", onlyIfActiveCheckResult=" + onlyIfActiveCheckResult
-                      + ", nodeProps: " + replica); //nowarn
-            }
-            if (!onlyIfActiveCheckResult && replica != null && (state == waitForState || leaderDoesNotNeedRecovery)) {
-              if (checkLive == null) {
-                return true;
-              } else if (checkLive && live) {
-                return true;
-              } else if (!checkLive && !live) {
-                return true;
-              }
-            }
-          }
-        }
-
-        if (coreContainer.isShutDown()) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Solr is shutting down");
-        }
-
-        return false;
-      });
+      coreContainer
+          .getZkController()
+          .getZkStateReader()
+          .waitForState(
+              collectionName,
+              conflictWaitMs,
+              TimeUnit.MILLISECONDS,
+              (n, c) -> {
+                if (c == null) return false;
+
+                try (SolrCore core = coreContainer.getCore(cname)) {
+                  if (core == null)
+                    throw new SolrException(
+                        SolrException.ErrorCode.BAD_REQUEST, "core not found:" + cname);
+                  if (onlyIfLeader != null && onlyIfLeader) {
+                    if (!core.getCoreDescriptor().getCloudDescriptor().isLeader()) {
+                      throw new SolrException(
+                          SolrException.ErrorCode.BAD_REQUEST, "We are not the leader");
+                    }
+                  }
+                }
+
+                // wait until we are sure the recovering node is ready
+                // to accept updates
+                Replica.State state = null;
+                boolean live = false;
+                Slice slice = c.getSlice(cloudDescriptor.getShardId());
+                if (slice != null) {
+                  final Replica replica = slice.getReplicasMap().get(coreNodeName);
+                  if (replica != null) {
+                    state = replica.getState();
+                    live = n.contains(nodeName);
+
+                    final Replica.State localState = cloudDescriptor.getLastPublished();
+
+                    // TODO: This is funky but I've seen this in testing where the replica asks the
+                    // leader to be in recovery? Need to track down how that happens ... in the
+                    // meantime,
+                    // this is a safeguard
+                    boolean leaderDoesNotNeedRecovery =
+                        (onlyIfLeader != null
+                            && onlyIfLeader
+                            && cname.equals(replica.getStr("core"))
+                            && waitForState == Replica.State.RECOVERING
+                            && localState == Replica.State.ACTIVE
+                            && state == Replica.State.ACTIVE);
+
+                    if (leaderDoesNotNeedRecovery) {
+                      log.warn(
+                          "Leader {} ignoring request to be in the recovering state because it is live and active.",
+                          cname);
+                    }
+
+                    ZkShardTerms shardTerms =
+                        coreContainer
+                            .getZkController()
+                            .getShardTerms(collectionName, slice.getName());
+                    // if the replica is waiting for leader to see recovery state, the leader should
+                    // refresh its terms
+                    if (waitForState == Replica.State.RECOVERING
+                        && shardTerms.registered(coreNodeName)
+                        && shardTerms.skipSendingUpdatesTo(coreNodeName)) {
+                      // The replica changed it term, then published itself as RECOVERING.
+                      // This core already see replica as RECOVERING
+                      // so it is guarantees that a live-fetch will be enough for this core to see
+                      // max term published
+                      shardTerms.refreshTerms();

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
##########
@@ -295,11 +324,18 @@ private void addInactiveToResults(Slice slice, Replica replica) {
     }
     SimpleOrderedMap<String> res = new SimpleOrderedMap<>();
     res.add("status", "skipped");
-    res.add("msg", "Replica " + replica.getName() + " is a referredLeader for shard " + slice.getName() + ", but is inactive. No change necessary");
+    res.add(
+        "msg",
+        "Replica "
+            + replica.getName()
+            + " is a referredLeader for shard "
+            + slice.getName()
+            + ", but is inactive. No change necessary");
     inactives.add(replica.getName(), res);
   }
 
-  // Provide some feedback to the user about what actually happened, or in this case where no action was
+  // Provide some feedback to the user about what actually happened, or in this case where no action
+  // was
   // necesary since this preferred replica was already the leader

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
##########
@@ -250,42 +268,53 @@ private void ensurePreferredIsLeader(Slice slice) throws KeeperException, Interr
       // 1> if the preferred leader isn't first in line, tell it to re-queue itself.
       // 2> tell the actual leader to re-queue itself.
 
-      // Ok, the sorting for election nodes is a bit strange. If the sequence numbers are the same, then the whole
-      // string is used, but that sorts nodes with the same sequence number by their session IDs from ZK.
-      // While this is determinate, it's not quite what we need, so re-queue nodes that aren't us and are
+      // Ok, the sorting for election nodes is a bit strange. If the sequence numbers are the same,
+      // then the whole
+      // string is used, but that sorts nodes with the same sequence number by their session IDs
+      // from ZK.
+      // While this is determinate, it's not quite what we need, so re-queue nodes that aren't us
+      // and are
       // watching the leader node..
 
-
       String firstWatcher = electionNodes.get(1);
 
       if (LeaderElector.getNodeName(firstWatcher).equals(replica.getName()) == false) {
         makeReplicaFirstWatcher(slice, replica);
       }
 
-      // This replica should be the leader at the end of the day, so let's record that information to check at the end
+      // This replica should be the leader at the end of the day, so let's record that information
+      // to check at the end
       pendingOps.put(slice.getName(), replica.getName());
       String leaderElectionNode = electionNodes.get(0);
-      String coreName = slice.getReplica(LeaderElector.getNodeName(leaderElectionNode)).getStr(CORE_NAME_PROP);
+      String coreName =
+          slice.getReplica(LeaderElector.getNodeName(leaderElectionNode)).getStr(CORE_NAME_PROP);
       rejoinElectionQueue(slice, leaderElectionNode, coreName, false);
       waitForNodeChange(slice, leaderElectionNode);
 
       return; // Done with this slice, skip the rest of the replicas.
     }
   }
 
-  // Check that the election queue has some members! There really should be two or more for this to make any sense,
+  // Check that the election queue has some members! There really should be two or more for this to
+  // make any sense,
   // if there's only one we can't change anything.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
##########
@@ -250,42 +268,53 @@ private void ensurePreferredIsLeader(Slice slice) throws KeeperException, Interr
       // 1> if the preferred leader isn't first in line, tell it to re-queue itself.
       // 2> tell the actual leader to re-queue itself.
 
-      // Ok, the sorting for election nodes is a bit strange. If the sequence numbers are the same, then the whole
-      // string is used, but that sorts nodes with the same sequence number by their session IDs from ZK.
-      // While this is determinate, it's not quite what we need, so re-queue nodes that aren't us and are
+      // Ok, the sorting for election nodes is a bit strange. If the sequence numbers are the same,
+      // then the whole
+      // string is used, but that sorts nodes with the same sequence number by their session IDs
+      // from ZK.
+      // While this is determinate, it's not quite what we need, so re-queue nodes that aren't us
+      // and are
       // watching the leader node..
 
-
       String firstWatcher = electionNodes.get(1);
 
       if (LeaderElector.getNodeName(firstWatcher).equals(replica.getName()) == false) {
         makeReplicaFirstWatcher(slice, replica);
       }
 
-      // This replica should be the leader at the end of the day, so let's record that information to check at the end
+      // This replica should be the leader at the end of the day, so let's record that information
+      // to check at the end
       pendingOps.put(slice.getName(), replica.getName());
       String leaderElectionNode = electionNodes.get(0);
-      String coreName = slice.getReplica(LeaderElector.getNodeName(leaderElectionNode)).getStr(CORE_NAME_PROP);
+      String coreName =
+          slice.getReplica(LeaderElector.getNodeName(leaderElectionNode)).getStr(CORE_NAME_PROP);
       rejoinElectionQueue(slice, leaderElectionNode, coreName, false);
       waitForNodeChange(slice, leaderElectionNode);
 
       return; // Done with this slice, skip the rest of the replicas.
     }
   }
 
-  // Check that the election queue has some members! There really should be two or more for this to make any sense,
+  // Check that the election queue has some members! There really should be two or more for this to
+  // make any sense,
   // if there's only one we can't change anything.
-  private boolean electionQueueInBadState(List<String> electionNodes, Slice slice, Replica replica) {
-    if (electionNodes.size() < 2) { // if there's only one node in the queue, should already be leader and we shouldn't be here anyway.
-      log.warn("Rebalancing leaders and slice {} has less than two elements in the leader election queue, but replica {} doesn't think it's the leader."
-          , slice.getName(), replica.getName());
+  private boolean electionQueueInBadState(
+      List<String> electionNodes, Slice slice, Replica replica) {
+    if (electionNodes.size()
+        < 2) { // if there's only one node in the queue, should already be leader and we shouldn't
+      // be here anyway.
+      log.warn(
+          "Rebalancing leaders and slice {} has less than two elements in the leader election queue, but replica {} doesn't think it's the leader.",
+          slice.getName(),
+          replica.getName());
       return true;
     }
 
     return false;
   }
 
-  // Provide some feedback to the user about what actually happened, or in this case where no action was
+  // Provide some feedback to the user about what actually happened, or in this case where no action
+  // was
   // possible

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
##########
@@ -309,20 +345,31 @@ private void addAlreadyLeaderToResults(Slice slice, Replica replica) {
     }
     SimpleOrderedMap<String> res = new SimpleOrderedMap<>();
     res.add("status", "skipped");
-    res.add("msg", "Replica " + replica.getName() + " is already the leader for shard " + slice.getName() + ". No change necessary");
+    res.add(
+        "msg",
+        "Replica "
+            + replica.getName()
+            + " is already the leader for shard "
+            + slice.getName()
+            + ". No change necessary");
     alreadyLeaders.add(replica.getName(), res);
   }
 
-  // Put the replica in at the head of the queue and send all nodes with the same sequence number to the back of the list
-  // There can be "ties", i.e. replicas in the queue with the same sequence number. Sorting doesn't necessarily sort
-  // the one we most care about first. So put the node we _don't care about at the end of the election queue_
+  // Put the replica in at the head of the queue and send all nodes with the same sequence number to
+  // the back of the list
+  // There can be "ties", i.e. replicas in the queue with the same sequence number. Sorting doesn't
+  // necessarily sort
+  // the one we most care about first. So put the node we _don't care about at the end of the
+  // election queue_

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
##########
@@ -367,25 +418,31 @@ void makeReplicaFirstWatcher(Slice slice, Replica replica)
       }
       // We won't get here for the preferredLeader node
       if (LeaderElector.getSeq(thisNode) == newSeq) {
-        String coreName = slice.getReplica(LeaderElector.getNodeName(thisNode)).getStr(CORE_NAME_PROP);
+        String coreName =
+            slice.getReplica(LeaderElector.getNodeName(thisNode)).getStr(CORE_NAME_PROP);
         rejoinElectionQueue(slice, thisNode, coreName, false);
         waitForNodeChange(slice, thisNode);
       }
     }
   }
 
-  // We're just waiting for the electionNode to rejoin the queue with a _different_ node, indicating that any
+  // We're just waiting for the electionNode to rejoin the queue with a _different_ node, indicating
+  // that any
   // requeueing we've done has happened.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
##########
@@ -468,16 +538,23 @@ private void addToSuccesses(Slice slice, Replica replica) {
       results.add("successes", successes);
     }
     if (log.isInfoEnabled()) {
-      log.info("Successfully changed leader of shard {} to replica {}", slice.getName(), replica.getName());
+      log.info(
+          "Successfully changed leader of shard {} to replica {}",
+          slice.getName(),
+          replica.getName());
     }
     SimpleOrderedMap<String> res = new SimpleOrderedMap<>();
     res.add("status", "success");
-    res.add("msg", "Successfully changed leader of slice " + slice.getName() + " to " + replica.getName());
+    res.add(
+        "msg",
+        "Successfully changed leader of slice " + slice.getName() + " to " + replica.getName());
     successes.add(slice.getName(), res);
   }
 
-  // If for any reason we were supposed to change leadership, that should be recorded in changingLeaders. Any
-  // time we verified that the change actually occurred, that entry should have been removed. So report anything
+  // If for any reason we were supposed to change leadership, that should be recorded in
+  // changingLeaders. Any
+  // time we verified that the change actually occurred, that entry should have been removed. So
+  // report anything
   // left over as a failure.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandler.java
##########
@@ -269,44 +277,54 @@ public String toString() {
       synchronized (this) {
         if (apis == null) {
           Collection<Api> apis = new ArrayList<>();
-          final SpecProvider authcCommands = Utils.getSpec("cluster.security.authentication.Commands");
-          final SpecProvider authzCommands = Utils.getSpec("cluster.security.authorization.Commands");
+          final SpecProvider authcCommands =
+              Utils.getSpec("cluster.security.authentication.Commands");
+          final SpecProvider authzCommands =
+              Utils.getSpec("cluster.security.authorization.Commands");
           apis.add(new ReqHandlerToApi(this, Utils.getSpec("cluster.security.authentication")));
           apis.add(new ReqHandlerToApi(this, Utils.getSpec("cluster.security.authorization")));
-          SpecProvider authcSpecProvider = () -> {
-            AuthenticationPlugin authcPlugin = cores.getAuthenticationPlugin();
-            return authcPlugin != null && authcPlugin instanceof SpecProvider ?
-                ((SpecProvider) authcPlugin).getSpec() :
-                authcCommands.getSpec();
-          };
-
-          apis.add(new ReqHandlerToApi(this, authcSpecProvider) {
-            @Override
-            public synchronized Map<String, JsonSchemaValidator> getCommandSchema() {
-              //it is possible that the Authentication plugin is modified since the last call. invalidate the
-              // the cached commandSchema
-              if(SecurityConfHandler.this.authcPlugin != cores.getAuthenticationPlugin()) commandSchema = null;
-              SecurityConfHandler.this.authcPlugin = cores.getAuthenticationPlugin();
-              return super.getCommandSchema();
-            }
-          });
-
-          SpecProvider authzSpecProvider = () -> {
-            AuthorizationPlugin authzPlugin = cores.getAuthorizationPlugin();
-            return authzPlugin != null && authzPlugin instanceof SpecProvider ?
-                ((SpecProvider) authzPlugin).getSpec() :
-                authzCommands.getSpec();
-          };
-          apis.add(new ApiBag.ReqHandlerToApi(this, authzSpecProvider) {
-            @Override
-            public synchronized Map<String, JsonSchemaValidator> getCommandSchema() {
-              //it is possible that the Authorization plugin is modified since the last call. invalidate the
-              // the cached commandSchema
-              if(SecurityConfHandler.this.authzPlugin != cores.getAuthorizationPlugin()) commandSchema = null;
-              SecurityConfHandler.this.authzPlugin = cores.getAuthorizationPlugin();
-              return super.getCommandSchema();
-            }
-          });
+          SpecProvider authcSpecProvider =
+              () -> {
+                AuthenticationPlugin authcPlugin = cores.getAuthenticationPlugin();
+                return authcPlugin != null && authcPlugin instanceof SpecProvider
+                    ? ((SpecProvider) authcPlugin).getSpec()
+                    : authcCommands.getSpec();
+              };
+
+          apis.add(
+              new ReqHandlerToApi(this, authcSpecProvider) {
+                @Override
+                public synchronized Map<String, JsonSchemaValidator> getCommandSchema() {
+                  // it is possible that the Authentication plugin is modified since the last call.
+                  // invalidate the
+                  // the cached commandSchema

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
##########
@@ -395,9 +452,11 @@ int waitForNodeChange(Slice slice, String electionNode) throws InterruptedExcept
     return -1;
   }
 
-  // Move an election node to some other place in the queue. If rejoinAtHead==false, then at the end, otherwise
+  // Move an election node to some other place in the queue. If rejoinAtHead==false, then at the
+  // end, otherwise
   // the new node should point at the leader.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
##########
@@ -447,9 +514,12 @@ private boolean waitAsyncRequests(final int maxWaitSecs, Boolean waitForAll)
         }
       }
       // We're done if we're processing a few at a time or all requests are processed.
-      // We don't want to change, say, 100s of leaders simultaneously. So if the request specifies some limit,
-      // and we're at that limit, we want to return to the caller so it can immediately add another request.
-      // That's the purpose of the first clause here. Otherwise, of course, just return if all requests are
+      // We don't want to change, say, 100s of leaders simultaneously. So if the request specifies
+      // some limit,
+      // and we're at that limit, we want to return to the caller so it can immediately add another
+      // request.
+      // That's the purpose of the first clause here. Otherwise, of course, just return if all
+      // requests are
       // processed.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
##########
@@ -250,42 +268,53 @@ private void ensurePreferredIsLeader(Slice slice) throws KeeperException, Interr
       // 1> if the preferred leader isn't first in line, tell it to re-queue itself.
       // 2> tell the actual leader to re-queue itself.
 
-      // Ok, the sorting for election nodes is a bit strange. If the sequence numbers are the same, then the whole
-      // string is used, but that sorts nodes with the same sequence number by their session IDs from ZK.
-      // While this is determinate, it's not quite what we need, so re-queue nodes that aren't us and are
+      // Ok, the sorting for election nodes is a bit strange. If the sequence numbers are the same,
+      // then the whole
+      // string is used, but that sorts nodes with the same sequence number by their session IDs
+      // from ZK.
+      // While this is determinate, it's not quite what we need, so re-queue nodes that aren't us
+      // and are
       // watching the leader node..
 
-
       String firstWatcher = electionNodes.get(1);
 
       if (LeaderElector.getNodeName(firstWatcher).equals(replica.getName()) == false) {
         makeReplicaFirstWatcher(slice, replica);
       }
 
-      // This replica should be the leader at the end of the day, so let's record that information to check at the end
+      // This replica should be the leader at the end of the day, so let's record that information
+      // to check at the end
       pendingOps.put(slice.getName(), replica.getName());
       String leaderElectionNode = electionNodes.get(0);
-      String coreName = slice.getReplica(LeaderElector.getNodeName(leaderElectionNode)).getStr(CORE_NAME_PROP);
+      String coreName =
+          slice.getReplica(LeaderElector.getNodeName(leaderElectionNode)).getStr(CORE_NAME_PROP);
       rejoinElectionQueue(slice, leaderElectionNode, coreName, false);
       waitForNodeChange(slice, leaderElectionNode);
 
       return; // Done with this slice, skip the rest of the replicas.
     }
   }
 
-  // Check that the election queue has some members! There really should be two or more for this to make any sense,
+  // Check that the election queue has some members! There really should be two or more for this to
+  // make any sense,
   // if there's only one we can't change anything.
-  private boolean electionQueueInBadState(List<String> electionNodes, Slice slice, Replica replica) {
-    if (electionNodes.size() < 2) { // if there's only one node in the queue, should already be leader and we shouldn't be here anyway.
-      log.warn("Rebalancing leaders and slice {} has less than two elements in the leader election queue, but replica {} doesn't think it's the leader."
-          , slice.getName(), replica.getName());
+  private boolean electionQueueInBadState(
+      List<String> electionNodes, Slice slice, Replica replica) {
+    if (electionNodes.size()
+        < 2) { // if there's only one node in the queue, should already be leader and we shouldn't
+      // be here anyway.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandler.java
##########
@@ -269,44 +277,54 @@ public String toString() {
       synchronized (this) {
         if (apis == null) {
           Collection<Api> apis = new ArrayList<>();
-          final SpecProvider authcCommands = Utils.getSpec("cluster.security.authentication.Commands");
-          final SpecProvider authzCommands = Utils.getSpec("cluster.security.authorization.Commands");
+          final SpecProvider authcCommands =
+              Utils.getSpec("cluster.security.authentication.Commands");
+          final SpecProvider authzCommands =
+              Utils.getSpec("cluster.security.authorization.Commands");
           apis.add(new ReqHandlerToApi(this, Utils.getSpec("cluster.security.authentication")));
           apis.add(new ReqHandlerToApi(this, Utils.getSpec("cluster.security.authorization")));
-          SpecProvider authcSpecProvider = () -> {
-            AuthenticationPlugin authcPlugin = cores.getAuthenticationPlugin();
-            return authcPlugin != null && authcPlugin instanceof SpecProvider ?
-                ((SpecProvider) authcPlugin).getSpec() :
-                authcCommands.getSpec();
-          };
-
-          apis.add(new ReqHandlerToApi(this, authcSpecProvider) {
-            @Override
-            public synchronized Map<String, JsonSchemaValidator> getCommandSchema() {
-              //it is possible that the Authentication plugin is modified since the last call. invalidate the
-              // the cached commandSchema
-              if(SecurityConfHandler.this.authcPlugin != cores.getAuthenticationPlugin()) commandSchema = null;
-              SecurityConfHandler.this.authcPlugin = cores.getAuthenticationPlugin();
-              return super.getCommandSchema();
-            }
-          });
-
-          SpecProvider authzSpecProvider = () -> {
-            AuthorizationPlugin authzPlugin = cores.getAuthorizationPlugin();
-            return authzPlugin != null && authzPlugin instanceof SpecProvider ?
-                ((SpecProvider) authzPlugin).getSpec() :
-                authzCommands.getSpec();
-          };
-          apis.add(new ApiBag.ReqHandlerToApi(this, authzSpecProvider) {
-            @Override
-            public synchronized Map<String, JsonSchemaValidator> getCommandSchema() {
-              //it is possible that the Authorization plugin is modified since the last call. invalidate the
-              // the cached commandSchema
-              if(SecurityConfHandler.this.authzPlugin != cores.getAuthorizationPlugin()) commandSchema = null;
-              SecurityConfHandler.this.authzPlugin = cores.getAuthorizationPlugin();
-              return super.getCommandSchema();
-            }
-          });
+          SpecProvider authcSpecProvider =
+              () -> {
+                AuthenticationPlugin authcPlugin = cores.getAuthenticationPlugin();
+                return authcPlugin != null && authcPlugin instanceof SpecProvider
+                    ? ((SpecProvider) authcPlugin).getSpec()
+                    : authcCommands.getSpec();
+              };
+
+          apis.add(
+              new ReqHandlerToApi(this, authcSpecProvider) {
+                @Override
+                public synchronized Map<String, JsonSchemaValidator> getCommandSchema() {
+                  // it is possible that the Authentication plugin is modified since the last call.
+                  // invalidate the
+                  // the cached commandSchema
+                  if (SecurityConfHandler.this.authcPlugin != cores.getAuthenticationPlugin())
+                    commandSchema = null;
+                  SecurityConfHandler.this.authcPlugin = cores.getAuthenticationPlugin();
+                  return super.getCommandSchema();
+                }
+              });
+
+          SpecProvider authzSpecProvider =
+              () -> {
+                AuthorizationPlugin authzPlugin = cores.getAuthorizationPlugin();
+                return authzPlugin != null && authzPlugin instanceof SpecProvider
+                    ? ((SpecProvider) authzPlugin).getSpec()
+                    : authzCommands.getSpec();
+              };
+          apis.add(
+              new ApiBag.ReqHandlerToApi(this, authzSpecProvider) {
+                @Override
+                public synchronized Map<String, JsonSchemaValidator> getCommandSchema() {
+                  // it is possible that the Authorization plugin is modified since the last call.
+                  // invalidate the
+                  // the cached commandSchema

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
##########
@@ -329,11 +358,12 @@ static String toSplitString(Collection<DocRouter.Range> splits) throws Exception
       String termStr = term.utf8ToString();
       int firstSep = termStr.indexOf(CompositeIdRouter.SEPARATOR);
       // truncate to first separator since we don't support multiple levels currently
-      // NOTE: this does not currently work for tri-level composite ids since the number of bits allocated to the first ID is 16 for a 2 part id
+      // NOTE: this does not currently work for tri-level composite ids since the number of bits
+      // allocated to the first ID is 16 for a 2 part id
       // and 8 for a 3 part id!

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
##########
@@ -341,19 +345,20 @@ public static String getAdminFileFromZooKeeper(SolrQueryRequest req, SolrQueryRe
     // Make sure the file exists, is readable and is not a hidden file
     if (!zkClient.exists(adminFile, true)) {
       log.error("Can not find: {}", adminFile);
-      rsp.setException(new SolrException(SolrException.ErrorCode.NOT_FOUND, "Can not find: "
-          + adminFile));
+      rsp.setException(
+          new SolrException(SolrException.ErrorCode.NOT_FOUND, "Can not find: " + adminFile));
       return null;
     }
 
     return adminFile;
   }
 
-
-  // Find the file indicated by the "file=XXX" parameter or the root of the conf directory on the local
-  // file system. Respects all the "interesting" stuff around what the resource loader does to find files.
-  public static Path getAdminFileFromFileSystem(SolrQueryRequest req, SolrQueryResponse rsp,
-                                                Set<String> hiddenFiles) {
+  // Find the file indicated by the "file=XXX" parameter or the root of the conf directory on the
+  // local
+  // file system. Respects all the "interesting" stuff around what the resource loader does to find
+  // files.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
##########
@@ -286,23 +284,30 @@ public static String getSafeContentType(String contentType) {
 
   //////////////////////// Static methods //////////////////////////////
 
-  public static boolean isHiddenFile(SolrQueryRequest req, SolrQueryResponse rsp, String fnameIn, boolean reportError,
-                                     Set<String> hiddenFiles) {
+  public static boolean isHiddenFile(
+      SolrQueryRequest req,
+      SolrQueryResponse rsp,
+      String fnameIn,
+      boolean reportError,
+      Set<String> hiddenFiles) {
     String fname = fnameIn.toUpperCase(Locale.ROOT);
     if (hiddenFiles.contains(fname) || hiddenFiles.contains("*")) {
       if (reportError) {
         log.error("Cannot access {}", fname);
-        rsp.setException(new SolrException(SolrException.ErrorCode.FORBIDDEN, "Can not access: " + fnameIn));
+        rsp.setException(
+            new SolrException(SolrException.ErrorCode.FORBIDDEN, "Can not access: " + fnameIn));
       }
       return true;
     }
 
-    // This is slightly off, a valid path is something like ./schema.xml. I don't think it's worth the effort though
+    // This is slightly off, a valid path is something like ./schema.xml. I don't think it's worth
+    // the effort though
     // to fix it to handle all possibilities though.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
##########
@@ -420,13 +460,15 @@ static String toSplitString(Collection<DocRouter.Range> splits) throws Exception
 
       // resize if needed
       if (currPrefix.length < term.length) {
-        currPrefix.bytes = new byte[term.length+10];
+        currPrefix.bytes = new byte[term.length + 10];
       }
 
-      // Copy the bytes up to and including the separator, and set the length if the separator is found.
-      // If there was no separator, then length remains 0 and it's the indicator that we have no prefix bucket
+      // Copy the bytes up to and including the separator, and set the length if the separator is
+      // found.
+      // If there was no separator, then length remains 0 and it's the indicator that we have no
+      // prefix bucket

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
##########
@@ -448,12 +496,15 @@ static String toSplitString(Collection<DocRouter.Range> splits) throws Exception
   /*
    * Returns the list of recommended splits, or null if there is not enough information
    */
-  static Collection<DocRouter.Range> getSplits(Collection<RangeCount> rawCounts, DocRouter.Range currentRange) throws Exception {
+  static Collection<DocRouter.Range> getSplits(
+      Collection<RangeCount> rawCounts, DocRouter.Range currentRange) throws Exception {
     int totalCount = 0;
-    RangeCount biggest = null; // keep track of the largest in case we need to split it out into it's own shard
-    RangeCount last = null;  // keep track of what the last range is
+    RangeCount biggest =
+        null; // keep track of the largest in case we need to split it out into it's own shard

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/ZookeeperInfoHandler.java
##########
@@ -404,44 +391,45 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw
     String filterType = params.get("filterType");
     if (filterType != null) {
       filterType = filterType.trim().toLowerCase(Locale.ROOT);
-      if (filterType.length() == 0)
-        filterType = null;
+      if (filterType.length() == 0) filterType = null;
     }
     FilterType type = (filterType != null) ? FilterType.valueOf(filterType) : FilterType.none;
 
     String filter = (type != FilterType.none) ? params.get("filter") : null;
     if (filter != null) {
       filter = filter.trim();
-      if (filter.length() == 0)
-        filter = null;
+      if (filter.length() == 0) filter = null;
     }
 
     ZKPrinter printer = new ZKPrinter(cores.getZkController());
     printer.detail = detail;
     printer.dump = dump;
     boolean isGraphView = "graph".equals(params.get("view"));
-    // There is no znode /clusterstate.json (removed in Solr 9), but we do as if there's one and return collection listing
-    // Need to change services.js if cleaning up here, collection list is used from Admin UI Cloud - Graph
+    // There is no znode /clusterstate.json (removed in Solr 9), but we do as if there's one and
+    // return collection listing
+    // Need to change services.js if cleaning up here, collection list is used from Admin UI Cloud -
+    // Graph

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/CloudReplicaSource.java
##########
@@ -170,7 +197,8 @@ private void addSlices(Map<String, Slice> target, ClusterState state, SolrParams
   @Override
   public List<String> getSliceNames() {
     // This is maybe a bug?
-    return Collections.unmodifiableList(Arrays.asList(slices)); // Do not use List.of because slices could have null
+    return Collections.unmodifiableList(
+        Arrays.asList(slices)); // Do not use List.of because slices could have null

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
##########
@@ -472,27 +523,30 @@ static String toSplitString(Collection<DocRouter.Range> splits) throws Exception
       return null;
     }
 
-
     List<DocRouter.Range> targetRanges = new ArrayList<>();
 
     if (counts.size() == 1) {
       // We have a single range, so we should split it.
-      // Currently, we only split a prefix/bucket when we have just one, but this could be changed/controlled
-      // in the future via a allowedSizeDifference parameter (i.e. if just separating prefix buckets results in
+      // Currently, we only split a prefix/bucket when we have just one, but this could be
+      // changed/controlled
+      // in the future via a allowedSizeDifference parameter (i.e. if just separating prefix buckets
+      // results in
       // too large of an imbalanced, allow splitting within a prefix)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/ActiveTasksListComponent.java
##########
@@ -16,102 +16,114 @@
  */
 package org.apache.solr.handler.component;
 
-import org.apache.solr.common.MapWriter;
-import org.apache.solr.common.util.NamedList;
-
 import java.io.IOException;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.Map;
+import org.apache.solr.common.MapWriter;
+import org.apache.solr.common.util.NamedList;
 
 /** List the active tasks that can be cancelled */
 public class ActiveTasksListComponent extends SearchComponent {
-    public static final String COMPONENT_NAME = "activetaskslist";
+  public static final String COMPONENT_NAME = "activetaskslist";
 
-    private boolean shouldProcess;
+  private boolean shouldProcess;
 
-    @Override
-    public void prepare(ResponseBuilder rb) throws IOException {
-        if (rb.isTaskListRequest()) {
-            shouldProcess = true;
-        }
+  @Override
+  public void prepare(ResponseBuilder rb) throws IOException {
+    if (rb.isTaskListRequest()) {
+      shouldProcess = true;
     }
+  }
 
-    @Override
-    public void process(ResponseBuilder rb) {
-        if (!shouldProcess) {
-            return;
-        }
+  @Override
+  public void process(ResponseBuilder rb) {
+    if (!shouldProcess) {
+      return;
+    }
 
-        if (rb.getTaskStatusCheckUUID() != null) {
-            boolean isActiveOnThisShard = rb.req.getCore().getCancellableQueryTracker().isQueryIdActive(rb.getTaskStatusCheckUUID());
+    if (rb.getTaskStatusCheckUUID() != null) {
+      boolean isActiveOnThisShard =
+          rb.req
+              .getCore()
+              .getCancellableQueryTracker()
+              .isQueryIdActive(rb.getTaskStatusCheckUUID());
 
-            rb.rsp.add("taskStatus", isActiveOnThisShard);
-            return;
-        }
+      rb.rsp.add("taskStatus", isActiveOnThisShard);
+      return;
+    }
 
-        rb.rsp.add("taskList", (MapWriter) ew -> {
-            Iterator<Map.Entry<String, String>> iterator = rb.req.getCore().getCancellableQueryTracker().getActiveQueriesGenerated();
+    rb.rsp.add(
+        "taskList",
+        (MapWriter)
+            ew -> {
+              Iterator<Map.Entry<String, String>> iterator =
+                  rb.req.getCore().getCancellableQueryTracker().getActiveQueriesGenerated();
 
-            while (iterator.hasNext()) {
+              while (iterator.hasNext()) {
                 Map.Entry<String, String> entry = iterator.next();
                 ew.put(entry.getKey(), entry.getValue());
-            }
-        });
+              }
+            });
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
+    if (!shouldProcess) {
+      return;
     }
 
-    @Override
-    @SuppressWarnings("unchecked")
-    public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
-        if (!shouldProcess) {
-            return;
-        }
+    NamedList<String> resultList = new NamedList<>();
 
-        NamedList<String> resultList = new NamedList<>();
+    for (ShardResponse r : sreq.responses) {
 
-        for (ShardResponse r : sreq.responses) {
+      if (rb.getTaskStatusCheckUUID() != null) {
+        boolean isTaskActiveOnShard = r.getSolrResponse().getResponse().getBooleanArg("taskStatus");
 
-            if (rb.getTaskStatusCheckUUID() != null) {
-                boolean isTaskActiveOnShard = r.getSolrResponse().getResponse().getBooleanArg("taskStatus");
-
-                if (isTaskActiveOnShard) {
-                    rb.rsp.getValues().add("taskStatus", "id:" + rb.getTaskStatusCheckUUID() + ", status: active");
-                    return;
-                } else {
-                    continue;
-                }
-            }
-
-            LinkedHashMap<String, String> result = (LinkedHashMap<String, String>) r.getSolrResponse()
-                    .getResponse().get("taskList");
-
-            Iterator<Map.Entry<String, String>> iterator = result.entrySet().iterator();
+        if (isTaskActiveOnShard) {
+          rb.rsp
+              .getValues()
+              .add("taskStatus", "id:" + rb.getTaskStatusCheckUUID() + ", status: active");
+          return;
+        } else {
+          continue;
+        }
+      }
 
-            while (iterator.hasNext()) {
-                Map.Entry<String, String> entry = iterator.next();
+      LinkedHashMap<String, String> result =
+          (LinkedHashMap<String, String>) r.getSolrResponse().getResponse().get("taskList");
 
-                resultList.add(entry.getKey(), entry.getValue());
-            }
-        }
+      Iterator<Map.Entry<String, String>> iterator = result.entrySet().iterator();
 
-        if (rb.getTaskStatusCheckUUID() != null) {
-            // We got here with the specific taskID check being specified -- this means that the taskID was not
-            // found in active tasks on any shard
-            rb.rsp.getValues().add("taskStatus", "id:" + rb.getTaskStatusCheckUUID() + ", status: inactive");
-            return;
-        }
+      while (iterator.hasNext()) {
+        Map.Entry<String, String> entry = iterator.next();
 
-        rb.rsp.getValues().add("taskList", resultList);
+        resultList.add(entry.getKey(), entry.getValue());
+      }
     }
 
-    @Override
-    public String getDescription() {
-        return "Responsible for listing all active cancellable tasks and also supports checking the status of " +
-                "a particular task";
+    if (rb.getTaskStatusCheckUUID() != null) {
+      // We got here with the specific taskID check being specified -- this means that the taskID
+      // was not
+      // found in active tasks on any shard

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
##########
@@ -353,15 +367,17 @@ public void process(ResponseBuilder rb) throws IOException {
     }
 
     final boolean expandNullGroup =
-      params.getBool(ExpandParams.EXPAND_NULL, false) &&
-      // Our GroupCollector can typically ignore nulls (and the user's nullGroup param) unless the
-      // current page had any - but if expand.q was specified, current page doesn't mater: We
-      // need look for nulls if the user asked us to because we don't know what the expand.q will match
-      (nullGroupOnCurrentPage || (null != query));
+        params.getBool(ExpandParams.EXPAND_NULL, false)
+            &&
+            // Our GroupCollector can typically ignore nulls (and the user's nullGroup param) unless
+            // the
+            // current page had any - but if expand.q was specified, current page doesn't mater: We

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
##########
@@ -342,20 +366,34 @@ private static String createSliceShardsStr(final List<String> shardUrls) {
     return sliceShardsStr.toString();
   }
 
-  private boolean canShortCircuit(String[] slices, boolean onlyNrtReplicas, SolrParams params, CloudDescriptor cloudDescriptor) {
-    // Are we hosting the shard that this request is for, and are we active? If so, then handle it ourselves
+  private boolean canShortCircuit(
+      String[] slices,
+      boolean onlyNrtReplicas,
+      SolrParams params,
+      CloudDescriptor cloudDescriptor) {
+    // Are we hosting the shard that this request is for, and are we active? If so, then handle it
+    // ourselves
     // and make it a non-distributed request.
     String ourSlice = cloudDescriptor.getShardId();
     String ourCollection = cloudDescriptor.getCollectionName();
     // Some requests may only be fulfilled by replicas of type Replica.Type.NRT
-    if (slices.length == 1 && slices[0] != null
-        && (slices[0].equals(ourSlice) || slices[0].equals(ourCollection + "_" + ourSlice))  // handle the <collection>_<slice> format
+    if (slices.length == 1
+        && slices[0] != null
+        && (slices[0].equals(ourSlice)
+            || slices[0].equals(
+                ourCollection + "_" + ourSlice)) // handle the <collection>_<slice> format
         && cloudDescriptor.getLastPublished() == Replica.State.ACTIVE
         && (!onlyNrtReplicas || cloudDescriptor.getReplicaType() == Replica.Type.NRT)) {
-      boolean shortCircuit = params.getBool("shortCircuit", true);       // currently just a debugging parameter to check distrib search on a single node
+      boolean shortCircuit =
+          params.getBool(
+              "shortCircuit",
+              true); // currently just a debugging parameter to check distrib search on a single
+      // node
 
       String targetHandler = params.get(ShardParams.SHARDS_QT);
-      shortCircuit = shortCircuit && targetHandler == null;             // if a different handler is specified, don't short-circuit
+      shortCircuit =
+          shortCircuit
+              && targetHandler == null; // if a different handler is specified, don't short-circuit

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
##########
@@ -342,20 +366,34 @@ private static String createSliceShardsStr(final List<String> shardUrls) {
     return sliceShardsStr.toString();
   }
 
-  private boolean canShortCircuit(String[] slices, boolean onlyNrtReplicas, SolrParams params, CloudDescriptor cloudDescriptor) {
-    // Are we hosting the shard that this request is for, and are we active? If so, then handle it ourselves
+  private boolean canShortCircuit(
+      String[] slices,
+      boolean onlyNrtReplicas,
+      SolrParams params,
+      CloudDescriptor cloudDescriptor) {
+    // Are we hosting the shard that this request is for, and are we active? If so, then handle it
+    // ourselves
     // and make it a non-distributed request.
     String ourSlice = cloudDescriptor.getShardId();
     String ourCollection = cloudDescriptor.getCollectionName();
     // Some requests may only be fulfilled by replicas of type Replica.Type.NRT
-    if (slices.length == 1 && slices[0] != null
-        && (slices[0].equals(ourSlice) || slices[0].equals(ourCollection + "_" + ourSlice))  // handle the <collection>_<slice> format
+    if (slices.length == 1
+        && slices[0] != null
+        && (slices[0].equals(ourSlice)
+            || slices[0].equals(
+                ourCollection + "_" + ourSlice)) // handle the <collection>_<slice> format
         && cloudDescriptor.getLastPublished() == Replica.State.ACTIVE
         && (!onlyNrtReplicas || cloudDescriptor.getReplicaType() == Replica.Type.NRT)) {
-      boolean shortCircuit = params.getBool("shortCircuit", true);       // currently just a debugging parameter to check distrib search on a single node
+      boolean shortCircuit =
+          params.getBool(
+              "shortCircuit",
+              true); // currently just a debugging parameter to check distrib search on a single
+      // node

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
##########
@@ -718,30 +784,34 @@ private DocSlice collectorToDocSlice(Collector groupCollector, SolrIndexSearcher
       }
       return null;
     }
-    
-    private void addGroupSliceToOutputMap(NamedList<DocSlice> outMap, CharsRefBuilder charsRef,
-                                          long groupValue, DocSlice slice) {
-      if(fieldType instanceof StrField) {
-        final BytesRef bytesRef = ordBytes.get((int)groupValue);
+
+    private void addGroupSliceToOutputMap(
+        NamedList<DocSlice> outMap, CharsRefBuilder charsRef, long groupValue, DocSlice slice) {
+      if (fieldType instanceof StrField) {
+        final BytesRef bytesRef = ordBytes.get((int) groupValue);
         fieldType.indexedToReadable(bytesRef, charsRef);
         String group = charsRef.toString();
         outMap.add(group, slice);
       } else {
         outMap.add(numericToString(fieldType, groupValue), slice);
       }
     }
-    
+
     @Override
     public ScoreMode scoreMode() {
       final LongObjectMap<Collector> groups = getGroups();
       if (groups.isEmpty()) {
         return ScoreMode.COMPLETE; // doesn't matter?
       } else {
-        return groups.iterator().next().value.scoreMode(); // we assume all the collectors should have the same nature
+        return groups
+            .iterator()
+            .next()
+            .value
+            .scoreMode(); // we assume all the collectors should have the same nature

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
##########
@@ -342,20 +366,34 @@ private static String createSliceShardsStr(final List<String> shardUrls) {
     return sliceShardsStr.toString();
   }
 
-  private boolean canShortCircuit(String[] slices, boolean onlyNrtReplicas, SolrParams params, CloudDescriptor cloudDescriptor) {
-    // Are we hosting the shard that this request is for, and are we active? If so, then handle it ourselves
+  private boolean canShortCircuit(
+      String[] slices,
+      boolean onlyNrtReplicas,
+      SolrParams params,
+      CloudDescriptor cloudDescriptor) {
+    // Are we hosting the shard that this request is for, and are we active? If so, then handle it
+    // ourselves
     // and make it a non-distributed request.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
##########
@@ -342,20 +366,34 @@ private static String createSliceShardsStr(final List<String> shardUrls) {
     return sliceShardsStr.toString();
   }
 
-  private boolean canShortCircuit(String[] slices, boolean onlyNrtReplicas, SolrParams params, CloudDescriptor cloudDescriptor) {
-    // Are we hosting the shard that this request is for, and are we active? If so, then handle it ourselves
+  private boolean canShortCircuit(
+      String[] slices,
+      boolean onlyNrtReplicas,
+      SolrParams params,
+      CloudDescriptor cloudDescriptor) {
+    // Are we hosting the shard that this request is for, and are we active? If so, then handle it
+    // ourselves
     // and make it a non-distributed request.
     String ourSlice = cloudDescriptor.getShardId();
     String ourCollection = cloudDescriptor.getCollectionName();
     // Some requests may only be fulfilled by replicas of type Replica.Type.NRT
-    if (slices.length == 1 && slices[0] != null
-        && (slices[0].equals(ourSlice) || slices[0].equals(ourCollection + "_" + ourSlice))  // handle the <collection>_<slice> format
+    if (slices.length == 1
+        && slices[0] != null
+        && (slices[0].equals(ourSlice)
+            || slices[0].equals(
+                ourCollection + "_" + ourSlice)) // handle the <collection>_<slice> format

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/PhrasesIdentificationComponent.java
##########
@@ -232,23 +234,25 @@ public void finishStage(ResponseBuilder rb) {
     scoreAndAddResultsToResponse(rb, contextData);
   }
 
-  
   @Override
   public void process(ResponseBuilder rb) throws IOException {
-    final PhrasesContextData contextData = (PhrasesContextData) rb.req.getContext().get(this.getClass());
+    final PhrasesContextData contextData =
+        (PhrasesContextData) rb.req.getContext().get(this.getClass());
     if (null == contextData) {
       // if prepare didn't give us anything to work with, then we should do nothing
       return;
     }
 
     // regardless of single node / shard, we need local stats...
-    Phrase.populateStats(contextData.allPhrases, contextData.fieldWeights.keySet(), rb.req.getSearcher());
+    Phrase.populateStats(
+        contextData.allPhrases, contextData.fieldWeights.keySet(), rb.req.getSearcher());
 
-    if ( rb.req.getParams().getBool(ShardParams.IS_SHARD, false) ) {
+    if (rb.req.getParams().getBool(ShardParams.IS_SHARD, false)) {
       // shard request, return stats for all phrases (in original order)
       SimpleOrderedMap<Object> output = new SimpleOrderedMap<>();
       output.add("_all", Phrase.formatShardResponse(contextData.allPhrases));
-      // TODO: might want to add numDocs() & getSumTotalTermFreq(f)/getDocCount(f) stats from each field...
+      // TODO: might want to add numDocs() & getSumTotalTermFreq(f)/getDocCount(f) stats from each
+      // field...
       // so that we can sum/merge them for use in scoring?

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/QueryCancellationComponent.java
##########
@@ -16,89 +16,95 @@
  */
 package org.apache.solr.handler.component;
 
-import org.apache.solr.client.solrj.util.Cancellable;
-
 import java.io.IOException;
+import org.apache.solr.client.solrj.util.Cancellable;
 
 /** Responsible for handling query cancellation requests */
 public class QueryCancellationComponent extends SearchComponent {
-    public static final String COMPONENT_NAME = "querycancellation";
+  public static final String COMPONENT_NAME = "querycancellation";
 
-    private boolean shouldProcess;
+  private boolean shouldProcess;
 
-    @Override
-    public void prepare(ResponseBuilder rb) throws IOException
-    {
-        if (rb.isCancellation()) {
-            shouldProcess = true;
-        }
+  @Override
+  public void prepare(ResponseBuilder rb) throws IOException {
+    if (rb.isCancellation()) {
+      shouldProcess = true;
     }
+  }
 
-    @Override
-    public void process(ResponseBuilder rb) {
-        if (!shouldProcess) {
-            return;
-        }
-
-        String cancellationUUID = rb.getCancellationUUID();
-
-        if (cancellationUUID == null) {
-            throw new RuntimeException("Null query UUID seen");
-        }
+  @Override
+  public void process(ResponseBuilder rb) {
+    if (!shouldProcess) {
+      return;
+    }
 
-        Cancellable cancellableTask = rb.req.getCore().getCancellableQueryTracker().getCancellableTask(cancellationUUID);
+    String cancellationUUID = rb.getCancellationUUID();
 
-        if (cancellableTask != null) {
-            cancellableTask.cancel();
-            rb.rsp.add("cancellationResult", "success");
-        } else {
-            rb.rsp.add("cancellationResult", "not found");
-        }
+    if (cancellationUUID == null) {
+      throw new RuntimeException("Null query UUID seen");
     }
 
-    @Override
-    @SuppressWarnings("unchecked")
-    public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
-        if (!shouldProcess) {
-            return;
-        }
+    Cancellable cancellableTask =
+        rb.req.getCore().getCancellableQueryTracker().getCancellableTask(cancellationUUID);
 
-        boolean queryFound = false;
+    if (cancellableTask != null) {
+      cancellableTask.cancel();
+      rb.rsp.add("cancellationResult", "success");
+    } else {
+      rb.rsp.add("cancellationResult", "not found");
+    }
+  }
 
-        for (ShardResponse r : sreq.responses) {
+  @Override
+  @SuppressWarnings("unchecked")
+  public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
+    if (!shouldProcess) {
+      return;
+    }
 
-            String cancellationResult = (String) r.getSolrResponse()
-                    .getResponse().get("cancellationResult");
+    boolean queryFound = false;
 
-            if (cancellationResult.equalsIgnoreCase("success")) {
-                queryFound = true;
+    for (ShardResponse r : sreq.responses) {
 
-                break;
-            }
-        }
+      String cancellationResult =
+          (String) r.getSolrResponse().getResponse().get("cancellationResult");
 
-        // If any shard sees the query as present, then we mark the query as successfully cancelled. If no shard found
-        // the query, then that can denote that the query was not found. This is important since the query cancellation
-        // request is broadcast to all shards, and the query might have completed on some shards but not on others
+      if (cancellationResult.equalsIgnoreCase("success")) {
+        queryFound = true;
 
-        if(queryFound) {
-            rb.rsp.getValues().add("status", "Query with queryID " + rb.getCancellationUUID() +
-                    " cancelled successfully");
-            rb.rsp.getValues().add("responseCode", 200 /* HTTP OK */);
-        } else {
-            rb.rsp.getValues().add("status", "Query with queryID " + rb.getCancellationUUID() +
-                    " not found");
-            rb.rsp.getValues().add("responseCode", 404 /* HTTP NOT FOUND */);
-        }
+        break;
+      }
     }
 
-    @Override
-    public String getDescription() {
-        return "Supports cancellation of queries which are cancellable";
+    // If any shard sees the query as present, then we mark the query as successfully cancelled. If
+    // no shard found
+    // the query, then that can denote that the query was not found. This is important since the
+    // query cancellation
+    // request is broadcast to all shards, and the query might have completed on some shards but not
+    // on others

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
##########
@@ -233,59 +246,73 @@ public void init(PluginInfo info) {
       r.setSeed(Long.parseLong(v));
     }
 
-    BlockingQueue<Runnable> blockingQueue = (this.queueSize == -1) ?
-        new SynchronousQueue<Runnable>(this.accessPolicy) :
-        new ArrayBlockingQueue<Runnable>(this.queueSize, this.accessPolicy);
-
-    this.commExecutor = new ExecutorUtil.MDCAwareThreadPoolExecutor(
-        this.corePoolSize,
-        this.maximumPoolSize,
-        this.keepAliveTime, TimeUnit.SECONDS,
-        blockingQueue,
-        new SolrNamedThreadFactory("httpShardExecutor"),
-        // the Runnable added to this executor handles all exceptions so we disable stack trace collection as an optimization
-        // see SOLR-11880 for more details
-        false
-    );
+    BlockingQueue<Runnable> blockingQueue =
+        (this.queueSize == -1)
+            ? new SynchronousQueue<Runnable>(this.accessPolicy)
+            : new ArrayBlockingQueue<Runnable>(this.queueSize, this.accessPolicy);
+
+    this.commExecutor =
+        new ExecutorUtil.MDCAwareThreadPoolExecutor(
+            this.corePoolSize,
+            this.maximumPoolSize,
+            this.keepAliveTime,
+            TimeUnit.SECONDS,
+            blockingQueue,
+            new SolrNamedThreadFactory("httpShardExecutor"),
+            // the Runnable added to this executor handles all exceptions so we disable stack trace
+            // collection as an optimization
+            // see SOLR-11880 for more details

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/PhrasesIdentificationComponent.java
##########
@@ -175,41 +173,44 @@ public int distributedProcess(ResponseBuilder rb) {
       sreq.params.remove(ShardParams.SHARDS);
       rb.addRequest(this, sreq);
       return ResponseBuilder.STAGE_GET_FIELDS;
-      
+
     } else if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS) {
       // NOTE: we don't do any actual work in this stage, but we need to ensure that even if
-      // we are being used in isolation w/o QueryComponent that SearchHandler "tracks" a STAGE_GET_FIELDS
+      // we are being used in isolation w/o QueryComponent that SearchHandler "tracks" a
+      // STAGE_GET_FIELDS
       // so that finishStage(STAGE_GET_FIELDS) is called on us and we can add our merged results

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/PhrasesIdentificationComponent.java
##########
@@ -470,48 +508,57 @@ public String summarize(final List<Phrase> results) {
       return out.toString();
     }
   }
-      
-  
-  /** 
-   * Model the data known about a single (candidate) Phrase -- which may or may not be indexed 
+
+  /**
+   * Model the data known about a single (candidate) Phrase -- which may or may not be indexed
+   *
    * @lucene.internal
    */
   public static final class Phrase {
 
     /**
-     * Factory method for constructing a list of Phrases given the specified input and using the analyzer
-     * for the specified field.  The <code>maxIndexedPositionLength</code> and 
-     * <code>maxQueryPositionLength</code> provided *must* match the effective values used by 
-     * respective analyzers.
+     * Factory method for constructing a list of Phrases given the specified input and using the
+     * analyzer for the specified field. The <code>maxIndexedPositionLength</code> and <code>
+     * maxQueryPositionLength</code> provided *must* match the effective values used by respective
+     * analyzers.
      */
-    public static List<Phrase> extractPhrases(final String input, final SchemaField analysisField,
-                                              final int maxIndexedPositionLength,
-                                              final int maxQueryPositionLength) {
+    public static List<Phrase> extractPhrases(
+        final String input,
+        final SchemaField analysisField,
+        final int maxIndexedPositionLength,
+        final int maxQueryPositionLength) {
 
-      // TODO: rather then requiring the query analyzer to produce the Phrases for us (assuming Shingles)
-      // we could potentially just require that it produces unigrams compatible with the unigrams in the
-      // indexed fields, and then build our own Phrases at query time -- making the maxQueryPositionLength
+      // TODO: rather then requiring the query analyzer to produce the Phrases for us (assuming
+      // Shingles)
+      // we could potentially just require that it produces unigrams compatible with the unigrams in
+      // the
+      // indexed fields, and then build our own Phrases at query time -- making the
+      // maxQueryPositionLength
       // a 100% run time configuration option.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
##########
@@ -399,7 +403,8 @@ public void process(ResponseBuilder rb) throws IOException
     //
     GroupingSpecification groupingSpec = rb.getGroupingSpec();
     if (groupingSpec != null) {
-      cmd.setSegmentTerminateEarly(false); // not supported, silently ignore any segmentTerminateEarly flag
+      cmd.setSegmentTerminateEarly(
+          false); // not supported, silently ignore any segmentTerminateEarly flag

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/PhrasesIdentificationComponent.java
##########
@@ -1028,78 +1112,93 @@ private static double computeFieldScore(final Phrase input,
         // sub-phrase of every other phrase (or at least: all siblings of diff sizes that add up to
         // an existing phrase).  As well as require us to give up on a predictible "range" of
         // legal values for scores (IIUC from the LLR docs)
-        
+
         final long phrase_ttf = words.getTTF(field);
         final long phrase_df = words.getDocFreq(field);
         final long words_conj_count = words.getConjunctionDocCount(field);
         max_sub_conj_count = Math.max(words_conj_count, max_sub_conj_count);
-        
-        final double max_wrapper_phrase_probability = 
-          words.getIndexedSuperPhrases().stream()
-          .mapToDouble(p -> p.getConjunctionDocCount(field) <= 0 ?
-                       // special case check -- we already know *our* conj count > 0,
-                       // but we need a similar check for wrapper phrases: if <= 0, their probability is 0
-                       0.0D : ((double)p.getDocFreq(field) / p.getConjunctionDocCount(field))).max().orElse(0.0D);
-        
-        final LongSummaryStatistics words_ttfs = 
-          words.getIndividualIndexedTerms().stream()
-          .collect(Collectors.summarizingLong(t -> t.getTTF(field)));
-        
-        final double words_phrase_prob = (phrase_ttf / (double)words_ttfs.getMin());
-        final double words_not_phrase_prob = (phrase_ttf / (double)words_ttfs.getMax());
-        
-        final double phrase_prob = (phrase_conj_count / (double)words_conj_count);
-        
-          
-        final double phrase_score = words_phrase_prob * (phrase_prob - max_wrapper_phrase_probability);
-        final double not_phrase_score =  words_not_phrase_prob * (1 - (phrase_prob - max_wrapper_phrase_probability));
+
+        final double max_wrapper_phrase_probability =
+            words.getIndexedSuperPhrases().stream()
+                .mapToDouble(
+                    p ->
+                        p.getConjunctionDocCount(field) <= 0
+                            ?
+                            // special case check -- we already know *our* conj count > 0,
+                            // but we need a similar check for wrapper phrases: if <= 0, their
+                            // probability is 0
+                            0.0D
+                            : ((double) p.getDocFreq(field) / p.getConjunctionDocCount(field)))
+                .max()
+                .orElse(0.0D);
+
+        final LongSummaryStatistics words_ttfs =
+            words.getIndividualIndexedTerms().stream()
+                .collect(Collectors.summarizingLong(t -> t.getTTF(field)));
+
+        final double words_phrase_prob = (phrase_ttf / (double) words_ttfs.getMin());
+        final double words_not_phrase_prob = (phrase_ttf / (double) words_ttfs.getMax());
+
+        final double phrase_prob = (phrase_conj_count / (double) words_conj_count);
+
+        final double phrase_score =
+            words_phrase_prob * (phrase_prob - max_wrapper_phrase_probability);
+        final double not_phrase_score =
+            words_not_phrase_prob * (1 - (phrase_prob - max_wrapper_phrase_probability));
         final double words_score = phrase_score - not_phrase_score;
-        
+
         field_score += words_score;
       }
 
-      // NOTE: the "scaling" factors below can "increase" negative scores (by reducing the unsigned value)
-      // when they should ideally be penalizing the scores further, but since we currently don't care
+      // NOTE: the "scaling" factors below can "increase" negative scores (by reducing the unsigned
+      // value)
+      // when they should ideally be penalizing the scores further, but since we currently don't
+      // care
       // about any score lower then 0, it's not worth worrying about.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/PhrasesIdentificationComponent.java
##########
@@ -1028,78 +1112,93 @@ private static double computeFieldScore(final Phrase input,
         // sub-phrase of every other phrase (or at least: all siblings of diff sizes that add up to
         // an existing phrase).  As well as require us to give up on a predictible "range" of
         // legal values for scores (IIUC from the LLR docs)
-        
+
         final long phrase_ttf = words.getTTF(field);
         final long phrase_df = words.getDocFreq(field);
         final long words_conj_count = words.getConjunctionDocCount(field);
         max_sub_conj_count = Math.max(words_conj_count, max_sub_conj_count);
-        
-        final double max_wrapper_phrase_probability = 
-          words.getIndexedSuperPhrases().stream()
-          .mapToDouble(p -> p.getConjunctionDocCount(field) <= 0 ?
-                       // special case check -- we already know *our* conj count > 0,
-                       // but we need a similar check for wrapper phrases: if <= 0, their probability is 0
-                       0.0D : ((double)p.getDocFreq(field) / p.getConjunctionDocCount(field))).max().orElse(0.0D);
-        
-        final LongSummaryStatistics words_ttfs = 
-          words.getIndividualIndexedTerms().stream()
-          .collect(Collectors.summarizingLong(t -> t.getTTF(field)));
-        
-        final double words_phrase_prob = (phrase_ttf / (double)words_ttfs.getMin());
-        final double words_not_phrase_prob = (phrase_ttf / (double)words_ttfs.getMax());
-        
-        final double phrase_prob = (phrase_conj_count / (double)words_conj_count);
-        
-          
-        final double phrase_score = words_phrase_prob * (phrase_prob - max_wrapper_phrase_probability);
-        final double not_phrase_score =  words_not_phrase_prob * (1 - (phrase_prob - max_wrapper_phrase_probability));
+
+        final double max_wrapper_phrase_probability =
+            words.getIndexedSuperPhrases().stream()
+                .mapToDouble(
+                    p ->
+                        p.getConjunctionDocCount(field) <= 0
+                            ?
+                            // special case check -- we already know *our* conj count > 0,
+                            // but we need a similar check for wrapper phrases: if <= 0, their
+                            // probability is 0
+                            0.0D
+                            : ((double) p.getDocFreq(field) / p.getConjunctionDocCount(field)))
+                .max()
+                .orElse(0.0D);
+
+        final LongSummaryStatistics words_ttfs =
+            words.getIndividualIndexedTerms().stream()
+                .collect(Collectors.summarizingLong(t -> t.getTTF(field)));
+
+        final double words_phrase_prob = (phrase_ttf / (double) words_ttfs.getMin());
+        final double words_not_phrase_prob = (phrase_ttf / (double) words_ttfs.getMax());
+
+        final double phrase_prob = (phrase_conj_count / (double) words_conj_count);
+
+        final double phrase_score =
+            words_phrase_prob * (phrase_prob - max_wrapper_phrase_probability);
+        final double not_phrase_score =
+            words_not_phrase_prob * (1 - (phrase_prob - max_wrapper_phrase_probability));
         final double words_score = phrase_score - not_phrase_score;
-        
+
         field_score += words_score;
       }
 
-      // NOTE: the "scaling" factors below can "increase" negative scores (by reducing the unsigned value)
-      // when they should ideally be penalizing the scores further, but since we currently don't care
+      // NOTE: the "scaling" factors below can "increase" negative scores (by reducing the unsigned
+      // value)
+      // when they should ideally be penalizing the scores further, but since we currently don't
+      // care
       // about any score lower then 0, it's not worth worrying about.
-      
-      // Average the accumulated score over the number of actual indexed sub-phrases that contributed
+
+      // Average the accumulated score over the number of actual indexed sub-phrases that
+      // contributed
       //
-      // NOTE: since we subsequently want to multiply the score by a fraction with num_indexed_sub_phrases
+      // NOTE: since we subsequently want to multiply the score by a fraction with
+      // num_indexed_sub_phrases
       // in the numerator, we can skip this...
       // SEE BELOW // field_score /= (double) num_indexed_sub_phrases;
-      
+
       // If we leave field_score as is, then a phrase longer then the maxIndexedPositionLength
-      // will never score higher then the highest scoring sub-phrase it has (because we've averaged them)
+      // will never score higher then the highest scoring sub-phrase it has (because we've averaged
+      // them)
       // so we scale the scores against the longest possible phrase length we're considering
       //
       // NOTE: We don't use num_indexed_sub_phrases in the numerator since we skipped it when
       // averating above...
-      field_score *= ( 1.0D // SEE ABOVE // * ( (double)num_indexed_sub_phrases )
-                       / (1 + maxQueryPositionLength - maxIndexedPositionLength) );
-      
+      field_score *=
+          (1.0D // SEE ABOVE // * ( (double)num_indexed_sub_phrases )
+              / (1 + maxQueryPositionLength - maxIndexedPositionLength));
+
       // scale the field_score based on the ratio of the conjunction docCount for the whole phrase
-      // realtive to the largest conjunction docCount of it's (largest indexed) sub phrases, to penalize
+      // realtive to the largest conjunction docCount of it's (largest indexed) sub phrases, to
+      // penalize
       // the scores of very long phrases that exist very rarely relative to the how often their
       // sub phrases exist in the index

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/PivotFacetFieldValueCollection.java
##########
@@ -121,16 +115,18 @@ public int size() {
     if (numRefinableValues < facetFieldOffset) {
       return Collections.<PivotFacetValue>emptyList();
     }
-    
-    final int offsetPlusCount = (facetFieldLimit >= 0) 
-      ? Math.min(facetFieldLimit + facetFieldOffset, numRefinableValues) 
-      : numRefinableValues;
-    
+
+    final int offsetPlusCount =
+        (facetFieldLimit >= 0)
+            ? Math.min(facetFieldLimit + facetFieldOffset, numRefinableValues)
+            : numRefinableValues;
+
     if (1 < facetFieldMinimumCount && facetFieldSort.equals(FacetParams.FACET_SORT_INDEX)) {
       // we have to skip any values that (still) don't meet the mincount
       //
       // TODO: in theory we could avoid this extra check by trimming sooner (SOLR-6331)
-      // but since that's a destructive op that blows away the `valuesMap` which we (might?) still need
+      // but since that's a destructive op that blows away the `valuesMap` which we (might?) still
+      // need
       // (and pre-emptively skips the offsets) we're avoiding re-working that optimization
       // for now until/unless someone gives it more careful thought...

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
##########
@@ -1229,42 +1271,50 @@ protected void returnFields(ResponseBuilder rb, ShardRequest sreq) {
       for (ShardResponse srsp : sreq.responses) {
         if (srsp.getException() != null) {
           // Don't try to get the documents if there was an exception in the shard
-          if(rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
+          if (rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
             @SuppressWarnings("unchecked")
-            NamedList<Object> shardInfo = (NamedList<Object>) rb.rsp.getValues().get(ShardParams.SHARDS_INFO);
+            NamedList<Object> shardInfo =
+                (NamedList<Object>) rb.rsp.getValues().get(ShardParams.SHARDS_INFO);
             @SuppressWarnings("unchecked")
             SimpleOrderedMap<Object> nl = (SimpleOrderedMap<Object>) shardInfo.get(srsp.getShard());
             if (nl.get("error") == null) {
               // Add the error to the shards info section if it wasn't added before
               Throwable t = srsp.getException();
-              if(t instanceof SolrServerException) {
-                t = ((SolrServerException)t).getCause();
+              if (t instanceof SolrServerException) {
+                t = ((SolrServerException) t).getCause();
               }
-              nl.add("error", t.toString() );
+              nl.add("error", t.toString());
               StringWriter trace = new StringWriter();
               t.printStackTrace(new PrintWriter(trace));
-              nl.add("trace", trace.toString() );
+              nl.add("trace", trace.toString());
             }
           }
-          
+
           continue;
         }
         {
-          NamedList<?> responseHeader = (NamedList<?>)srsp.getSolrResponse().getResponse().get("responseHeader");
-          if (Boolean.TRUE.equals(responseHeader.getBooleanArg(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY))) {
-            rb.rsp.getResponseHeader().asShallowMap()
-               .put(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
+          NamedList<?> responseHeader =
+              (NamedList<?>) srsp.getSolrResponse().getResponse().get("responseHeader");
+          if (Boolean.TRUE.equals(
+              responseHeader.getBooleanArg(
+                  SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY))) {
+            rb.rsp
+                .getResponseHeader()
+                .asShallowMap()
+                .put(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
           }
         }
-        SolrDocumentList docs = (SolrDocumentList) srsp.getSolrResponse().getResponse().get("response");
+        SolrDocumentList docs =
+            (SolrDocumentList) srsp.getSolrResponse().getResponse().get("response");
         for (SolrDocument doc : docs) {
           Object id = doc.getFieldValue(keyFieldName);
           ShardDoc sdoc = rb.resultIds.get(id.toString());
           if (sdoc != null) {
             if (returnScores) {
               doc.setField("score", sdoc.score);
             } else {
-              // Score might have been added (in createMainQuery) to shard-requests (and therefore in shard-response-docs)
+              // Score might have been added (in createMainQuery) to shard-requests (and therefore
+              // in shard-response-docs)
               // Remove score if the outer request did not ask for it returned

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
##########
@@ -622,44 +669,57 @@ private void addDebugInfo(ResponseBuilder rb, Elevation elevation) {
     rb.addDebugInfo("queryBoosting", dbg);
   }
 
-  //---------------------------------------------------------------------------------
+  // ---------------------------------------------------------------------------------
   // Boosted docs helper
-  //---------------------------------------------------------------------------------
+  // ---------------------------------------------------------------------------------
 
   /**
-   * Resolves a set of boosted docs by uniqueKey to a map of docIds mapped to a priority value &gt; 0.
+   * Resolves a set of boosted docs by uniqueKey to a map of docIds mapped to a priority value &gt;
+   * 0.
+   *
    * @param indexSearcher the SolrIndexSearcher; required
-   * @param boosted are the set of uniqueKey values to be boosted in priority order.  If null; returns null.
-   * @param context the {@link SolrQueryRequest#getContext()} or null if none.  We'll cache our results here.
+   * @param boosted are the set of uniqueKey values to be boosted in priority order. If null;
+   *     returns null.
+   * @param context the {@link SolrQueryRequest#getContext()} or null if none. We'll cache our
+   *     results here.
    */
-  //TODO consider simplifying to remove "boosted" arg which can be looked up in context via BOOSTED key?
-  public static IntIntHashMap getBoostDocs(SolrIndexSearcher indexSearcher, Set<BytesRef> boosted,
-                                           Map<Object,Object> context) throws IOException {
+  // TODO consider simplifying to remove "boosted" arg which can be looked up in context via BOOSTED
+  // key?
+  public static IntIntHashMap getBoostDocs(
+      SolrIndexSearcher indexSearcher, Set<BytesRef> boosted, Map<Object, Object> context)
+      throws IOException {
 
     IntIntHashMap boostDocs = null;
 
     if (boosted != null) {
 
-      //First see if it's already in the request context. Could have been put there by another caller.
+      // First see if it's already in the request context. Could have been put there by another
+      // caller.
       if (context != null) {
         boostDocs = (IntIntHashMap) context.get(BOOSTED_DOCIDS);
         if (boostDocs != null) {
           return boostDocs;
         }
       }
 
-      //Not in the context yet so load it.
+      // Not in the context yet so load it.
       boostDocs = new IntIntHashMap(boosted.size()); // docId to boost
-      int priority = boosted.size() + 1; // the corresponding priority for each boosted key (starts at this; decrements down)
+      int priority =
+          boosted.size()
+              + 1; // the corresponding priority for each boosted key (starts at this; decrements
+      // down)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
##########
@@ -1183,7 +1277,8 @@ protected void doSetNextReader(LeafReaderContext context) {
 
         @Override
         public int compare(int slot1, int slot2) {
-          return values[slot1] - values[slot2];  // values will be small enough that there is no overflow concern
+          return values[slot1]
+              - values[slot2]; // values will be small enough that there is no overflow concern

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/RangeFacetProcessor.java
##########
@@ -208,23 +229,27 @@ public void getFacetRangeCounts(RangeFacetRequest rangeFacetRequest, NamedList<O
       intervals.add(after);
     }
 
-    IntervalFacets.FacetInterval[] intervalsArray = intervals.toArray(new IntervalFacets.FacetInterval[intervals.size()]);
+    IntervalFacets.FacetInterval[] intervalsArray =
+        intervals.toArray(new IntervalFacets.FacetInterval[intervals.size()]);
     // don't use the ArrayList anymore
     intervals = null;
 
     new IntervalFacets(sf, searcher, computeDocSet(docsOrig, rfr.getExcludeTags()), intervalsArray);
 
     int intervalIndex = 0;
     int lastIntervalIndex = intervalsArray.length - 1;
-    // if the user requested "BEFORE", it will be the first of the intervals. Needs to be added to the
+    // if the user requested "BEFORE", it will be the first of the intervals. Needs to be added to
+    // the
     // response named list instead of with the counts
     if (includeBefore) {
       res.add(intervalsArray[intervalIndex].getKey(), intervalsArray[intervalIndex].getCount());
       intervalIndex++;
     }
 
-    // if the user requested "BETWEEN", it will be the first or second of the intervals (depending on if
-    // "BEFORE" was also requested). Needs to be added to the response named list instead of with the counts
+    // if the user requested "BETWEEN", it will be the first or second of the intervals (depending
+    // on if
+    // "BEFORE" was also requested). Needs to be added to the response named list instead of with
+    // the counts

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
##########
@@ -622,44 +669,57 @@ private void addDebugInfo(ResponseBuilder rb, Elevation elevation) {
     rb.addDebugInfo("queryBoosting", dbg);
   }
 
-  //---------------------------------------------------------------------------------
+  // ---------------------------------------------------------------------------------
   // Boosted docs helper
-  //---------------------------------------------------------------------------------
+  // ---------------------------------------------------------------------------------
 
   /**
-   * Resolves a set of boosted docs by uniqueKey to a map of docIds mapped to a priority value &gt; 0.
+   * Resolves a set of boosted docs by uniqueKey to a map of docIds mapped to a priority value &gt;
+   * 0.
+   *
    * @param indexSearcher the SolrIndexSearcher; required
-   * @param boosted are the set of uniqueKey values to be boosted in priority order.  If null; returns null.
-   * @param context the {@link SolrQueryRequest#getContext()} or null if none.  We'll cache our results here.
+   * @param boosted are the set of uniqueKey values to be boosted in priority order. If null;
+   *     returns null.
+   * @param context the {@link SolrQueryRequest#getContext()} or null if none. We'll cache our
+   *     results here.
    */
-  //TODO consider simplifying to remove "boosted" arg which can be looked up in context via BOOSTED key?
-  public static IntIntHashMap getBoostDocs(SolrIndexSearcher indexSearcher, Set<BytesRef> boosted,
-                                           Map<Object,Object> context) throws IOException {
+  // TODO consider simplifying to remove "boosted" arg which can be looked up in context via BOOSTED
+  // key?
+  public static IntIntHashMap getBoostDocs(
+      SolrIndexSearcher indexSearcher, Set<BytesRef> boosted, Map<Object, Object> context)
+      throws IOException {
 
     IntIntHashMap boostDocs = null;
 
     if (boosted != null) {
 
-      //First see if it's already in the request context. Could have been put there by another caller.
+      // First see if it's already in the request context. Could have been put there by another
+      // caller.
       if (context != null) {
         boostDocs = (IntIntHashMap) context.get(BOOSTED_DOCIDS);
         if (boostDocs != null) {
           return boostDocs;
         }
       }
 
-      //Not in the context yet so load it.
+      // Not in the context yet so load it.
       boostDocs = new IntIntHashMap(boosted.size()); // docId to boost
-      int priority = boosted.size() + 1; // the corresponding priority for each boosted key (starts at this; decrements down)
+      int priority =
+          boosted.size()
+              + 1; // the corresponding priority for each boosted key (starts at this; decrements
+      // down)
       for (BytesRef uniqueKey : boosted) {
         priority--; // therefore first == bosted.size(); last will be 1
-        long segAndId = indexSearcher.lookupId(uniqueKey); // higher 32 bits == segment ID, low 32 bits == doc ID
+        long segAndId =
+            indexSearcher.lookupId(
+                uniqueKey); // higher 32 bits == segment ID, low 32 bits == doc ID

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
##########
@@ -126,35 +122,40 @@ public void process(ResponseBuilder rb) throws IOException
       Replica.Type replicaType = cloudDesc.getReplicaType();
       if (replicaType != null) {
         if (replicaType == Replica.Type.PULL) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, 
-              String.format(Locale.ROOT, "%s can't handle realtime get requests. Replicas of type %s do not support these type of requests", 
+          throw new SolrException(
+              ErrorCode.BAD_REQUEST,
+              String.format(
+                  Locale.ROOT,
+                  "%s can't handle realtime get requests. Replicas of type %s do not support these type of requests",
                   cloudDesc.getCoreNodeName(),
                   Replica.Type.PULL));
-        } 
-        // non-leader TLOG replicas should not respond to distrib /get requests, but internal requests are OK
+        }
+        // non-leader TLOG replicas should not respond to distrib /get requests, but internal
+        // requests are OK
       }
     }
-    
+
     if (!params.getBool(COMPONENT_NAME, true)) {
       return;
     }
 
-    //TODO remove this at Solr 10
-    //After SOLR-14641 other nodes won't call RTG with this param.
-    //Just keeping here for backward-compatibility, if we remove this, nodes with older versions will
-    //assume that this node can't handle version ranges.
+    // TODO remove this at Solr 10
+    // After SOLR-14641 other nodes won't call RTG with this param.
+    // Just keeping here for backward-compatibility, if we remove this, nodes with older versions
+    // will
+    // assume that this node can't handle version ranges.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
##########
@@ -425,7 +449,8 @@ protected ElevationProvider loadElevationProvider(XmlConfigFile config) {
         elevationBuilder.addElevatedIds(Collections.singletonList(id));
       }
 
-      // It is allowed to define multiple times different elevations for the same query. In this case the elevations
+      // It is allowed to define multiple times different elevations for the same query. In this
+      // case the elevations
       // are merged in the ElevationBuilder (they will be triggered at the same time).

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
##########
@@ -823,255 +841,282 @@ protected void createMainQuery(ResponseBuilder rb) {
 
     rb.addRequest(this, sreq);
   }
-  
+
   protected boolean addFL(StringBuilder fl, String field, boolean additionalAdded) {
     if (additionalAdded) fl.append(",");
     fl.append(field);
     return true;
   }
 
   protected void mergeIds(ResponseBuilder rb, ShardRequest sreq) {
-      List<MergeStrategy> mergeStrategies = rb.getMergeStrategies();
-      if(mergeStrategies != null) {
-        mergeStrategies.sort(MergeStrategy.MERGE_COMP);
-        boolean idsMerged = false;
-        for(MergeStrategy mergeStrategy : mergeStrategies) {
-          mergeStrategy.merge(rb, sreq);
-          if(mergeStrategy.mergesIds()) {
-            idsMerged = true;
-          }
+    List<MergeStrategy> mergeStrategies = rb.getMergeStrategies();
+    if (mergeStrategies != null) {
+      mergeStrategies.sort(MergeStrategy.MERGE_COMP);
+      boolean idsMerged = false;
+      for (MergeStrategy mergeStrategy : mergeStrategies) {
+        mergeStrategy.merge(rb, sreq);
+        if (mergeStrategy.mergesIds()) {
+          idsMerged = true;
         }
+      }
 
-        if(idsMerged) {
-          return; //ids were merged above so return.
-        }
+      if (idsMerged) {
+        return; // ids were merged above so return.
       }
+    }
 
-      SortSpec ss = rb.getSortSpec();
-      Sort sort = ss.getSort();
+    SortSpec ss = rb.getSortSpec();
+    Sort sort = ss.getSort();
 
-      SortField[] sortFields = null;
-      if(sort != null) sortFields = sort.getSort();
-      else {
-        sortFields = new SortField[]{SortField.FIELD_SCORE};
-      }
- 
-      IndexSchema schema = rb.req.getSchema();
-      SchemaField uniqueKeyField = schema.getUniqueKeyField();
+    SortField[] sortFields = null;
+    if (sort != null) sortFields = sort.getSort();
+    else {
+      sortFields = new SortField[] {SortField.FIELD_SCORE};
+    }
 
+    IndexSchema schema = rb.req.getSchema();
+    SchemaField uniqueKeyField = schema.getUniqueKeyField();
 
-      // id to shard mapping, to eliminate any accidental dups
-      HashMap<Object,String> uniqueDoc = new HashMap<>();
+    // id to shard mapping, to eliminate any accidental dups
+    HashMap<Object, String> uniqueDoc = new HashMap<>();
 
-      // Merge the docs via a priority queue so we don't have to sort *all* of the
-      // documents... we only need to order the top (rows+start)
-      final ShardFieldSortedHitQueue queue = new ShardFieldSortedHitQueue(sortFields, ss.getOffset() + ss.getCount(), rb.req.getSearcher());
+    // Merge the docs via a priority queue so we don't have to sort *all* of the
+    // documents... we only need to order the top (rows+start)
+    final ShardFieldSortedHitQueue queue =
+        new ShardFieldSortedHitQueue(
+            sortFields, ss.getOffset() + ss.getCount(), rb.req.getSearcher());
 
-      NamedList<Object> shardInfo = null;
-      if(rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
-        shardInfo = new SimpleOrderedMap<>();
-        rb.rsp.getValues().add(ShardParams.SHARDS_INFO,shardInfo);
-      }
-      
-      long numFound = 0;
-      boolean hitCountIsExact = true;
-      Float maxScore=null;
-      boolean thereArePartialResults = false;
-      Boolean segmentTerminatedEarly = null;
-      for (ShardResponse srsp : sreq.responses) {
-        SolrDocumentList docs = null;
-        NamedList<?> responseHeader = null;
-
-        if(shardInfo!=null) {
-          SimpleOrderedMap<Object> nl = new SimpleOrderedMap<>();
-          
-          if (srsp.getException() != null) {
-            Throwable t = srsp.getException();
-            if(t instanceof SolrServerException) {
-              t = ((SolrServerException)t).getCause();
-            }
-            nl.add("error", t.toString() );
-            StringWriter trace = new StringWriter();
-            t.printStackTrace(new PrintWriter(trace));
-            nl.add("trace", trace.toString() );
-            if (srsp.getShardAddress() != null) {
-              nl.add("shardAddress", srsp.getShardAddress());
-            }
+    NamedList<Object> shardInfo = null;
+    if (rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
+      shardInfo = new SimpleOrderedMap<>();
+      rb.rsp.getValues().add(ShardParams.SHARDS_INFO, shardInfo);
+    }
+
+    long numFound = 0;
+    boolean hitCountIsExact = true;
+    Float maxScore = null;
+    boolean thereArePartialResults = false;
+    Boolean segmentTerminatedEarly = null;
+    for (ShardResponse srsp : sreq.responses) {
+      SolrDocumentList docs = null;
+      NamedList<?> responseHeader = null;
+
+      if (shardInfo != null) {
+        SimpleOrderedMap<Object> nl = new SimpleOrderedMap<>();
+
+        if (srsp.getException() != null) {
+          Throwable t = srsp.getException();
+          if (t instanceof SolrServerException) {
+            t = ((SolrServerException) t).getCause();
           }
-          else {
-            responseHeader = (NamedList<?>)srsp.getSolrResponse().getResponse().get("responseHeader");
-            final Object rhste = responseHeader.get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY);
-            if (rhste != null) {
-              nl.add(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY, rhste);
-            }
-            docs = (SolrDocumentList)srsp.getSolrResponse().getResponse().get("response");
-            nl.add("numFound", docs.getNumFound());
-            nl.add("numFoundExact", docs.getNumFoundExact());
-            nl.add("maxScore", docs.getMaxScore());
+          nl.add("error", t.toString());
+          StringWriter trace = new StringWriter();
+          t.printStackTrace(new PrintWriter(trace));
+          nl.add("trace", trace.toString());
+          if (srsp.getShardAddress() != null) {
             nl.add("shardAddress", srsp.getShardAddress());
           }
-          if(srsp.getSolrResponse()!=null) {
-            nl.add("time", srsp.getSolrResponse().getElapsedTime());
+        } else {
+          responseHeader =
+              (NamedList<?>) srsp.getSolrResponse().getResponse().get("responseHeader");
+          final Object rhste =
+              responseHeader.get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY);
+          if (rhste != null) {
+            nl.add(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY, rhste);
           }
-
-          shardInfo.add(srsp.getShard(), nl);
-        }
-        // now that we've added the shard info, let's only proceed if we have no error.
-        if (srsp.getException() != null) {
-          thereArePartialResults = true;
-          continue;
-        }
-
-        if (docs == null) { // could have been initialized in the shards info block above
-          docs = (SolrDocumentList)srsp.getSolrResponse().getResponse().get("response");
+          docs = (SolrDocumentList) srsp.getSolrResponse().getResponse().get("response");
+          nl.add("numFound", docs.getNumFound());
+          nl.add("numFoundExact", docs.getNumFoundExact());
+          nl.add("maxScore", docs.getMaxScore());
+          nl.add("shardAddress", srsp.getShardAddress());
         }
-        
-        if (responseHeader == null) { // could have been initialized in the shards info block above
-          responseHeader = (NamedList<?>)srsp.getSolrResponse().getResponse().get("responseHeader");
+        if (srsp.getSolrResponse() != null) {
+          nl.add("time", srsp.getSolrResponse().getElapsedTime());
         }
 
-        final boolean thisResponseIsPartial;
-        thisResponseIsPartial = Boolean.TRUE.equals(responseHeader.getBooleanArg(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY));
-        thereArePartialResults |= thisResponseIsPartial;
-        
-        if (!Boolean.TRUE.equals(segmentTerminatedEarly)) {
-          final Object ste = responseHeader.get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY);
-          if (Boolean.TRUE.equals(ste)) {
-            segmentTerminatedEarly = Boolean.TRUE;
-          } else if (Boolean.FALSE.equals(ste)) {
-            segmentTerminatedEarly = Boolean.FALSE;
-          }
-        }
-        
-        // calculate global maxScore and numDocsFound
-        if (docs.getMaxScore() != null) {
-          maxScore = maxScore==null ? docs.getMaxScore() : Math.max(maxScore, docs.getMaxScore());
-        }
-        numFound += docs.getNumFound();
-        
-        if (hitCountIsExact && Boolean.FALSE.equals(docs.getNumFoundExact())) {
-          hitCountIsExact = false;
-        }
+        shardInfo.add(srsp.getShard(), nl);
+      }
+      // now that we've added the shard info, let's only proceed if we have no error.
+      if (srsp.getException() != null) {
+        thereArePartialResults = true;
+        continue;
+      }
 
-        @SuppressWarnings("unchecked")
-        NamedList<List<Object>> sortFieldValues = (NamedList<List<Object>>)(srsp.getSolrResponse().getResponse().get("sort_values"));
-        if (null == sortFieldValues) {
-          sortFieldValues = new NamedList<>();
-        }
+      if (docs == null) { // could have been initialized in the shards info block above
+        docs = (SolrDocumentList) srsp.getSolrResponse().getResponse().get("response");
+      }
 
-        // if the SortSpec contains a field besides score or the Lucene docid, then the values will need to be unmarshalled from
-        // sortFieldValues.
-        boolean needsUnmarshalling = ss.includesNonScoreOrDocField();
+      if (responseHeader == null) { // could have been initialized in the shards info block above
+        responseHeader = (NamedList<?>) srsp.getSolrResponse().getResponse().get("responseHeader");
+      }
 
-        // if we need to unmarshal the sortFieldValues for sorting but we have none, which can happen if partial results are
-        // being returned from the shard, then skip merging the results for the shard. This avoids an exception below.
-        // if the shard returned partial results but we don't need to unmarshal (a normal scoring query), then merge what we got.
-        if (thisResponseIsPartial && sortFieldValues.size() == 0 && needsUnmarshalling) {
-          continue;
+      final boolean thisResponseIsPartial;
+      thisResponseIsPartial =
+          Boolean.TRUE.equals(
+              responseHeader.getBooleanArg(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY));
+      thereArePartialResults |= thisResponseIsPartial;
+
+      if (!Boolean.TRUE.equals(segmentTerminatedEarly)) {
+        final Object ste =
+            responseHeader.get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY);
+        if (Boolean.TRUE.equals(ste)) {
+          segmentTerminatedEarly = Boolean.TRUE;
+        } else if (Boolean.FALSE.equals(ste)) {
+          segmentTerminatedEarly = Boolean.FALSE;
         }
+      }
 
-        // Checking needsUnmarshalling saves on iterating the SortFields in the SortSpec again.
-        NamedList<List<Object>> unmarshalledSortFieldValues = needsUnmarshalling ? unmarshalSortValues(ss, sortFieldValues, schema) : new NamedList<>();
-
-        // go through every doc in this response, construct a ShardDoc, and
-        // put it in the priority queue so it can be ordered.
-        for (int i=0; i<docs.size(); i++) {
-          SolrDocument doc = docs.get(i);
-          Object id = doc.getFieldValue(uniqueKeyField.getName());
-
-          String prevShard = uniqueDoc.put(id, srsp.getShard());
-          if (prevShard != null) {
-            // duplicate detected
-            numFound--;
-
-            // For now, just always use the first encountered since we can't currently
-            // remove the previous one added to the priority queue.  If we switched
-            // to the Java5 PriorityQueue, this would be easier.
-            continue;
-            // make which duplicate is used deterministic based on shard
-            // if (prevShard.compareTo(srsp.shard) >= 0) {
-            //  TODO: remove previous from priority queue
-            //  continue;
-            // }
-          }
+      // calculate global maxScore and numDocsFound
+      if (docs.getMaxScore() != null) {
+        maxScore = maxScore == null ? docs.getMaxScore() : Math.max(maxScore, docs.getMaxScore());
+      }
+      numFound += docs.getNumFound();
 
-          ShardDoc shardDoc = new ShardDoc();
-          shardDoc.id = id;
-          shardDoc.shard = srsp.getShard();
-          shardDoc.orderInShard = i;
-          Object scoreObj = doc.getFieldValue("score");
-          if (scoreObj != null) {
-            if (scoreObj instanceof String) {
-              shardDoc.score = Float.parseFloat((String)scoreObj);
-            } else {
-              shardDoc.score = (Float)scoreObj;
-            }
-          }
+      if (hitCountIsExact && Boolean.FALSE.equals(docs.getNumFoundExact())) {
+        hitCountIsExact = false;
+      }
 
-          shardDoc.sortFieldValues = unmarshalledSortFieldValues;
-
-          queue.insertWithOverflow(shardDoc);
-        } // end for-each-doc-in-response
-      } // end for-each-response
-      
-      // The queue now has 0 -> queuesize docs, where queuesize <= start + rows
-      // So we want to pop the last documents off the queue to get
-      // the docs offset -> queuesize
-      int resultSize = queue.size() - ss.getOffset();
-      resultSize = Math.max(0, resultSize);  // there may not be any docs in range
-
-      Map<Object,ShardDoc> resultIds = new HashMap<>();
-      for (int i=resultSize-1; i>=0; i--) {
-        ShardDoc shardDoc = queue.pop();
-        shardDoc.positionInResponse = i;
-        // Need the toString() for correlation with other lists that must
-        // be strings (like keys in highlighting, explain, etc)
-        resultIds.put(shardDoc.id.toString(), shardDoc);
+      @SuppressWarnings("unchecked")
+      NamedList<List<Object>> sortFieldValues =
+          (NamedList<List<Object>>) (srsp.getSolrResponse().getResponse().get("sort_values"));
+      if (null == sortFieldValues) {
+        sortFieldValues = new NamedList<>();
       }
 
-      // Add hits for distributed requests
-      // https://issues.apache.org/jira/browse/SOLR-3518
-      rb.rsp.addToLog("hits", numFound);
-
-      SolrDocumentList responseDocs = new SolrDocumentList();
-      if (maxScore!=null) responseDocs.setMaxScore(maxScore);
-      responseDocs.setNumFound(numFound);
-      responseDocs.setNumFoundExact(hitCountIsExact);
-      responseDocs.setStart(ss.getOffset());
-      // size appropriately
-      for (int i=0; i<resultSize; i++) responseDocs.add(null);
-
-      // save these results in a private area so we can access them
-      // again when retrieving stored fields.
-      // TODO: use ResponseBuilder (w/ comments) or the request context?
-      rb.resultIds = resultIds;
-      rb.setResponseDocs(responseDocs);
-
-      populateNextCursorMarkFromMergedShards(rb);
-
-      if (thereArePartialResults) {
-         rb.rsp.getResponseHeader().asShallowMap()
-                   .put(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
+      // if the SortSpec contains a field besides score or the Lucene docid, then the values will
+      // need to be unmarshalled from
+      // sortFieldValues.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/RangeFacetProcessor.java
##########
@@ -208,23 +229,27 @@ public void getFacetRangeCounts(RangeFacetRequest rangeFacetRequest, NamedList<O
       intervals.add(after);
     }
 
-    IntervalFacets.FacetInterval[] intervalsArray = intervals.toArray(new IntervalFacets.FacetInterval[intervals.size()]);
+    IntervalFacets.FacetInterval[] intervalsArray =
+        intervals.toArray(new IntervalFacets.FacetInterval[intervals.size()]);
     // don't use the ArrayList anymore
     intervals = null;
 
     new IntervalFacets(sf, searcher, computeDocSet(docsOrig, rfr.getExcludeTags()), intervalsArray);
 
     int intervalIndex = 0;
     int lastIntervalIndex = intervalsArray.length - 1;
-    // if the user requested "BEFORE", it will be the first of the intervals. Needs to be added to the
+    // if the user requested "BEFORE", it will be the first of the intervals. Needs to be added to
+    // the
     // response named list instead of with the counts

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
##########
@@ -227,138 +228,158 @@ public void process(ResponseBuilder rb) throws IOException
     SolrDocumentList docList = new SolrDocumentList();
     UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
 
-    SearcherInfo searcherInfo =  new SearcherInfo(core);
-    
+    SearcherInfo searcherInfo = new SearcherInfo(core);
+
     // this is initialized & set on the context *after* any searcher (re-)opening
     ResultContext resultContext = null;
     final DocTransformer transformer = rsp.getReturnFields().getTransformer();
 
     // true in any situation where we have to use a realtime searcher rather then returning docs
     // directly from the UpdateLog
     final boolean mustUseRealtimeSearcher =
-      // if we have filters, we need to check those against the indexed form of the doc
-      (rb.getFilters() != null)
-      || ((null != transformer) && transformer.needsSolrIndexSearcher());
-
-   try {
-
-     boolean opennedRealtimeSearcher = false;
-     BytesRefBuilder idBytes = new BytesRefBuilder();
-     for (String idStr : reqIds.allIds) {
-       fieldType.readableToIndexed(idStr, idBytes);
-       // if _route_ is passed, id is a child doc.  TODO remove in SOLR-15064
-       if (!opennedRealtimeSearcher && !params.get(ShardParams._ROUTE_, idStr).equals(idStr)) {
-         searcherInfo.clear();
-         resultContext = null;
-         ulog.openRealtimeSearcher();  // force open a new realtime searcher
-         opennedRealtimeSearcher = true;
-       } else if (ulog != null) {
-         Object o = ulog.lookup(idBytes.get());
-         if (o != null) {
-           // should currently be a List<Oper,Ver,Doc/Id>
-           List<?> entry = (List<?>)o;
-           assert entry.size() >= 3;
-           int oper = (Integer)entry.get(UpdateLog.FLAGS_IDX) & UpdateLog.OPERATION_MASK;
-           switch (oper) {
-             case UpdateLog.UPDATE_INPLACE: // fall through to ADD
-             case UpdateLog.ADD:
-
-               if (mustUseRealtimeSearcher) {
-                 // close handles to current searchers & result context
-                 if (!opennedRealtimeSearcher) {
-                   searcherInfo.clear();
-                   resultContext = null;
-                   ulog.openRealtimeSearcher();  // force open a new realtime searcher
-                   opennedRealtimeSearcher = true;
-                 }
-                 o = null;  // pretend we never found this record and fall through to use the searcher
-                 break;
-               }
-
-               SolrDocument doc;
-               if (oper == UpdateLog.ADD) {
-                 doc = toSolrDoc((SolrInputDocument)entry.get(entry.size()-1), core.getLatestSchema());
-                 // toSolrDoc filtered copy-field targets already
-                 if (transformer!=null) {
-                   transformer.transform(doc, -1); // unknown docID
-                 }
-               } else if (oper == UpdateLog.UPDATE_INPLACE) {
-                 assert entry.size() == 5;
-                 // For in-place update case, we have obtained the partial document till now. We need to
-                 // resolve it to a full document to be returned to the user.
-                 // resolveFullDocument applies the transformer, if present.
-                 doc = resolveFullDocument(core, idBytes.get(), rsp.getReturnFields(), (SolrInputDocument)entry.get(entry.size()-1), entry);
-                 if (doc == null) {
-                   break; // document has been deleted as the resolve was going on
-                 }
-                 doc.visitSelfAndNestedDocs((label, d) -> removeCopyFieldTargets(d, req.getSchema()));
-               } else {
-                 throw new SolrException(ErrorCode.INVALID_STATE, "Expected ADD or UPDATE_INPLACE. Got: " + oper);
-               }
-
-              docList.add(doc);
-              break;
-             case UpdateLog.DELETE:
-              break;
-             default:
-               throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,  "Unknown Operation! " + oper);
-           }
-           if (o != null) continue;
-         }
-       }
-
-       // didn't find it in the update log, so it should be in the newest searcher opened
-       searcherInfo.init();
-       // don't bother with ResultContext yet, we won't need it if doc doesn't match filters
-
-       int docid = -1;
-       long segAndId = searcherInfo.getSearcher().lookupId(idBytes.get());
-       if (segAndId >= 0) {
-         int segid = (int) segAndId;
-         LeafReaderContext ctx = searcherInfo.getSearcher().getTopReaderContext().leaves().get((int) (segAndId >> 32));
-         docid = segid + ctx.docBase;
-
-         if (rb.getFilters() != null) {
-           for (Query raw : rb.getFilters()) {
-             Query q = raw.rewrite(searcherInfo.getSearcher().getIndexReader());
-             Scorer scorer = searcherInfo.getSearcher().createWeight(q, ScoreMode.COMPLETE_NO_SCORES, 1f).scorer(ctx);
-             if (scorer == null || segid != scorer.iterator().advance(segid)) {
-               // filter doesn't match.
-               docid = -1;
-               break;
-             }
-           }
-         }
-       }
-
-       if (docid < 0) continue;
-       
-       Document luceneDocument = searcherInfo.getSearcher().doc(docid, rsp.getReturnFields().getLuceneFieldNames());
-       SolrDocument doc = toSolrDoc(luceneDocument,  core.getLatestSchema());
-       SolrDocumentFetcher docFetcher = searcherInfo.getSearcher().getDocFetcher();
-       docFetcher.decorateDocValueFields(doc, docid, docFetcher.getNonStoredDVs(true));
-       if ( null != transformer) {
-         if (null == resultContext) {
-           // either first pass, or we've re-opened searcher - either way now we setContext
-           resultContext = new RTGResultContext(rsp.getReturnFields(), searcherInfo.getSearcher(), req);
-           transformer.setContext(resultContext); // we avoid calling setContext unless searcher is new/changed
-         }
-         transformer.transform(doc, docid);
-       }
-       docList.add(doc);
-     } // loop on ids
-
-   } finally {
-     searcherInfo.clear();
-   }
-
-   addDocListToResponse(rb, docList);
+        // if we have filters, we need to check those against the indexed form of the doc
+        (rb.getFilters() != null)
+            || ((null != transformer) && transformer.needsSolrIndexSearcher());
+
+    try {
+
+      boolean opennedRealtimeSearcher = false;
+      BytesRefBuilder idBytes = new BytesRefBuilder();
+      for (String idStr : reqIds.allIds) {
+        fieldType.readableToIndexed(idStr, idBytes);
+        // if _route_ is passed, id is a child doc.  TODO remove in SOLR-15064
+        if (!opennedRealtimeSearcher && !params.get(ShardParams._ROUTE_, idStr).equals(idStr)) {
+          searcherInfo.clear();
+          resultContext = null;
+          ulog.openRealtimeSearcher(); // force open a new realtime searcher
+          opennedRealtimeSearcher = true;
+        } else if (ulog != null) {
+          Object o = ulog.lookup(idBytes.get());
+          if (o != null) {
+            // should currently be a List<Oper,Ver,Doc/Id>
+            List<?> entry = (List<?>) o;
+            assert entry.size() >= 3;
+            int oper = (Integer) entry.get(UpdateLog.FLAGS_IDX) & UpdateLog.OPERATION_MASK;
+            switch (oper) {
+              case UpdateLog.UPDATE_INPLACE: // fall through to ADD
+              case UpdateLog.ADD:
+                if (mustUseRealtimeSearcher) {
+                  // close handles to current searchers & result context
+                  if (!opennedRealtimeSearcher) {
+                    searcherInfo.clear();
+                    resultContext = null;
+                    ulog.openRealtimeSearcher(); // force open a new realtime searcher
+                    opennedRealtimeSearcher = true;
+                  }
+                  o = null; // pretend we never found this record and fall through to use the
+                  // searcher

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
##########
@@ -823,255 +841,282 @@ protected void createMainQuery(ResponseBuilder rb) {
 
     rb.addRequest(this, sreq);
   }
-  
+
   protected boolean addFL(StringBuilder fl, String field, boolean additionalAdded) {
     if (additionalAdded) fl.append(",");
     fl.append(field);
     return true;
   }
 
   protected void mergeIds(ResponseBuilder rb, ShardRequest sreq) {
-      List<MergeStrategy> mergeStrategies = rb.getMergeStrategies();
-      if(mergeStrategies != null) {
-        mergeStrategies.sort(MergeStrategy.MERGE_COMP);
-        boolean idsMerged = false;
-        for(MergeStrategy mergeStrategy : mergeStrategies) {
-          mergeStrategy.merge(rb, sreq);
-          if(mergeStrategy.mergesIds()) {
-            idsMerged = true;
-          }
+    List<MergeStrategy> mergeStrategies = rb.getMergeStrategies();
+    if (mergeStrategies != null) {
+      mergeStrategies.sort(MergeStrategy.MERGE_COMP);
+      boolean idsMerged = false;
+      for (MergeStrategy mergeStrategy : mergeStrategies) {
+        mergeStrategy.merge(rb, sreq);
+        if (mergeStrategy.mergesIds()) {
+          idsMerged = true;
         }
+      }
 
-        if(idsMerged) {
-          return; //ids were merged above so return.
-        }
+      if (idsMerged) {
+        return; // ids were merged above so return.
       }
+    }
 
-      SortSpec ss = rb.getSortSpec();
-      Sort sort = ss.getSort();
+    SortSpec ss = rb.getSortSpec();
+    Sort sort = ss.getSort();
 
-      SortField[] sortFields = null;
-      if(sort != null) sortFields = sort.getSort();
-      else {
-        sortFields = new SortField[]{SortField.FIELD_SCORE};
-      }
- 
-      IndexSchema schema = rb.req.getSchema();
-      SchemaField uniqueKeyField = schema.getUniqueKeyField();
+    SortField[] sortFields = null;
+    if (sort != null) sortFields = sort.getSort();
+    else {
+      sortFields = new SortField[] {SortField.FIELD_SCORE};
+    }
 
+    IndexSchema schema = rb.req.getSchema();
+    SchemaField uniqueKeyField = schema.getUniqueKeyField();
 
-      // id to shard mapping, to eliminate any accidental dups
-      HashMap<Object,String> uniqueDoc = new HashMap<>();
+    // id to shard mapping, to eliminate any accidental dups
+    HashMap<Object, String> uniqueDoc = new HashMap<>();
 
-      // Merge the docs via a priority queue so we don't have to sort *all* of the
-      // documents... we only need to order the top (rows+start)
-      final ShardFieldSortedHitQueue queue = new ShardFieldSortedHitQueue(sortFields, ss.getOffset() + ss.getCount(), rb.req.getSearcher());
+    // Merge the docs via a priority queue so we don't have to sort *all* of the
+    // documents... we only need to order the top (rows+start)
+    final ShardFieldSortedHitQueue queue =
+        new ShardFieldSortedHitQueue(
+            sortFields, ss.getOffset() + ss.getCount(), rb.req.getSearcher());
 
-      NamedList<Object> shardInfo = null;
-      if(rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
-        shardInfo = new SimpleOrderedMap<>();
-        rb.rsp.getValues().add(ShardParams.SHARDS_INFO,shardInfo);
-      }
-      
-      long numFound = 0;
-      boolean hitCountIsExact = true;
-      Float maxScore=null;
-      boolean thereArePartialResults = false;
-      Boolean segmentTerminatedEarly = null;
-      for (ShardResponse srsp : sreq.responses) {
-        SolrDocumentList docs = null;
-        NamedList<?> responseHeader = null;
-
-        if(shardInfo!=null) {
-          SimpleOrderedMap<Object> nl = new SimpleOrderedMap<>();
-          
-          if (srsp.getException() != null) {
-            Throwable t = srsp.getException();
-            if(t instanceof SolrServerException) {
-              t = ((SolrServerException)t).getCause();
-            }
-            nl.add("error", t.toString() );
-            StringWriter trace = new StringWriter();
-            t.printStackTrace(new PrintWriter(trace));
-            nl.add("trace", trace.toString() );
-            if (srsp.getShardAddress() != null) {
-              nl.add("shardAddress", srsp.getShardAddress());
-            }
+    NamedList<Object> shardInfo = null;
+    if (rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
+      shardInfo = new SimpleOrderedMap<>();
+      rb.rsp.getValues().add(ShardParams.SHARDS_INFO, shardInfo);
+    }
+
+    long numFound = 0;
+    boolean hitCountIsExact = true;
+    Float maxScore = null;
+    boolean thereArePartialResults = false;
+    Boolean segmentTerminatedEarly = null;
+    for (ShardResponse srsp : sreq.responses) {
+      SolrDocumentList docs = null;
+      NamedList<?> responseHeader = null;
+
+      if (shardInfo != null) {
+        SimpleOrderedMap<Object> nl = new SimpleOrderedMap<>();
+
+        if (srsp.getException() != null) {
+          Throwable t = srsp.getException();
+          if (t instanceof SolrServerException) {
+            t = ((SolrServerException) t).getCause();
           }
-          else {
-            responseHeader = (NamedList<?>)srsp.getSolrResponse().getResponse().get("responseHeader");
-            final Object rhste = responseHeader.get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY);
-            if (rhste != null) {
-              nl.add(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY, rhste);
-            }
-            docs = (SolrDocumentList)srsp.getSolrResponse().getResponse().get("response");
-            nl.add("numFound", docs.getNumFound());
-            nl.add("numFoundExact", docs.getNumFoundExact());
-            nl.add("maxScore", docs.getMaxScore());
+          nl.add("error", t.toString());
+          StringWriter trace = new StringWriter();
+          t.printStackTrace(new PrintWriter(trace));
+          nl.add("trace", trace.toString());
+          if (srsp.getShardAddress() != null) {
             nl.add("shardAddress", srsp.getShardAddress());
           }
-          if(srsp.getSolrResponse()!=null) {
-            nl.add("time", srsp.getSolrResponse().getElapsedTime());
+        } else {
+          responseHeader =
+              (NamedList<?>) srsp.getSolrResponse().getResponse().get("responseHeader");
+          final Object rhste =
+              responseHeader.get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY);
+          if (rhste != null) {
+            nl.add(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY, rhste);
           }
-
-          shardInfo.add(srsp.getShard(), nl);
-        }
-        // now that we've added the shard info, let's only proceed if we have no error.
-        if (srsp.getException() != null) {
-          thereArePartialResults = true;
-          continue;
-        }
-
-        if (docs == null) { // could have been initialized in the shards info block above
-          docs = (SolrDocumentList)srsp.getSolrResponse().getResponse().get("response");
+          docs = (SolrDocumentList) srsp.getSolrResponse().getResponse().get("response");
+          nl.add("numFound", docs.getNumFound());
+          nl.add("numFoundExact", docs.getNumFoundExact());
+          nl.add("maxScore", docs.getMaxScore());
+          nl.add("shardAddress", srsp.getShardAddress());
         }
-        
-        if (responseHeader == null) { // could have been initialized in the shards info block above
-          responseHeader = (NamedList<?>)srsp.getSolrResponse().getResponse().get("responseHeader");
+        if (srsp.getSolrResponse() != null) {
+          nl.add("time", srsp.getSolrResponse().getElapsedTime());
         }
 
-        final boolean thisResponseIsPartial;
-        thisResponseIsPartial = Boolean.TRUE.equals(responseHeader.getBooleanArg(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY));
-        thereArePartialResults |= thisResponseIsPartial;
-        
-        if (!Boolean.TRUE.equals(segmentTerminatedEarly)) {
-          final Object ste = responseHeader.get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY);
-          if (Boolean.TRUE.equals(ste)) {
-            segmentTerminatedEarly = Boolean.TRUE;
-          } else if (Boolean.FALSE.equals(ste)) {
-            segmentTerminatedEarly = Boolean.FALSE;
-          }
-        }
-        
-        // calculate global maxScore and numDocsFound
-        if (docs.getMaxScore() != null) {
-          maxScore = maxScore==null ? docs.getMaxScore() : Math.max(maxScore, docs.getMaxScore());
-        }
-        numFound += docs.getNumFound();
-        
-        if (hitCountIsExact && Boolean.FALSE.equals(docs.getNumFoundExact())) {
-          hitCountIsExact = false;
-        }
+        shardInfo.add(srsp.getShard(), nl);
+      }
+      // now that we've added the shard info, let's only proceed if we have no error.
+      if (srsp.getException() != null) {
+        thereArePartialResults = true;
+        continue;
+      }
 
-        @SuppressWarnings("unchecked")
-        NamedList<List<Object>> sortFieldValues = (NamedList<List<Object>>)(srsp.getSolrResponse().getResponse().get("sort_values"));
-        if (null == sortFieldValues) {
-          sortFieldValues = new NamedList<>();
-        }
+      if (docs == null) { // could have been initialized in the shards info block above
+        docs = (SolrDocumentList) srsp.getSolrResponse().getResponse().get("response");
+      }
 
-        // if the SortSpec contains a field besides score or the Lucene docid, then the values will need to be unmarshalled from
-        // sortFieldValues.
-        boolean needsUnmarshalling = ss.includesNonScoreOrDocField();
+      if (responseHeader == null) { // could have been initialized in the shards info block above
+        responseHeader = (NamedList<?>) srsp.getSolrResponse().getResponse().get("responseHeader");
+      }
 
-        // if we need to unmarshal the sortFieldValues for sorting but we have none, which can happen if partial results are
-        // being returned from the shard, then skip merging the results for the shard. This avoids an exception below.
-        // if the shard returned partial results but we don't need to unmarshal (a normal scoring query), then merge what we got.
-        if (thisResponseIsPartial && sortFieldValues.size() == 0 && needsUnmarshalling) {
-          continue;
+      final boolean thisResponseIsPartial;
+      thisResponseIsPartial =
+          Boolean.TRUE.equals(
+              responseHeader.getBooleanArg(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY));
+      thereArePartialResults |= thisResponseIsPartial;
+
+      if (!Boolean.TRUE.equals(segmentTerminatedEarly)) {
+        final Object ste =
+            responseHeader.get(SolrQueryResponse.RESPONSE_HEADER_SEGMENT_TERMINATED_EARLY_KEY);
+        if (Boolean.TRUE.equals(ste)) {
+          segmentTerminatedEarly = Boolean.TRUE;
+        } else if (Boolean.FALSE.equals(ste)) {
+          segmentTerminatedEarly = Boolean.FALSE;
         }
+      }
 
-        // Checking needsUnmarshalling saves on iterating the SortFields in the SortSpec again.
-        NamedList<List<Object>> unmarshalledSortFieldValues = needsUnmarshalling ? unmarshalSortValues(ss, sortFieldValues, schema) : new NamedList<>();
-
-        // go through every doc in this response, construct a ShardDoc, and
-        // put it in the priority queue so it can be ordered.
-        for (int i=0; i<docs.size(); i++) {
-          SolrDocument doc = docs.get(i);
-          Object id = doc.getFieldValue(uniqueKeyField.getName());
-
-          String prevShard = uniqueDoc.put(id, srsp.getShard());
-          if (prevShard != null) {
-            // duplicate detected
-            numFound--;
-
-            // For now, just always use the first encountered since we can't currently
-            // remove the previous one added to the priority queue.  If we switched
-            // to the Java5 PriorityQueue, this would be easier.
-            continue;
-            // make which duplicate is used deterministic based on shard
-            // if (prevShard.compareTo(srsp.shard) >= 0) {
-            //  TODO: remove previous from priority queue
-            //  continue;
-            // }
-          }
+      // calculate global maxScore and numDocsFound
+      if (docs.getMaxScore() != null) {
+        maxScore = maxScore == null ? docs.getMaxScore() : Math.max(maxScore, docs.getMaxScore());
+      }
+      numFound += docs.getNumFound();
 
-          ShardDoc shardDoc = new ShardDoc();
-          shardDoc.id = id;
-          shardDoc.shard = srsp.getShard();
-          shardDoc.orderInShard = i;
-          Object scoreObj = doc.getFieldValue("score");
-          if (scoreObj != null) {
-            if (scoreObj instanceof String) {
-              shardDoc.score = Float.parseFloat((String)scoreObj);
-            } else {
-              shardDoc.score = (Float)scoreObj;
-            }
-          }
+      if (hitCountIsExact && Boolean.FALSE.equals(docs.getNumFoundExact())) {
+        hitCountIsExact = false;
+      }
 
-          shardDoc.sortFieldValues = unmarshalledSortFieldValues;
-
-          queue.insertWithOverflow(shardDoc);
-        } // end for-each-doc-in-response
-      } // end for-each-response
-      
-      // The queue now has 0 -> queuesize docs, where queuesize <= start + rows
-      // So we want to pop the last documents off the queue to get
-      // the docs offset -> queuesize
-      int resultSize = queue.size() - ss.getOffset();
-      resultSize = Math.max(0, resultSize);  // there may not be any docs in range
-
-      Map<Object,ShardDoc> resultIds = new HashMap<>();
-      for (int i=resultSize-1; i>=0; i--) {
-        ShardDoc shardDoc = queue.pop();
-        shardDoc.positionInResponse = i;
-        // Need the toString() for correlation with other lists that must
-        // be strings (like keys in highlighting, explain, etc)
-        resultIds.put(shardDoc.id.toString(), shardDoc);
+      @SuppressWarnings("unchecked")
+      NamedList<List<Object>> sortFieldValues =
+          (NamedList<List<Object>>) (srsp.getSolrResponse().getResponse().get("sort_values"));
+      if (null == sortFieldValues) {
+        sortFieldValues = new NamedList<>();
       }
 
-      // Add hits for distributed requests
-      // https://issues.apache.org/jira/browse/SOLR-3518
-      rb.rsp.addToLog("hits", numFound);
-
-      SolrDocumentList responseDocs = new SolrDocumentList();
-      if (maxScore!=null) responseDocs.setMaxScore(maxScore);
-      responseDocs.setNumFound(numFound);
-      responseDocs.setNumFoundExact(hitCountIsExact);
-      responseDocs.setStart(ss.getOffset());
-      // size appropriately
-      for (int i=0; i<resultSize; i++) responseDocs.add(null);
-
-      // save these results in a private area so we can access them
-      // again when retrieving stored fields.
-      // TODO: use ResponseBuilder (w/ comments) or the request context?
-      rb.resultIds = resultIds;
-      rb.setResponseDocs(responseDocs);
-
-      populateNextCursorMarkFromMergedShards(rb);
-
-      if (thereArePartialResults) {
-         rb.rsp.getResponseHeader().asShallowMap()
-                   .put(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
+      // if the SortSpec contains a field besides score or the Lucene docid, then the values will
+      // need to be unmarshalled from
+      // sortFieldValues.
+      boolean needsUnmarshalling = ss.includesNonScoreOrDocField();
+
+      // if we need to unmarshal the sortFieldValues for sorting but we have none, which can happen
+      // if partial results are
+      // being returned from the shard, then skip merging the results for the shard. This avoids an
+      // exception below.
+      // if the shard returned partial results but we don't need to unmarshal (a normal scoring
+      // query), then merge what we got.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
##########
@@ -227,138 +228,158 @@ public void process(ResponseBuilder rb) throws IOException
     SolrDocumentList docList = new SolrDocumentList();
     UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
 
-    SearcherInfo searcherInfo =  new SearcherInfo(core);
-    
+    SearcherInfo searcherInfo = new SearcherInfo(core);
+
     // this is initialized & set on the context *after* any searcher (re-)opening
     ResultContext resultContext = null;
     final DocTransformer transformer = rsp.getReturnFields().getTransformer();
 
     // true in any situation where we have to use a realtime searcher rather then returning docs
     // directly from the UpdateLog
     final boolean mustUseRealtimeSearcher =
-      // if we have filters, we need to check those against the indexed form of the doc
-      (rb.getFilters() != null)
-      || ((null != transformer) && transformer.needsSolrIndexSearcher());
-
-   try {
-
-     boolean opennedRealtimeSearcher = false;
-     BytesRefBuilder idBytes = new BytesRefBuilder();
-     for (String idStr : reqIds.allIds) {
-       fieldType.readableToIndexed(idStr, idBytes);
-       // if _route_ is passed, id is a child doc.  TODO remove in SOLR-15064
-       if (!opennedRealtimeSearcher && !params.get(ShardParams._ROUTE_, idStr).equals(idStr)) {
-         searcherInfo.clear();
-         resultContext = null;
-         ulog.openRealtimeSearcher();  // force open a new realtime searcher
-         opennedRealtimeSearcher = true;
-       } else if (ulog != null) {
-         Object o = ulog.lookup(idBytes.get());
-         if (o != null) {
-           // should currently be a List<Oper,Ver,Doc/Id>
-           List<?> entry = (List<?>)o;
-           assert entry.size() >= 3;
-           int oper = (Integer)entry.get(UpdateLog.FLAGS_IDX) & UpdateLog.OPERATION_MASK;
-           switch (oper) {
-             case UpdateLog.UPDATE_INPLACE: // fall through to ADD
-             case UpdateLog.ADD:
-
-               if (mustUseRealtimeSearcher) {
-                 // close handles to current searchers & result context
-                 if (!opennedRealtimeSearcher) {
-                   searcherInfo.clear();
-                   resultContext = null;
-                   ulog.openRealtimeSearcher();  // force open a new realtime searcher
-                   opennedRealtimeSearcher = true;
-                 }
-                 o = null;  // pretend we never found this record and fall through to use the searcher
-                 break;
-               }
-
-               SolrDocument doc;
-               if (oper == UpdateLog.ADD) {
-                 doc = toSolrDoc((SolrInputDocument)entry.get(entry.size()-1), core.getLatestSchema());
-                 // toSolrDoc filtered copy-field targets already
-                 if (transformer!=null) {
-                   transformer.transform(doc, -1); // unknown docID
-                 }
-               } else if (oper == UpdateLog.UPDATE_INPLACE) {
-                 assert entry.size() == 5;
-                 // For in-place update case, we have obtained the partial document till now. We need to
-                 // resolve it to a full document to be returned to the user.
-                 // resolveFullDocument applies the transformer, if present.
-                 doc = resolveFullDocument(core, idBytes.get(), rsp.getReturnFields(), (SolrInputDocument)entry.get(entry.size()-1), entry);
-                 if (doc == null) {
-                   break; // document has been deleted as the resolve was going on
-                 }
-                 doc.visitSelfAndNestedDocs((label, d) -> removeCopyFieldTargets(d, req.getSchema()));
-               } else {
-                 throw new SolrException(ErrorCode.INVALID_STATE, "Expected ADD or UPDATE_INPLACE. Got: " + oper);
-               }
-
-              docList.add(doc);
-              break;
-             case UpdateLog.DELETE:
-              break;
-             default:
-               throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,  "Unknown Operation! " + oper);
-           }
-           if (o != null) continue;
-         }
-       }
-
-       // didn't find it in the update log, so it should be in the newest searcher opened
-       searcherInfo.init();
-       // don't bother with ResultContext yet, we won't need it if doc doesn't match filters
-
-       int docid = -1;
-       long segAndId = searcherInfo.getSearcher().lookupId(idBytes.get());
-       if (segAndId >= 0) {
-         int segid = (int) segAndId;
-         LeafReaderContext ctx = searcherInfo.getSearcher().getTopReaderContext().leaves().get((int) (segAndId >> 32));
-         docid = segid + ctx.docBase;
-
-         if (rb.getFilters() != null) {
-           for (Query raw : rb.getFilters()) {
-             Query q = raw.rewrite(searcherInfo.getSearcher().getIndexReader());
-             Scorer scorer = searcherInfo.getSearcher().createWeight(q, ScoreMode.COMPLETE_NO_SCORES, 1f).scorer(ctx);
-             if (scorer == null || segid != scorer.iterator().advance(segid)) {
-               // filter doesn't match.
-               docid = -1;
-               break;
-             }
-           }
-         }
-       }
-
-       if (docid < 0) continue;
-       
-       Document luceneDocument = searcherInfo.getSearcher().doc(docid, rsp.getReturnFields().getLuceneFieldNames());
-       SolrDocument doc = toSolrDoc(luceneDocument,  core.getLatestSchema());
-       SolrDocumentFetcher docFetcher = searcherInfo.getSearcher().getDocFetcher();
-       docFetcher.decorateDocValueFields(doc, docid, docFetcher.getNonStoredDVs(true));
-       if ( null != transformer) {
-         if (null == resultContext) {
-           // either first pass, or we've re-opened searcher - either way now we setContext
-           resultContext = new RTGResultContext(rsp.getReturnFields(), searcherInfo.getSearcher(), req);
-           transformer.setContext(resultContext); // we avoid calling setContext unless searcher is new/changed
-         }
-         transformer.transform(doc, docid);
-       }
-       docList.add(doc);
-     } // loop on ids
-
-   } finally {
-     searcherInfo.clear();
-   }
-
-   addDocListToResponse(rb, docList);
+        // if we have filters, we need to check those against the indexed form of the doc
+        (rb.getFilters() != null)
+            || ((null != transformer) && transformer.needsSolrIndexSearcher());
+
+    try {
+
+      boolean opennedRealtimeSearcher = false;
+      BytesRefBuilder idBytes = new BytesRefBuilder();
+      for (String idStr : reqIds.allIds) {
+        fieldType.readableToIndexed(idStr, idBytes);
+        // if _route_ is passed, id is a child doc.  TODO remove in SOLR-15064
+        if (!opennedRealtimeSearcher && !params.get(ShardParams._ROUTE_, idStr).equals(idStr)) {
+          searcherInfo.clear();
+          resultContext = null;
+          ulog.openRealtimeSearcher(); // force open a new realtime searcher
+          opennedRealtimeSearcher = true;
+        } else if (ulog != null) {
+          Object o = ulog.lookup(idBytes.get());
+          if (o != null) {
+            // should currently be a List<Oper,Ver,Doc/Id>
+            List<?> entry = (List<?>) o;
+            assert entry.size() >= 3;
+            int oper = (Integer) entry.get(UpdateLog.FLAGS_IDX) & UpdateLog.OPERATION_MASK;
+            switch (oper) {
+              case UpdateLog.UPDATE_INPLACE: // fall through to ADD
+              case UpdateLog.ADD:
+                if (mustUseRealtimeSearcher) {
+                  // close handles to current searchers & result context
+                  if (!opennedRealtimeSearcher) {
+                    searcherInfo.clear();
+                    resultContext = null;
+                    ulog.openRealtimeSearcher(); // force open a new realtime searcher
+                    opennedRealtimeSearcher = true;
+                  }
+                  o = null; // pretend we never found this record and fall through to use the
+                  // searcher
+                  break;
+                }
+
+                SolrDocument doc;
+                if (oper == UpdateLog.ADD) {
+                  doc =
+                      toSolrDoc(
+                          (SolrInputDocument) entry.get(entry.size() - 1), core.getLatestSchema());
+                  // toSolrDoc filtered copy-field targets already
+                  if (transformer != null) {
+                    transformer.transform(doc, -1); // unknown docID
+                  }
+                } else if (oper == UpdateLog.UPDATE_INPLACE) {
+                  assert entry.size() == 5;
+                  // For in-place update case, we have obtained the partial document till now. We
+                  // need to
+                  // resolve it to a full document to be returned to the user.
+                  // resolveFullDocument applies the transformer, if present.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
##########
@@ -632,35 +694,43 @@ private static SolrInputDocument getInputDocumentFromTlog(
     return iDoc;
   }
 
-  /** returns the SolrInputDocument from the current tlog, or DELETED if it has been deleted, or
-   * null if there is no record of it in the current update log.  If null is returned, it could
-   * still be in the latest index.  Copy-field target fields are excluded.
+  /**
+   * returns the SolrInputDocument from the current tlog, or DELETED if it has been deleted, or null
+   * if there is no record of it in the current update log. If null is returned, it could still be
+   * in the latest index. Copy-field target fields are excluded.
+   *
    * @param idBytes doc ID to find; never a child doc.
-   * @param versionReturned If a non-null AtomicLong is passed in, it is set to the version of the update returned from the TLog.
+   * @param versionReturned If a non-null AtomicLong is passed in, it is set to the version of the
+   *     update returned from the TLog.
    */
   @SuppressWarnings({"fallthrough"})
-  public static SolrInputDocument getInputDocumentFromTlog(SolrCore core, BytesRef idBytes, AtomicLong versionReturned,
-      Set<String> onlyTheseFields, Resolution resolution) {
+  public static SolrInputDocument getInputDocumentFromTlog(
+      SolrCore core,
+      BytesRef idBytes,
+      AtomicLong versionReturned,
+      Set<String> onlyTheseFields,
+      Resolution resolution) {
     UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
 
     if (ulog != null) {
       Object o = ulog.lookup(idBytes);
       if (o != null) {
         // should currently be a List<Oper,Ver,Doc/Id>
-        List<?> entry = (List<?>)o;
+        List<?> entry = (List<?>) o;
         assert entry.size() >= 3;
-        int oper = (Integer)entry.get(0) & UpdateLog.OPERATION_MASK;
+        int oper = (Integer) entry.get(0) & UpdateLog.OPERATION_MASK;
         if (versionReturned != null) {
-          versionReturned.set((long)entry.get(UpdateLog.VERSION_IDX));
+          versionReturned.set((long) entry.get(UpdateLog.VERSION_IDX));
         }
         switch (oper) {
           case UpdateLog.UPDATE_INPLACE:
             assert entry.size() == 5;
-            
+
             if (resolution != Resolution.PARTIAL) {
-              SolrInputDocument doc = (SolrInputDocument)entry.get(entry.size()-1);
+              SolrInputDocument doc = (SolrInputDocument) entry.get(entry.size() - 1);
               try {
-                // For in-place update case, we have obtained the partial document till now. We need to
+                // For in-place update case, we have obtained the partial document till now. We need
+                // to
                 // resolve it to a full document to be returned to the user.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
##########
@@ -450,7 +483,8 @@ private static SolrDocument resolveFullDocument(SolrCore core, BytesRef idBytes,
     if (lastPrevPointer == -1) { // full document was not found in tlog, but exists in index
       return mergePartialDocWithFullDocFromIndex(core, idBytes, returnFields, partialDoc);
     } else if (lastPrevPointer > 0) {
-      // We were supposed to have found the last full doc also in the tlogs, but the prevPointer links led to nowhere
+      // We were supposed to have found the last full doc also in the tlogs, but the prevPointer
+      // links led to nowhere
       // We should reopen a new RT searcher and get the doc. This should be a rare occurrence

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
##########
@@ -775,59 +857,65 @@ private static boolean hasRootTerm(SolrIndexSearcher searcher, BytesRef rootIdBy
   }
 
   /** Traverse the doc looking for a doc with the specified ID. */
-  private static SolrInputDocument findNestedDocById(SolrInputDocument iDoc, BytesRef idBytes, IndexSchema schema) {
+  private static SolrInputDocument findNestedDocById(
+      SolrInputDocument iDoc, BytesRef idBytes, IndexSchema schema) {
     assert schema.printableUniqueKey(iDoc) != null : "need IDs";
     // traverse nested doc, looking for the node with the ID we are looking for
     SolrInputDocument[] found = new SolrInputDocument[1];
     String idStr = schema.printableUniqueKey(idBytes);
-    BiConsumer<String, SolrInputDocument> finder = (label, childDoc) -> {
-      if (found[0] == null && idStr.equals(schema.printableUniqueKey(childDoc))) {
-        found[0] = childDoc;
-      }
-    };
+    BiConsumer<String, SolrInputDocument> finder =
+        (label, childDoc) -> {
+          if (found[0] == null && idStr.equals(schema.printableUniqueKey(childDoc))) {
+            found[0] = childDoc;
+          }
+        };
     iDoc.visitSelfAndNestedDocs(finder);
     return found[0];
   }
 
-  private static SolrReturnFields makeReturnFields(SolrCore core, Set<String> requestedFields, Resolution resolution) {
+  private static SolrReturnFields makeReturnFields(
+      SolrCore core, Set<String> requestedFields, Resolution resolution) {
     DocTransformer docTransformer;
-    if (resolution == Resolution.ROOT_WITH_CHILDREN && core.getLatestSchema().isUsableForChildDocs()) {
+    if (resolution == Resolution.ROOT_WITH_CHILDREN
+        && core.getLatestSchema().isUsableForChildDocs()) {
       SolrParams params = new ModifiableSolrParams().set("limit", "-1");
       try (LocalSolrQueryRequest req = new LocalSolrQueryRequest(core, params)) {
         docTransformer = core.getTransformerFactory("child").create(null, params, req);
       }
     } else {
       docTransformer = null;
     }
-    // TODO optimization: add feature to SolrReturnFields to exclude copyFieldTargets from wildcard matching.
+    // TODO optimization: add feature to SolrReturnFields to exclude copyFieldTargets from wildcard
+    // matching.
     //   Today, we filter this data out later before returning, but it's already been fetched.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
##########
@@ -881,7 +970,8 @@ private static IndexableField materialize(IndexableField in) {
     return new ClonedField(in);
   }
 
-  private static class ClonedField extends Field { // TODO Lucene Field has no copy constructor; maybe it should?
+  private static class ClonedField
+      extends Field { // TODO Lucene Field has no copy constructor; maybe it should?

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
##########
@@ -537,18 +580,28 @@ private static SolrDocument mergePartialDocWithFullDocFromIndex(SolrCore core, B
 
       SolrDocument doc = fetchSolrDoc(searcher, docid, returnFields);
       if (!doc.containsKey(VERSION_FIELD)) {
-        searcher.getDocFetcher().decorateDocValueFields(doc, docid, Collections.singleton(VERSION_FIELD));
+        searcher
+            .getDocFetcher()
+            .decorateDocValueFields(doc, docid, Collections.singleton(VERSION_FIELD));
       }
 
       long docVersion = (long) doc.getFirstValue(VERSION_FIELD);
       Object partialVersionObj = partialDoc.getFieldValue(VERSION_FIELD);
-      long partialDocVersion = partialVersionObj instanceof Field? ((Field) partialVersionObj).numericValue().longValue():
-        partialVersionObj instanceof Number? ((Number) partialVersionObj).longValue(): Long.parseLong(partialVersionObj.toString());
+      long partialDocVersion =
+          partialVersionObj instanceof Field
+              ? ((Field) partialVersionObj).numericValue().longValue()
+              : partialVersionObj instanceof Number
+                  ? ((Number) partialVersionObj).longValue()
+                  : Long.parseLong(partialVersionObj.toString());
       if (docVersion > partialDocVersion) {
         return doc;
       }
-      for (String fieldName: partialDoc.getFieldNames()) {
-        doc.setField(fieldName.toString(), partialDoc.getFieldValue(fieldName));  // since partial doc will only contain single valued fields, this is fine
+      for (String fieldName : partialDoc.getFieldNames()) {
+        doc.setField(
+            fieldName.toString(),
+            partialDoc.getFieldValue(
+                fieldName)); // since partial doc will only contain single valued fields, this is
+        // fine

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java
##########
@@ -246,20 +248,24 @@ public ShardHandler getAndPrepShardHandler(SolrQueryRequest req, ResponseBuilder
       final String shards = req.getParams().get(ShardParams.SHARDS);
       rb.isDistrib = ((shards != null) && (shards.indexOf('/') > 0));
     }
-    
+
     if (rb.isDistrib) {
       shardHandler = shardHandlerFactory.getShardHandler();
       shardHandler.prepDistributed(rb);
       if (!rb.isDistrib) {
-        shardHandler = null; // request is not distributed after all and so the shard handler is not needed
+        shardHandler =
+            null; // request is not distributed after all and so the shard handler is not needed

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java
##########
@@ -317,32 +324,38 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw
       if (trippedCircuitBreakers != null) {
         String errorMessage = CircuitBreakerManager.toErrorMessage(trippedCircuitBreakers);
         rsp.add(STATUS, FAILURE);
-        rsp.setException(new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Circuit Breakers tripped " + errorMessage));
+        rsp.setException(
+            new SolrException(
+                SolrException.ErrorCode.SERVICE_UNAVAILABLE,
+                "Circuit Breakers tripped " + errorMessage));
         return;
       }
     }
 
-    final ShardHandler shardHandler1 = getAndPrepShardHandler(req, rb); // creates a ShardHandler object only if it's needed
+    final ShardHandler shardHandler1 =
+        getAndPrepShardHandler(req, rb); // creates a ShardHandler object only if it's needed

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java
##########
@@ -416,32 +430,34 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw
         nextStage = ResponseBuilder.STAGE_DONE;
 
         // call all components
-        for( SearchComponent c : components ) {
+        for (SearchComponent c : components) {
           // the next stage is the minimum of what all components report
           nextStage = Math.min(nextStage, c.distributedProcess(rb));
         }
 
-
         // check the outgoing queue and send requests
         while (rb.outgoing.size() > 0) {
 
           // submit all current request tasks at once
           while (rb.outgoing.size() > 0) {
             ShardRequest sreq = rb.outgoing.remove(0);
             sreq.actualShards = sreq.shards;
-            if (sreq.actualShards==ShardRequest.ALL_SHARDS) {
+            if (sreq.actualShards == ShardRequest.ALL_SHARDS) {
               sreq.actualShards = rb.shards;
             }
-            sreq.responses = new ArrayList<>(sreq.actualShards.length); // presume we'll get a response from each shard we send to
+            sreq.responses =
+                new ArrayList<>(
+                    sreq.actualShards
+                        .length); // presume we'll get a response from each shard we send to

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/SpatialHeatmapFacets.java
##########
@@ -83,32 +99,43 @@ private static FacetRequest createHeatmapRequest(String fieldKey, String fieldNa
     return heatmapFacets;
   }
 
-  /** Called by FacetComponent's impl of
-   * {@link org.apache.solr.handler.component.SearchComponent#modifyRequest(ResponseBuilder, SearchComponent, ShardRequest)}. */
-  public static void distribModifyRequest(ShardRequest sreq, LinkedHashMap<String, HeatmapFacet> heatmapFacets) {
-    // Set the format to PNG because it's compressed and it's the only format we have code to read at the moment.
-    // We re-write the facet.heatmap list with PNG format in local-params where it has highest precedence.
-
-    //Remove existing heatmap field param vals; we will rewrite
+  /**
+   * Called by FacetComponent's impl of {@link
+   * org.apache.solr.handler.component.SearchComponent#modifyRequest(ResponseBuilder,
+   * SearchComponent, ShardRequest)}.
+   */
+  public static void distribModifyRequest(
+      ShardRequest sreq, LinkedHashMap<String, HeatmapFacet> heatmapFacets) {
+    // Set the format to PNG because it's compressed and it's the only format we have code to read
+    // at the moment.
+    // We re-write the facet.heatmap list with PNG format in local-params where it has highest
+    // precedence.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java
##########
@@ -468,117 +484,128 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw
             }
           }
 
-
           // now wait for replies, but if anyone puts more requests on
           // the outgoing queue, send them out immediately (by exiting
           // this loop)
           boolean tolerant = ShardParams.getShardsTolerantAsBool(rb.req.getParams());
           while (rb.outgoing.size() == 0) {
-            ShardResponse srsp = tolerant ? 
-                shardHandler1.takeCompletedIncludingErrors():
-                shardHandler1.takeCompletedOrError();
-            if (srsp == null) break;  // no more requests to wait for
+            ShardResponse srsp =
+                tolerant
+                    ? shardHandler1.takeCompletedIncludingErrors()
+                    : shardHandler1.takeCompletedOrError();
+            if (srsp == null) break; // no more requests to wait for
 
-            // Was there an exception?  
+            // Was there an exception?
             if (srsp.getException() != null) {
               // If things are not tolerant, abort everything and rethrow
-              if(!tolerant) {
+              if (!tolerant) {
                 shardHandler1.cancelAll();
                 if (srsp.getException() instanceof SolrException) {
-                  throw (SolrException)srsp.getException();
+                  throw (SolrException) srsp.getException();
                 } else {
-                  throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, srsp.getException());
+                  throw new SolrException(
+                      SolrException.ErrorCode.SERVER_ERROR, srsp.getException());
                 }
               } else {
-                rsp.getResponseHeader().asShallowMap().put(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
+                rsp.getResponseHeader()
+                    .asShallowMap()
+                    .put(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
               }
             }
 
             rb.finished.add(srsp.getShardRequest());
 
             // let the components see the responses to the request
-            for(SearchComponent c : components) {
+            for (SearchComponent c : components) {
               c.handleResponses(rb, srsp.getShardRequest());
             }
           }
         }
 
-        for(SearchComponent c : components) {
+        for (SearchComponent c : components) {
           c.finishStage(rb);
         }
 
         // we are done when the next stage is MAX_VALUE
       } while (nextStage != Integer.MAX_VALUE);
     }
-    
+
     // SOLR-5550: still provide shards.info if requested even for a short circuited distrib request
-    if(!rb.isDistrib && req.getParams().getBool(ShardParams.SHARDS_INFO, false) && rb.shortCircuitedURL != null) {  
+    if (!rb.isDistrib
+        && req.getParams().getBool(ShardParams.SHARDS_INFO, false)
+        && rb.shortCircuitedURL != null) {
       NamedList<Object> shardInfo = new SimpleOrderedMap<Object>();
-      SimpleOrderedMap<Object> nl = new SimpleOrderedMap<Object>();        
+      SimpleOrderedMap<Object> nl = new SimpleOrderedMap<Object>();
       if (rsp.getException() != null) {
         Throwable cause = rsp.getException();
         if (cause instanceof SolrServerException) {
-          cause = ((SolrServerException)cause).getRootCause();
+          cause = ((SolrServerException) cause).getRootCause();
         } else {
           if (cause.getCause() != null) {
             cause = cause.getCause();
-          }          
+          }
         }
-        nl.add("error", cause.toString() );
+        nl.add("error", cause.toString());
         StringWriter trace = new StringWriter();
         cause.printStackTrace(new PrintWriter(trace));
-        nl.add("trace", trace.toString() );
+        nl.add("trace", trace.toString());
       } else if (rb.getResults() != null) {
         nl.add("numFound", rb.getResults().docList.matches());
-        nl.add("numFoundExact", rb.getResults().docList.hitCountRelation() == TotalHits.Relation.EQUAL_TO);
+        nl.add(
+            "numFoundExact",
+            rb.getResults().docList.hitCountRelation() == TotalHits.Relation.EQUAL_TO);
         nl.add("maxScore", rb.getResults().docList.maxScore());
       }
       nl.add("shardAddress", rb.shortCircuitedURL);
       nl.add("time", req.getRequestTimer().getTime()); // elapsed time of this request so far
-      
-      int pos = rb.shortCircuitedURL.indexOf("://");        
-      String shardInfoName = pos != -1 ? rb.shortCircuitedURL.substring(pos+3) : rb.shortCircuitedURL;
-      shardInfo.add(shardInfoName, nl);   
-      rsp.getValues().add(ShardParams.SHARDS_INFO,shardInfo);            
+
+      int pos = rb.shortCircuitedURL.indexOf("://");
+      String shardInfoName =
+          pos != -1 ? rb.shortCircuitedURL.substring(pos + 3) : rb.shortCircuitedURL;
+      shardInfo.add(shardInfoName, nl);
+      rsp.getValues().add(ShardParams.SHARDS_INFO, shardInfo);
     }
   }
 
   private void tagRequestWithRequestId(ResponseBuilder rb) {
-    final boolean ridTaggingDisabled = rb.req.getParams().getBool(CommonParams.DISABLE_REQUEST_ID, false);
-    if (! ridTaggingDisabled) {
+    final boolean ridTaggingDisabled =
+        rb.req.getParams().getBool(CommonParams.DISABLE_REQUEST_ID, false);
+    if (!ridTaggingDisabled) {
       String rid = getOrGenerateRequestId(rb.req);
 
       // NOTE: SearchHandler explicitly never clears/removes this MDC value...
-      // We want it to live for the entire request, beyond the scope of SearchHandler's processing, and trust
+      // We want it to live for the entire request, beyond the scope of SearchHandler's processing,
+      // and trust
       // SolrDispatchFilter to clean it up at the end of the request.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java
##########
@@ -317,32 +324,38 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw
       if (trippedCircuitBreakers != null) {
         String errorMessage = CircuitBreakerManager.toErrorMessage(trippedCircuitBreakers);
         rsp.add(STATUS, FAILURE);
-        rsp.setException(new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Circuit Breakers tripped " + errorMessage));
+        rsp.setException(
+            new SolrException(
+                SolrException.ErrorCode.SERVICE_UNAVAILABLE,
+                "Circuit Breakers tripped " + errorMessage));
         return;
       }
     }
 
-    final ShardHandler shardHandler1 = getAndPrepShardHandler(req, rb); // creates a ShardHandler object only if it's needed
+    final ShardHandler shardHandler1 =
+        getAndPrepShardHandler(req, rb); // creates a ShardHandler object only if it's needed
 
     if (timer == null) {
       // non-debugging prepare phase
-      for( SearchComponent c : components ) {
+      for (SearchComponent c : components) {
         c.prepare(rb);
       }
     } else {
       // debugging prepare phase
-      RTimerTree subt = timer.sub( "prepare" );
-      for( SearchComponent c : components ) {
-        rb.setTimer(subt.sub( c.getName() ) );
+      RTimerTree subt = timer.sub("prepare");
+      for (SearchComponent c : components) {
+        rb.setTimer(subt.sub(c.getName()));
         c.prepare(rb);
         rb.getTimer().stop();
       }
       subt.stop();
     }
 
     { // Once all of our components have been prepared, check if this request involves a SortSpec.
-      // If it does, and if our request includes a cursorMark param, then parse & init the CursorMark state
-      // (This must happen after the prepare() of all components, because any component may have modified the SortSpec)
+      // If it does, and if our request includes a cursorMark param, then parse & init the
+      // CursorMark state
+      // (This must happen after the prepare() of all components, because any component may have
+      // modified the SortSpec)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/SuggestComponent.java
##########
@@ -473,24 +498,32 @@ private SuggesterResult toSuggesterResult(SimpleOrderedMap<SimpleOrderedMap<Name
     }
     return result;
   }
-  
+
   /** Listener to build or reload the maintained {@link SolrSuggester} by this component */
   private static class SuggesterListener implements SolrEventListener {
     private final SolrCore core;
     private final SolrSuggester suggester;
     private final boolean buildOnCommit;
     private final boolean buildOnOptimize;
     private final boolean buildOnStartup;
-    
-    // On core reload, immediately after the core is created a new searcher is opened, causing the suggester
-    // to trigger a "buildOnCommit". The only event that we want to trigger in that situation is "buildOnStartup"
-    // so if buildOnCommit is true and this is a core being reloaded, we will skip the first time this listener 
-    // is called. 
+
+    // On core reload, immediately after the core is created a new searcher is opened, causing the
+    // suggester
+    // to trigger a "buildOnCommit". The only event that we want to trigger in that situation is
+    // "buildOnStartup"
+    // so if buildOnCommit is true and this is a core being reloaded, we will skip the first time
+    // this listener
+    // is called.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
##########
@@ -211,25 +217,30 @@ public void process(ResponseBuilder rb) throws IOException {
         rb.rsp.add("spellcheck", response);
 
       } else {
-        throw new SolrException(SolrException.ErrorCode.NOT_FOUND,
-            "Specified dictionaries do not exist: " + getDictionaryNameAsSingleString(getDictionaryNames(params)));
+        throw new SolrException(
+            SolrException.ErrorCode.NOT_FOUND,
+            "Specified dictionaries do not exist: "
+                + getDictionaryNameAsSingleString(getDictionaryNames(params)));
       }
     }
   }
 
   private Integer maxResultsForSuggest(ResponseBuilder rb) {
     SolrParams params = rb.req.getParams();
-    float maxResultsForSuggestParamValue = params.getFloat(SpellingParams.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST, 0.0f);
+    float maxResultsForSuggestParamValue =
+        params.getFloat(SpellingParams.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST, 0.0f);
     Integer maxResultsForSuggest = null;
 
     if (maxResultsForSuggestParamValue > 0.0f) {
       if (maxResultsForSuggestParamValue == (int) maxResultsForSuggestParamValue) {
         // If a whole number was passed in, this is a discrete number of documents
         maxResultsForSuggest = (int) maxResultsForSuggestParamValue;
       } else {
-        // If a fractional value was passed in, this is the % of documents returned by the specified filter
+        // If a fractional value was passed in, this is the % of documents returned by the specified
+        // filter
         // If no specified filter, we use the most restrictive filter of the fq parameters

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/designer/DefaultSchemaSuggester.java
##########
@@ -309,8 +365,10 @@ protected String isIntOrLong(List<Object> values, Locale locale) {
       }
     }
 
-    // if all values are less than some smallish threshold, then it's likely this field holds small numbers
-    // but be very conservative here as it's simply an optimization and we can always fall back to long
+    // if all values are less than some smallish threshold, then it's likely this field holds small
+    // numbers
+    // but be very conservative here as it's simply an optimization and we can always fall back to
+    // long

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
##########
@@ -254,8 +265,10 @@ void invokeAction(SolrQueryRequest req, SolrQueryResponse rsp, CoreContainer cor
       rsp.setException(exp);
     }
 
-    // Even if Overseer does wait for the collection to be created, it sees a different cluster state than this node,
-    // so this wait is required to make sure the local node Zookeeper watches fired and now see the collection.
+    // Even if Overseer does wait for the collection to be created, it sees a different cluster
+    // state than this node,
+    // so this wait is required to make sure the local node Zookeeper watches fired and now see the
+    // collection.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/export/ExportWriter.java
##########
@@ -202,37 +208,52 @@ private void _write(OutputStream os) throws IOException {
     SortSpec sortSpec = info.getResponseBuilder().getSortSpec();
 
     if (sortSpec == null) {
-      writeException((new IOException(new SyntaxError("No sort criteria was provided."))), writer, true);
+      writeException(
+          (new IOException(new SyntaxError("No sort criteria was provided."))), writer, true);
       return;
     }
 
     SolrIndexSearcher searcher = req.getSearcher();
     Sort sort = searcher.weightSort(sortSpec.getSort());
 
     if (sort == null) {
-      writeException((new IOException(new SyntaxError("No sort criteria was provided."))), writer, true);
+      writeException(
+          (new IOException(new SyntaxError("No sort criteria was provided."))), writer, true);
       return;
     }
 
     if (sort != null && sort.needsScores()) {
-      writeException((new IOException(new SyntaxError("Scoring is not currently supported with xsort."))), writer, true);
+      writeException(
+          (new IOException(new SyntaxError("Scoring is not currently supported with xsort."))),
+          writer,
+          true);
       return;
     }
 
-    // There is a bailout in SolrIndexSearcher.getDocListNC when there are _no_ docs in the index at all.
+    // There is a bailout in SolrIndexSearcher.getDocListNC when there are _no_ docs in the index at
+    // all.
     // if (lastDocRequested <= 0) {
     // That causes the totalHits and export entries in the context to _not_ get set.
-    // The only time that really matters is when we search against an _empty_ set. That's too obscure
-    // a condition to handle as part of this patch, if someone wants to pursue it it can be reproduced with:
-    // ant test  -Dtestcase=StreamingTest -Dtests.method=testAllValidExportTypes -Dtests.seed=10F13879D0D1D6AD -Dtests.slow=true -Dtests.locale=es-PA -Dtests.timezone=America/Bahia_Banderas -Dtests.asserts=true -Dtests.file.encoding=ISO-8859-1
+    // The only time that really matters is when we search against an _empty_ set. That's too
+    // obscure
+    // a condition to handle as part of this patch, if someone wants to pursue it it can be
+    // reproduced with:
+    // ant test  -Dtestcase=StreamingTest -Dtests.method=testAllValidExportTypes
+    // -Dtests.seed=10F13879D0D1D6AD -Dtests.slow=true -Dtests.locale=es-PA
+    // -Dtests.timezone=America/Bahia_Banderas -Dtests.asserts=true -Dtests.file.encoding=ISO-8859-1

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/export/ExportWriter.java
##########
@@ -597,12 +630,15 @@ SortDoc getSortDoc(SolrIndexSearcher searcher, SortField[] sortFields) throws IO
           sortValues[i] = new StringValue(vals, field, new IntComp.IntAsc());
         }
       } else {
-        throw new IOException("Sort fields must be one of the following types: int,float,long,double,string,date,boolean,SortableText");
+        throw new IOException(
+            "Sort fields must be one of the following types: int,float,long,double,string,date,boolean,SortableText");
       }
     }
-    //SingleValueSortDoc etc are specialized classes which don't have array lookups. On benchmarking large datasets
-    //This is faster than the using an array in SortDoc . So upto 4 sort fields we still want to keep specialized classes.
-    //SOLR-12616 has more details
+    // SingleValueSortDoc etc are specialized classes which don't have array lookups. On
+    // benchmarking large datasets
+    // This is faster than the using an array in SortDoc . So upto 4 sort fields we still want to
+    // keep specialized classes.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/designer/SchemaDesignerAPI.java
##########
@@ -831,7 +955,8 @@ protected SchemaDesignerSettings getMutableSchemaForConfigSet(final String confi
       }
 
       if (publishedVersion != -1) {
-        // keep track of the version of the configSet the mutable is derived from in case another user
+        // keep track of the version of the configSet the mutable is derived from in case another
+        // user
         // changes the derived from schema before we publish the mutable on top of it

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/designer/DefaultSchemaSuggester.java
##########
@@ -139,57 +172,74 @@ protected void validateNumericTypeChange(SchemaField field, FieldType toType, Li
       throw new IllegalStateException("FieldType '" + fieldTypeName + "' not found in the schema!");
     }
 
-    Map<String, String> fieldProps = guessFieldProps(fieldName, fieldType, sampleValues, isMV, schema);
+    Map<String, String> fieldProps =
+        guessFieldProps(fieldName, fieldType, sampleValues, isMV, schema);
     SchemaField schemaField = schema.newField(fieldName, fieldTypeName, fieldProps);
     return Optional.of(schemaField);
   }
 
   @Override
-  public ManagedIndexSchema adaptExistingFieldToData(SchemaField schemaField, List<Object> sampleValues, ManagedIndexSchema schema) {
+  public ManagedIndexSchema adaptExistingFieldToData(
+      SchemaField schemaField, List<Object> sampleValues, ManagedIndexSchema schema) {
     // Promote a single-valued to multi-valued if needed
     if (!schemaField.multiValued() && isMultiValued(sampleValues)) {
       // this existing field needs to be promoted to multi-valued
       SimpleOrderedMap<Object> fieldProps = schemaField.getNamedPropertyValues(false);
       fieldProps.add("multiValued", true);
       fieldProps.remove("name");
       fieldProps.remove("type");
-      schema = schema.replaceField(schemaField.getName(), schemaField.getType(), fieldProps.asShallowMap());
+      schema =
+          schema.replaceField(
+              schemaField.getName(), schemaField.getType(), fieldProps.asShallowMap());
     }
-    // TODO: other "healing" type operations here ... but we have to be careful about overriding explicit user changes
-    // such as a user making a text field a string field, we wouldn't want to revert that field back to text
+    // TODO: other "healing" type operations here ... but we have to be careful about overriding
+    // explicit user changes
+    // such as a user making a text field a string field, we wouldn't want to revert that field back
+    // to text

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
##########
@@ -267,21 +280,33 @@ void invokeAction(SolrQueryRequest req, SolrQueryResponse rsp, CoreContainer cor
 
   public static long DEFAULT_COLLECTION_OP_TIMEOUT = 180 * 1000;
 
-  public SolrResponse submitCollectionApiCommand(ZkNodeProps m, CollectionAction action) throws KeeperException, InterruptedException {
+  public SolrResponse submitCollectionApiCommand(ZkNodeProps m, CollectionAction action)
+      throws KeeperException, InterruptedException {
     return submitCollectionApiCommand(m, action, DEFAULT_COLLECTION_OP_TIMEOUT);
   }
 
-  public SolrResponse submitCollectionApiCommand(ZkNodeProps m, CollectionAction action, long timeout) throws KeeperException, InterruptedException {
-    // Collection API messages are either sent to Overseer and processed there, or processed locally.
-    // Distributing Collection API implies we're also distributing Cluster State Updates. Indeed collection creation
-    // with non distributed cluster state updates requires for "Per Replica States" that the Collection API be running
-    // on Overseer, which means that it is not possible to distributed Collection API while keeping cluster state updates
-    // on Overseer. See the call to CollectionCommandContext.submitIntraProcessMessage() in CreateCollectionCmd.call() which
-    // can only be done if the Collection API command runs on the same JVM as the Overseer based cluster state update...
-    // The configuration handling includes these checks to not allow distributing collection API without distributing
+  public SolrResponse submitCollectionApiCommand(
+      ZkNodeProps m, CollectionAction action, long timeout)
+      throws KeeperException, InterruptedException {
+    // Collection API messages are either sent to Overseer and processed there, or processed
+    // locally.
+    // Distributing Collection API implies we're also distributing Cluster State Updates. Indeed

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCore.java
##########
@@ -442,7 +450,9 @@ private String getIndexPropertyFromPropFile(Directory dir) throws IOException {
       // All other exceptions are will propagate to caller.
       return dataDir + "index/";
     }
-    final InputStream is = new PropertiesInputStream(input); // c'tor just assigns a variable here, no exception thrown.
+    final InputStream is =
+        new PropertiesInputStream(
+            input); // c'tor just assigns a variable here, no exception thrown.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCore.java
##########
@@ -846,44 +892,54 @@ void initIndex(boolean passOnPreviousState, boolean reload) throws IOException {
       log.debug("Solr index directory '{}' doesn't exist. Creating new index...", indexDir);
       SolrIndexWriter writer = null;
       try {
-        writer = SolrIndexWriter.create(this, "SolrCore.initIndex", indexDir, getDirectoryFactory(), true,
-            getLatestSchema(), solrConfig.indexConfig, solrDelPolicy, codec);
+        writer =
+            SolrIndexWriter.create(
+                this,
+                "SolrCore.initIndex",
+                indexDir,
+                getDirectoryFactory(),
+                true,
+                getLatestSchema(),
+                solrConfig.indexConfig,
+                solrDelPolicy,
+                codec);
       } finally {
         IOUtils.closeQuietly(writer);
       }
-
     }
 
     cleanupOldIndexDirectories(reload);
   }
 
-
   /**
-   * Creates an instance by trying a constructor that accepts a SolrCore before
-   * trying the default (no arg) constructor.
+   * Creates an instance by trying a constructor that accepts a SolrCore before trying the default
+   * (no arg) constructor.
    *
    * @param className the instance class to create
-   * @param cast      the class or interface that the instance should extend or implement
-   * @param msg       a message helping compose the exception error if any occurs.
-   * @param core      The SolrCore instance for which this object needs to be loaded
+   * @param cast the class or interface that the instance should extend or implement
+   * @param msg a message helping compose the exception error if any occurs.
+   * @param core The SolrCore instance for which this object needs to be loaded
    * @return the desired instance
    * @throws SolrException if the object could not be instantiated
    */
-  public static <T> T createInstance(String className, Class<T> cast, String msg, SolrCore core, ResourceLoader resourceLoader) {
+  public static <T> T createInstance(
+      String className, Class<T> cast, String msg, SolrCore core, ResourceLoader resourceLoader) {
     Class<? extends T> clazz = null;
     if (msg == null) msg = "SolrCore Object";
     try {
       clazz = resourceLoader.findClass(className, cast);
-      //most of the classes do not have constructors which takes SolrCore argument. It is recommended to obtain SolrCore by implementing SolrCoreAware.
-      // So invariably always it will cause a  NoSuchMethodException. So iterate though the list of available constructors
+      // most of the classes do not have constructors which takes SolrCore argument. It is
+      // recommended to obtain SolrCore by implementing SolrCoreAware.
+      // So invariably always it will cause a  NoSuchMethodException. So iterate though the list of
+      // available constructors

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCore.java
##########
@@ -1957,56 +2110,84 @@ boolean areAllSearcherReferencesEmpty() {
   }
 
   /**
-   * Computes fingerprint of a segment and caches it only if all the version in segment are included in the fingerprint.
-   * We can't use computeIfAbsent as caching is conditional (as described above)
-   * There is chance that two threads may compute fingerprint on the same segment. It might be OK to do so rather than locking entire map.
+   * Computes fingerprint of a segment and caches it only if all the version in segment are included
+   * in the fingerprint. We can't use computeIfAbsent as caching is conditional (as described above)
+   * There is chance that two threads may compute fingerprint on the same segment. It might be OK to
+   * do so rather than locking entire map.
    *
-   * @param searcher   searcher that includes specified LeaderReaderContext
-   * @param ctx        LeafReaderContext of a segment to compute fingerprint of
+   * @param searcher searcher that includes specified LeaderReaderContext
+   * @param ctx LeafReaderContext of a segment to compute fingerprint of
    * @param maxVersion maximum version number to consider for fingerprint computation
    * @return IndexFingerprint of the segment
    * @throws IOException Can throw IOException
    */
-  public IndexFingerprint getIndexFingerprint(SolrIndexSearcher searcher, LeafReaderContext ctx, long maxVersion)
-      throws IOException {
+  public IndexFingerprint getIndexFingerprint(
+      SolrIndexSearcher searcher, LeafReaderContext ctx, long maxVersion) throws IOException {
     IndexReader.CacheHelper cacheHelper = ctx.reader().getReaderCacheHelper();
     if (cacheHelper == null) {
       if (log.isDebugEnabled()) {
-        log.debug("Cannot cache IndexFingerprint as reader does not support caching. searcher:{} reader:{} readerHash:{} maxVersion:{}", searcher, ctx.reader(), ctx.reader().hashCode(), maxVersion);
+        log.debug(
+            "Cannot cache IndexFingerprint as reader does not support caching. searcher:{} reader:{} readerHash:{} maxVersion:{}",
+            searcher,
+            ctx.reader(),
+            ctx.reader().hashCode(),
+            maxVersion);
       }
       return IndexFingerprint.getFingerprint(searcher, ctx, maxVersion);
     }
 
     IndexFingerprint f = null;
     f = perSegmentFingerprintCache.get(cacheHelper.getKey());
     // fingerprint is either not cached or
-    // if we want fingerprint only up to a version less than maxVersionEncountered in the segment, or
+    // if we want fingerprint only up to a version less than maxVersionEncountered in the segment,
+    // or
     // documents were deleted from segment for which fingerprint was cached

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
##########
@@ -1494,78 +1902,87 @@ public static void waitForActiveCollection(String collectionName, CoreContainer
     Integer seconds = ccfg.getCreateCollectionWaitTimeTillActive();
     Boolean checkLeaderOnly = ccfg.isCreateCollectionCheckLeaderActive();
     if (log.isInfoEnabled()) {
-      log.info("Wait for new collection to be active for at most {} seconds. Check all shard {}"
-          , seconds, (checkLeaderOnly ? "leaders" : "replicas"));
+      log.info(
+          "Wait for new collection to be active for at most {} seconds. Check all shard {}",
+          seconds,
+          (checkLeaderOnly ? "leaders" : "replicas"));
     }
 
     try {
-      cc.getZkController().getZkStateReader().waitForState(collectionName, seconds, TimeUnit.SECONDS, (n, c) -> {
-
-        if (c == null) {
-          // the collection was not created, don't wait
-          return true;
-        }
-
-        if (c.getSlices() != null) {
-          Collection<Slice> shards = c.getSlices();
-          int replicaNotAliveCnt = 0;
-          for (Slice shard : shards) {
-            Collection<Replica> replicas;
-            if (!checkLeaderOnly) replicas = shard.getReplicas();
-            else {
-              replicas = new ArrayList<Replica>();
-              replicas.add(shard.getLeader());
-            }
-            for (Replica replica : replicas) {
-              String state = replica.getStr(ZkStateReader.STATE_PROP);
-              if (log.isDebugEnabled()) {
-                log.debug("Checking replica status, collection={} replica={} state={}", collectionName,
-                    replica.getCoreUrl(), state);
-              }
-              if (!n.contains(replica.getNodeName())
-                  || !state.equals(Replica.State.ACTIVE.toString())) {
-                replicaNotAliveCnt++;
+      cc.getZkController()
+          .getZkStateReader()
+          .waitForState(
+              collectionName,
+              seconds,
+              TimeUnit.SECONDS,
+              (n, c) -> {
+                if (c == null) {
+                  // the collection was not created, don't wait
+                  return true;
+                }
+
+                if (c.getSlices() != null) {
+                  Collection<Slice> shards = c.getSlices();
+                  int replicaNotAliveCnt = 0;
+                  for (Slice shard : shards) {
+                    Collection<Replica> replicas;
+                    if (!checkLeaderOnly) replicas = shard.getReplicas();
+                    else {
+                      replicas = new ArrayList<Replica>();
+                      replicas.add(shard.getLeader());
+                    }
+                    for (Replica replica : replicas) {
+                      String state = replica.getStr(ZkStateReader.STATE_PROP);
+                      if (log.isDebugEnabled()) {
+                        log.debug(
+                            "Checking replica status, collection={} replica={} state={}",
+                            collectionName,
+                            replica.getCoreUrl(),
+                            state);
+                      }
+                      if (!n.contains(replica.getNodeName())
+                          || !state.equals(Replica.State.ACTIVE.toString())) {
+                        replicaNotAliveCnt++;
+                        return false;
+                      }
+                    }
+                  }
+
+                  return (replicaNotAliveCnt == 0) || (replicaNotAliveCnt <= replicaFailCount);
+                }
                 return false;
-              }
-            }
-          }
-
-          return (replicaNotAliveCnt == 0) || (replicaNotAliveCnt <= replicaFailCount);
-        }
-        return false;
-      });
+              });
     } catch (TimeoutException | InterruptedException e) {
 
-      String error = "Timeout waiting for active collection " + collectionName + " with timeout=" + seconds;
+      String error =
+          "Timeout waiting for active collection " + collectionName + " with timeout=" + seconds;
       throw new NotInClusterStateException(ErrorCode.SERVER_ERROR, error);
     }
-
   }
 
-
   private static void verifyShardsParam(String shardsParam) {
     for (String shard : shardsParam.split(",")) {
       SolrIdentifierValidator.validateShardName(shard);
     }
   }
 
   interface CollectionOp {
-    Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception;
-
+    Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h)
+        throws Exception;
   }
 
   @Override
   public Boolean registerV2() {
     return Boolean.TRUE;
   }
 
-  // These "copy" methods were once SolrParams.getAll but were moved here as there is no universal way that
+  // These "copy" methods were once SolrParams.getAll but were moved here as there is no universal
+  // way that
   //  a SolrParams can be represented in a Map; there are various choices.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCore.java
##########
@@ -2491,45 +2718,47 @@ public IndexFingerprint getIndexFingerprint(SolrIndexSearcher searcher, LeafRead
         }
 
         if (searchHolder != null) {
-          searchHolder.decref();      // decrement 1 for _searcher (searchHolder will never become _searcher now)
+          searchHolder
+              .decref(); // decrement 1 for _searcher (searchHolder will never become _searcher now)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCore.java
##########
@@ -2330,26 +2554,25 @@ public IndexFingerprint getIndexFingerprint(SolrIndexSearcher searcher, LeafRead
     }
 
     // a signal to decrement onDeckSearchers if something goes wrong.
-    final boolean[] decrementOnDeckCount = new boolean[]{true};
-    RefCounted<SolrIndexSearcher> currSearcherHolder = null;     // searcher we are autowarming from
+    final boolean[] decrementOnDeckCount = new boolean[] {true};
+    RefCounted<SolrIndexSearcher> currSearcherHolder = null; // searcher we are autowarming from
     RefCounted<SolrIndexSearcher> searchHolder = null;
     boolean success = false;
 
     openSearcherLock.lock();
     Timer.Context timerContext = newSearcherTimer.time();
     try {
       searchHolder = openNewSearcher(updateHandlerReopens, false);
-      // the searchHolder will be incremented once already (and it will eventually be assigned to _searcher when registered)
+      // the searchHolder will be incremented once already (and it will eventually be assigned to
+      // _searcher when registered)
       // increment it again if we are going to return it to the caller.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCore.java
##########
@@ -2182,21 +2386,32 @@ public IndexFingerprint getIndexFingerprint(SolrIndexSearcher searcher, LeafRead
           } finally {
             writer.decref();
           }
-          tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(),
-              (realtime ? "realtime" : "main"), newReader, true, !realtime, true, directoryFactory);
+          tmp =
+              new SolrIndexSearcher(
+                  this,
+                  newIndexDir,
+                  getLatestSchema(),
+                  (realtime ? "realtime" : "main"),
+                  newReader,
+                  true,
+                  !realtime,
+                  true,
+                  directoryFactory);
         }
       }
 
       List<RefCounted<SolrIndexSearcher>> searcherList = realtime ? _realtimeSearchers : _searchers;
-      RefCounted<SolrIndexSearcher> newSearcher = newHolder(tmp, searcherList);    // refcount now at 1
+      RefCounted<SolrIndexSearcher> newSearcher = newHolder(tmp, searcherList); // refcount now at 1
 
       // Increment reference again for "realtimeSearcher" variable.  It should be at 2 after.
-      // When it's decremented by both the caller of this method, and by realtimeSearcher being replaced,
+      // When it's decremented by both the caller of this method, and by realtimeSearcher being
+      // replaced,
       // it will be closed.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
##########
@@ -388,982 +426,1324 @@ public Category getCategory() {
     return Category.ADMIN;
   }
 
-  private static void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException {
+  private static void createSysConfigSet(CoreContainer coreContainer)
+      throws KeeperException, InterruptedException {
     SolrZkClient zk = coreContainer.getZkController().getZkStateReader().getZkClient();
     ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zk.getZkClientTimeout());
     cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE, zk);
-    cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk);
+    cmdExecutor.ensureExists(
+        ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk);
 
     try {
-      String path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml";
-      byte[] data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml"));
+      String path =
+          ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml";
+      byte[] data =
+          IOUtils.toByteArray(
+              CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml"));
       assert data != null && data.length > 0;
       cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
-      path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/solrconfig.xml";
-      data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml"));
+      path =
+          ZkStateReader.CONFIGS_ZKNODE
+              + "/"
+              + CollectionAdminParams.SYSTEM_COLL
+              + "/solrconfig.xml";
+      data =
+          IOUtils.toByteArray(
+              CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml"));
       assert data != null && data.length > 0;
       cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
     } catch (IOException e) {
       throw new SolrException(ErrorCode.SERVER_ERROR, e);
     }
-
-
   }
 
-  private static void addStatusToResponse(NamedList<Object> results, RequestStatusState state, String msg) {
+  private static void addStatusToResponse(
+      NamedList<Object> results, RequestStatusState state, String msg) {
     SimpleOrderedMap<String> status = new SimpleOrderedMap<>();
     status.add("state", state.getKey());
     status.add("msg", msg);
     results.add("status", status);
   }
 
   public enum CollectionOperation implements CollectionOp {
-    CREATE_OP(CREATE, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams().required(), null, NAME);
-      props.put("fromApi", "true");
-      copy(req.getParams(), props,
-          REPLICATION_FACTOR,
-          COLL_CONF,
-          NUM_SLICES,
-          CREATE_NODE_SET,
-          CREATE_NODE_SET_SHUFFLE,
-          SHARDS_PROP,
-          PULL_REPLICAS,
-          TLOG_REPLICAS,
-          NRT_REPLICAS,
-          WAIT_FOR_FINAL_STATE,
-          PER_REPLICA_STATE,
-          ALIAS);
-
-      if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
-        //TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for more details
-        int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR));
-        int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS));
-        if (replicationFactor != nrtReplicas) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing");
-        }
-      }
-      if (props.get(REPLICATION_FACTOR) != null) {
-        props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR));
-      } else if (props.get(NRT_REPLICAS) != null) {
-        props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS));
-      }
-
-      final String collectionName = SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
-      final String shardsParam = (String) props.get(SHARDS_PROP);
-      if (StringUtils.isNotEmpty(shardsParam)) {
-        verifyShardsParam(shardsParam);
-      }
-      if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) {
-        //We must always create a .system collection with only a single shard
-        props.put(NUM_SLICES, 1);
-        props.remove(SHARDS_PROP);
-        createSysConfigSet(h.coreContainer);
-
-      }
-      if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
-      for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
-        h.copyFromClusterProp(props, prop);
-      copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
-      return copyPropertiesWithPrefix(req.getParams(), props, "router.");
+    CREATE_OP(
+        CREATE,
+        (req, rsp, h) -> {
+          Map<String, Object> props = copy(req.getParams().required(), null, NAME);
+          props.put("fromApi", "true");
+          copy(
+              req.getParams(),
+              props,
+              REPLICATION_FACTOR,
+              COLL_CONF,
+              NUM_SLICES,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              SHARDS_PROP,
+              PULL_REPLICAS,
+              TLOG_REPLICAS,
+              NRT_REPLICAS,
+              WAIT_FOR_FINAL_STATE,
+              PER_REPLICA_STATE,
+              ALIAS);
+
+          if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
+            // TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for
+            // more details
+            int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR));
+            int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS));
+            if (replicationFactor != nrtReplicas) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing");
+            }
+          }
+          if (props.get(REPLICATION_FACTOR) != null) {
+            props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR));
+          } else if (props.get(NRT_REPLICAS) != null) {
+            props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS));
+          }
 
-    }),
+          final String collectionName =
+              SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
+          final String shardsParam = (String) props.get(SHARDS_PROP);
+          if (StringUtils.isNotEmpty(shardsParam)) {
+            verifyShardsParam(shardsParam);
+          }
+          if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) {
+            // We must always create a .system collection with only a single shard
+            props.put(NUM_SLICES, 1);
+            props.remove(SHARDS_PROP);
+            createSysConfigSet(h.coreContainer);
+          }
+          if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
+          for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
+            h.copyFromClusterProp(props, prop);
+          copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
+          return copyPropertiesWithPrefix(req.getParams(), props, "router.");
+        }),
     @SuppressWarnings({"unchecked"})
-    COLSTATUS_OP(COLSTATUS, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          ColStatus.CORE_INFO_PROP,
-          ColStatus.SEGMENTS_PROP,
-          ColStatus.FIELD_INFO_PROP,
-          ColStatus.RAW_SIZE_PROP,
-          ColStatus.RAW_SIZE_SUMMARY_PROP,
-          ColStatus.RAW_SIZE_DETAILS_PROP,
-          ColStatus.RAW_SIZE_SAMPLING_PERCENT_PROP,
-          ColStatus.SIZE_INFO_PROP);
-
-      new ColStatus(h.coreContainer.getSolrClientCache(),
-          h.coreContainer.getZkController().getZkStateReader().getClusterState(), new ZkNodeProps(props))
-          .getColStatus(rsp.getValues());
-      return null;
-    }),
-    DELETE_OP(DELETE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME);
-      return copy(req.getParams(), map, FOLLOW_ALIASES);
-    }),
+    COLSTATUS_OP(
+        COLSTATUS,
+        (req, rsp, h) -> {
+          Map<String, Object> props =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  ColStatus.CORE_INFO_PROP,
+                  ColStatus.SEGMENTS_PROP,
+                  ColStatus.FIELD_INFO_PROP,
+                  ColStatus.RAW_SIZE_PROP,
+                  ColStatus.RAW_SIZE_SUMMARY_PROP,
+                  ColStatus.RAW_SIZE_DETAILS_PROP,
+                  ColStatus.RAW_SIZE_SAMPLING_PERCENT_PROP,
+                  ColStatus.SIZE_INFO_PROP);
+
+          new ColStatus(
+                  h.coreContainer.getSolrClientCache(),
+                  h.coreContainer.getZkController().getZkStateReader().getClusterState(),
+                  new ZkNodeProps(props))
+              .getColStatus(rsp.getValues());
+          return null;
+        }),
+    DELETE_OP(
+        DELETE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, NAME);
+          return copy(req.getParams(), map, FOLLOW_ALIASES);
+        }),
     // XXX should this command support followAliases?
-    RELOAD_OP(RELOAD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME);
-      return copy(req.getParams(), map);
-    }),
-
-    RENAME_OP(RENAME, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME, CollectionAdminParams.TARGET);
-      return copy(req.getParams(), map, FOLLOW_ALIASES);
-    }),
-
-    REINDEXCOLLECTION_OP(REINDEXCOLLECTION, (req, rsp, h) -> {
-      Map<String, Object> m = copy(req.getParams().required(), null, NAME);
-      copy(req.getParams(), m,
-          ReindexCollectionCmd.COMMAND,
-          ReindexCollectionCmd.REMOVE_SOURCE,
-          ReindexCollectionCmd.TARGET,
-          ZkStateReader.CONFIGNAME_PROP,
-          NUM_SLICES,
-          NRT_REPLICAS,
-          PULL_REPLICAS,
-          TLOG_REPLICAS,
-          REPLICATION_FACTOR,
-          CREATE_NODE_SET,
-          CREATE_NODE_SET_SHUFFLE,
-          "shards",
-          CommonParams.ROWS,
-          CommonParams.Q,
-          CommonParams.FL,
-          FOLLOW_ALIASES);
-      if (req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP) != null) {
-        m.put(ZkStateReader.CONFIGNAME_PROP, req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP));
-      }
-      copyPropertiesWithPrefix(req.getParams(), m, "router.");
-      return m;
-    }),
-
-    SYNCSHARD_OP(SYNCSHARD, (req, rsp, h) -> {
-      String extCollection = req.getParams().required().get("collection");
-      String collection = h.coreContainer.getZkController().getZkStateReader().getAliases().resolveSimpleAlias(extCollection);
-      String shard = req.getParams().required().get("shard");
-
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-
-      DocCollection docCollection = clusterState.getCollection(collection);
-      ZkNodeProps leaderProps = docCollection.getLeader(shard);
-      ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
-
-      try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl())
-          .withConnectionTimeout(15000)
-          .withSocketTimeout(60000)
-          .build()) {
-        RequestSyncShard reqSyncShard = new RequestSyncShard();
-        reqSyncShard.setCollection(collection);
-        reqSyncShard.setShard(shard);
-        reqSyncShard.setCoreName(nodeProps.getCoreName());
-        client.request(reqSyncShard);
-      }
-      return null;
-    }),
-
-    CREATEALIAS_OP(CREATEALIAS, (req, rsp, h) -> {
-      String alias = req.getParams().get(NAME);
-      SolrIdentifierValidator.validateAliasName(alias);
-      String collections = req.getParams().get("collections");
-      RoutedAlias routedAlias = null;
-      Exception ex = null;
-      HashMap<String,Object> possiblyModifiedParams = new HashMap<>();
-      try {
-        // note that RA specific validation occurs here.
-        req.getParams().toMap(possiblyModifiedParams);
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        // This is awful because RoutedAlias lies about what types it wants
-        Map<String, String> temp = (Map<String, String>) (Map) possiblyModifiedParams;
-        routedAlias = RoutedAlias.fromProps(alias, temp);
-      } catch (SolrException e) {
-        // we'll throw this later if we are in fact creating a routed alias.
-        ex = e;
-      }
-      ModifiableSolrParams finalParams = new ModifiableSolrParams();
-      for (Map.Entry<String, Object> entry : possiblyModifiedParams.entrySet()) {
-        if (entry.getValue().getClass().isArray() ) {
-          // v2 api hits this case
-          for (Object o : (Object[]) entry.getValue()) {
-            finalParams.add(entry.getKey(),o.toString());
+    RELOAD_OP(
+        RELOAD,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, NAME);
+          return copy(req.getParams(), map);
+        }),
+
+    RENAME_OP(
+        RENAME,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, NAME, CollectionAdminParams.TARGET);
+          return copy(req.getParams(), map, FOLLOW_ALIASES);
+        }),
+
+    REINDEXCOLLECTION_OP(
+        REINDEXCOLLECTION,
+        (req, rsp, h) -> {
+          Map<String, Object> m = copy(req.getParams().required(), null, NAME);
+          copy(
+              req.getParams(),
+              m,
+              ReindexCollectionCmd.COMMAND,
+              ReindexCollectionCmd.REMOVE_SOURCE,
+              ReindexCollectionCmd.TARGET,
+              ZkStateReader.CONFIGNAME_PROP,
+              NUM_SLICES,
+              NRT_REPLICAS,
+              PULL_REPLICAS,
+              TLOG_REPLICAS,
+              REPLICATION_FACTOR,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              "shards",
+              CommonParams.ROWS,
+              CommonParams.Q,
+              CommonParams.FL,
+              FOLLOW_ALIASES);
+          if (req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP) != null) {
+            m.put(
+                ZkStateReader.CONFIGNAME_PROP,
+                req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP));
+          }
+          copyPropertiesWithPrefix(req.getParams(), m, "router.");
+          return m;
+        }),
+
+    SYNCSHARD_OP(
+        SYNCSHARD,
+        (req, rsp, h) -> {
+          String extCollection = req.getParams().required().get("collection");
+          String collection =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getAliases()
+                  .resolveSimpleAlias(extCollection);
+          String shard = req.getParams().required().get("shard");
+
+          ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+
+          DocCollection docCollection = clusterState.getCollection(collection);
+          ZkNodeProps leaderProps = docCollection.getLeader(shard);
+          ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
+
+          try (HttpSolrClient client =
+              new Builder(nodeProps.getBaseUrl())
+                  .withConnectionTimeout(15000)
+                  .withSocketTimeout(60000)
+                  .build()) {
+            RequestSyncShard reqSyncShard = new RequestSyncShard();
+            reqSyncShard.setCollection(collection);
+            reqSyncShard.setShard(shard);
+            reqSyncShard.setCoreName(nodeProps.getCoreName());
+            client.request(reqSyncShard);
+          }
+          return null;
+        }),
+
+    CREATEALIAS_OP(
+        CREATEALIAS,
+        (req, rsp, h) -> {
+          String alias = req.getParams().get(NAME);
+          SolrIdentifierValidator.validateAliasName(alias);
+          String collections = req.getParams().get("collections");
+          RoutedAlias routedAlias = null;
+          Exception ex = null;
+          HashMap<String, Object> possiblyModifiedParams = new HashMap<>();
+          try {
+            // note that RA specific validation occurs here.
+            req.getParams().toMap(possiblyModifiedParams);
+            @SuppressWarnings({"unchecked", "rawtypes"})
+            // This is awful because RoutedAlias lies about what types it wants
+            Map<String, String> temp = (Map<String, String>) (Map) possiblyModifiedParams;
+            routedAlias = RoutedAlias.fromProps(alias, temp);
+          } catch (SolrException e) {
+            // we'll throw this later if we are in fact creating a routed alias.
+            ex = e;
+          }
+          ModifiableSolrParams finalParams = new ModifiableSolrParams();
+          for (Map.Entry<String, Object> entry : possiblyModifiedParams.entrySet()) {
+            if (entry.getValue().getClass().isArray()) {
+              // v2 api hits this case
+              for (Object o : (Object[]) entry.getValue()) {
+                finalParams.add(entry.getKey(), o.toString());
+              }
+            } else {
+              finalParams.add(entry.getKey(), entry.getValue().toString());
+            }
           }
-        } else {
-          finalParams.add(entry.getKey(),entry.getValue().toString());
-        }
-      }
 
-      if (collections != null) {
-        if (routedAlias != null) {
-          throw new SolrException(BAD_REQUEST, "Collections cannot be specified when creating a routed alias.");
-        } else {
-          //////////////////////////////////////
-          // Regular alias creation indicated //
-          //////////////////////////////////////
-          return copy(finalParams.required(), null, NAME, "collections");
-        }
-      }
+          if (collections != null) {
+            if (routedAlias != null) {
+              throw new SolrException(
+                  BAD_REQUEST, "Collections cannot be specified when creating a routed alias.");
+            } else {
+              //////////////////////////////////////
+              // Regular alias creation indicated //
+              //////////////////////////////////////
+              return copy(finalParams.required(), null, NAME, "collections");
+            }
+          }
 
-      /////////////////////////////////////////////////
-      // We are creating a routed alias from here on //
-      /////////////////////////////////////////////////
+          /////////////////////////////////////////////////
+          // We are creating a routed alias from here on //
+          /////////////////////////////////////////////////
 
-      // If our prior creation attempt had issues expose them now.
-      if (ex != null) {
-        throw ex;
-      }
+          // If our prior creation attempt had issues expose them now.
+          if (ex != null) {
+            throw ex;
+          }
 
-      // Now filter out just the parameters we care about from the request
-      assert routedAlias != null;
-      Map<String, Object> result = copy(finalParams, null, routedAlias.getRequiredParams());
-      copy(finalParams, result, routedAlias.getOptionalParams());
-
-      ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix
-
-      // add to result params that start with "create-collection.".
-      //   Additionally, save these without the prefix to createCollParams
-      for (Map.Entry<String, String[]> entry : finalParams) {
-        final String p = entry.getKey();
-        if (p.startsWith(CREATE_COLLECTION_PREFIX)) {
-          // This is what SolrParams#getAll(Map, Collection)} does
-          final String[] v = entry.getValue();
-          if (v.length == 1) {
-            result.put(p, v[0]);
-          } else {
-            result.put(p, v);
+          // Now filter out just the parameters we care about from the request
+          assert routedAlias != null;
+          Map<String, Object> result = copy(finalParams, null, routedAlias.getRequiredParams());
+          copy(finalParams, result, routedAlias.getOptionalParams());
+
+          ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix
+
+          // add to result params that start with "create-collection.".
+          //   Additionally, save these without the prefix to createCollParams
+          for (Map.Entry<String, String[]> entry : finalParams) {
+            final String p = entry.getKey();
+            if (p.startsWith(CREATE_COLLECTION_PREFIX)) {
+              // This is what SolrParams#getAll(Map, Collection)} does
+              final String[] v = entry.getValue();
+              if (v.length == 1) {
+                result.put(p, v[0]);
+              } else {
+                result.put(p, v);
+              }
+              createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v);
+            }
           }
-          createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v);
-        }
-      }
 
-      // Verify that the create-collection prefix'ed params appear to be valid.
-      if (createCollParams.get(NAME) != null) {
-        throw new SolrException(BAD_REQUEST, "routed aliases calculate names for their " +
-            "dependent collections, you cannot specify the name.");
-      }
-      if (createCollParams.get(COLL_CONF) == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "We require an explicit " + COLL_CONF);
-      }
-      // note: could insist on a config name here as well.... or wait to throw at overseer
-      createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation
-      CREATE_OP.execute(new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results
+          // Verify that the create-collection prefix'ed params appear to be valid.
+          if (createCollParams.get(NAME) != null) {
+            throw new SolrException(
+                BAD_REQUEST,
+                "routed aliases calculate names for their "
+                    + "dependent collections, you cannot specify the name.");
+          }
+          if (createCollParams.get(COLL_CONF) == null) {
+            throw new SolrException(
+                SolrException.ErrorCode.BAD_REQUEST, "We require an explicit " + COLL_CONF);
+          }
+          // note: could insist on a config name here as well.... or wait to throw at overseer
+          createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation
+          CREATE_OP.execute(
+              new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results
 
-      return result;
-    }),
+          return result;
+        }),
 
     DELETEALIAS_OP(DELETEALIAS, (req, rsp, h) -> copy(req.getParams().required(), null, NAME)),
 
     /**
      * Change properties for an alias (use CREATEALIAS_OP to change the actual value of the alias)
      */
-    ALIASPROP_OP(ALIASPROP, (req, rsp, h) -> {
-      Map<String, Object> params = copy(req.getParams().required(), null, NAME);
-
-      // Note: success/no-op in the event of no properties supplied is intentional. Keeps code simple and one less case
-      // for api-callers to check for.
-      return convertPrefixToMap(req.getParams(), params, "property");
-    }),
-
-    /**
-     * List the aliases and associated properties.
-     */
+    ALIASPROP_OP(
+        ALIASPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> params = copy(req.getParams().required(), null, NAME);
+
+          // Note: success/no-op in the event of no properties supplied is intentional. Keeps code
+          // simple and one less case
+          // for api-callers to check for.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCore.java
##########
@@ -958,15 +1048,21 @@ public CoreContainer getCoreContainer() {
   }
 
   SolrCore(CoreContainer coreContainer, CoreDescriptor cd, ConfigSet configSet) {
-    this(coreContainer, cd, configSet, null,
-        null, null, null, false);
+    this(coreContainer, cd, configSet, null, null, null, null, false);
   }
 
-  private SolrCore(CoreContainer coreContainer, CoreDescriptor coreDescriptor, ConfigSet configSet,
-                   String dataDir, UpdateHandler updateHandler,
-                   IndexDeletionPolicyWrapper delPolicy, SolrCore prev, boolean reload) {
+  private SolrCore(
+      CoreContainer coreContainer,
+      CoreDescriptor coreDescriptor,
+      ConfigSet configSet,
+      String dataDir,
+      UpdateHandler updateHandler,
+      IndexDeletionPolicyWrapper delPolicy,
+      SolrCore prev,
+      boolean reload) {
 
-    assert ObjectReleaseTracker.track(searcherExecutor); // ensure that in unclean shutdown tests we still close this
+    assert ObjectReleaseTracker.track(
+        searcherExecutor); // ensure that in unclean shutdown tests we still close this

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCore.java
##########
@@ -2182,21 +2386,32 @@ public IndexFingerprint getIndexFingerprint(SolrIndexSearcher searcher, LeafRead
           } finally {
             writer.decref();
           }
-          tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(),
-              (realtime ? "realtime" : "main"), newReader, true, !realtime, true, directoryFactory);
+          tmp =
+              new SolrIndexSearcher(
+                  this,
+                  newIndexDir,
+                  getLatestSchema(),
+                  (realtime ? "realtime" : "main"),
+                  newReader,
+                  true,
+                  !realtime,
+                  true,
+                  directoryFactory);
         }
       }
 
       List<RefCounted<SolrIndexSearcher>> searcherList = realtime ? _realtimeSearchers : _searchers;
-      RefCounted<SolrIndexSearcher> newSearcher = newHolder(tmp, searcherList);    // refcount now at 1
+      RefCounted<SolrIndexSearcher> newSearcher = newHolder(tmp, searcherList); // refcount now at 1
 
       // Increment reference again for "realtimeSearcher" variable.  It should be at 2 after.
-      // When it's decremented by both the caller of this method, and by realtimeSearcher being replaced,
+      // When it's decremented by both the caller of this method, and by realtimeSearcher being
+      // replaced,
       // it will be closed.
       newSearcher.incref();
 
       synchronized (searcherLock) {
-        // Check if the core is closed again inside the lock in case this method is racing with a close. If the core is
+        // Check if the core is closed again inside the lock in case this method is racing with a
+        // close. If the core is
         // closed, clean up the new searcher and bail.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCore.java
##########
@@ -894,25 +950,43 @@ void initIndex(boolean passOnPreviousState, boolean reload) throws IOException {
         throw inner;
       }
 
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error Instantiating " + msg + ", " + className + " failed to instantiate " + cast.getName(), e);
+      throw new SolrException(
+          ErrorCode.SERVER_ERROR,
+          "Error Instantiating "
+              + msg
+              + ", "
+              + className
+              + " failed to instantiate "
+              + cast.getName(),
+          e);
     }
   }
 
-  private UpdateHandler createReloadedUpdateHandler(String className, String msg, UpdateHandler updateHandler) {
+  private UpdateHandler createReloadedUpdateHandler(
+      String className, String msg, UpdateHandler updateHandler) {
     Class<? extends UpdateHandler> clazz = null;
     if (msg == null) msg = "SolrCore Object";
     try {
       clazz = getResourceLoader().findClass(className, UpdateHandler.class);
-      //most of the classes do not have constructors which takes SolrCore argument. It is recommended to obtain SolrCore by implementing SolrCoreAware.
-      // So invariably always it will cause a  NoSuchMethodException. So iterate though the list of available constructors
+      // most of the classes do not have constructors which takes SolrCore argument. It is
+      // recommended to obtain SolrCore by implementing SolrCoreAware.
+      // So invariably always it will cause a  NoSuchMethodException. So iterate though the list of
+      // available constructors

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
##########
@@ -388,982 +426,1324 @@ public Category getCategory() {
     return Category.ADMIN;
   }
 
-  private static void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException {
+  private static void createSysConfigSet(CoreContainer coreContainer)
+      throws KeeperException, InterruptedException {
     SolrZkClient zk = coreContainer.getZkController().getZkStateReader().getZkClient();
     ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zk.getZkClientTimeout());
     cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE, zk);
-    cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk);
+    cmdExecutor.ensureExists(
+        ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk);
 
     try {
-      String path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml";
-      byte[] data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml"));
+      String path =
+          ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml";
+      byte[] data =
+          IOUtils.toByteArray(
+              CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml"));
       assert data != null && data.length > 0;
       cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
-      path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/solrconfig.xml";
-      data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml"));
+      path =
+          ZkStateReader.CONFIGS_ZKNODE
+              + "/"
+              + CollectionAdminParams.SYSTEM_COLL
+              + "/solrconfig.xml";
+      data =
+          IOUtils.toByteArray(
+              CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml"));
       assert data != null && data.length > 0;
       cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
     } catch (IOException e) {
       throw new SolrException(ErrorCode.SERVER_ERROR, e);
     }
-
-
   }
 
-  private static void addStatusToResponse(NamedList<Object> results, RequestStatusState state, String msg) {
+  private static void addStatusToResponse(
+      NamedList<Object> results, RequestStatusState state, String msg) {
     SimpleOrderedMap<String> status = new SimpleOrderedMap<>();
     status.add("state", state.getKey());
     status.add("msg", msg);
     results.add("status", status);
   }
 
   public enum CollectionOperation implements CollectionOp {
-    CREATE_OP(CREATE, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams().required(), null, NAME);
-      props.put("fromApi", "true");
-      copy(req.getParams(), props,
-          REPLICATION_FACTOR,
-          COLL_CONF,
-          NUM_SLICES,
-          CREATE_NODE_SET,
-          CREATE_NODE_SET_SHUFFLE,
-          SHARDS_PROP,
-          PULL_REPLICAS,
-          TLOG_REPLICAS,
-          NRT_REPLICAS,
-          WAIT_FOR_FINAL_STATE,
-          PER_REPLICA_STATE,
-          ALIAS);
-
-      if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
-        //TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for more details
-        int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR));
-        int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS));
-        if (replicationFactor != nrtReplicas) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing");
-        }
-      }
-      if (props.get(REPLICATION_FACTOR) != null) {
-        props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR));
-      } else if (props.get(NRT_REPLICAS) != null) {
-        props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS));
-      }
-
-      final String collectionName = SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
-      final String shardsParam = (String) props.get(SHARDS_PROP);
-      if (StringUtils.isNotEmpty(shardsParam)) {
-        verifyShardsParam(shardsParam);
-      }
-      if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) {
-        //We must always create a .system collection with only a single shard
-        props.put(NUM_SLICES, 1);
-        props.remove(SHARDS_PROP);
-        createSysConfigSet(h.coreContainer);
-
-      }
-      if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
-      for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
-        h.copyFromClusterProp(props, prop);
-      copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
-      return copyPropertiesWithPrefix(req.getParams(), props, "router.");
+    CREATE_OP(
+        CREATE,
+        (req, rsp, h) -> {
+          Map<String, Object> props = copy(req.getParams().required(), null, NAME);
+          props.put("fromApi", "true");
+          copy(
+              req.getParams(),
+              props,
+              REPLICATION_FACTOR,
+              COLL_CONF,
+              NUM_SLICES,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              SHARDS_PROP,
+              PULL_REPLICAS,
+              TLOG_REPLICAS,
+              NRT_REPLICAS,
+              WAIT_FOR_FINAL_STATE,
+              PER_REPLICA_STATE,
+              ALIAS);
+
+          if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
+            // TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for
+            // more details
+            int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR));
+            int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS));
+            if (replicationFactor != nrtReplicas) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing");
+            }
+          }
+          if (props.get(REPLICATION_FACTOR) != null) {
+            props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR));
+          } else if (props.get(NRT_REPLICAS) != null) {
+            props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS));
+          }
 
-    }),
+          final String collectionName =
+              SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
+          final String shardsParam = (String) props.get(SHARDS_PROP);
+          if (StringUtils.isNotEmpty(shardsParam)) {
+            verifyShardsParam(shardsParam);
+          }
+          if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) {
+            // We must always create a .system collection with only a single shard
+            props.put(NUM_SLICES, 1);
+            props.remove(SHARDS_PROP);
+            createSysConfigSet(h.coreContainer);
+          }
+          if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
+          for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
+            h.copyFromClusterProp(props, prop);
+          copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
+          return copyPropertiesWithPrefix(req.getParams(), props, "router.");
+        }),
     @SuppressWarnings({"unchecked"})
-    COLSTATUS_OP(COLSTATUS, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          ColStatus.CORE_INFO_PROP,
-          ColStatus.SEGMENTS_PROP,
-          ColStatus.FIELD_INFO_PROP,
-          ColStatus.RAW_SIZE_PROP,
-          ColStatus.RAW_SIZE_SUMMARY_PROP,
-          ColStatus.RAW_SIZE_DETAILS_PROP,
-          ColStatus.RAW_SIZE_SAMPLING_PERCENT_PROP,
-          ColStatus.SIZE_INFO_PROP);
-
-      new ColStatus(h.coreContainer.getSolrClientCache(),
-          h.coreContainer.getZkController().getZkStateReader().getClusterState(), new ZkNodeProps(props))
-          .getColStatus(rsp.getValues());
-      return null;
-    }),
-    DELETE_OP(DELETE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME);
-      return copy(req.getParams(), map, FOLLOW_ALIASES);
-    }),
+    COLSTATUS_OP(
+        COLSTATUS,
+        (req, rsp, h) -> {
+          Map<String, Object> props =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  ColStatus.CORE_INFO_PROP,
+                  ColStatus.SEGMENTS_PROP,
+                  ColStatus.FIELD_INFO_PROP,
+                  ColStatus.RAW_SIZE_PROP,
+                  ColStatus.RAW_SIZE_SUMMARY_PROP,
+                  ColStatus.RAW_SIZE_DETAILS_PROP,
+                  ColStatus.RAW_SIZE_SAMPLING_PERCENT_PROP,
+                  ColStatus.SIZE_INFO_PROP);
+
+          new ColStatus(
+                  h.coreContainer.getSolrClientCache(),
+                  h.coreContainer.getZkController().getZkStateReader().getClusterState(),
+                  new ZkNodeProps(props))
+              .getColStatus(rsp.getValues());
+          return null;
+        }),
+    DELETE_OP(
+        DELETE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, NAME);
+          return copy(req.getParams(), map, FOLLOW_ALIASES);
+        }),
     // XXX should this command support followAliases?
-    RELOAD_OP(RELOAD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME);
-      return copy(req.getParams(), map);
-    }),
-
-    RENAME_OP(RENAME, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME, CollectionAdminParams.TARGET);
-      return copy(req.getParams(), map, FOLLOW_ALIASES);
-    }),
-
-    REINDEXCOLLECTION_OP(REINDEXCOLLECTION, (req, rsp, h) -> {
-      Map<String, Object> m = copy(req.getParams().required(), null, NAME);
-      copy(req.getParams(), m,
-          ReindexCollectionCmd.COMMAND,
-          ReindexCollectionCmd.REMOVE_SOURCE,
-          ReindexCollectionCmd.TARGET,
-          ZkStateReader.CONFIGNAME_PROP,
-          NUM_SLICES,
-          NRT_REPLICAS,
-          PULL_REPLICAS,
-          TLOG_REPLICAS,
-          REPLICATION_FACTOR,
-          CREATE_NODE_SET,
-          CREATE_NODE_SET_SHUFFLE,
-          "shards",
-          CommonParams.ROWS,
-          CommonParams.Q,
-          CommonParams.FL,
-          FOLLOW_ALIASES);
-      if (req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP) != null) {
-        m.put(ZkStateReader.CONFIGNAME_PROP, req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP));
-      }
-      copyPropertiesWithPrefix(req.getParams(), m, "router.");
-      return m;
-    }),
-
-    SYNCSHARD_OP(SYNCSHARD, (req, rsp, h) -> {
-      String extCollection = req.getParams().required().get("collection");
-      String collection = h.coreContainer.getZkController().getZkStateReader().getAliases().resolveSimpleAlias(extCollection);
-      String shard = req.getParams().required().get("shard");
-
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-
-      DocCollection docCollection = clusterState.getCollection(collection);
-      ZkNodeProps leaderProps = docCollection.getLeader(shard);
-      ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
-
-      try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl())
-          .withConnectionTimeout(15000)
-          .withSocketTimeout(60000)
-          .build()) {
-        RequestSyncShard reqSyncShard = new RequestSyncShard();
-        reqSyncShard.setCollection(collection);
-        reqSyncShard.setShard(shard);
-        reqSyncShard.setCoreName(nodeProps.getCoreName());
-        client.request(reqSyncShard);
-      }
-      return null;
-    }),
-
-    CREATEALIAS_OP(CREATEALIAS, (req, rsp, h) -> {
-      String alias = req.getParams().get(NAME);
-      SolrIdentifierValidator.validateAliasName(alias);
-      String collections = req.getParams().get("collections");
-      RoutedAlias routedAlias = null;
-      Exception ex = null;
-      HashMap<String,Object> possiblyModifiedParams = new HashMap<>();
-      try {
-        // note that RA specific validation occurs here.
-        req.getParams().toMap(possiblyModifiedParams);
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        // This is awful because RoutedAlias lies about what types it wants
-        Map<String, String> temp = (Map<String, String>) (Map) possiblyModifiedParams;
-        routedAlias = RoutedAlias.fromProps(alias, temp);
-      } catch (SolrException e) {
-        // we'll throw this later if we are in fact creating a routed alias.
-        ex = e;
-      }
-      ModifiableSolrParams finalParams = new ModifiableSolrParams();
-      for (Map.Entry<String, Object> entry : possiblyModifiedParams.entrySet()) {
-        if (entry.getValue().getClass().isArray() ) {
-          // v2 api hits this case
-          for (Object o : (Object[]) entry.getValue()) {
-            finalParams.add(entry.getKey(),o.toString());
+    RELOAD_OP(
+        RELOAD,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, NAME);
+          return copy(req.getParams(), map);
+        }),
+
+    RENAME_OP(
+        RENAME,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, NAME, CollectionAdminParams.TARGET);
+          return copy(req.getParams(), map, FOLLOW_ALIASES);
+        }),
+
+    REINDEXCOLLECTION_OP(
+        REINDEXCOLLECTION,
+        (req, rsp, h) -> {
+          Map<String, Object> m = copy(req.getParams().required(), null, NAME);
+          copy(
+              req.getParams(),
+              m,
+              ReindexCollectionCmd.COMMAND,
+              ReindexCollectionCmd.REMOVE_SOURCE,
+              ReindexCollectionCmd.TARGET,
+              ZkStateReader.CONFIGNAME_PROP,
+              NUM_SLICES,
+              NRT_REPLICAS,
+              PULL_REPLICAS,
+              TLOG_REPLICAS,
+              REPLICATION_FACTOR,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              "shards",
+              CommonParams.ROWS,
+              CommonParams.Q,
+              CommonParams.FL,
+              FOLLOW_ALIASES);
+          if (req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP) != null) {
+            m.put(
+                ZkStateReader.CONFIGNAME_PROP,
+                req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP));
+          }
+          copyPropertiesWithPrefix(req.getParams(), m, "router.");
+          return m;
+        }),
+
+    SYNCSHARD_OP(
+        SYNCSHARD,
+        (req, rsp, h) -> {
+          String extCollection = req.getParams().required().get("collection");
+          String collection =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getAliases()
+                  .resolveSimpleAlias(extCollection);
+          String shard = req.getParams().required().get("shard");
+
+          ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+
+          DocCollection docCollection = clusterState.getCollection(collection);
+          ZkNodeProps leaderProps = docCollection.getLeader(shard);
+          ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
+
+          try (HttpSolrClient client =
+              new Builder(nodeProps.getBaseUrl())
+                  .withConnectionTimeout(15000)
+                  .withSocketTimeout(60000)
+                  .build()) {
+            RequestSyncShard reqSyncShard = new RequestSyncShard();
+            reqSyncShard.setCollection(collection);
+            reqSyncShard.setShard(shard);
+            reqSyncShard.setCoreName(nodeProps.getCoreName());
+            client.request(reqSyncShard);
+          }
+          return null;
+        }),
+
+    CREATEALIAS_OP(
+        CREATEALIAS,
+        (req, rsp, h) -> {
+          String alias = req.getParams().get(NAME);
+          SolrIdentifierValidator.validateAliasName(alias);
+          String collections = req.getParams().get("collections");
+          RoutedAlias routedAlias = null;
+          Exception ex = null;
+          HashMap<String, Object> possiblyModifiedParams = new HashMap<>();
+          try {
+            // note that RA specific validation occurs here.
+            req.getParams().toMap(possiblyModifiedParams);
+            @SuppressWarnings({"unchecked", "rawtypes"})
+            // This is awful because RoutedAlias lies about what types it wants
+            Map<String, String> temp = (Map<String, String>) (Map) possiblyModifiedParams;
+            routedAlias = RoutedAlias.fromProps(alias, temp);
+          } catch (SolrException e) {
+            // we'll throw this later if we are in fact creating a routed alias.
+            ex = e;
+          }
+          ModifiableSolrParams finalParams = new ModifiableSolrParams();
+          for (Map.Entry<String, Object> entry : possiblyModifiedParams.entrySet()) {
+            if (entry.getValue().getClass().isArray()) {
+              // v2 api hits this case
+              for (Object o : (Object[]) entry.getValue()) {
+                finalParams.add(entry.getKey(), o.toString());
+              }
+            } else {
+              finalParams.add(entry.getKey(), entry.getValue().toString());
+            }
           }
-        } else {
-          finalParams.add(entry.getKey(),entry.getValue().toString());
-        }
-      }
 
-      if (collections != null) {
-        if (routedAlias != null) {
-          throw new SolrException(BAD_REQUEST, "Collections cannot be specified when creating a routed alias.");
-        } else {
-          //////////////////////////////////////
-          // Regular alias creation indicated //
-          //////////////////////////////////////
-          return copy(finalParams.required(), null, NAME, "collections");
-        }
-      }
+          if (collections != null) {
+            if (routedAlias != null) {
+              throw new SolrException(
+                  BAD_REQUEST, "Collections cannot be specified when creating a routed alias.");
+            } else {
+              //////////////////////////////////////
+              // Regular alias creation indicated //
+              //////////////////////////////////////
+              return copy(finalParams.required(), null, NAME, "collections");
+            }
+          }
 
-      /////////////////////////////////////////////////
-      // We are creating a routed alias from here on //
-      /////////////////////////////////////////////////
+          /////////////////////////////////////////////////
+          // We are creating a routed alias from here on //
+          /////////////////////////////////////////////////
 
-      // If our prior creation attempt had issues expose them now.
-      if (ex != null) {
-        throw ex;
-      }
+          // If our prior creation attempt had issues expose them now.
+          if (ex != null) {
+            throw ex;
+          }
 
-      // Now filter out just the parameters we care about from the request
-      assert routedAlias != null;
-      Map<String, Object> result = copy(finalParams, null, routedAlias.getRequiredParams());
-      copy(finalParams, result, routedAlias.getOptionalParams());
-
-      ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix
-
-      // add to result params that start with "create-collection.".
-      //   Additionally, save these without the prefix to createCollParams
-      for (Map.Entry<String, String[]> entry : finalParams) {
-        final String p = entry.getKey();
-        if (p.startsWith(CREATE_COLLECTION_PREFIX)) {
-          // This is what SolrParams#getAll(Map, Collection)} does
-          final String[] v = entry.getValue();
-          if (v.length == 1) {
-            result.put(p, v[0]);
-          } else {
-            result.put(p, v);
+          // Now filter out just the parameters we care about from the request
+          assert routedAlias != null;
+          Map<String, Object> result = copy(finalParams, null, routedAlias.getRequiredParams());
+          copy(finalParams, result, routedAlias.getOptionalParams());
+
+          ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix
+
+          // add to result params that start with "create-collection.".
+          //   Additionally, save these without the prefix to createCollParams
+          for (Map.Entry<String, String[]> entry : finalParams) {
+            final String p = entry.getKey();
+            if (p.startsWith(CREATE_COLLECTION_PREFIX)) {
+              // This is what SolrParams#getAll(Map, Collection)} does
+              final String[] v = entry.getValue();
+              if (v.length == 1) {
+                result.put(p, v[0]);
+              } else {
+                result.put(p, v);
+              }
+              createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v);
+            }
           }
-          createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v);
-        }
-      }
 
-      // Verify that the create-collection prefix'ed params appear to be valid.
-      if (createCollParams.get(NAME) != null) {
-        throw new SolrException(BAD_REQUEST, "routed aliases calculate names for their " +
-            "dependent collections, you cannot specify the name.");
-      }
-      if (createCollParams.get(COLL_CONF) == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "We require an explicit " + COLL_CONF);
-      }
-      // note: could insist on a config name here as well.... or wait to throw at overseer
-      createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation
-      CREATE_OP.execute(new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results
+          // Verify that the create-collection prefix'ed params appear to be valid.
+          if (createCollParams.get(NAME) != null) {
+            throw new SolrException(
+                BAD_REQUEST,
+                "routed aliases calculate names for their "
+                    + "dependent collections, you cannot specify the name.");
+          }
+          if (createCollParams.get(COLL_CONF) == null) {
+            throw new SolrException(
+                SolrException.ErrorCode.BAD_REQUEST, "We require an explicit " + COLL_CONF);
+          }
+          // note: could insist on a config name here as well.... or wait to throw at overseer
+          createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation
+          CREATE_OP.execute(
+              new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results
 
-      return result;
-    }),
+          return result;
+        }),
 
     DELETEALIAS_OP(DELETEALIAS, (req, rsp, h) -> copy(req.getParams().required(), null, NAME)),
 
     /**
      * Change properties for an alias (use CREATEALIAS_OP to change the actual value of the alias)
      */
-    ALIASPROP_OP(ALIASPROP, (req, rsp, h) -> {
-      Map<String, Object> params = copy(req.getParams().required(), null, NAME);
-
-      // Note: success/no-op in the event of no properties supplied is intentional. Keeps code simple and one less case
-      // for api-callers to check for.
-      return convertPrefixToMap(req.getParams(), params, "property");
-    }),
-
-    /**
-     * List the aliases and associated properties.
-     */
+    ALIASPROP_OP(
+        ALIASPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> params = copy(req.getParams().required(), null, NAME);
+
+          // Note: success/no-op in the event of no properties supplied is intentional. Keeps code
+          // simple and one less case
+          // for api-callers to check for.
+          return convertPrefixToMap(req.getParams(), params, "property");
+        }),
+
+    /** List the aliases and associated properties. */
     @SuppressWarnings({"unchecked"})
-    LISTALIASES_OP(LISTALIASES, (req, rsp, h) -> {
-      ZkStateReader zkStateReader = h.coreContainer.getZkController().getZkStateReader();
-      // if someone calls listAliases, lets ensure we return an up to date response
-      zkStateReader.aliasesManager.update();
-      Aliases aliases = zkStateReader.getAliases();
-      if (aliases != null) {
-        // the aliases themselves...
-        rsp.getValues().add("aliases", aliases.getCollectionAliasMap());
-        // Any properties for the above aliases.
-        Map<String, Map<String, String>> meta = new LinkedHashMap<>();
-        for (String alias : aliases.getCollectionAliasListMap().keySet()) {
-          Map<String, String> collectionAliasProperties = aliases.getCollectionAliasProperties(alias);
-          if (!collectionAliasProperties.isEmpty()) {
-            meta.put(alias, collectionAliasProperties);
+    LISTALIASES_OP(
+        LISTALIASES,
+        (req, rsp, h) -> {
+          ZkStateReader zkStateReader = h.coreContainer.getZkController().getZkStateReader();
+          // if someone calls listAliases, lets ensure we return an up to date response
+          zkStateReader.aliasesManager.update();
+          Aliases aliases = zkStateReader.getAliases();
+          if (aliases != null) {
+            // the aliases themselves...
+            rsp.getValues().add("aliases", aliases.getCollectionAliasMap());
+            // Any properties for the above aliases.
+            Map<String, Map<String, String>> meta = new LinkedHashMap<>();
+            for (String alias : aliases.getCollectionAliasListMap().keySet()) {
+              Map<String, String> collectionAliasProperties =
+                  aliases.getCollectionAliasProperties(alias);
+              if (!collectionAliasProperties.isEmpty()) {
+                meta.put(alias, collectionAliasProperties);
+              }
+            }
+            rsp.getValues().add("properties", meta);
+          }
+          return null;
+        }),
+    SPLITSHARD_OP(
+        SPLITSHARD,
+        DEFAULT_COLLECTION_OP_TIMEOUT * 5,
+        (req, rsp, h) -> {
+          String name = req.getParams().required().get(COLLECTION_PROP);
+          // TODO : add support for multiple shards
+          String shard = req.getParams().get(SHARD_ID_PROP);
+          String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
+          String splitKey = req.getParams().get("split.key");
+          String numSubShards = req.getParams().get(NUM_SUB_SHARDS);
+          String fuzz = req.getParams().get(SPLIT_FUZZ);
+
+          if (splitKey == null && shard == null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
+          }
+          if (splitKey != null && shard != null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Only one of 'shard' or 'split.key' should be specified");
+          }
+          if (splitKey != null && rangesStr != null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Only one of 'ranges' or 'split.key' should be specified");
+          }
+          if (numSubShards != null && (splitKey != null || rangesStr != null)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "numSubShards can not be specified with split.key or ranges parameters");
+          }
+          if (fuzz != null && (splitKey != null || rangesStr != null)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "fuzz can not be specified with split.key or ranges parameters");
           }
-        }
-        rsp.getValues().add("properties", meta);
-      }
-      return null;
-    }),
-    SPLITSHARD_OP(SPLITSHARD, DEFAULT_COLLECTION_OP_TIMEOUT * 5, (req, rsp, h) -> {
-      String name = req.getParams().required().get(COLLECTION_PROP);
-      // TODO : add support for multiple shards
-      String shard = req.getParams().get(SHARD_ID_PROP);
-      String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
-      String splitKey = req.getParams().get("split.key");
-      String numSubShards = req.getParams().get(NUM_SUB_SHARDS);
-      String fuzz = req.getParams().get(SPLIT_FUZZ);
-
-      if (splitKey == null && shard == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
-      }
-      if (splitKey != null && shard != null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Only one of 'shard' or 'split.key' should be specified");
-      }
-      if (splitKey != null && rangesStr != null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Only one of 'ranges' or 'split.key' should be specified");
-      }
-      if (numSubShards != null && (splitKey != null || rangesStr != null)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "numSubShards can not be specified with split.key or ranges parameters");
-      }
-      if (fuzz != null && (splitKey != null || rangesStr != null)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "fuzz can not be specified with split.key or ranges parameters");
-      }
 
-      Map<String, Object> map = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP,
-          "split.key",
-          CoreAdminParams.RANGES,
-          WAIT_FOR_FINAL_STATE,
-          TIMING,
-          SPLIT_METHOD,
-          NUM_SUB_SHARDS,
-          SPLIT_FUZZ,
-          SPLIT_BY_PREFIX,
-          FOLLOW_ALIASES);
-      return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
-    }),
-    DELETESHARD_OP(DELETESHARD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP);
-      copy(req.getParams(), map,
-          DELETE_INDEX,
-          DELETE_DATA_DIR,
-          DELETE_INSTANCE_DIR,
-          FOLLOW_ALIASES);
-      return map;
-    }),
-    FORCELEADER_OP(FORCELEADER, (req, rsp, h) -> {
-      forceLeaderElection(req, h);
-      return null;
-    }),
-    CREATESHARD_OP(CREATESHARD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP);
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      final String newShardName = SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
-      boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
-      String extCollectionName = req.getParams().get(COLLECTION_PROP);
-      String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader()
-          .getAliases().resolveSimpleAlias(extCollectionName) : extCollectionName;
-      if (!ImplicitDocRouter.NAME.equals(((Map<?,?>) clusterState.getCollection(collectionName).get(DOC_ROUTER)).get(NAME)))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
-      copy(req.getParams(), map,
-          REPLICATION_FACTOR,
-          NRT_REPLICAS,
-          TLOG_REPLICAS,
-          PULL_REPLICAS,
-          CREATE_NODE_SET,
-          WAIT_FOR_FINAL_STATE,
-          FOLLOW_ALIASES);
-      return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
-    }),
-    DELETEREPLICA_OP(DELETEREPLICA, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP);
-
-      return copy(req.getParams(), map,
-          DELETE_INDEX,
-          DELETE_DATA_DIR,
-          DELETE_INSTANCE_DIR,
-          COUNT_PROP, REPLICA_PROP,
-          SHARD_ID_PROP,
-          ONLY_IF_DOWN,
-          FOLLOW_ALIASES);
-    }),
-    MIGRATE_OP(MIGRATE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP, "split.key", "target.collection");
-      return copy(req.getParams(), map, "forward.timeout", FOLLOW_ALIASES);
-    }),
-    ADDROLE_OP(ADDROLE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
-      if (!KNOWN_ROLES.contains(map.get("role")))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
-      return map;
-    }),
-    REMOVEROLE_OP(REMOVEROLE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
-      if (!KNOWN_ROLES.contains(map.get("role")))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
-      return map;
-    }),
-    CLUSTERPROP_OP(CLUSTERPROP, (req, rsp, h) -> {
-      String name = req.getParams().required().get(NAME);
-      String val = req.getParams().get(VALUE_LONG);
-      ClusterProperties cp = new ClusterProperties(h.coreContainer.getZkController().getZkClient());
-      cp.setClusterProperty(name, val);
-      return null;
-    }),
-    COLLECTIONPROP_OP(COLLECTIONPROP, (req, rsp, h) -> {
-      String extCollection = req.getParams().required().get(NAME);
-      String collection = h.coreContainer.getZkController().getZkStateReader().getAliases().resolveSimpleAlias(extCollection);
-      String name = req.getParams().required().get(PROPERTY_NAME);
-      String val = req.getParams().get(PROPERTY_VALUE);
-      CollectionProperties cp = new CollectionProperties(h.coreContainer.getZkController().getZkClient());
-      cp.setCollectionProperty(collection, name, val);
-      return null;
-    }),
+          Map<String, Object> map =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  SHARD_ID_PROP,
+                  "split.key",
+                  CoreAdminParams.RANGES,
+                  WAIT_FOR_FINAL_STATE,
+                  TIMING,
+                  SPLIT_METHOD,
+                  NUM_SUB_SHARDS,
+                  SPLIT_FUZZ,
+                  SPLIT_BY_PREFIX,
+                  FOLLOW_ALIASES);
+          return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
+        }),
+    DELETESHARD_OP(
+        DELETESHARD,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, SHARD_ID_PROP);
+          copy(
+              req.getParams(),
+              map,
+              DELETE_INDEX,
+              DELETE_DATA_DIR,
+              DELETE_INSTANCE_DIR,
+              FOLLOW_ALIASES);
+          return map;
+        }),
+    FORCELEADER_OP(
+        FORCELEADER,
+        (req, rsp, h) -> {
+          forceLeaderElection(req, h);
+          return null;
+        }),
+    CREATESHARD_OP(
+        CREATESHARD,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, SHARD_ID_PROP);
+          ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+          final String newShardName =
+              SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
+          boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
+          String extCollectionName = req.getParams().get(COLLECTION_PROP);
+          String collectionName =
+              followAliases
+                  ? h.coreContainer
+                      .getZkController()
+                      .getZkStateReader()
+                      .getAliases()
+                      .resolveSimpleAlias(extCollectionName)
+                  : extCollectionName;
+          if (!ImplicitDocRouter.NAME.equals(
+              ((Map<?, ?>) clusterState.getCollection(collectionName).get(DOC_ROUTER)).get(NAME)))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
+          copy(
+              req.getParams(),
+              map,
+              REPLICATION_FACTOR,
+              NRT_REPLICAS,
+              TLOG_REPLICAS,
+              PULL_REPLICAS,
+              CREATE_NODE_SET,
+              WAIT_FOR_FINAL_STATE,
+              FOLLOW_ALIASES);
+          return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
+        }),
+    DELETEREPLICA_OP(
+        DELETEREPLICA,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP);
+
+          return copy(
+              req.getParams(),
+              map,
+              DELETE_INDEX,
+              DELETE_DATA_DIR,
+              DELETE_INSTANCE_DIR,
+              COUNT_PROP,
+              REPLICA_PROP,
+              SHARD_ID_PROP,
+              ONLY_IF_DOWN,
+              FOLLOW_ALIASES);
+        }),
+    MIGRATE_OP(
+        MIGRATE,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  "split.key",
+                  "target.collection");
+          return copy(req.getParams(), map, "forward.timeout", FOLLOW_ALIASES);
+        }),
+    ADDROLE_OP(
+        ADDROLE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
+          if (!KNOWN_ROLES.contains(map.get("role")))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
+          return map;
+        }),
+    REMOVEROLE_OP(
+        REMOVEROLE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
+          if (!KNOWN_ROLES.contains(map.get("role")))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
+          return map;
+        }),
+    CLUSTERPROP_OP(
+        CLUSTERPROP,
+        (req, rsp, h) -> {
+          String name = req.getParams().required().get(NAME);
+          String val = req.getParams().get(VALUE_LONG);
+          ClusterProperties cp =
+              new ClusterProperties(h.coreContainer.getZkController().getZkClient());
+          cp.setClusterProperty(name, val);
+          return null;
+        }),
+    COLLECTIONPROP_OP(
+        COLLECTIONPROP,
+        (req, rsp, h) -> {
+          String extCollection = req.getParams().required().get(NAME);
+          String collection =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getAliases()
+                  .resolveSimpleAlias(extCollection);
+          String name = req.getParams().required().get(PROPERTY_NAME);
+          String val = req.getParams().get(PROPERTY_VALUE);
+          CollectionProperties cp =
+              new CollectionProperties(h.coreContainer.getZkController().getZkClient());
+          cp.setCollectionProperty(collection, name, val);
+          return null;
+        }),
     @SuppressWarnings({"unchecked"})
-    REQUESTSTATUS_OP(REQUESTSTATUS, (req, rsp, h) -> {
-      req.getParams().required().check(REQUESTID);
-
-      final CoreContainer coreContainer = h.coreContainer;
-      final String requestId = req.getParams().get(REQUESTID);
-      final ZkController zkController = coreContainer.getZkController();
-
-      final NamedList<Object> status = new NamedList<>();
-      if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
-        if (zkController.getOverseerCompletedMap().contains(requestId)) {
-          final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
-          rsp.getValues().addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
-          addStatusToResponse(status, COMPLETED, "found [" + requestId + "] in completed tasks");
-        } else if (zkController.getOverseerFailureMap().contains(requestId)) {
-          final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
-          rsp.getValues().addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
-          addStatusToResponse(status, FAILED, "found [" + requestId + "] in failed tasks");
-        } else if (zkController.getOverseerRunningMap().contains(requestId)) {
-          addStatusToResponse(status, RUNNING, "found [" + requestId + "] in running tasks");
-        } else if (h.overseerCollectionQueueContains(requestId)) {
-          addStatusToResponse(status, SUBMITTED, "found [" + requestId + "] in submitted tasks");
-        } else {
-          addStatusToResponse(status, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
-        }
-      } else {
-        Pair<RequestStatusState, OverseerSolrResponse> sr =
-            coreContainer.getDistributedCollectionCommandRunner().get().getAsyncTaskRequestStatus(requestId);
-        final String message;
-        switch (sr.first()) {
-          case COMPLETED:
-            message = "found [" + requestId + "] in completed tasks";
-            rsp.getValues().addAll(sr.second().getResponse());
-            break;
-          case FAILED:
-            message = "found [" + requestId + "] in failed tasks";
-            rsp.getValues().addAll(sr.second().getResponse());
-            break;
-          case RUNNING:
-            message = "found [" + requestId + "] in running tasks";
-            break;
-          case SUBMITTED:
-            message = "found [" + requestId + "] in submitted tasks";
-            break;
-          default:
-            message = "Did not find [" + requestId + "] in any tasks queue";
-        }
-        addStatusToResponse(status, sr.first(), message);
-      }
-
-      rsp.getValues().addAll(status);
-      return null;
-    }),
-    DELETESTATUS_OP(DELETESTATUS, new CollectionOp() {
-      @SuppressWarnings("unchecked")
-      @Override
-      public Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
-        final CoreContainer coreContainer = h.coreContainer;
-        final String requestId = req.getParams().get(REQUESTID);
-        final ZkController zkController = coreContainer.getZkController();
-        Boolean flush = req.getParams().getBool(CollectionAdminParams.FLUSH, false);
-
-        if (requestId == null && !flush) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "Either requestid or flush parameter must be specified.");
-        }
-
-        if (requestId != null && flush) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Both requestid and flush parameters can not be specified together.");
-        }
-
-        if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
-          if (flush) {
-            Collection<String> completed = zkController.getOverseerCompletedMap().keys();
-            Collection<String> failed = zkController.getOverseerFailureMap().keys();
-            for (String asyncId : completed) {
-              zkController.getOverseerCompletedMap().remove(asyncId);
-              zkController.clearAsyncId(asyncId);
-            }
-            for (String asyncId : failed) {
-              zkController.getOverseerFailureMap().remove(asyncId);
-              zkController.clearAsyncId(asyncId);
+    REQUESTSTATUS_OP(
+        REQUESTSTATUS,
+        (req, rsp, h) -> {
+          req.getParams().required().check(REQUESTID);
+
+          final CoreContainer coreContainer = h.coreContainer;
+          final String requestId = req.getParams().get(REQUESTID);
+          final ZkController zkController = coreContainer.getZkController();
+
+          final NamedList<Object> status = new NamedList<>();
+          if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
+            if (zkController.getOverseerCompletedMap().contains(requestId)) {
+              final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
+              rsp.getValues()
+                  .addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
+              addStatusToResponse(
+                  status, COMPLETED, "found [" + requestId + "] in completed tasks");
+            } else if (zkController.getOverseerFailureMap().contains(requestId)) {
+              final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
+              rsp.getValues()
+                  .addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
+              addStatusToResponse(status, FAILED, "found [" + requestId + "] in failed tasks");
+            } else if (zkController.getOverseerRunningMap().contains(requestId)) {
+              addStatusToResponse(status, RUNNING, "found [" + requestId + "] in running tasks");
+            } else if (h.overseerCollectionQueueContains(requestId)) {
+              addStatusToResponse(
+                  status, SUBMITTED, "found [" + requestId + "] in submitted tasks");
+            } else {
+              addStatusToResponse(
+                  status, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
             }
-            rsp.getValues().add("status", "successfully cleared stored collection api responses");
           } else {
-            // Request to cleanup
-            if (zkController.getOverseerCompletedMap().remove(requestId)) {
-              zkController.clearAsyncId(requestId);
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
-            } else if (zkController.getOverseerFailureMap().remove(requestId)) {
-              zkController.clearAsyncId(requestId);
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
-            } else {
-              rsp.getValues().add("status", "[" + requestId + "] not found in stored responses");
-              // Don't call zkController.clearAsyncId for this, since it could be a running/pending task
+            Pair<RequestStatusState, OverseerSolrResponse> sr =
+                coreContainer
+                    .getDistributedCollectionCommandRunner()
+                    .get()
+                    .getAsyncTaskRequestStatus(requestId);
+            final String message;
+            switch (sr.first()) {
+              case COMPLETED:
+                message = "found [" + requestId + "] in completed tasks";
+                rsp.getValues().addAll(sr.second().getResponse());
+                break;
+              case FAILED:
+                message = "found [" + requestId + "] in failed tasks";
+                rsp.getValues().addAll(sr.second().getResponse());
+                break;
+              case RUNNING:
+                message = "found [" + requestId + "] in running tasks";
+                break;
+              case SUBMITTED:
+                message = "found [" + requestId + "] in submitted tasks";
+                break;
+              default:
+                message = "Did not find [" + requestId + "] in any tasks queue";
             }
+            addStatusToResponse(status, sr.first(), message);
           }
-        } else {
-          if (flush) {
-            coreContainer.getDistributedCollectionCommandRunner().get().deleteAllAsyncIds();
-            rsp.getValues().add("status", "successfully cleared stored collection api responses");
-          } else {
-            if (coreContainer.getDistributedCollectionCommandRunner().get().deleteSingleAsyncId(requestId)) {
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
+
+          rsp.getValues().addAll(status);
+          return null;
+        }),
+    DELETESTATUS_OP(
+        DELETESTATUS,
+        new CollectionOp() {
+          @SuppressWarnings("unchecked")
+          @Override
+          public Map<String, Object> execute(
+              SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
+            final CoreContainer coreContainer = h.coreContainer;
+            final String requestId = req.getParams().get(REQUESTID);
+            final ZkController zkController = coreContainer.getZkController();
+            Boolean flush = req.getParams().getBool(CollectionAdminParams.FLUSH, false);
+
+            if (requestId == null && !flush) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST, "Either requestid or flush parameter must be specified.");
+            }
+
+            if (requestId != null && flush) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "Both requestid and flush parameters can not be specified together.");
+            }
+
+            if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
+              if (flush) {
+                Collection<String> completed = zkController.getOverseerCompletedMap().keys();
+                Collection<String> failed = zkController.getOverseerFailureMap().keys();
+                for (String asyncId : completed) {
+                  zkController.getOverseerCompletedMap().remove(asyncId);
+                  zkController.clearAsyncId(asyncId);
+                }
+                for (String asyncId : failed) {
+                  zkController.getOverseerFailureMap().remove(asyncId);
+                  zkController.clearAsyncId(asyncId);
+                }
+                rsp.getValues()
+                    .add("status", "successfully cleared stored collection api responses");
+              } else {
+                // Request to cleanup
+                if (zkController.getOverseerCompletedMap().remove(requestId)) {
+                  zkController.clearAsyncId(requestId);
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else if (zkController.getOverseerFailureMap().remove(requestId)) {
+                  zkController.clearAsyncId(requestId);
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else {
+                  rsp.getValues()
+                      .add("status", "[" + requestId + "] not found in stored responses");
+                  // Don't call zkController.clearAsyncId for this, since it could be a
+                  // running/pending task
+                }
+              }
             } else {
-              rsp.getValues().add("status", "[" + requestId + "] not found in stored responses");
+              if (flush) {
+                coreContainer.getDistributedCollectionCommandRunner().get().deleteAllAsyncIds();
+                rsp.getValues()
+                    .add("status", "successfully cleared stored collection api responses");
+              } else {
+                if (coreContainer
+                    .getDistributedCollectionCommandRunner()
+                    .get()
+                    .deleteSingleAsyncId(requestId)) {
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else {
+                  rsp.getValues()
+                      .add("status", "[" + requestId + "] not found in stored responses");
+                }
+              }
             }
+            return null;
           }
-        }
-        return null;
-      }
-    }),
-    ADDREPLICA_OP(ADDREPLICA, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          "node",
-          SHARD_ID_PROP,
-          _ROUTE_,
-          CoreAdminParams.NAME,
-          INSTANCE_DIR,
-          DATA_DIR,
-          ULOG_DIR,
-          REPLICA_TYPE,
-          WAIT_FOR_FINAL_STATE,
-          NRT_REPLICAS,
-          TLOG_REPLICAS,
-          PULL_REPLICAS,
-          CREATE_NODE_SET,
-          FOLLOW_ALIASES,
-          SKIP_NODE_ASSIGNMENT);
-      return copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
-    }),
+        }),
+    ADDREPLICA_OP(
+        ADDREPLICA,
+        (req, rsp, h) -> {
+          Map<String, Object> props =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  "node",
+                  SHARD_ID_PROP,
+                  _ROUTE_,
+                  CoreAdminParams.NAME,
+                  INSTANCE_DIR,
+                  DATA_DIR,
+                  ULOG_DIR,
+                  REPLICA_TYPE,
+                  WAIT_FOR_FINAL_STATE,
+                  NRT_REPLICAS,
+                  TLOG_REPLICAS,
+                  PULL_REPLICAS,
+                  CREATE_NODE_SET,
+                  FOLLOW_ALIASES,
+                  SKIP_NODE_ASSIGNMENT);
+          return copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
+        }),
     OVERSEERSTATUS_OP(OVERSEERSTATUS, (req, rsp, h) -> new LinkedHashMap<>()),
     @SuppressWarnings({"unchecked"})
-    DISTRIBUTEDAPIPROCESSING_OP(DISTRIBUTEDAPIPROCESSING, (req, rsp, h)  -> {
-      NamedList<Object> results = new NamedList<>();
-      boolean isDistributedApi = h.coreContainer.getDistributedCollectionCommandRunner().isPresent();
-      results.add("isDistributedApi", isDistributedApi);
-      rsp.getValues().addAll(results);
-      return null;
-    }),
-    /**
-     * Handle list collection request.
-     * Do list collection request to zk host
-     */
+    DISTRIBUTEDAPIPROCESSING_OP(
+        DISTRIBUTEDAPIPROCESSING,
+        (req, rsp, h) -> {
+          NamedList<Object> results = new NamedList<>();
+          boolean isDistributedApi =
+              h.coreContainer.getDistributedCollectionCommandRunner().isPresent();
+          results.add("isDistributedApi", isDistributedApi);
+          rsp.getValues().addAll(results);
+          return null;
+        }),
+    /** Handle list collection request. Do list collection request to zk host */
     @SuppressWarnings({"unchecked"})
-    LIST_OP(LIST, (req, rsp, h) -> {
-      NamedList<Object> results = new NamedList<>();
-      Map<String, DocCollection> collections = h.coreContainer.getZkController().getZkStateReader().getClusterState().getCollectionsMap();
-      List<String> collectionList = new ArrayList<>(collections.keySet());
-      Collections.sort(collectionList);
-      // XXX should we add aliases here?
-      results.add("collections", collectionList);
-      SolrResponse response = new OverseerSolrResponse(results);
-      rsp.getValues().addAll(response.getResponse());
-      return null;
-    }),
+    LIST_OP(
+        LIST,
+        (req, rsp, h) -> {
+          NamedList<Object> results = new NamedList<>();
+          Map<String, DocCollection> collections =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getClusterState()
+                  .getCollectionsMap();
+          List<String> collectionList = new ArrayList<>(collections.keySet());
+          Collections.sort(collectionList);
+          // XXX should we add aliases here?
+          results.add("collections", collectionList);
+          SolrResponse response = new OverseerSolrResponse(results);
+          rsp.getValues().addAll(response.getResponse());
+          return null;
+        }),
     /**
-     * Handle cluster status request.
-     * Can return status per specific collection/shard or per all collections.
+     * Handle cluster status request. Can return status per specific collection/shard or per all
+     * collections.
      */
-    CLUSTERSTATUS_OP(CLUSTERSTATUS, (req, rsp, h) -> {
-      Map<String, Object> all = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP,
-          _ROUTE_);
-      new ClusterStatus(h.coreContainer.getZkController().getZkStateReader(),
-          new ZkNodeProps(all)).getClusterStatus(rsp.getValues());
-      return null;
-    }),
-    ADDREPLICAPROP_OP(ADDREPLICAPROP, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP,
-          SHARD_ID_PROP,
-          REPLICA_PROP,
-          PROPERTY_VALUE_PROP);
-      copy(req.getParams(), map, SHARD_UNIQUE);
-      String property = (String) map.get(PROPERTY_PROP);
-      if (!property.startsWith(PROPERTY_PREFIX)) {
-        property = PROPERTY_PREFIX + property;
-      }
+    CLUSTERSTATUS_OP(
+        CLUSTERSTATUS,
+        (req, rsp, h) -> {
+          Map<String, Object> all =
+              copy(req.getParams(), null, COLLECTION_PROP, SHARD_ID_PROP, _ROUTE_);
+          new ClusterStatus(
+                  h.coreContainer.getZkController().getZkStateReader(), new ZkNodeProps(all))
+              .getClusterStatus(rsp.getValues());
+          return null;
+        }),
+    ADDREPLICAPROP_OP(
+        ADDREPLICAPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  PROPERTY_PROP,
+                  SHARD_ID_PROP,
+                  REPLICA_PROP,
+                  PROPERTY_VALUE_PROP);
+          copy(req.getParams(), map, SHARD_UNIQUE);
+          String property = (String) map.get(PROPERTY_PROP);
+          if (!property.startsWith(PROPERTY_PREFIX)) {
+            property = PROPERTY_PREFIX + property;
+          }
 
-      boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
-
-      // Check if we're trying to set a property with parameters that allow us to set the property on multiple replicas
-      // in a slice on properties that are known to only be one-per-slice and error out if so.
-      if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE)) &&
-          SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(property.toLowerCase(Locale.ROOT)) &&
-          uniquePerSlice == false) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Overseer replica property command received for property " + property +
-                " with the " + SHARD_UNIQUE +
-                " parameter set to something other than 'true'. No action taken.");
-      }
-      return map;
-    }),
+          boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
+
+          // Check if we're trying to set a property with parameters that allow us to set the
+          // property on multiple replicas
+          // in a slice on properties that are known to only be one-per-slice and error out if so.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCore.java
##########
@@ -2631,9 +2864,12 @@ public void execute(SolrRequestHandler handler, SolrQueryRequest req, SolrQueryR
       requestLog.debug(rsp.getToLogAsString());
     }
 
-    // TODO: this doesn't seem to be working correctly and causes problems with the example server and distrib (for example /spell)
-    // if (req.getParams().getBool(ShardParams.IS_SHARD,false) && !(handler instanceof SearchHandler))
-    //   throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"isShard is only acceptable with search handlers");
+    // TODO: this doesn't seem to be working correctly and causes problems with the example server
+    // and distrib (for example /spell)
+    // if (req.getParams().getBool(ShardParams.IS_SHARD,false) && !(handler instanceof
+    // SearchHandler))
+    //   throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"isShard is only acceptable
+    // with search handlers");

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
##########
@@ -388,982 +426,1324 @@ public Category getCategory() {
     return Category.ADMIN;
   }
 
-  private static void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException {
+  private static void createSysConfigSet(CoreContainer coreContainer)
+      throws KeeperException, InterruptedException {
     SolrZkClient zk = coreContainer.getZkController().getZkStateReader().getZkClient();
     ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zk.getZkClientTimeout());
     cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE, zk);
-    cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk);
+    cmdExecutor.ensureExists(
+        ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk);
 
     try {
-      String path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml";
-      byte[] data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml"));
+      String path =
+          ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml";
+      byte[] data =
+          IOUtils.toByteArray(
+              CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml"));
       assert data != null && data.length > 0;
       cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
-      path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/solrconfig.xml";
-      data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml"));
+      path =
+          ZkStateReader.CONFIGS_ZKNODE
+              + "/"
+              + CollectionAdminParams.SYSTEM_COLL
+              + "/solrconfig.xml";
+      data =
+          IOUtils.toByteArray(
+              CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml"));
       assert data != null && data.length > 0;
       cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
     } catch (IOException e) {
       throw new SolrException(ErrorCode.SERVER_ERROR, e);
     }
-
-
   }
 
-  private static void addStatusToResponse(NamedList<Object> results, RequestStatusState state, String msg) {
+  private static void addStatusToResponse(
+      NamedList<Object> results, RequestStatusState state, String msg) {
     SimpleOrderedMap<String> status = new SimpleOrderedMap<>();
     status.add("state", state.getKey());
     status.add("msg", msg);
     results.add("status", status);
   }
 
   public enum CollectionOperation implements CollectionOp {
-    CREATE_OP(CREATE, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams().required(), null, NAME);
-      props.put("fromApi", "true");
-      copy(req.getParams(), props,
-          REPLICATION_FACTOR,
-          COLL_CONF,
-          NUM_SLICES,
-          CREATE_NODE_SET,
-          CREATE_NODE_SET_SHUFFLE,
-          SHARDS_PROP,
-          PULL_REPLICAS,
-          TLOG_REPLICAS,
-          NRT_REPLICAS,
-          WAIT_FOR_FINAL_STATE,
-          PER_REPLICA_STATE,
-          ALIAS);
-
-      if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
-        //TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for more details
-        int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR));
-        int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS));
-        if (replicationFactor != nrtReplicas) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing");
-        }
-      }
-      if (props.get(REPLICATION_FACTOR) != null) {
-        props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR));
-      } else if (props.get(NRT_REPLICAS) != null) {
-        props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS));
-      }
-
-      final String collectionName = SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
-      final String shardsParam = (String) props.get(SHARDS_PROP);
-      if (StringUtils.isNotEmpty(shardsParam)) {
-        verifyShardsParam(shardsParam);
-      }
-      if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) {
-        //We must always create a .system collection with only a single shard
-        props.put(NUM_SLICES, 1);
-        props.remove(SHARDS_PROP);
-        createSysConfigSet(h.coreContainer);
-
-      }
-      if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
-      for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
-        h.copyFromClusterProp(props, prop);
-      copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
-      return copyPropertiesWithPrefix(req.getParams(), props, "router.");
+    CREATE_OP(
+        CREATE,
+        (req, rsp, h) -> {
+          Map<String, Object> props = copy(req.getParams().required(), null, NAME);
+          props.put("fromApi", "true");
+          copy(
+              req.getParams(),
+              props,
+              REPLICATION_FACTOR,
+              COLL_CONF,
+              NUM_SLICES,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              SHARDS_PROP,
+              PULL_REPLICAS,
+              TLOG_REPLICAS,
+              NRT_REPLICAS,
+              WAIT_FOR_FINAL_STATE,
+              PER_REPLICA_STATE,
+              ALIAS);
+
+          if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
+            // TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for
+            // more details
+            int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR));
+            int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS));
+            if (replicationFactor != nrtReplicas) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing");
+            }
+          }
+          if (props.get(REPLICATION_FACTOR) != null) {
+            props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR));
+          } else if (props.get(NRT_REPLICAS) != null) {
+            props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS));
+          }
 
-    }),
+          final String collectionName =
+              SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
+          final String shardsParam = (String) props.get(SHARDS_PROP);
+          if (StringUtils.isNotEmpty(shardsParam)) {
+            verifyShardsParam(shardsParam);
+          }
+          if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) {
+            // We must always create a .system collection with only a single shard
+            props.put(NUM_SLICES, 1);
+            props.remove(SHARDS_PROP);
+            createSysConfigSet(h.coreContainer);
+          }
+          if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
+          for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
+            h.copyFromClusterProp(props, prop);
+          copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
+          return copyPropertiesWithPrefix(req.getParams(), props, "router.");
+        }),
     @SuppressWarnings({"unchecked"})
-    COLSTATUS_OP(COLSTATUS, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          ColStatus.CORE_INFO_PROP,
-          ColStatus.SEGMENTS_PROP,
-          ColStatus.FIELD_INFO_PROP,
-          ColStatus.RAW_SIZE_PROP,
-          ColStatus.RAW_SIZE_SUMMARY_PROP,
-          ColStatus.RAW_SIZE_DETAILS_PROP,
-          ColStatus.RAW_SIZE_SAMPLING_PERCENT_PROP,
-          ColStatus.SIZE_INFO_PROP);
-
-      new ColStatus(h.coreContainer.getSolrClientCache(),
-          h.coreContainer.getZkController().getZkStateReader().getClusterState(), new ZkNodeProps(props))
-          .getColStatus(rsp.getValues());
-      return null;
-    }),
-    DELETE_OP(DELETE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME);
-      return copy(req.getParams(), map, FOLLOW_ALIASES);
-    }),
+    COLSTATUS_OP(
+        COLSTATUS,
+        (req, rsp, h) -> {
+          Map<String, Object> props =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  ColStatus.CORE_INFO_PROP,
+                  ColStatus.SEGMENTS_PROP,
+                  ColStatus.FIELD_INFO_PROP,
+                  ColStatus.RAW_SIZE_PROP,
+                  ColStatus.RAW_SIZE_SUMMARY_PROP,
+                  ColStatus.RAW_SIZE_DETAILS_PROP,
+                  ColStatus.RAW_SIZE_SAMPLING_PERCENT_PROP,
+                  ColStatus.SIZE_INFO_PROP);
+
+          new ColStatus(
+                  h.coreContainer.getSolrClientCache(),
+                  h.coreContainer.getZkController().getZkStateReader().getClusterState(),
+                  new ZkNodeProps(props))
+              .getColStatus(rsp.getValues());
+          return null;
+        }),
+    DELETE_OP(
+        DELETE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, NAME);
+          return copy(req.getParams(), map, FOLLOW_ALIASES);
+        }),
     // XXX should this command support followAliases?
-    RELOAD_OP(RELOAD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME);
-      return copy(req.getParams(), map);
-    }),
-
-    RENAME_OP(RENAME, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME, CollectionAdminParams.TARGET);
-      return copy(req.getParams(), map, FOLLOW_ALIASES);
-    }),
-
-    REINDEXCOLLECTION_OP(REINDEXCOLLECTION, (req, rsp, h) -> {
-      Map<String, Object> m = copy(req.getParams().required(), null, NAME);
-      copy(req.getParams(), m,
-          ReindexCollectionCmd.COMMAND,
-          ReindexCollectionCmd.REMOVE_SOURCE,
-          ReindexCollectionCmd.TARGET,
-          ZkStateReader.CONFIGNAME_PROP,
-          NUM_SLICES,
-          NRT_REPLICAS,
-          PULL_REPLICAS,
-          TLOG_REPLICAS,
-          REPLICATION_FACTOR,
-          CREATE_NODE_SET,
-          CREATE_NODE_SET_SHUFFLE,
-          "shards",
-          CommonParams.ROWS,
-          CommonParams.Q,
-          CommonParams.FL,
-          FOLLOW_ALIASES);
-      if (req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP) != null) {
-        m.put(ZkStateReader.CONFIGNAME_PROP, req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP));
-      }
-      copyPropertiesWithPrefix(req.getParams(), m, "router.");
-      return m;
-    }),
-
-    SYNCSHARD_OP(SYNCSHARD, (req, rsp, h) -> {
-      String extCollection = req.getParams().required().get("collection");
-      String collection = h.coreContainer.getZkController().getZkStateReader().getAliases().resolveSimpleAlias(extCollection);
-      String shard = req.getParams().required().get("shard");
-
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-
-      DocCollection docCollection = clusterState.getCollection(collection);
-      ZkNodeProps leaderProps = docCollection.getLeader(shard);
-      ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
-
-      try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl())
-          .withConnectionTimeout(15000)
-          .withSocketTimeout(60000)
-          .build()) {
-        RequestSyncShard reqSyncShard = new RequestSyncShard();
-        reqSyncShard.setCollection(collection);
-        reqSyncShard.setShard(shard);
-        reqSyncShard.setCoreName(nodeProps.getCoreName());
-        client.request(reqSyncShard);
-      }
-      return null;
-    }),
-
-    CREATEALIAS_OP(CREATEALIAS, (req, rsp, h) -> {
-      String alias = req.getParams().get(NAME);
-      SolrIdentifierValidator.validateAliasName(alias);
-      String collections = req.getParams().get("collections");
-      RoutedAlias routedAlias = null;
-      Exception ex = null;
-      HashMap<String,Object> possiblyModifiedParams = new HashMap<>();
-      try {
-        // note that RA specific validation occurs here.
-        req.getParams().toMap(possiblyModifiedParams);
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        // This is awful because RoutedAlias lies about what types it wants
-        Map<String, String> temp = (Map<String, String>) (Map) possiblyModifiedParams;
-        routedAlias = RoutedAlias.fromProps(alias, temp);
-      } catch (SolrException e) {
-        // we'll throw this later if we are in fact creating a routed alias.
-        ex = e;
-      }
-      ModifiableSolrParams finalParams = new ModifiableSolrParams();
-      for (Map.Entry<String, Object> entry : possiblyModifiedParams.entrySet()) {
-        if (entry.getValue().getClass().isArray() ) {
-          // v2 api hits this case
-          for (Object o : (Object[]) entry.getValue()) {
-            finalParams.add(entry.getKey(),o.toString());
+    RELOAD_OP(
+        RELOAD,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, NAME);
+          return copy(req.getParams(), map);
+        }),
+
+    RENAME_OP(
+        RENAME,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, NAME, CollectionAdminParams.TARGET);
+          return copy(req.getParams(), map, FOLLOW_ALIASES);
+        }),
+
+    REINDEXCOLLECTION_OP(
+        REINDEXCOLLECTION,
+        (req, rsp, h) -> {
+          Map<String, Object> m = copy(req.getParams().required(), null, NAME);
+          copy(
+              req.getParams(),
+              m,
+              ReindexCollectionCmd.COMMAND,
+              ReindexCollectionCmd.REMOVE_SOURCE,
+              ReindexCollectionCmd.TARGET,
+              ZkStateReader.CONFIGNAME_PROP,
+              NUM_SLICES,
+              NRT_REPLICAS,
+              PULL_REPLICAS,
+              TLOG_REPLICAS,
+              REPLICATION_FACTOR,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              "shards",
+              CommonParams.ROWS,
+              CommonParams.Q,
+              CommonParams.FL,
+              FOLLOW_ALIASES);
+          if (req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP) != null) {
+            m.put(
+                ZkStateReader.CONFIGNAME_PROP,
+                req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP));
+          }
+          copyPropertiesWithPrefix(req.getParams(), m, "router.");
+          return m;
+        }),
+
+    SYNCSHARD_OP(
+        SYNCSHARD,
+        (req, rsp, h) -> {
+          String extCollection = req.getParams().required().get("collection");
+          String collection =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getAliases()
+                  .resolveSimpleAlias(extCollection);
+          String shard = req.getParams().required().get("shard");
+
+          ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+
+          DocCollection docCollection = clusterState.getCollection(collection);
+          ZkNodeProps leaderProps = docCollection.getLeader(shard);
+          ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
+
+          try (HttpSolrClient client =
+              new Builder(nodeProps.getBaseUrl())
+                  .withConnectionTimeout(15000)
+                  .withSocketTimeout(60000)
+                  .build()) {
+            RequestSyncShard reqSyncShard = new RequestSyncShard();
+            reqSyncShard.setCollection(collection);
+            reqSyncShard.setShard(shard);
+            reqSyncShard.setCoreName(nodeProps.getCoreName());
+            client.request(reqSyncShard);
+          }
+          return null;
+        }),
+
+    CREATEALIAS_OP(
+        CREATEALIAS,
+        (req, rsp, h) -> {
+          String alias = req.getParams().get(NAME);
+          SolrIdentifierValidator.validateAliasName(alias);
+          String collections = req.getParams().get("collections");
+          RoutedAlias routedAlias = null;
+          Exception ex = null;
+          HashMap<String, Object> possiblyModifiedParams = new HashMap<>();
+          try {
+            // note that RA specific validation occurs here.
+            req.getParams().toMap(possiblyModifiedParams);
+            @SuppressWarnings({"unchecked", "rawtypes"})
+            // This is awful because RoutedAlias lies about what types it wants
+            Map<String, String> temp = (Map<String, String>) (Map) possiblyModifiedParams;
+            routedAlias = RoutedAlias.fromProps(alias, temp);
+          } catch (SolrException e) {
+            // we'll throw this later if we are in fact creating a routed alias.
+            ex = e;
+          }
+          ModifiableSolrParams finalParams = new ModifiableSolrParams();
+          for (Map.Entry<String, Object> entry : possiblyModifiedParams.entrySet()) {
+            if (entry.getValue().getClass().isArray()) {
+              // v2 api hits this case
+              for (Object o : (Object[]) entry.getValue()) {
+                finalParams.add(entry.getKey(), o.toString());
+              }
+            } else {
+              finalParams.add(entry.getKey(), entry.getValue().toString());
+            }
           }
-        } else {
-          finalParams.add(entry.getKey(),entry.getValue().toString());
-        }
-      }
 
-      if (collections != null) {
-        if (routedAlias != null) {
-          throw new SolrException(BAD_REQUEST, "Collections cannot be specified when creating a routed alias.");
-        } else {
-          //////////////////////////////////////
-          // Regular alias creation indicated //
-          //////////////////////////////////////
-          return copy(finalParams.required(), null, NAME, "collections");
-        }
-      }
+          if (collections != null) {
+            if (routedAlias != null) {
+              throw new SolrException(
+                  BAD_REQUEST, "Collections cannot be specified when creating a routed alias.");
+            } else {
+              //////////////////////////////////////
+              // Regular alias creation indicated //
+              //////////////////////////////////////
+              return copy(finalParams.required(), null, NAME, "collections");
+            }
+          }
 
-      /////////////////////////////////////////////////
-      // We are creating a routed alias from here on //
-      /////////////////////////////////////////////////
+          /////////////////////////////////////////////////
+          // We are creating a routed alias from here on //
+          /////////////////////////////////////////////////
 
-      // If our prior creation attempt had issues expose them now.
-      if (ex != null) {
-        throw ex;
-      }
+          // If our prior creation attempt had issues expose them now.
+          if (ex != null) {
+            throw ex;
+          }
 
-      // Now filter out just the parameters we care about from the request
-      assert routedAlias != null;
-      Map<String, Object> result = copy(finalParams, null, routedAlias.getRequiredParams());
-      copy(finalParams, result, routedAlias.getOptionalParams());
-
-      ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix
-
-      // add to result params that start with "create-collection.".
-      //   Additionally, save these without the prefix to createCollParams
-      for (Map.Entry<String, String[]> entry : finalParams) {
-        final String p = entry.getKey();
-        if (p.startsWith(CREATE_COLLECTION_PREFIX)) {
-          // This is what SolrParams#getAll(Map, Collection)} does
-          final String[] v = entry.getValue();
-          if (v.length == 1) {
-            result.put(p, v[0]);
-          } else {
-            result.put(p, v);
+          // Now filter out just the parameters we care about from the request
+          assert routedAlias != null;
+          Map<String, Object> result = copy(finalParams, null, routedAlias.getRequiredParams());
+          copy(finalParams, result, routedAlias.getOptionalParams());
+
+          ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix
+
+          // add to result params that start with "create-collection.".
+          //   Additionally, save these without the prefix to createCollParams
+          for (Map.Entry<String, String[]> entry : finalParams) {
+            final String p = entry.getKey();
+            if (p.startsWith(CREATE_COLLECTION_PREFIX)) {
+              // This is what SolrParams#getAll(Map, Collection)} does
+              final String[] v = entry.getValue();
+              if (v.length == 1) {
+                result.put(p, v[0]);
+              } else {
+                result.put(p, v);
+              }
+              createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v);
+            }
           }
-          createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v);
-        }
-      }
 
-      // Verify that the create-collection prefix'ed params appear to be valid.
-      if (createCollParams.get(NAME) != null) {
-        throw new SolrException(BAD_REQUEST, "routed aliases calculate names for their " +
-            "dependent collections, you cannot specify the name.");
-      }
-      if (createCollParams.get(COLL_CONF) == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "We require an explicit " + COLL_CONF);
-      }
-      // note: could insist on a config name here as well.... or wait to throw at overseer
-      createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation
-      CREATE_OP.execute(new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results
+          // Verify that the create-collection prefix'ed params appear to be valid.
+          if (createCollParams.get(NAME) != null) {
+            throw new SolrException(
+                BAD_REQUEST,
+                "routed aliases calculate names for their "
+                    + "dependent collections, you cannot specify the name.");
+          }
+          if (createCollParams.get(COLL_CONF) == null) {
+            throw new SolrException(
+                SolrException.ErrorCode.BAD_REQUEST, "We require an explicit " + COLL_CONF);
+          }
+          // note: could insist on a config name here as well.... or wait to throw at overseer
+          createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation
+          CREATE_OP.execute(
+              new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results
 
-      return result;
-    }),
+          return result;
+        }),
 
     DELETEALIAS_OP(DELETEALIAS, (req, rsp, h) -> copy(req.getParams().required(), null, NAME)),
 
     /**
      * Change properties for an alias (use CREATEALIAS_OP to change the actual value of the alias)
      */
-    ALIASPROP_OP(ALIASPROP, (req, rsp, h) -> {
-      Map<String, Object> params = copy(req.getParams().required(), null, NAME);
-
-      // Note: success/no-op in the event of no properties supplied is intentional. Keeps code simple and one less case
-      // for api-callers to check for.
-      return convertPrefixToMap(req.getParams(), params, "property");
-    }),
-
-    /**
-     * List the aliases and associated properties.
-     */
+    ALIASPROP_OP(
+        ALIASPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> params = copy(req.getParams().required(), null, NAME);
+
+          // Note: success/no-op in the event of no properties supplied is intentional. Keeps code
+          // simple and one less case
+          // for api-callers to check for.
+          return convertPrefixToMap(req.getParams(), params, "property");
+        }),
+
+    /** List the aliases and associated properties. */
     @SuppressWarnings({"unchecked"})
-    LISTALIASES_OP(LISTALIASES, (req, rsp, h) -> {
-      ZkStateReader zkStateReader = h.coreContainer.getZkController().getZkStateReader();
-      // if someone calls listAliases, lets ensure we return an up to date response
-      zkStateReader.aliasesManager.update();
-      Aliases aliases = zkStateReader.getAliases();
-      if (aliases != null) {
-        // the aliases themselves...
-        rsp.getValues().add("aliases", aliases.getCollectionAliasMap());
-        // Any properties for the above aliases.
-        Map<String, Map<String, String>> meta = new LinkedHashMap<>();
-        for (String alias : aliases.getCollectionAliasListMap().keySet()) {
-          Map<String, String> collectionAliasProperties = aliases.getCollectionAliasProperties(alias);
-          if (!collectionAliasProperties.isEmpty()) {
-            meta.put(alias, collectionAliasProperties);
+    LISTALIASES_OP(
+        LISTALIASES,
+        (req, rsp, h) -> {
+          ZkStateReader zkStateReader = h.coreContainer.getZkController().getZkStateReader();
+          // if someone calls listAliases, lets ensure we return an up to date response
+          zkStateReader.aliasesManager.update();
+          Aliases aliases = zkStateReader.getAliases();
+          if (aliases != null) {
+            // the aliases themselves...
+            rsp.getValues().add("aliases", aliases.getCollectionAliasMap());
+            // Any properties for the above aliases.
+            Map<String, Map<String, String>> meta = new LinkedHashMap<>();
+            for (String alias : aliases.getCollectionAliasListMap().keySet()) {
+              Map<String, String> collectionAliasProperties =
+                  aliases.getCollectionAliasProperties(alias);
+              if (!collectionAliasProperties.isEmpty()) {
+                meta.put(alias, collectionAliasProperties);
+              }
+            }
+            rsp.getValues().add("properties", meta);
+          }
+          return null;
+        }),
+    SPLITSHARD_OP(
+        SPLITSHARD,
+        DEFAULT_COLLECTION_OP_TIMEOUT * 5,
+        (req, rsp, h) -> {
+          String name = req.getParams().required().get(COLLECTION_PROP);
+          // TODO : add support for multiple shards
+          String shard = req.getParams().get(SHARD_ID_PROP);
+          String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
+          String splitKey = req.getParams().get("split.key");
+          String numSubShards = req.getParams().get(NUM_SUB_SHARDS);
+          String fuzz = req.getParams().get(SPLIT_FUZZ);
+
+          if (splitKey == null && shard == null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
+          }
+          if (splitKey != null && shard != null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Only one of 'shard' or 'split.key' should be specified");
+          }
+          if (splitKey != null && rangesStr != null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Only one of 'ranges' or 'split.key' should be specified");
+          }
+          if (numSubShards != null && (splitKey != null || rangesStr != null)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "numSubShards can not be specified with split.key or ranges parameters");
+          }
+          if (fuzz != null && (splitKey != null || rangesStr != null)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "fuzz can not be specified with split.key or ranges parameters");
           }
-        }
-        rsp.getValues().add("properties", meta);
-      }
-      return null;
-    }),
-    SPLITSHARD_OP(SPLITSHARD, DEFAULT_COLLECTION_OP_TIMEOUT * 5, (req, rsp, h) -> {
-      String name = req.getParams().required().get(COLLECTION_PROP);
-      // TODO : add support for multiple shards
-      String shard = req.getParams().get(SHARD_ID_PROP);
-      String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
-      String splitKey = req.getParams().get("split.key");
-      String numSubShards = req.getParams().get(NUM_SUB_SHARDS);
-      String fuzz = req.getParams().get(SPLIT_FUZZ);
-
-      if (splitKey == null && shard == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
-      }
-      if (splitKey != null && shard != null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Only one of 'shard' or 'split.key' should be specified");
-      }
-      if (splitKey != null && rangesStr != null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Only one of 'ranges' or 'split.key' should be specified");
-      }
-      if (numSubShards != null && (splitKey != null || rangesStr != null)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "numSubShards can not be specified with split.key or ranges parameters");
-      }
-      if (fuzz != null && (splitKey != null || rangesStr != null)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "fuzz can not be specified with split.key or ranges parameters");
-      }
 
-      Map<String, Object> map = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP,
-          "split.key",
-          CoreAdminParams.RANGES,
-          WAIT_FOR_FINAL_STATE,
-          TIMING,
-          SPLIT_METHOD,
-          NUM_SUB_SHARDS,
-          SPLIT_FUZZ,
-          SPLIT_BY_PREFIX,
-          FOLLOW_ALIASES);
-      return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
-    }),
-    DELETESHARD_OP(DELETESHARD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP);
-      copy(req.getParams(), map,
-          DELETE_INDEX,
-          DELETE_DATA_DIR,
-          DELETE_INSTANCE_DIR,
-          FOLLOW_ALIASES);
-      return map;
-    }),
-    FORCELEADER_OP(FORCELEADER, (req, rsp, h) -> {
-      forceLeaderElection(req, h);
-      return null;
-    }),
-    CREATESHARD_OP(CREATESHARD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP);
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      final String newShardName = SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
-      boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
-      String extCollectionName = req.getParams().get(COLLECTION_PROP);
-      String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader()
-          .getAliases().resolveSimpleAlias(extCollectionName) : extCollectionName;
-      if (!ImplicitDocRouter.NAME.equals(((Map<?,?>) clusterState.getCollection(collectionName).get(DOC_ROUTER)).get(NAME)))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
-      copy(req.getParams(), map,
-          REPLICATION_FACTOR,
-          NRT_REPLICAS,
-          TLOG_REPLICAS,
-          PULL_REPLICAS,
-          CREATE_NODE_SET,
-          WAIT_FOR_FINAL_STATE,
-          FOLLOW_ALIASES);
-      return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
-    }),
-    DELETEREPLICA_OP(DELETEREPLICA, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP);
-
-      return copy(req.getParams(), map,
-          DELETE_INDEX,
-          DELETE_DATA_DIR,
-          DELETE_INSTANCE_DIR,
-          COUNT_PROP, REPLICA_PROP,
-          SHARD_ID_PROP,
-          ONLY_IF_DOWN,
-          FOLLOW_ALIASES);
-    }),
-    MIGRATE_OP(MIGRATE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP, "split.key", "target.collection");
-      return copy(req.getParams(), map, "forward.timeout", FOLLOW_ALIASES);
-    }),
-    ADDROLE_OP(ADDROLE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
-      if (!KNOWN_ROLES.contains(map.get("role")))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
-      return map;
-    }),
-    REMOVEROLE_OP(REMOVEROLE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
-      if (!KNOWN_ROLES.contains(map.get("role")))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
-      return map;
-    }),
-    CLUSTERPROP_OP(CLUSTERPROP, (req, rsp, h) -> {
-      String name = req.getParams().required().get(NAME);
-      String val = req.getParams().get(VALUE_LONG);
-      ClusterProperties cp = new ClusterProperties(h.coreContainer.getZkController().getZkClient());
-      cp.setClusterProperty(name, val);
-      return null;
-    }),
-    COLLECTIONPROP_OP(COLLECTIONPROP, (req, rsp, h) -> {
-      String extCollection = req.getParams().required().get(NAME);
-      String collection = h.coreContainer.getZkController().getZkStateReader().getAliases().resolveSimpleAlias(extCollection);
-      String name = req.getParams().required().get(PROPERTY_NAME);
-      String val = req.getParams().get(PROPERTY_VALUE);
-      CollectionProperties cp = new CollectionProperties(h.coreContainer.getZkController().getZkClient());
-      cp.setCollectionProperty(collection, name, val);
-      return null;
-    }),
+          Map<String, Object> map =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  SHARD_ID_PROP,
+                  "split.key",
+                  CoreAdminParams.RANGES,
+                  WAIT_FOR_FINAL_STATE,
+                  TIMING,
+                  SPLIT_METHOD,
+                  NUM_SUB_SHARDS,
+                  SPLIT_FUZZ,
+                  SPLIT_BY_PREFIX,
+                  FOLLOW_ALIASES);
+          return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
+        }),
+    DELETESHARD_OP(
+        DELETESHARD,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, SHARD_ID_PROP);
+          copy(
+              req.getParams(),
+              map,
+              DELETE_INDEX,
+              DELETE_DATA_DIR,
+              DELETE_INSTANCE_DIR,
+              FOLLOW_ALIASES);
+          return map;
+        }),
+    FORCELEADER_OP(
+        FORCELEADER,
+        (req, rsp, h) -> {
+          forceLeaderElection(req, h);
+          return null;
+        }),
+    CREATESHARD_OP(
+        CREATESHARD,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, SHARD_ID_PROP);
+          ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+          final String newShardName =
+              SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
+          boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
+          String extCollectionName = req.getParams().get(COLLECTION_PROP);
+          String collectionName =
+              followAliases
+                  ? h.coreContainer
+                      .getZkController()
+                      .getZkStateReader()
+                      .getAliases()
+                      .resolveSimpleAlias(extCollectionName)
+                  : extCollectionName;
+          if (!ImplicitDocRouter.NAME.equals(
+              ((Map<?, ?>) clusterState.getCollection(collectionName).get(DOC_ROUTER)).get(NAME)))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
+          copy(
+              req.getParams(),
+              map,
+              REPLICATION_FACTOR,
+              NRT_REPLICAS,
+              TLOG_REPLICAS,
+              PULL_REPLICAS,
+              CREATE_NODE_SET,
+              WAIT_FOR_FINAL_STATE,
+              FOLLOW_ALIASES);
+          return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
+        }),
+    DELETEREPLICA_OP(
+        DELETEREPLICA,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP);
+
+          return copy(
+              req.getParams(),
+              map,
+              DELETE_INDEX,
+              DELETE_DATA_DIR,
+              DELETE_INSTANCE_DIR,
+              COUNT_PROP,
+              REPLICA_PROP,
+              SHARD_ID_PROP,
+              ONLY_IF_DOWN,
+              FOLLOW_ALIASES);
+        }),
+    MIGRATE_OP(
+        MIGRATE,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  "split.key",
+                  "target.collection");
+          return copy(req.getParams(), map, "forward.timeout", FOLLOW_ALIASES);
+        }),
+    ADDROLE_OP(
+        ADDROLE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
+          if (!KNOWN_ROLES.contains(map.get("role")))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
+          return map;
+        }),
+    REMOVEROLE_OP(
+        REMOVEROLE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
+          if (!KNOWN_ROLES.contains(map.get("role")))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
+          return map;
+        }),
+    CLUSTERPROP_OP(
+        CLUSTERPROP,
+        (req, rsp, h) -> {
+          String name = req.getParams().required().get(NAME);
+          String val = req.getParams().get(VALUE_LONG);
+          ClusterProperties cp =
+              new ClusterProperties(h.coreContainer.getZkController().getZkClient());
+          cp.setClusterProperty(name, val);
+          return null;
+        }),
+    COLLECTIONPROP_OP(
+        COLLECTIONPROP,
+        (req, rsp, h) -> {
+          String extCollection = req.getParams().required().get(NAME);
+          String collection =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getAliases()
+                  .resolveSimpleAlias(extCollection);
+          String name = req.getParams().required().get(PROPERTY_NAME);
+          String val = req.getParams().get(PROPERTY_VALUE);
+          CollectionProperties cp =
+              new CollectionProperties(h.coreContainer.getZkController().getZkClient());
+          cp.setCollectionProperty(collection, name, val);
+          return null;
+        }),
     @SuppressWarnings({"unchecked"})
-    REQUESTSTATUS_OP(REQUESTSTATUS, (req, rsp, h) -> {
-      req.getParams().required().check(REQUESTID);
-
-      final CoreContainer coreContainer = h.coreContainer;
-      final String requestId = req.getParams().get(REQUESTID);
-      final ZkController zkController = coreContainer.getZkController();
-
-      final NamedList<Object> status = new NamedList<>();
-      if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
-        if (zkController.getOverseerCompletedMap().contains(requestId)) {
-          final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
-          rsp.getValues().addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
-          addStatusToResponse(status, COMPLETED, "found [" + requestId + "] in completed tasks");
-        } else if (zkController.getOverseerFailureMap().contains(requestId)) {
-          final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
-          rsp.getValues().addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
-          addStatusToResponse(status, FAILED, "found [" + requestId + "] in failed tasks");
-        } else if (zkController.getOverseerRunningMap().contains(requestId)) {
-          addStatusToResponse(status, RUNNING, "found [" + requestId + "] in running tasks");
-        } else if (h.overseerCollectionQueueContains(requestId)) {
-          addStatusToResponse(status, SUBMITTED, "found [" + requestId + "] in submitted tasks");
-        } else {
-          addStatusToResponse(status, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
-        }
-      } else {
-        Pair<RequestStatusState, OverseerSolrResponse> sr =
-            coreContainer.getDistributedCollectionCommandRunner().get().getAsyncTaskRequestStatus(requestId);
-        final String message;
-        switch (sr.first()) {
-          case COMPLETED:
-            message = "found [" + requestId + "] in completed tasks";
-            rsp.getValues().addAll(sr.second().getResponse());
-            break;
-          case FAILED:
-            message = "found [" + requestId + "] in failed tasks";
-            rsp.getValues().addAll(sr.second().getResponse());
-            break;
-          case RUNNING:
-            message = "found [" + requestId + "] in running tasks";
-            break;
-          case SUBMITTED:
-            message = "found [" + requestId + "] in submitted tasks";
-            break;
-          default:
-            message = "Did not find [" + requestId + "] in any tasks queue";
-        }
-        addStatusToResponse(status, sr.first(), message);
-      }
-
-      rsp.getValues().addAll(status);
-      return null;
-    }),
-    DELETESTATUS_OP(DELETESTATUS, new CollectionOp() {
-      @SuppressWarnings("unchecked")
-      @Override
-      public Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
-        final CoreContainer coreContainer = h.coreContainer;
-        final String requestId = req.getParams().get(REQUESTID);
-        final ZkController zkController = coreContainer.getZkController();
-        Boolean flush = req.getParams().getBool(CollectionAdminParams.FLUSH, false);
-
-        if (requestId == null && !flush) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "Either requestid or flush parameter must be specified.");
-        }
-
-        if (requestId != null && flush) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Both requestid and flush parameters can not be specified together.");
-        }
-
-        if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
-          if (flush) {
-            Collection<String> completed = zkController.getOverseerCompletedMap().keys();
-            Collection<String> failed = zkController.getOverseerFailureMap().keys();
-            for (String asyncId : completed) {
-              zkController.getOverseerCompletedMap().remove(asyncId);
-              zkController.clearAsyncId(asyncId);
-            }
-            for (String asyncId : failed) {
-              zkController.getOverseerFailureMap().remove(asyncId);
-              zkController.clearAsyncId(asyncId);
+    REQUESTSTATUS_OP(
+        REQUESTSTATUS,
+        (req, rsp, h) -> {
+          req.getParams().required().check(REQUESTID);
+
+          final CoreContainer coreContainer = h.coreContainer;
+          final String requestId = req.getParams().get(REQUESTID);
+          final ZkController zkController = coreContainer.getZkController();
+
+          final NamedList<Object> status = new NamedList<>();
+          if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
+            if (zkController.getOverseerCompletedMap().contains(requestId)) {
+              final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
+              rsp.getValues()
+                  .addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
+              addStatusToResponse(
+                  status, COMPLETED, "found [" + requestId + "] in completed tasks");
+            } else if (zkController.getOverseerFailureMap().contains(requestId)) {
+              final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
+              rsp.getValues()
+                  .addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
+              addStatusToResponse(status, FAILED, "found [" + requestId + "] in failed tasks");
+            } else if (zkController.getOverseerRunningMap().contains(requestId)) {
+              addStatusToResponse(status, RUNNING, "found [" + requestId + "] in running tasks");
+            } else if (h.overseerCollectionQueueContains(requestId)) {
+              addStatusToResponse(
+                  status, SUBMITTED, "found [" + requestId + "] in submitted tasks");
+            } else {
+              addStatusToResponse(
+                  status, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
             }
-            rsp.getValues().add("status", "successfully cleared stored collection api responses");
           } else {
-            // Request to cleanup
-            if (zkController.getOverseerCompletedMap().remove(requestId)) {
-              zkController.clearAsyncId(requestId);
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
-            } else if (zkController.getOverseerFailureMap().remove(requestId)) {
-              zkController.clearAsyncId(requestId);
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
-            } else {
-              rsp.getValues().add("status", "[" + requestId + "] not found in stored responses");
-              // Don't call zkController.clearAsyncId for this, since it could be a running/pending task
+            Pair<RequestStatusState, OverseerSolrResponse> sr =
+                coreContainer
+                    .getDistributedCollectionCommandRunner()
+                    .get()
+                    .getAsyncTaskRequestStatus(requestId);
+            final String message;
+            switch (sr.first()) {
+              case COMPLETED:
+                message = "found [" + requestId + "] in completed tasks";
+                rsp.getValues().addAll(sr.second().getResponse());
+                break;
+              case FAILED:
+                message = "found [" + requestId + "] in failed tasks";
+                rsp.getValues().addAll(sr.second().getResponse());
+                break;
+              case RUNNING:
+                message = "found [" + requestId + "] in running tasks";
+                break;
+              case SUBMITTED:
+                message = "found [" + requestId + "] in submitted tasks";
+                break;
+              default:
+                message = "Did not find [" + requestId + "] in any tasks queue";
             }
+            addStatusToResponse(status, sr.first(), message);
           }
-        } else {
-          if (flush) {
-            coreContainer.getDistributedCollectionCommandRunner().get().deleteAllAsyncIds();
-            rsp.getValues().add("status", "successfully cleared stored collection api responses");
-          } else {
-            if (coreContainer.getDistributedCollectionCommandRunner().get().deleteSingleAsyncId(requestId)) {
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
+
+          rsp.getValues().addAll(status);
+          return null;
+        }),
+    DELETESTATUS_OP(
+        DELETESTATUS,
+        new CollectionOp() {
+          @SuppressWarnings("unchecked")
+          @Override
+          public Map<String, Object> execute(
+              SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
+            final CoreContainer coreContainer = h.coreContainer;
+            final String requestId = req.getParams().get(REQUESTID);
+            final ZkController zkController = coreContainer.getZkController();
+            Boolean flush = req.getParams().getBool(CollectionAdminParams.FLUSH, false);
+
+            if (requestId == null && !flush) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST, "Either requestid or flush parameter must be specified.");
+            }
+
+            if (requestId != null && flush) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "Both requestid and flush parameters can not be specified together.");
+            }
+
+            if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
+              if (flush) {
+                Collection<String> completed = zkController.getOverseerCompletedMap().keys();
+                Collection<String> failed = zkController.getOverseerFailureMap().keys();
+                for (String asyncId : completed) {
+                  zkController.getOverseerCompletedMap().remove(asyncId);
+                  zkController.clearAsyncId(asyncId);
+                }
+                for (String asyncId : failed) {
+                  zkController.getOverseerFailureMap().remove(asyncId);
+                  zkController.clearAsyncId(asyncId);
+                }
+                rsp.getValues()
+                    .add("status", "successfully cleared stored collection api responses");
+              } else {
+                // Request to cleanup
+                if (zkController.getOverseerCompletedMap().remove(requestId)) {
+                  zkController.clearAsyncId(requestId);
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else if (zkController.getOverseerFailureMap().remove(requestId)) {
+                  zkController.clearAsyncId(requestId);
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else {
+                  rsp.getValues()
+                      .add("status", "[" + requestId + "] not found in stored responses");
+                  // Don't call zkController.clearAsyncId for this, since it could be a
+                  // running/pending task
+                }
+              }
             } else {
-              rsp.getValues().add("status", "[" + requestId + "] not found in stored responses");
+              if (flush) {
+                coreContainer.getDistributedCollectionCommandRunner().get().deleteAllAsyncIds();
+                rsp.getValues()
+                    .add("status", "successfully cleared stored collection api responses");
+              } else {
+                if (coreContainer
+                    .getDistributedCollectionCommandRunner()
+                    .get()
+                    .deleteSingleAsyncId(requestId)) {
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else {
+                  rsp.getValues()
+                      .add("status", "[" + requestId + "] not found in stored responses");
+                }
+              }
             }
+            return null;
           }
-        }
-        return null;
-      }
-    }),
-    ADDREPLICA_OP(ADDREPLICA, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          "node",
-          SHARD_ID_PROP,
-          _ROUTE_,
-          CoreAdminParams.NAME,
-          INSTANCE_DIR,
-          DATA_DIR,
-          ULOG_DIR,
-          REPLICA_TYPE,
-          WAIT_FOR_FINAL_STATE,
-          NRT_REPLICAS,
-          TLOG_REPLICAS,
-          PULL_REPLICAS,
-          CREATE_NODE_SET,
-          FOLLOW_ALIASES,
-          SKIP_NODE_ASSIGNMENT);
-      return copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
-    }),
+        }),
+    ADDREPLICA_OP(
+        ADDREPLICA,
+        (req, rsp, h) -> {
+          Map<String, Object> props =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  "node",
+                  SHARD_ID_PROP,
+                  _ROUTE_,
+                  CoreAdminParams.NAME,
+                  INSTANCE_DIR,
+                  DATA_DIR,
+                  ULOG_DIR,
+                  REPLICA_TYPE,
+                  WAIT_FOR_FINAL_STATE,
+                  NRT_REPLICAS,
+                  TLOG_REPLICAS,
+                  PULL_REPLICAS,
+                  CREATE_NODE_SET,
+                  FOLLOW_ALIASES,
+                  SKIP_NODE_ASSIGNMENT);
+          return copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
+        }),
     OVERSEERSTATUS_OP(OVERSEERSTATUS, (req, rsp, h) -> new LinkedHashMap<>()),
     @SuppressWarnings({"unchecked"})
-    DISTRIBUTEDAPIPROCESSING_OP(DISTRIBUTEDAPIPROCESSING, (req, rsp, h)  -> {
-      NamedList<Object> results = new NamedList<>();
-      boolean isDistributedApi = h.coreContainer.getDistributedCollectionCommandRunner().isPresent();
-      results.add("isDistributedApi", isDistributedApi);
-      rsp.getValues().addAll(results);
-      return null;
-    }),
-    /**
-     * Handle list collection request.
-     * Do list collection request to zk host
-     */
+    DISTRIBUTEDAPIPROCESSING_OP(
+        DISTRIBUTEDAPIPROCESSING,
+        (req, rsp, h) -> {
+          NamedList<Object> results = new NamedList<>();
+          boolean isDistributedApi =
+              h.coreContainer.getDistributedCollectionCommandRunner().isPresent();
+          results.add("isDistributedApi", isDistributedApi);
+          rsp.getValues().addAll(results);
+          return null;
+        }),
+    /** Handle list collection request. Do list collection request to zk host */
     @SuppressWarnings({"unchecked"})
-    LIST_OP(LIST, (req, rsp, h) -> {
-      NamedList<Object> results = new NamedList<>();
-      Map<String, DocCollection> collections = h.coreContainer.getZkController().getZkStateReader().getClusterState().getCollectionsMap();
-      List<String> collectionList = new ArrayList<>(collections.keySet());
-      Collections.sort(collectionList);
-      // XXX should we add aliases here?
-      results.add("collections", collectionList);
-      SolrResponse response = new OverseerSolrResponse(results);
-      rsp.getValues().addAll(response.getResponse());
-      return null;
-    }),
+    LIST_OP(
+        LIST,
+        (req, rsp, h) -> {
+          NamedList<Object> results = new NamedList<>();
+          Map<String, DocCollection> collections =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getClusterState()
+                  .getCollectionsMap();
+          List<String> collectionList = new ArrayList<>(collections.keySet());
+          Collections.sort(collectionList);
+          // XXX should we add aliases here?
+          results.add("collections", collectionList);
+          SolrResponse response = new OverseerSolrResponse(results);
+          rsp.getValues().addAll(response.getResponse());
+          return null;
+        }),
     /**
-     * Handle cluster status request.
-     * Can return status per specific collection/shard or per all collections.
+     * Handle cluster status request. Can return status per specific collection/shard or per all
+     * collections.
      */
-    CLUSTERSTATUS_OP(CLUSTERSTATUS, (req, rsp, h) -> {
-      Map<String, Object> all = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP,
-          _ROUTE_);
-      new ClusterStatus(h.coreContainer.getZkController().getZkStateReader(),
-          new ZkNodeProps(all)).getClusterStatus(rsp.getValues());
-      return null;
-    }),
-    ADDREPLICAPROP_OP(ADDREPLICAPROP, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP,
-          SHARD_ID_PROP,
-          REPLICA_PROP,
-          PROPERTY_VALUE_PROP);
-      copy(req.getParams(), map, SHARD_UNIQUE);
-      String property = (String) map.get(PROPERTY_PROP);
-      if (!property.startsWith(PROPERTY_PREFIX)) {
-        property = PROPERTY_PREFIX + property;
-      }
+    CLUSTERSTATUS_OP(
+        CLUSTERSTATUS,
+        (req, rsp, h) -> {
+          Map<String, Object> all =
+              copy(req.getParams(), null, COLLECTION_PROP, SHARD_ID_PROP, _ROUTE_);
+          new ClusterStatus(
+                  h.coreContainer.getZkController().getZkStateReader(), new ZkNodeProps(all))
+              .getClusterStatus(rsp.getValues());
+          return null;
+        }),
+    ADDREPLICAPROP_OP(
+        ADDREPLICAPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  PROPERTY_PROP,
+                  SHARD_ID_PROP,
+                  REPLICA_PROP,
+                  PROPERTY_VALUE_PROP);
+          copy(req.getParams(), map, SHARD_UNIQUE);
+          String property = (String) map.get(PROPERTY_PROP);
+          if (!property.startsWith(PROPERTY_PREFIX)) {
+            property = PROPERTY_PREFIX + property;
+          }
 
-      boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
-
-      // Check if we're trying to set a property with parameters that allow us to set the property on multiple replicas
-      // in a slice on properties that are known to only be one-per-slice and error out if so.
-      if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE)) &&
-          SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(property.toLowerCase(Locale.ROOT)) &&
-          uniquePerSlice == false) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Overseer replica property command received for property " + property +
-                " with the " + SHARD_UNIQUE +
-                " parameter set to something other than 'true'. No action taken.");
-      }
-      return map;
-    }),
+          boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
+
+          // Check if we're trying to set a property with parameters that allow us to set the
+          // property on multiple replicas
+          // in a slice on properties that are known to only be one-per-slice and error out if so.
+          if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE))
+              && SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(
+                  property.toLowerCase(Locale.ROOT))
+              && uniquePerSlice == false) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Overseer replica property command received for property "
+                    + property
+                    + " with the "
+                    + SHARD_UNIQUE
+                    + " parameter set to something other than 'true'. No action taken.");
+          }
+          return map;
+        }),
     // XXX should this command support followAliases?
-    DELETEREPLICAPROP_OP(DELETEREPLICAPROP, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP,
-          SHARD_ID_PROP,
-          REPLICA_PROP);
-      return copy(req.getParams(), map, PROPERTY_PROP);
-    }),
+    DELETEREPLICAPROP_OP(
+        DELETEREPLICAPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  PROPERTY_PROP,
+                  SHARD_ID_PROP,
+                  REPLICA_PROP);
+          return copy(req.getParams(), map, PROPERTY_PROP);
+        }),
     // XXX should this command support followAliases?
-    BALANCESHARDUNIQUE_OP(BALANCESHARDUNIQUE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP);
-      Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
-      String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
-      if (!StringUtils.startsWith(prop, PROPERTY_PREFIX)) {
-        prop = PROPERTY_PREFIX + prop;
-      }
+    BALANCESHARDUNIQUE_OP(
+        BALANCESHARDUNIQUE,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, PROPERTY_PROP);
+          Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
+          String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
+          if (!StringUtils.startsWith(prop, PROPERTY_PREFIX)) {
+            prop = PROPERTY_PREFIX + prop;
+          }
 
-      if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
-            + " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. " +
-            " Property: " + prop + " shardUnique: " + shardUnique);
-      }
+          if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Balancing properties amongst replicas in a slice requires that"
+                    + " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. "
+                    + " Property: "
+                    + prop
+                    + " shardUnique: "
+                    + shardUnique);
+          }
 
-      return copy(req.getParams(), map, ONLY_ACTIVE_NODES, SHARD_UNIQUE);
-    }),
-    REBALANCELEADERS_OP(REBALANCELEADERS, (req, rsp, h) -> {
-      new RebalanceLeaders(req, rsp, h).execute();
-      return null;
-    }),
+          return copy(req.getParams(), map, ONLY_ACTIVE_NODES, SHARD_UNIQUE);
+        }),
+    REBALANCELEADERS_OP(
+        REBALANCELEADERS,
+        (req, rsp, h) -> {
+          new RebalanceLeaders(req, rsp, h).execute();
+          return null;
+        }),
     // XXX should this command support followAliases?
-    MODIFYCOLLECTION_OP(MODIFYCOLLECTION, (req, rsp, h) -> {
-      Map<String, Object> m = copy(req.getParams(), null, CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES);
-      copyPropertiesWithPrefix(req.getParams(), m, PROPERTY_PREFIX);
-      if (m.isEmpty()) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            formatString("no supported values provided {0}", CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES.toString()));
-      }
-      copy(req.getParams().required(), m, COLLECTION_PROP);
-      for (Map.Entry<String, Object> entry : m.entrySet()) {
-        String prop = entry.getKey();
-        if ("".equals(entry.getValue())) {
-          // set to an empty string is equivalent to removing the property, see SOLR-12507
-          entry.setValue(null);
-        }
-        DocCollection.verifyProp(m, prop);
-      }
-      if (m.get(REPLICATION_FACTOR) != null) {
-        m.put(NRT_REPLICAS, m.get(REPLICATION_FACTOR));
-      }
-      return m;
-    }),
-    BACKUP_OP(BACKUP, (req, rsp, h) -> {
-      req.getParams().required().check(NAME, COLLECTION_PROP);
-
-      final String extCollectionName = req.getParams().get(COLLECTION_PROP);
-      final boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
-      final String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader()
-          .getAliases().resolveSimpleAlias(extCollectionName) : extCollectionName;
-      final ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      if (!clusterState.hasCollection(collectionName)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
-      }
-
-      CoreContainer cc = h.coreContainer;
-      String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      BackupRepository repository = cc.newBackupRepository(repo);
-
-      String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-      if (location == null) {
-        //Refresh the cluster property file to make sure the value set for location is the latest
-        // Check if the location is specified in the cluster property.
-        location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
-        if (location == null) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-              + " parameter or as a default repository property or as a cluster property.");
-        }
-      }
-      boolean incremental = req.getParams().getBool(CoreAdminParams.BACKUP_INCREMENTAL, true);
-
-      // Check if the specified location is valid for this repository.
-      final URI uri = repository.createDirectoryURI(location);
-      try {
-        if (!repository.exists(uri)) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
-        }
-      } catch (IOException ex) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existence of " + uri + ". Is it valid?", ex);
-      }
+    MODIFYCOLLECTION_OP(
+        MODIFYCOLLECTION,
+        (req, rsp, h) -> {
+          Map<String, Object> m =
+              copy(req.getParams(), null, CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES);
+          copyPropertiesWithPrefix(req.getParams(), m, PROPERTY_PREFIX);
+          if (m.isEmpty()) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                formatString(
+                    "no supported values provided {0}",
+                    CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES.toString()));
+          }
+          copy(req.getParams().required(), m, COLLECTION_PROP);
+          for (Map.Entry<String, Object> entry : m.entrySet()) {
+            String prop = entry.getKey();
+            if ("".equals(entry.getValue())) {
+              // set to an empty string is equivalent to removing the property, see SOLR-12507
+              entry.setValue(null);
+            }
+            DocCollection.verifyProp(m, prop);
+          }
+          if (m.get(REPLICATION_FACTOR) != null) {
+            m.put(NRT_REPLICAS, m.get(REPLICATION_FACTOR));
+          }
+          return m;
+        }),
+    BACKUP_OP(
+        BACKUP,
+        (req, rsp, h) -> {
+          req.getParams().required().check(NAME, COLLECTION_PROP);
+
+          final String extCollectionName = req.getParams().get(COLLECTION_PROP);
+          final boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
+          final String collectionName =
+              followAliases
+                  ? h.coreContainer
+                      .getZkController()
+                      .getZkStateReader()
+                      .getAliases()
+                      .resolveSimpleAlias(extCollectionName)
+                  : extCollectionName;
+          final ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+          if (!clusterState.hasCollection(collectionName)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Collection '" + collectionName + "' does not exist, no action taken.");
+          }
 
-      String strategy = req.getParams().get(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY);
-      if (!CollectionAdminParams.INDEX_BACKUP_STRATEGIES.contains(strategy)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown index backup strategy " + strategy);
-      }
+          CoreContainer cc = h.coreContainer;
+          String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
+          BackupRepository repository = cc.newBackupRepository(repo);
 
-      Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP,
-              FOLLOW_ALIASES, CoreAdminParams.COMMIT_NAME, CoreAdminParams.MAX_NUM_BACKUP_POINTS);
-      params.put(CoreAdminParams.BACKUP_LOCATION, location);
-      if (repo != null) {
-        params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
-      }
+          String location =
+              repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
+          if (location == null) {
+            // Refresh the cluster property file to make sure the value set for location is the
+            // latest
+            // Check if the location is specified in the cluster property.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCore.java
##########
@@ -2548,30 +2777,33 @@ private void registerSearcher(RefCounted<SolrIndexSearcher> newSearcherHolder) {
     synchronized (searcherLock) {
       try {
         if (_searcher == newSearcherHolder) {
-          // trying to re-register the same searcher... this can now happen when a commit has been done but
+          // trying to re-register the same searcher... this can now happen when a commit has been
+          // done but
           // there were no changes to the index.
-          newSearcherHolder.decref();  // decref since the caller should have still incref'd (since they didn't know the searcher was the same)
-          return;  // still execute the finally block to notify anyone waiting.
+          newSearcherHolder
+              .decref(); // decref since the caller should have still incref'd (since they didn't
+          // know the searcher was the same)

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java
##########
@@ -428,29 +451,32 @@ RollbackUpdateCommand parseRollback() throws IOException {
     void parseCommitOptions(CommitUpdateCommand cmd) throws IOException {
       assertNextEvent(JSONParser.OBJECT_START);
       @SuppressWarnings({"unchecked"})
-      final Map<String, Object> map = (Map<String,Object>) ObjectBuilder.getVal(parser);
+      final Map<String, Object> map = (Map<String, Object>) ObjectBuilder.getVal(parser);
 
       // SolrParams currently expects string values...
-      SolrParams p = new SolrParams() {
-        @Override
-        public String get(String param) {
-          Object o = map.get(param);
-          return o == null ? null : o.toString();
-        }
+      SolrParams p =
+          new SolrParams() {
+            @Override
+            public String get(String param) {
+              Object o = map.get(param);
+              return o == null ? null : o.toString();
+            }
 
-        @Override
-        public String[] getParams(String param) {
-          return new String[]{get(param)};
-        }
+            @Override
+            public String[] getParams(String param) {
+              return new String[] {get(param)};
+            }
 
-        @Override
-        public Iterator<String> getParameterNamesIterator() {
-          return map.keySet().iterator();
-        }
-      };
+            @Override
+            public Iterator<String> getParameterNamesIterator() {
+              return map.keySet().iterator();
+            }
+          };
 
       RequestHandlerUtils.validateCommitParams(p);
-      p = SolrParams.wrapDefaults(p, req.getParams());   // default to the normal request params for commit options
+      p =
+          SolrParams.wrapDefaults(
+              p, req.getParams()); // default to the normal request params for commit options

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCore.java
##########
@@ -2548,30 +2777,33 @@ private void registerSearcher(RefCounted<SolrIndexSearcher> newSearcherHolder) {
     synchronized (searcherLock) {
       try {
         if (_searcher == newSearcherHolder) {
-          // trying to re-register the same searcher... this can now happen when a commit has been done but
+          // trying to re-register the same searcher... this can now happen when a commit has been
+          // done but
           // there were no changes to the index.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/loader/CSVLoaderBase.java
##########
@@ -16,75 +16,73 @@
  */
 package org.apache.solr.handler.loader;
 
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.SolrQueryResponse;
+import java.io.*;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Pattern;
+import org.apache.commons.io.IOUtils;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.params.UpdateParams;
-import org.apache.solr.common.util.StrUtils;
 import org.apache.solr.common.util.ContentStream;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.internal.csv.CSVParser;
+import org.apache.solr.internal.csv.CSVStrategy;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
 import org.apache.solr.update.*;
 import org.apache.solr.update.processor.UpdateRequestProcessor;
-import org.apache.solr.internal.csv.CSVStrategy;
-import org.apache.solr.internal.csv.CSVParser;
-import org.apache.commons.io.IOUtils;
-
-import java.util.Map;
-import java.util.regex.Pattern;
-import java.util.List;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.io.*;
 
 public abstract class CSVLoaderBase extends ContentStreamLoader {
-  public static final String SEPARATOR="separator";
-  public static final String FIELDNAMES="fieldnames";
-  public static final String HEADER="header";
-  public static final String SKIP="skip";
-  public static final String SKIPLINES="skipLines";
-  public static final String MAP="map";
-  public static final String TRIM="trim";
-  public static final String EMPTY="keepEmpty";
-  public static final String SPLIT="split";
-  public static final String ENCAPSULATOR="encapsulator";
-  public static final String ESCAPE="escape";
-  public static final String OVERWRITE="overwrite";
+  public static final String SEPARATOR = "separator";
+  public static final String FIELDNAMES = "fieldnames";
+  public static final String HEADER = "header";
+  public static final String SKIP = "skip";
+  public static final String SKIPLINES = "skipLines";
+  public static final String MAP = "map";
+  public static final String TRIM = "trim";
+  public static final String EMPTY = "keepEmpty";
+  public static final String SPLIT = "split";
+  public static final String ENCAPSULATOR = "encapsulator";
+  public static final String ESCAPE = "escape";
+  public static final String OVERWRITE = "overwrite";
   public static final String LITERALS_PREFIX = "literal.";
   public static final String ROW_ID = "rowid";
   public static final String ROW_ID_OFFSET = "rowidOffset";
 
   private static Pattern colonSplit = Pattern.compile(":");
   private static Pattern commaSplit = Pattern.compile(",");
-  
+
   final SolrParams params;
   final CSVStrategy strategy;
   protected final UpdateRequestProcessor processor;
   // hashmap to save any literal fields and their values
-  HashMap <String, String> literals;
+  HashMap<String, String> literals;
 
   String[] fieldnames;
   CSVLoaderBase.FieldAdder[] adders;
 
-  String rowId = null;// if not null, add a special field by the name given with the line number/row id as the value
-  int rowIdOffset = 0; //add to line/rowid before creating the field
+  String rowId =
+      null; // if not null, add a special field by the name given with the line number/row id as the
+  // value

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/core/SolrCore.java
##########
@@ -2491,45 +2718,47 @@ public IndexFingerprint getIndexFingerprint(SolrIndexSearcher searcher, LeafRead
         }
 
         if (searchHolder != null) {
-          searchHolder.decref();      // decrement 1 for _searcher (searchHolder will never become _searcher now)
+          searchHolder
+              .decref(); // decrement 1 for _searcher (searchHolder will never become _searcher now)
           if (returnSearcher) {
-            searchHolder.decref();    // decrement 1 because we won't be returning the searcher to the user
+            searchHolder
+                .decref(); // decrement 1 because we won't be returning the searcher to the user
           }
         }
       }
 
       // we want to do this after we decrement onDeckSearchers so another thread
       // doesn't increment first and throw a false warning.
       openSearcherLock.unlock();
-
     }
-
   }
 
-
-  private RefCounted<SolrIndexSearcher> newHolder(SolrIndexSearcher newSearcher, final List<RefCounted<SolrIndexSearcher>> searcherList) {
-    RefCounted<SolrIndexSearcher> holder = new RefCounted<SolrIndexSearcher>(newSearcher) {
-      @Override
-      public void close() {
-        try {
-          synchronized (searcherLock) {
-            // it's possible for someone to get a reference via the _searchers queue
-            // and increment the refcount while RefCounted.close() is being called.
-            // we check the refcount again to see if this has happened and abort the close.
-            // This relies on the RefCounted class allowing close() to be called every
-            // time the counter hits zero.
-            if (refcount.get() > 0) return;
-            searcherList.remove(this);
+  private RefCounted<SolrIndexSearcher> newHolder(
+      SolrIndexSearcher newSearcher, final List<RefCounted<SolrIndexSearcher>> searcherList) {
+    RefCounted<SolrIndexSearcher> holder =
+        new RefCounted<SolrIndexSearcher>(newSearcher) {
+          @Override
+          public void close() {
+            try {
+              synchronized (searcherLock) {
+                // it's possible for someone to get a reference via the _searchers queue
+                // and increment the refcount while RefCounted.close() is being called.
+                // we check the refcount again to see if this has happened and abort the close.
+                // This relies on the RefCounted class allowing close() to be called every
+                // time the counter hits zero.
+                if (refcount.get() > 0) return;
+                searcherList.remove(this);
+              }
+              resource.close();
+            } catch (Exception e) {
+              // do not allow decref() operations to fail since they are typically called in finally
+              // blocks
+              // and throwing another exception would be very unexpected.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
##########
@@ -388,982 +426,1324 @@ public Category getCategory() {
     return Category.ADMIN;
   }
 
-  private static void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException {
+  private static void createSysConfigSet(CoreContainer coreContainer)
+      throws KeeperException, InterruptedException {
     SolrZkClient zk = coreContainer.getZkController().getZkStateReader().getZkClient();
     ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zk.getZkClientTimeout());
     cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE, zk);
-    cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk);
+    cmdExecutor.ensureExists(
+        ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk);
 
     try {
-      String path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml";
-      byte[] data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml"));
+      String path =
+          ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml";
+      byte[] data =
+          IOUtils.toByteArray(
+              CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml"));
       assert data != null && data.length > 0;
       cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
-      path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/solrconfig.xml";
-      data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml"));
+      path =
+          ZkStateReader.CONFIGS_ZKNODE
+              + "/"
+              + CollectionAdminParams.SYSTEM_COLL
+              + "/solrconfig.xml";
+      data =
+          IOUtils.toByteArray(
+              CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml"));
       assert data != null && data.length > 0;
       cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
     } catch (IOException e) {
       throw new SolrException(ErrorCode.SERVER_ERROR, e);
     }
-
-
   }
 
-  private static void addStatusToResponse(NamedList<Object> results, RequestStatusState state, String msg) {
+  private static void addStatusToResponse(
+      NamedList<Object> results, RequestStatusState state, String msg) {
     SimpleOrderedMap<String> status = new SimpleOrderedMap<>();
     status.add("state", state.getKey());
     status.add("msg", msg);
     results.add("status", status);
   }
 
   public enum CollectionOperation implements CollectionOp {
-    CREATE_OP(CREATE, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams().required(), null, NAME);
-      props.put("fromApi", "true");
-      copy(req.getParams(), props,
-          REPLICATION_FACTOR,
-          COLL_CONF,
-          NUM_SLICES,
-          CREATE_NODE_SET,
-          CREATE_NODE_SET_SHUFFLE,
-          SHARDS_PROP,
-          PULL_REPLICAS,
-          TLOG_REPLICAS,
-          NRT_REPLICAS,
-          WAIT_FOR_FINAL_STATE,
-          PER_REPLICA_STATE,
-          ALIAS);
-
-      if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
-        //TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for more details
-        int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR));
-        int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS));
-        if (replicationFactor != nrtReplicas) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing");
-        }
-      }
-      if (props.get(REPLICATION_FACTOR) != null) {
-        props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR));
-      } else if (props.get(NRT_REPLICAS) != null) {
-        props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS));
-      }
-
-      final String collectionName = SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
-      final String shardsParam = (String) props.get(SHARDS_PROP);
-      if (StringUtils.isNotEmpty(shardsParam)) {
-        verifyShardsParam(shardsParam);
-      }
-      if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) {
-        //We must always create a .system collection with only a single shard
-        props.put(NUM_SLICES, 1);
-        props.remove(SHARDS_PROP);
-        createSysConfigSet(h.coreContainer);
-
-      }
-      if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
-      for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
-        h.copyFromClusterProp(props, prop);
-      copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
-      return copyPropertiesWithPrefix(req.getParams(), props, "router.");
+    CREATE_OP(
+        CREATE,
+        (req, rsp, h) -> {
+          Map<String, Object> props = copy(req.getParams().required(), null, NAME);
+          props.put("fromApi", "true");
+          copy(
+              req.getParams(),
+              props,
+              REPLICATION_FACTOR,
+              COLL_CONF,
+              NUM_SLICES,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              SHARDS_PROP,
+              PULL_REPLICAS,
+              TLOG_REPLICAS,
+              NRT_REPLICAS,
+              WAIT_FOR_FINAL_STATE,
+              PER_REPLICA_STATE,
+              ALIAS);
+
+          if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
+            // TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for
+            // more details
+            int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR));
+            int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS));
+            if (replicationFactor != nrtReplicas) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing");
+            }
+          }
+          if (props.get(REPLICATION_FACTOR) != null) {
+            props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR));
+          } else if (props.get(NRT_REPLICAS) != null) {
+            props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS));
+          }
 
-    }),
+          final String collectionName =
+              SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
+          final String shardsParam = (String) props.get(SHARDS_PROP);
+          if (StringUtils.isNotEmpty(shardsParam)) {
+            verifyShardsParam(shardsParam);
+          }
+          if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) {
+            // We must always create a .system collection with only a single shard
+            props.put(NUM_SLICES, 1);
+            props.remove(SHARDS_PROP);
+            createSysConfigSet(h.coreContainer);
+          }
+          if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
+          for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
+            h.copyFromClusterProp(props, prop);
+          copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
+          return copyPropertiesWithPrefix(req.getParams(), props, "router.");
+        }),
     @SuppressWarnings({"unchecked"})
-    COLSTATUS_OP(COLSTATUS, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          ColStatus.CORE_INFO_PROP,
-          ColStatus.SEGMENTS_PROP,
-          ColStatus.FIELD_INFO_PROP,
-          ColStatus.RAW_SIZE_PROP,
-          ColStatus.RAW_SIZE_SUMMARY_PROP,
-          ColStatus.RAW_SIZE_DETAILS_PROP,
-          ColStatus.RAW_SIZE_SAMPLING_PERCENT_PROP,
-          ColStatus.SIZE_INFO_PROP);
-
-      new ColStatus(h.coreContainer.getSolrClientCache(),
-          h.coreContainer.getZkController().getZkStateReader().getClusterState(), new ZkNodeProps(props))
-          .getColStatus(rsp.getValues());
-      return null;
-    }),
-    DELETE_OP(DELETE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME);
-      return copy(req.getParams(), map, FOLLOW_ALIASES);
-    }),
+    COLSTATUS_OP(
+        COLSTATUS,
+        (req, rsp, h) -> {
+          Map<String, Object> props =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  ColStatus.CORE_INFO_PROP,
+                  ColStatus.SEGMENTS_PROP,
+                  ColStatus.FIELD_INFO_PROP,
+                  ColStatus.RAW_SIZE_PROP,
+                  ColStatus.RAW_SIZE_SUMMARY_PROP,
+                  ColStatus.RAW_SIZE_DETAILS_PROP,
+                  ColStatus.RAW_SIZE_SAMPLING_PERCENT_PROP,
+                  ColStatus.SIZE_INFO_PROP);
+
+          new ColStatus(
+                  h.coreContainer.getSolrClientCache(),
+                  h.coreContainer.getZkController().getZkStateReader().getClusterState(),
+                  new ZkNodeProps(props))
+              .getColStatus(rsp.getValues());
+          return null;
+        }),
+    DELETE_OP(
+        DELETE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, NAME);
+          return copy(req.getParams(), map, FOLLOW_ALIASES);
+        }),
     // XXX should this command support followAliases?
-    RELOAD_OP(RELOAD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME);
-      return copy(req.getParams(), map);
-    }),
-
-    RENAME_OP(RENAME, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME, CollectionAdminParams.TARGET);
-      return copy(req.getParams(), map, FOLLOW_ALIASES);
-    }),
-
-    REINDEXCOLLECTION_OP(REINDEXCOLLECTION, (req, rsp, h) -> {
-      Map<String, Object> m = copy(req.getParams().required(), null, NAME);
-      copy(req.getParams(), m,
-          ReindexCollectionCmd.COMMAND,
-          ReindexCollectionCmd.REMOVE_SOURCE,
-          ReindexCollectionCmd.TARGET,
-          ZkStateReader.CONFIGNAME_PROP,
-          NUM_SLICES,
-          NRT_REPLICAS,
-          PULL_REPLICAS,
-          TLOG_REPLICAS,
-          REPLICATION_FACTOR,
-          CREATE_NODE_SET,
-          CREATE_NODE_SET_SHUFFLE,
-          "shards",
-          CommonParams.ROWS,
-          CommonParams.Q,
-          CommonParams.FL,
-          FOLLOW_ALIASES);
-      if (req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP) != null) {
-        m.put(ZkStateReader.CONFIGNAME_PROP, req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP));
-      }
-      copyPropertiesWithPrefix(req.getParams(), m, "router.");
-      return m;
-    }),
-
-    SYNCSHARD_OP(SYNCSHARD, (req, rsp, h) -> {
-      String extCollection = req.getParams().required().get("collection");
-      String collection = h.coreContainer.getZkController().getZkStateReader().getAliases().resolveSimpleAlias(extCollection);
-      String shard = req.getParams().required().get("shard");
-
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-
-      DocCollection docCollection = clusterState.getCollection(collection);
-      ZkNodeProps leaderProps = docCollection.getLeader(shard);
-      ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
-
-      try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl())
-          .withConnectionTimeout(15000)
-          .withSocketTimeout(60000)
-          .build()) {
-        RequestSyncShard reqSyncShard = new RequestSyncShard();
-        reqSyncShard.setCollection(collection);
-        reqSyncShard.setShard(shard);
-        reqSyncShard.setCoreName(nodeProps.getCoreName());
-        client.request(reqSyncShard);
-      }
-      return null;
-    }),
-
-    CREATEALIAS_OP(CREATEALIAS, (req, rsp, h) -> {
-      String alias = req.getParams().get(NAME);
-      SolrIdentifierValidator.validateAliasName(alias);
-      String collections = req.getParams().get("collections");
-      RoutedAlias routedAlias = null;
-      Exception ex = null;
-      HashMap<String,Object> possiblyModifiedParams = new HashMap<>();
-      try {
-        // note that RA specific validation occurs here.
-        req.getParams().toMap(possiblyModifiedParams);
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        // This is awful because RoutedAlias lies about what types it wants
-        Map<String, String> temp = (Map<String, String>) (Map) possiblyModifiedParams;
-        routedAlias = RoutedAlias.fromProps(alias, temp);
-      } catch (SolrException e) {
-        // we'll throw this later if we are in fact creating a routed alias.
-        ex = e;
-      }
-      ModifiableSolrParams finalParams = new ModifiableSolrParams();
-      for (Map.Entry<String, Object> entry : possiblyModifiedParams.entrySet()) {
-        if (entry.getValue().getClass().isArray() ) {
-          // v2 api hits this case
-          for (Object o : (Object[]) entry.getValue()) {
-            finalParams.add(entry.getKey(),o.toString());
+    RELOAD_OP(
+        RELOAD,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, NAME);
+          return copy(req.getParams(), map);
+        }),
+
+    RENAME_OP(
+        RENAME,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, NAME, CollectionAdminParams.TARGET);
+          return copy(req.getParams(), map, FOLLOW_ALIASES);
+        }),
+
+    REINDEXCOLLECTION_OP(
+        REINDEXCOLLECTION,
+        (req, rsp, h) -> {
+          Map<String, Object> m = copy(req.getParams().required(), null, NAME);
+          copy(
+              req.getParams(),
+              m,
+              ReindexCollectionCmd.COMMAND,
+              ReindexCollectionCmd.REMOVE_SOURCE,
+              ReindexCollectionCmd.TARGET,
+              ZkStateReader.CONFIGNAME_PROP,
+              NUM_SLICES,
+              NRT_REPLICAS,
+              PULL_REPLICAS,
+              TLOG_REPLICAS,
+              REPLICATION_FACTOR,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              "shards",
+              CommonParams.ROWS,
+              CommonParams.Q,
+              CommonParams.FL,
+              FOLLOW_ALIASES);
+          if (req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP) != null) {
+            m.put(
+                ZkStateReader.CONFIGNAME_PROP,
+                req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP));
+          }
+          copyPropertiesWithPrefix(req.getParams(), m, "router.");
+          return m;
+        }),
+
+    SYNCSHARD_OP(
+        SYNCSHARD,
+        (req, rsp, h) -> {
+          String extCollection = req.getParams().required().get("collection");
+          String collection =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getAliases()
+                  .resolveSimpleAlias(extCollection);
+          String shard = req.getParams().required().get("shard");
+
+          ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+
+          DocCollection docCollection = clusterState.getCollection(collection);
+          ZkNodeProps leaderProps = docCollection.getLeader(shard);
+          ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
+
+          try (HttpSolrClient client =
+              new Builder(nodeProps.getBaseUrl())
+                  .withConnectionTimeout(15000)
+                  .withSocketTimeout(60000)
+                  .build()) {
+            RequestSyncShard reqSyncShard = new RequestSyncShard();
+            reqSyncShard.setCollection(collection);
+            reqSyncShard.setShard(shard);
+            reqSyncShard.setCoreName(nodeProps.getCoreName());
+            client.request(reqSyncShard);
+          }
+          return null;
+        }),
+
+    CREATEALIAS_OP(
+        CREATEALIAS,
+        (req, rsp, h) -> {
+          String alias = req.getParams().get(NAME);
+          SolrIdentifierValidator.validateAliasName(alias);
+          String collections = req.getParams().get("collections");
+          RoutedAlias routedAlias = null;
+          Exception ex = null;
+          HashMap<String, Object> possiblyModifiedParams = new HashMap<>();
+          try {
+            // note that RA specific validation occurs here.
+            req.getParams().toMap(possiblyModifiedParams);
+            @SuppressWarnings({"unchecked", "rawtypes"})
+            // This is awful because RoutedAlias lies about what types it wants
+            Map<String, String> temp = (Map<String, String>) (Map) possiblyModifiedParams;
+            routedAlias = RoutedAlias.fromProps(alias, temp);
+          } catch (SolrException e) {
+            // we'll throw this later if we are in fact creating a routed alias.
+            ex = e;
+          }
+          ModifiableSolrParams finalParams = new ModifiableSolrParams();
+          for (Map.Entry<String, Object> entry : possiblyModifiedParams.entrySet()) {
+            if (entry.getValue().getClass().isArray()) {
+              // v2 api hits this case
+              for (Object o : (Object[]) entry.getValue()) {
+                finalParams.add(entry.getKey(), o.toString());
+              }
+            } else {
+              finalParams.add(entry.getKey(), entry.getValue().toString());
+            }
           }
-        } else {
-          finalParams.add(entry.getKey(),entry.getValue().toString());
-        }
-      }
 
-      if (collections != null) {
-        if (routedAlias != null) {
-          throw new SolrException(BAD_REQUEST, "Collections cannot be specified when creating a routed alias.");
-        } else {
-          //////////////////////////////////////
-          // Regular alias creation indicated //
-          //////////////////////////////////////
-          return copy(finalParams.required(), null, NAME, "collections");
-        }
-      }
+          if (collections != null) {
+            if (routedAlias != null) {
+              throw new SolrException(
+                  BAD_REQUEST, "Collections cannot be specified when creating a routed alias.");
+            } else {
+              //////////////////////////////////////
+              // Regular alias creation indicated //
+              //////////////////////////////////////
+              return copy(finalParams.required(), null, NAME, "collections");
+            }
+          }
 
-      /////////////////////////////////////////////////
-      // We are creating a routed alias from here on //
-      /////////////////////////////////////////////////
+          /////////////////////////////////////////////////
+          // We are creating a routed alias from here on //
+          /////////////////////////////////////////////////
 
-      // If our prior creation attempt had issues expose them now.
-      if (ex != null) {
-        throw ex;
-      }
+          // If our prior creation attempt had issues expose them now.
+          if (ex != null) {
+            throw ex;
+          }
 
-      // Now filter out just the parameters we care about from the request
-      assert routedAlias != null;
-      Map<String, Object> result = copy(finalParams, null, routedAlias.getRequiredParams());
-      copy(finalParams, result, routedAlias.getOptionalParams());
-
-      ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix
-
-      // add to result params that start with "create-collection.".
-      //   Additionally, save these without the prefix to createCollParams
-      for (Map.Entry<String, String[]> entry : finalParams) {
-        final String p = entry.getKey();
-        if (p.startsWith(CREATE_COLLECTION_PREFIX)) {
-          // This is what SolrParams#getAll(Map, Collection)} does
-          final String[] v = entry.getValue();
-          if (v.length == 1) {
-            result.put(p, v[0]);
-          } else {
-            result.put(p, v);
+          // Now filter out just the parameters we care about from the request
+          assert routedAlias != null;
+          Map<String, Object> result = copy(finalParams, null, routedAlias.getRequiredParams());
+          copy(finalParams, result, routedAlias.getOptionalParams());
+
+          ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix
+
+          // add to result params that start with "create-collection.".
+          //   Additionally, save these without the prefix to createCollParams
+          for (Map.Entry<String, String[]> entry : finalParams) {
+            final String p = entry.getKey();
+            if (p.startsWith(CREATE_COLLECTION_PREFIX)) {
+              // This is what SolrParams#getAll(Map, Collection)} does
+              final String[] v = entry.getValue();
+              if (v.length == 1) {
+                result.put(p, v[0]);
+              } else {
+                result.put(p, v);
+              }
+              createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v);
+            }
           }
-          createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v);
-        }
-      }
 
-      // Verify that the create-collection prefix'ed params appear to be valid.
-      if (createCollParams.get(NAME) != null) {
-        throw new SolrException(BAD_REQUEST, "routed aliases calculate names for their " +
-            "dependent collections, you cannot specify the name.");
-      }
-      if (createCollParams.get(COLL_CONF) == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "We require an explicit " + COLL_CONF);
-      }
-      // note: could insist on a config name here as well.... or wait to throw at overseer
-      createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation
-      CREATE_OP.execute(new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results
+          // Verify that the create-collection prefix'ed params appear to be valid.
+          if (createCollParams.get(NAME) != null) {
+            throw new SolrException(
+                BAD_REQUEST,
+                "routed aliases calculate names for their "
+                    + "dependent collections, you cannot specify the name.");
+          }
+          if (createCollParams.get(COLL_CONF) == null) {
+            throw new SolrException(
+                SolrException.ErrorCode.BAD_REQUEST, "We require an explicit " + COLL_CONF);
+          }
+          // note: could insist on a config name here as well.... or wait to throw at overseer
+          createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation
+          CREATE_OP.execute(
+              new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results
 
-      return result;
-    }),
+          return result;
+        }),
 
     DELETEALIAS_OP(DELETEALIAS, (req, rsp, h) -> copy(req.getParams().required(), null, NAME)),
 
     /**
      * Change properties for an alias (use CREATEALIAS_OP to change the actual value of the alias)
      */
-    ALIASPROP_OP(ALIASPROP, (req, rsp, h) -> {
-      Map<String, Object> params = copy(req.getParams().required(), null, NAME);
-
-      // Note: success/no-op in the event of no properties supplied is intentional. Keeps code simple and one less case
-      // for api-callers to check for.
-      return convertPrefixToMap(req.getParams(), params, "property");
-    }),
-
-    /**
-     * List the aliases and associated properties.
-     */
+    ALIASPROP_OP(
+        ALIASPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> params = copy(req.getParams().required(), null, NAME);
+
+          // Note: success/no-op in the event of no properties supplied is intentional. Keeps code
+          // simple and one less case
+          // for api-callers to check for.
+          return convertPrefixToMap(req.getParams(), params, "property");
+        }),
+
+    /** List the aliases and associated properties. */
     @SuppressWarnings({"unchecked"})
-    LISTALIASES_OP(LISTALIASES, (req, rsp, h) -> {
-      ZkStateReader zkStateReader = h.coreContainer.getZkController().getZkStateReader();
-      // if someone calls listAliases, lets ensure we return an up to date response
-      zkStateReader.aliasesManager.update();
-      Aliases aliases = zkStateReader.getAliases();
-      if (aliases != null) {
-        // the aliases themselves...
-        rsp.getValues().add("aliases", aliases.getCollectionAliasMap());
-        // Any properties for the above aliases.
-        Map<String, Map<String, String>> meta = new LinkedHashMap<>();
-        for (String alias : aliases.getCollectionAliasListMap().keySet()) {
-          Map<String, String> collectionAliasProperties = aliases.getCollectionAliasProperties(alias);
-          if (!collectionAliasProperties.isEmpty()) {
-            meta.put(alias, collectionAliasProperties);
+    LISTALIASES_OP(
+        LISTALIASES,
+        (req, rsp, h) -> {
+          ZkStateReader zkStateReader = h.coreContainer.getZkController().getZkStateReader();
+          // if someone calls listAliases, lets ensure we return an up to date response
+          zkStateReader.aliasesManager.update();
+          Aliases aliases = zkStateReader.getAliases();
+          if (aliases != null) {
+            // the aliases themselves...
+            rsp.getValues().add("aliases", aliases.getCollectionAliasMap());
+            // Any properties for the above aliases.
+            Map<String, Map<String, String>> meta = new LinkedHashMap<>();
+            for (String alias : aliases.getCollectionAliasListMap().keySet()) {
+              Map<String, String> collectionAliasProperties =
+                  aliases.getCollectionAliasProperties(alias);
+              if (!collectionAliasProperties.isEmpty()) {
+                meta.put(alias, collectionAliasProperties);
+              }
+            }
+            rsp.getValues().add("properties", meta);
+          }
+          return null;
+        }),
+    SPLITSHARD_OP(
+        SPLITSHARD,
+        DEFAULT_COLLECTION_OP_TIMEOUT * 5,
+        (req, rsp, h) -> {
+          String name = req.getParams().required().get(COLLECTION_PROP);
+          // TODO : add support for multiple shards
+          String shard = req.getParams().get(SHARD_ID_PROP);
+          String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
+          String splitKey = req.getParams().get("split.key");
+          String numSubShards = req.getParams().get(NUM_SUB_SHARDS);
+          String fuzz = req.getParams().get(SPLIT_FUZZ);
+
+          if (splitKey == null && shard == null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
+          }
+          if (splitKey != null && shard != null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Only one of 'shard' or 'split.key' should be specified");
+          }
+          if (splitKey != null && rangesStr != null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Only one of 'ranges' or 'split.key' should be specified");
+          }
+          if (numSubShards != null && (splitKey != null || rangesStr != null)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "numSubShards can not be specified with split.key or ranges parameters");
+          }
+          if (fuzz != null && (splitKey != null || rangesStr != null)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "fuzz can not be specified with split.key or ranges parameters");
           }
-        }
-        rsp.getValues().add("properties", meta);
-      }
-      return null;
-    }),
-    SPLITSHARD_OP(SPLITSHARD, DEFAULT_COLLECTION_OP_TIMEOUT * 5, (req, rsp, h) -> {
-      String name = req.getParams().required().get(COLLECTION_PROP);
-      // TODO : add support for multiple shards
-      String shard = req.getParams().get(SHARD_ID_PROP);
-      String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
-      String splitKey = req.getParams().get("split.key");
-      String numSubShards = req.getParams().get(NUM_SUB_SHARDS);
-      String fuzz = req.getParams().get(SPLIT_FUZZ);
-
-      if (splitKey == null && shard == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
-      }
-      if (splitKey != null && shard != null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Only one of 'shard' or 'split.key' should be specified");
-      }
-      if (splitKey != null && rangesStr != null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Only one of 'ranges' or 'split.key' should be specified");
-      }
-      if (numSubShards != null && (splitKey != null || rangesStr != null)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "numSubShards can not be specified with split.key or ranges parameters");
-      }
-      if (fuzz != null && (splitKey != null || rangesStr != null)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "fuzz can not be specified with split.key or ranges parameters");
-      }
 
-      Map<String, Object> map = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP,
-          "split.key",
-          CoreAdminParams.RANGES,
-          WAIT_FOR_FINAL_STATE,
-          TIMING,
-          SPLIT_METHOD,
-          NUM_SUB_SHARDS,
-          SPLIT_FUZZ,
-          SPLIT_BY_PREFIX,
-          FOLLOW_ALIASES);
-      return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
-    }),
-    DELETESHARD_OP(DELETESHARD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP);
-      copy(req.getParams(), map,
-          DELETE_INDEX,
-          DELETE_DATA_DIR,
-          DELETE_INSTANCE_DIR,
-          FOLLOW_ALIASES);
-      return map;
-    }),
-    FORCELEADER_OP(FORCELEADER, (req, rsp, h) -> {
-      forceLeaderElection(req, h);
-      return null;
-    }),
-    CREATESHARD_OP(CREATESHARD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP);
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      final String newShardName = SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
-      boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
-      String extCollectionName = req.getParams().get(COLLECTION_PROP);
-      String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader()
-          .getAliases().resolveSimpleAlias(extCollectionName) : extCollectionName;
-      if (!ImplicitDocRouter.NAME.equals(((Map<?,?>) clusterState.getCollection(collectionName).get(DOC_ROUTER)).get(NAME)))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
-      copy(req.getParams(), map,
-          REPLICATION_FACTOR,
-          NRT_REPLICAS,
-          TLOG_REPLICAS,
-          PULL_REPLICAS,
-          CREATE_NODE_SET,
-          WAIT_FOR_FINAL_STATE,
-          FOLLOW_ALIASES);
-      return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
-    }),
-    DELETEREPLICA_OP(DELETEREPLICA, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP);
-
-      return copy(req.getParams(), map,
-          DELETE_INDEX,
-          DELETE_DATA_DIR,
-          DELETE_INSTANCE_DIR,
-          COUNT_PROP, REPLICA_PROP,
-          SHARD_ID_PROP,
-          ONLY_IF_DOWN,
-          FOLLOW_ALIASES);
-    }),
-    MIGRATE_OP(MIGRATE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP, "split.key", "target.collection");
-      return copy(req.getParams(), map, "forward.timeout", FOLLOW_ALIASES);
-    }),
-    ADDROLE_OP(ADDROLE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
-      if (!KNOWN_ROLES.contains(map.get("role")))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
-      return map;
-    }),
-    REMOVEROLE_OP(REMOVEROLE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
-      if (!KNOWN_ROLES.contains(map.get("role")))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
-      return map;
-    }),
-    CLUSTERPROP_OP(CLUSTERPROP, (req, rsp, h) -> {
-      String name = req.getParams().required().get(NAME);
-      String val = req.getParams().get(VALUE_LONG);
-      ClusterProperties cp = new ClusterProperties(h.coreContainer.getZkController().getZkClient());
-      cp.setClusterProperty(name, val);
-      return null;
-    }),
-    COLLECTIONPROP_OP(COLLECTIONPROP, (req, rsp, h) -> {
-      String extCollection = req.getParams().required().get(NAME);
-      String collection = h.coreContainer.getZkController().getZkStateReader().getAliases().resolveSimpleAlias(extCollection);
-      String name = req.getParams().required().get(PROPERTY_NAME);
-      String val = req.getParams().get(PROPERTY_VALUE);
-      CollectionProperties cp = new CollectionProperties(h.coreContainer.getZkController().getZkClient());
-      cp.setCollectionProperty(collection, name, val);
-      return null;
-    }),
+          Map<String, Object> map =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  SHARD_ID_PROP,
+                  "split.key",
+                  CoreAdminParams.RANGES,
+                  WAIT_FOR_FINAL_STATE,
+                  TIMING,
+                  SPLIT_METHOD,
+                  NUM_SUB_SHARDS,
+                  SPLIT_FUZZ,
+                  SPLIT_BY_PREFIX,
+                  FOLLOW_ALIASES);
+          return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
+        }),
+    DELETESHARD_OP(
+        DELETESHARD,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, SHARD_ID_PROP);
+          copy(
+              req.getParams(),
+              map,
+              DELETE_INDEX,
+              DELETE_DATA_DIR,
+              DELETE_INSTANCE_DIR,
+              FOLLOW_ALIASES);
+          return map;
+        }),
+    FORCELEADER_OP(
+        FORCELEADER,
+        (req, rsp, h) -> {
+          forceLeaderElection(req, h);
+          return null;
+        }),
+    CREATESHARD_OP(
+        CREATESHARD,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, SHARD_ID_PROP);
+          ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+          final String newShardName =
+              SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
+          boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
+          String extCollectionName = req.getParams().get(COLLECTION_PROP);
+          String collectionName =
+              followAliases
+                  ? h.coreContainer
+                      .getZkController()
+                      .getZkStateReader()
+                      .getAliases()
+                      .resolveSimpleAlias(extCollectionName)
+                  : extCollectionName;
+          if (!ImplicitDocRouter.NAME.equals(
+              ((Map<?, ?>) clusterState.getCollection(collectionName).get(DOC_ROUTER)).get(NAME)))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
+          copy(
+              req.getParams(),
+              map,
+              REPLICATION_FACTOR,
+              NRT_REPLICAS,
+              TLOG_REPLICAS,
+              PULL_REPLICAS,
+              CREATE_NODE_SET,
+              WAIT_FOR_FINAL_STATE,
+              FOLLOW_ALIASES);
+          return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
+        }),
+    DELETEREPLICA_OP(
+        DELETEREPLICA,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP);
+
+          return copy(
+              req.getParams(),
+              map,
+              DELETE_INDEX,
+              DELETE_DATA_DIR,
+              DELETE_INSTANCE_DIR,
+              COUNT_PROP,
+              REPLICA_PROP,
+              SHARD_ID_PROP,
+              ONLY_IF_DOWN,
+              FOLLOW_ALIASES);
+        }),
+    MIGRATE_OP(
+        MIGRATE,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  "split.key",
+                  "target.collection");
+          return copy(req.getParams(), map, "forward.timeout", FOLLOW_ALIASES);
+        }),
+    ADDROLE_OP(
+        ADDROLE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
+          if (!KNOWN_ROLES.contains(map.get("role")))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
+          return map;
+        }),
+    REMOVEROLE_OP(
+        REMOVEROLE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
+          if (!KNOWN_ROLES.contains(map.get("role")))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
+          return map;
+        }),
+    CLUSTERPROP_OP(
+        CLUSTERPROP,
+        (req, rsp, h) -> {
+          String name = req.getParams().required().get(NAME);
+          String val = req.getParams().get(VALUE_LONG);
+          ClusterProperties cp =
+              new ClusterProperties(h.coreContainer.getZkController().getZkClient());
+          cp.setClusterProperty(name, val);
+          return null;
+        }),
+    COLLECTIONPROP_OP(
+        COLLECTIONPROP,
+        (req, rsp, h) -> {
+          String extCollection = req.getParams().required().get(NAME);
+          String collection =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getAliases()
+                  .resolveSimpleAlias(extCollection);
+          String name = req.getParams().required().get(PROPERTY_NAME);
+          String val = req.getParams().get(PROPERTY_VALUE);
+          CollectionProperties cp =
+              new CollectionProperties(h.coreContainer.getZkController().getZkClient());
+          cp.setCollectionProperty(collection, name, val);
+          return null;
+        }),
     @SuppressWarnings({"unchecked"})
-    REQUESTSTATUS_OP(REQUESTSTATUS, (req, rsp, h) -> {
-      req.getParams().required().check(REQUESTID);
-
-      final CoreContainer coreContainer = h.coreContainer;
-      final String requestId = req.getParams().get(REQUESTID);
-      final ZkController zkController = coreContainer.getZkController();
-
-      final NamedList<Object> status = new NamedList<>();
-      if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
-        if (zkController.getOverseerCompletedMap().contains(requestId)) {
-          final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
-          rsp.getValues().addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
-          addStatusToResponse(status, COMPLETED, "found [" + requestId + "] in completed tasks");
-        } else if (zkController.getOverseerFailureMap().contains(requestId)) {
-          final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
-          rsp.getValues().addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
-          addStatusToResponse(status, FAILED, "found [" + requestId + "] in failed tasks");
-        } else if (zkController.getOverseerRunningMap().contains(requestId)) {
-          addStatusToResponse(status, RUNNING, "found [" + requestId + "] in running tasks");
-        } else if (h.overseerCollectionQueueContains(requestId)) {
-          addStatusToResponse(status, SUBMITTED, "found [" + requestId + "] in submitted tasks");
-        } else {
-          addStatusToResponse(status, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
-        }
-      } else {
-        Pair<RequestStatusState, OverseerSolrResponse> sr =
-            coreContainer.getDistributedCollectionCommandRunner().get().getAsyncTaskRequestStatus(requestId);
-        final String message;
-        switch (sr.first()) {
-          case COMPLETED:
-            message = "found [" + requestId + "] in completed tasks";
-            rsp.getValues().addAll(sr.second().getResponse());
-            break;
-          case FAILED:
-            message = "found [" + requestId + "] in failed tasks";
-            rsp.getValues().addAll(sr.second().getResponse());
-            break;
-          case RUNNING:
-            message = "found [" + requestId + "] in running tasks";
-            break;
-          case SUBMITTED:
-            message = "found [" + requestId + "] in submitted tasks";
-            break;
-          default:
-            message = "Did not find [" + requestId + "] in any tasks queue";
-        }
-        addStatusToResponse(status, sr.first(), message);
-      }
-
-      rsp.getValues().addAll(status);
-      return null;
-    }),
-    DELETESTATUS_OP(DELETESTATUS, new CollectionOp() {
-      @SuppressWarnings("unchecked")
-      @Override
-      public Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
-        final CoreContainer coreContainer = h.coreContainer;
-        final String requestId = req.getParams().get(REQUESTID);
-        final ZkController zkController = coreContainer.getZkController();
-        Boolean flush = req.getParams().getBool(CollectionAdminParams.FLUSH, false);
-
-        if (requestId == null && !flush) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "Either requestid or flush parameter must be specified.");
-        }
-
-        if (requestId != null && flush) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Both requestid and flush parameters can not be specified together.");
-        }
-
-        if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
-          if (flush) {
-            Collection<String> completed = zkController.getOverseerCompletedMap().keys();
-            Collection<String> failed = zkController.getOverseerFailureMap().keys();
-            for (String asyncId : completed) {
-              zkController.getOverseerCompletedMap().remove(asyncId);
-              zkController.clearAsyncId(asyncId);
-            }
-            for (String asyncId : failed) {
-              zkController.getOverseerFailureMap().remove(asyncId);
-              zkController.clearAsyncId(asyncId);
+    REQUESTSTATUS_OP(
+        REQUESTSTATUS,
+        (req, rsp, h) -> {
+          req.getParams().required().check(REQUESTID);
+
+          final CoreContainer coreContainer = h.coreContainer;
+          final String requestId = req.getParams().get(REQUESTID);
+          final ZkController zkController = coreContainer.getZkController();
+
+          final NamedList<Object> status = new NamedList<>();
+          if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
+            if (zkController.getOverseerCompletedMap().contains(requestId)) {
+              final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
+              rsp.getValues()
+                  .addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
+              addStatusToResponse(
+                  status, COMPLETED, "found [" + requestId + "] in completed tasks");
+            } else if (zkController.getOverseerFailureMap().contains(requestId)) {
+              final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
+              rsp.getValues()
+                  .addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
+              addStatusToResponse(status, FAILED, "found [" + requestId + "] in failed tasks");
+            } else if (zkController.getOverseerRunningMap().contains(requestId)) {
+              addStatusToResponse(status, RUNNING, "found [" + requestId + "] in running tasks");
+            } else if (h.overseerCollectionQueueContains(requestId)) {
+              addStatusToResponse(
+                  status, SUBMITTED, "found [" + requestId + "] in submitted tasks");
+            } else {
+              addStatusToResponse(
+                  status, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
             }
-            rsp.getValues().add("status", "successfully cleared stored collection api responses");
           } else {
-            // Request to cleanup
-            if (zkController.getOverseerCompletedMap().remove(requestId)) {
-              zkController.clearAsyncId(requestId);
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
-            } else if (zkController.getOverseerFailureMap().remove(requestId)) {
-              zkController.clearAsyncId(requestId);
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
-            } else {
-              rsp.getValues().add("status", "[" + requestId + "] not found in stored responses");
-              // Don't call zkController.clearAsyncId for this, since it could be a running/pending task
+            Pair<RequestStatusState, OverseerSolrResponse> sr =
+                coreContainer
+                    .getDistributedCollectionCommandRunner()
+                    .get()
+                    .getAsyncTaskRequestStatus(requestId);
+            final String message;
+            switch (sr.first()) {
+              case COMPLETED:
+                message = "found [" + requestId + "] in completed tasks";
+                rsp.getValues().addAll(sr.second().getResponse());
+                break;
+              case FAILED:
+                message = "found [" + requestId + "] in failed tasks";
+                rsp.getValues().addAll(sr.second().getResponse());
+                break;
+              case RUNNING:
+                message = "found [" + requestId + "] in running tasks";
+                break;
+              case SUBMITTED:
+                message = "found [" + requestId + "] in submitted tasks";
+                break;
+              default:
+                message = "Did not find [" + requestId + "] in any tasks queue";
             }
+            addStatusToResponse(status, sr.first(), message);
           }
-        } else {
-          if (flush) {
-            coreContainer.getDistributedCollectionCommandRunner().get().deleteAllAsyncIds();
-            rsp.getValues().add("status", "successfully cleared stored collection api responses");
-          } else {
-            if (coreContainer.getDistributedCollectionCommandRunner().get().deleteSingleAsyncId(requestId)) {
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
+
+          rsp.getValues().addAll(status);
+          return null;
+        }),
+    DELETESTATUS_OP(
+        DELETESTATUS,
+        new CollectionOp() {
+          @SuppressWarnings("unchecked")
+          @Override
+          public Map<String, Object> execute(
+              SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
+            final CoreContainer coreContainer = h.coreContainer;
+            final String requestId = req.getParams().get(REQUESTID);
+            final ZkController zkController = coreContainer.getZkController();
+            Boolean flush = req.getParams().getBool(CollectionAdminParams.FLUSH, false);
+
+            if (requestId == null && !flush) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST, "Either requestid or flush parameter must be specified.");
+            }
+
+            if (requestId != null && flush) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "Both requestid and flush parameters can not be specified together.");
+            }
+
+            if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
+              if (flush) {
+                Collection<String> completed = zkController.getOverseerCompletedMap().keys();
+                Collection<String> failed = zkController.getOverseerFailureMap().keys();
+                for (String asyncId : completed) {
+                  zkController.getOverseerCompletedMap().remove(asyncId);
+                  zkController.clearAsyncId(asyncId);
+                }
+                for (String asyncId : failed) {
+                  zkController.getOverseerFailureMap().remove(asyncId);
+                  zkController.clearAsyncId(asyncId);
+                }
+                rsp.getValues()
+                    .add("status", "successfully cleared stored collection api responses");
+              } else {
+                // Request to cleanup
+                if (zkController.getOverseerCompletedMap().remove(requestId)) {
+                  zkController.clearAsyncId(requestId);
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else if (zkController.getOverseerFailureMap().remove(requestId)) {
+                  zkController.clearAsyncId(requestId);
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else {
+                  rsp.getValues()
+                      .add("status", "[" + requestId + "] not found in stored responses");
+                  // Don't call zkController.clearAsyncId for this, since it could be a
+                  // running/pending task
+                }
+              }
             } else {
-              rsp.getValues().add("status", "[" + requestId + "] not found in stored responses");
+              if (flush) {
+                coreContainer.getDistributedCollectionCommandRunner().get().deleteAllAsyncIds();
+                rsp.getValues()
+                    .add("status", "successfully cleared stored collection api responses");
+              } else {
+                if (coreContainer
+                    .getDistributedCollectionCommandRunner()
+                    .get()
+                    .deleteSingleAsyncId(requestId)) {
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else {
+                  rsp.getValues()
+                      .add("status", "[" + requestId + "] not found in stored responses");
+                }
+              }
             }
+            return null;
           }
-        }
-        return null;
-      }
-    }),
-    ADDREPLICA_OP(ADDREPLICA, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          "node",
-          SHARD_ID_PROP,
-          _ROUTE_,
-          CoreAdminParams.NAME,
-          INSTANCE_DIR,
-          DATA_DIR,
-          ULOG_DIR,
-          REPLICA_TYPE,
-          WAIT_FOR_FINAL_STATE,
-          NRT_REPLICAS,
-          TLOG_REPLICAS,
-          PULL_REPLICAS,
-          CREATE_NODE_SET,
-          FOLLOW_ALIASES,
-          SKIP_NODE_ASSIGNMENT);
-      return copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
-    }),
+        }),
+    ADDREPLICA_OP(
+        ADDREPLICA,
+        (req, rsp, h) -> {
+          Map<String, Object> props =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  "node",
+                  SHARD_ID_PROP,
+                  _ROUTE_,
+                  CoreAdminParams.NAME,
+                  INSTANCE_DIR,
+                  DATA_DIR,
+                  ULOG_DIR,
+                  REPLICA_TYPE,
+                  WAIT_FOR_FINAL_STATE,
+                  NRT_REPLICAS,
+                  TLOG_REPLICAS,
+                  PULL_REPLICAS,
+                  CREATE_NODE_SET,
+                  FOLLOW_ALIASES,
+                  SKIP_NODE_ASSIGNMENT);
+          return copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
+        }),
     OVERSEERSTATUS_OP(OVERSEERSTATUS, (req, rsp, h) -> new LinkedHashMap<>()),
     @SuppressWarnings({"unchecked"})
-    DISTRIBUTEDAPIPROCESSING_OP(DISTRIBUTEDAPIPROCESSING, (req, rsp, h)  -> {
-      NamedList<Object> results = new NamedList<>();
-      boolean isDistributedApi = h.coreContainer.getDistributedCollectionCommandRunner().isPresent();
-      results.add("isDistributedApi", isDistributedApi);
-      rsp.getValues().addAll(results);
-      return null;
-    }),
-    /**
-     * Handle list collection request.
-     * Do list collection request to zk host
-     */
+    DISTRIBUTEDAPIPROCESSING_OP(
+        DISTRIBUTEDAPIPROCESSING,
+        (req, rsp, h) -> {
+          NamedList<Object> results = new NamedList<>();
+          boolean isDistributedApi =
+              h.coreContainer.getDistributedCollectionCommandRunner().isPresent();
+          results.add("isDistributedApi", isDistributedApi);
+          rsp.getValues().addAll(results);
+          return null;
+        }),
+    /** Handle list collection request. Do list collection request to zk host */
     @SuppressWarnings({"unchecked"})
-    LIST_OP(LIST, (req, rsp, h) -> {
-      NamedList<Object> results = new NamedList<>();
-      Map<String, DocCollection> collections = h.coreContainer.getZkController().getZkStateReader().getClusterState().getCollectionsMap();
-      List<String> collectionList = new ArrayList<>(collections.keySet());
-      Collections.sort(collectionList);
-      // XXX should we add aliases here?
-      results.add("collections", collectionList);
-      SolrResponse response = new OverseerSolrResponse(results);
-      rsp.getValues().addAll(response.getResponse());
-      return null;
-    }),
+    LIST_OP(
+        LIST,
+        (req, rsp, h) -> {
+          NamedList<Object> results = new NamedList<>();
+          Map<String, DocCollection> collections =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getClusterState()
+                  .getCollectionsMap();
+          List<String> collectionList = new ArrayList<>(collections.keySet());
+          Collections.sort(collectionList);
+          // XXX should we add aliases here?
+          results.add("collections", collectionList);
+          SolrResponse response = new OverseerSolrResponse(results);
+          rsp.getValues().addAll(response.getResponse());
+          return null;
+        }),
     /**
-     * Handle cluster status request.
-     * Can return status per specific collection/shard or per all collections.
+     * Handle cluster status request. Can return status per specific collection/shard or per all
+     * collections.
      */
-    CLUSTERSTATUS_OP(CLUSTERSTATUS, (req, rsp, h) -> {
-      Map<String, Object> all = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP,
-          _ROUTE_);
-      new ClusterStatus(h.coreContainer.getZkController().getZkStateReader(),
-          new ZkNodeProps(all)).getClusterStatus(rsp.getValues());
-      return null;
-    }),
-    ADDREPLICAPROP_OP(ADDREPLICAPROP, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP,
-          SHARD_ID_PROP,
-          REPLICA_PROP,
-          PROPERTY_VALUE_PROP);
-      copy(req.getParams(), map, SHARD_UNIQUE);
-      String property = (String) map.get(PROPERTY_PROP);
-      if (!property.startsWith(PROPERTY_PREFIX)) {
-        property = PROPERTY_PREFIX + property;
-      }
+    CLUSTERSTATUS_OP(
+        CLUSTERSTATUS,
+        (req, rsp, h) -> {
+          Map<String, Object> all =
+              copy(req.getParams(), null, COLLECTION_PROP, SHARD_ID_PROP, _ROUTE_);
+          new ClusterStatus(
+                  h.coreContainer.getZkController().getZkStateReader(), new ZkNodeProps(all))
+              .getClusterStatus(rsp.getValues());
+          return null;
+        }),
+    ADDREPLICAPROP_OP(
+        ADDREPLICAPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  PROPERTY_PROP,
+                  SHARD_ID_PROP,
+                  REPLICA_PROP,
+                  PROPERTY_VALUE_PROP);
+          copy(req.getParams(), map, SHARD_UNIQUE);
+          String property = (String) map.get(PROPERTY_PROP);
+          if (!property.startsWith(PROPERTY_PREFIX)) {
+            property = PROPERTY_PREFIX + property;
+          }
 
-      boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
-
-      // Check if we're trying to set a property with parameters that allow us to set the property on multiple replicas
-      // in a slice on properties that are known to only be one-per-slice and error out if so.
-      if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE)) &&
-          SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(property.toLowerCase(Locale.ROOT)) &&
-          uniquePerSlice == false) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Overseer replica property command received for property " + property +
-                " with the " + SHARD_UNIQUE +
-                " parameter set to something other than 'true'. No action taken.");
-      }
-      return map;
-    }),
+          boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
+
+          // Check if we're trying to set a property with parameters that allow us to set the
+          // property on multiple replicas
+          // in a slice on properties that are known to only be one-per-slice and error out if so.
+          if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE))
+              && SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(
+                  property.toLowerCase(Locale.ROOT))
+              && uniquePerSlice == false) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Overseer replica property command received for property "
+                    + property
+                    + " with the "
+                    + SHARD_UNIQUE
+                    + " parameter set to something other than 'true'. No action taken.");
+          }
+          return map;
+        }),
     // XXX should this command support followAliases?
-    DELETEREPLICAPROP_OP(DELETEREPLICAPROP, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP,
-          SHARD_ID_PROP,
-          REPLICA_PROP);
-      return copy(req.getParams(), map, PROPERTY_PROP);
-    }),
+    DELETEREPLICAPROP_OP(
+        DELETEREPLICAPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  PROPERTY_PROP,
+                  SHARD_ID_PROP,
+                  REPLICA_PROP);
+          return copy(req.getParams(), map, PROPERTY_PROP);
+        }),
     // XXX should this command support followAliases?
-    BALANCESHARDUNIQUE_OP(BALANCESHARDUNIQUE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP);
-      Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
-      String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
-      if (!StringUtils.startsWith(prop, PROPERTY_PREFIX)) {
-        prop = PROPERTY_PREFIX + prop;
-      }
+    BALANCESHARDUNIQUE_OP(
+        BALANCESHARDUNIQUE,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, PROPERTY_PROP);
+          Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
+          String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
+          if (!StringUtils.startsWith(prop, PROPERTY_PREFIX)) {
+            prop = PROPERTY_PREFIX + prop;
+          }
 
-      if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
-            + " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. " +
-            " Property: " + prop + " shardUnique: " + shardUnique);
-      }
+          if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Balancing properties amongst replicas in a slice requires that"
+                    + " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. "
+                    + " Property: "
+                    + prop
+                    + " shardUnique: "
+                    + shardUnique);
+          }
 
-      return copy(req.getParams(), map, ONLY_ACTIVE_NODES, SHARD_UNIQUE);
-    }),
-    REBALANCELEADERS_OP(REBALANCELEADERS, (req, rsp, h) -> {
-      new RebalanceLeaders(req, rsp, h).execute();
-      return null;
-    }),
+          return copy(req.getParams(), map, ONLY_ACTIVE_NODES, SHARD_UNIQUE);
+        }),
+    REBALANCELEADERS_OP(
+        REBALANCELEADERS,
+        (req, rsp, h) -> {
+          new RebalanceLeaders(req, rsp, h).execute();
+          return null;
+        }),
     // XXX should this command support followAliases?
-    MODIFYCOLLECTION_OP(MODIFYCOLLECTION, (req, rsp, h) -> {
-      Map<String, Object> m = copy(req.getParams(), null, CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES);
-      copyPropertiesWithPrefix(req.getParams(), m, PROPERTY_PREFIX);
-      if (m.isEmpty()) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            formatString("no supported values provided {0}", CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES.toString()));
-      }
-      copy(req.getParams().required(), m, COLLECTION_PROP);
-      for (Map.Entry<String, Object> entry : m.entrySet()) {
-        String prop = entry.getKey();
-        if ("".equals(entry.getValue())) {
-          // set to an empty string is equivalent to removing the property, see SOLR-12507
-          entry.setValue(null);
-        }
-        DocCollection.verifyProp(m, prop);
-      }
-      if (m.get(REPLICATION_FACTOR) != null) {
-        m.put(NRT_REPLICAS, m.get(REPLICATION_FACTOR));
-      }
-      return m;
-    }),
-    BACKUP_OP(BACKUP, (req, rsp, h) -> {
-      req.getParams().required().check(NAME, COLLECTION_PROP);
-
-      final String extCollectionName = req.getParams().get(COLLECTION_PROP);
-      final boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
-      final String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader()
-          .getAliases().resolveSimpleAlias(extCollectionName) : extCollectionName;
-      final ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      if (!clusterState.hasCollection(collectionName)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
-      }
-
-      CoreContainer cc = h.coreContainer;
-      String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      BackupRepository repository = cc.newBackupRepository(repo);
-
-      String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-      if (location == null) {
-        //Refresh the cluster property file to make sure the value set for location is the latest
-        // Check if the location is specified in the cluster property.
-        location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
-        if (location == null) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-              + " parameter or as a default repository property or as a cluster property.");
-        }
-      }
-      boolean incremental = req.getParams().getBool(CoreAdminParams.BACKUP_INCREMENTAL, true);
-
-      // Check if the specified location is valid for this repository.
-      final URI uri = repository.createDirectoryURI(location);
-      try {
-        if (!repository.exists(uri)) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
-        }
-      } catch (IOException ex) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existence of " + uri + ". Is it valid?", ex);
-      }
+    MODIFYCOLLECTION_OP(
+        MODIFYCOLLECTION,
+        (req, rsp, h) -> {
+          Map<String, Object> m =
+              copy(req.getParams(), null, CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES);
+          copyPropertiesWithPrefix(req.getParams(), m, PROPERTY_PREFIX);
+          if (m.isEmpty()) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                formatString(
+                    "no supported values provided {0}",
+                    CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES.toString()));
+          }
+          copy(req.getParams().required(), m, COLLECTION_PROP);
+          for (Map.Entry<String, Object> entry : m.entrySet()) {
+            String prop = entry.getKey();
+            if ("".equals(entry.getValue())) {
+              // set to an empty string is equivalent to removing the property, see SOLR-12507
+              entry.setValue(null);
+            }
+            DocCollection.verifyProp(m, prop);
+          }
+          if (m.get(REPLICATION_FACTOR) != null) {
+            m.put(NRT_REPLICAS, m.get(REPLICATION_FACTOR));
+          }
+          return m;
+        }),
+    BACKUP_OP(
+        BACKUP,
+        (req, rsp, h) -> {
+          req.getParams().required().check(NAME, COLLECTION_PROP);
+
+          final String extCollectionName = req.getParams().get(COLLECTION_PROP);
+          final boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
+          final String collectionName =
+              followAliases
+                  ? h.coreContainer
+                      .getZkController()
+                      .getZkStateReader()
+                      .getAliases()
+                      .resolveSimpleAlias(extCollectionName)
+                  : extCollectionName;
+          final ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+          if (!clusterState.hasCollection(collectionName)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Collection '" + collectionName + "' does not exist, no action taken.");
+          }
 
-      String strategy = req.getParams().get(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY);
-      if (!CollectionAdminParams.INDEX_BACKUP_STRATEGIES.contains(strategy)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown index backup strategy " + strategy);
-      }
+          CoreContainer cc = h.coreContainer;
+          String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
+          BackupRepository repository = cc.newBackupRepository(repo);
 
-      Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP,
-              FOLLOW_ALIASES, CoreAdminParams.COMMIT_NAME, CoreAdminParams.MAX_NUM_BACKUP_POINTS);
-      params.put(CoreAdminParams.BACKUP_LOCATION, location);
-      if (repo != null) {
-        params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
-      }
+          String location =
+              repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
+          if (location == null) {
+            // Refresh the cluster property file to make sure the value set for location is the
+            // latest
+            // Check if the location is specified in the cluster property.
+            location =
+                new ClusterProperties(h.coreContainer.getZkController().getZkClient())
+                    .getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
+            if (location == null) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "'location' is not specified as a query"
+                      + " parameter or as a default repository property or as a cluster property.");
+            }
+          }
+          boolean incremental = req.getParams().getBool(CoreAdminParams.BACKUP_INCREMENTAL, true);
 
-      params.put(CollectionAdminParams.INDEX_BACKUP_STRATEGY, strategy);
-      params.put(CoreAdminParams.BACKUP_INCREMENTAL, incremental);
-      return params;
-    }),
-    RESTORE_OP(RESTORE, (req, rsp, h) -> {
-      req.getParams().required().check(NAME, COLLECTION_PROP);
+          // Check if the specified location is valid for this repository.
+          final URI uri = repository.createDirectoryURI(location);
+          try {
+            if (!repository.exists(uri)) {
+              throw new SolrException(
+                  ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
+            }
+          } catch (IOException ex) {
+            throw new SolrException(
+                ErrorCode.SERVER_ERROR,
+                "Failed to check the existence of " + uri + ". Is it valid?",
+                ex);
+          }
 
-      final String collectionName = SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP));
-      if (h.coreContainer.getZkController().getZkStateReader().getAliases().hasAlias(collectionName)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' is an existing alias, no action taken.");
-      }
+          String strategy =
+              req.getParams()
+                  .get(
+                      CollectionAdminParams.INDEX_BACKUP_STRATEGY,
+                      CollectionAdminParams.COPY_FILES_STRATEGY);
+          if (!CollectionAdminParams.INDEX_BACKUP_STRATEGIES.contains(strategy)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Unknown index backup strategy " + strategy);
+          }
 
-      final CoreContainer cc = h.coreContainer;
-      final String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      final BackupRepository repository = cc.newBackupRepository(repo);
-
-      String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-      if (location == null) {
-        //Refresh the cluster property file to make sure the value set for location is the latest
-        // Check if the location is specified in the cluster property.
-        location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty("location", null);
-        if (location == null) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-              + " parameter or as a default repository property or as a cluster property.");
-        }
-      }
+          Map<String, Object> params =
+              copy(
+                  req.getParams(),
+                  null,
+                  NAME,
+                  COLLECTION_PROP,
+                  FOLLOW_ALIASES,
+                  CoreAdminParams.COMMIT_NAME,
+                  CoreAdminParams.MAX_NUM_BACKUP_POINTS);
+          params.put(CoreAdminParams.BACKUP_LOCATION, location);
+          if (repo != null) {
+            params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
+          }
 
-      // Check if the specified location is valid for this repository.
-      final URI uri = repository.createDirectoryURI(location);
-      try {
-        if (!repository.exists(uri)) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
-        }
-      } catch (IOException ex) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existence of " + uri + ". Is it valid?", ex);
-      }
+          params.put(CollectionAdminParams.INDEX_BACKUP_STRATEGY, strategy);
+          params.put(CoreAdminParams.BACKUP_INCREMENTAL, incremental);
+          return params;
+        }),
+    RESTORE_OP(
+        RESTORE,
+        (req, rsp, h) -> {
+          req.getParams().required().check(NAME, COLLECTION_PROP);
+
+          final String collectionName =
+              SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP));
+          if (h.coreContainer
+              .getZkController()
+              .getZkStateReader()
+              .getAliases()
+              .hasAlias(collectionName)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Collection '" + collectionName + "' is an existing alias, no action taken.");
+          }
 
-      final String createNodeArg = req.getParams().get(CREATE_NODE_SET);
-      if (CREATE_NODE_SET_EMPTY.equals(createNodeArg)) {
-        throw new SolrException(
-            SolrException.ErrorCode.BAD_REQUEST,
-            "Cannot restore with a CREATE_NODE_SET of CREATE_NODE_SET_EMPTY."
-        );
-      }
-      if (req.getParams().get(NRT_REPLICAS) != null && req.getParams().get(REPLICATION_FACTOR) != null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Cannot set both replicationFactor and nrtReplicas as they mean the same thing");
-      }
+          final CoreContainer cc = h.coreContainer;
+          final String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
+          final BackupRepository repository = cc.newBackupRepository(repo);
 
-      final Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP);
-      params.put(CoreAdminParams.BACKUP_LOCATION, location);
-      if (repo != null) {
-        params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
-      }
-      // from CREATE_OP:
-      copy(req.getParams(), params, COLL_CONF, REPLICATION_FACTOR, NRT_REPLICAS, TLOG_REPLICAS,
-          PULL_REPLICAS, CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE, BACKUP_ID);
-      copyPropertiesWithPrefix(req.getParams(), params, PROPERTY_PREFIX);
-      return params;
-    }),
-    DELETEBACKUP_OP(DELETEBACKUP, (req, rsp, h) -> {
-      req.getParams().required().check(NAME);
-
-      CoreContainer cc = h.coreContainer;
-      String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      try (BackupRepository repository = cc.newBackupRepository(repo)) {
-
-        String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-        if (location == null) {
-          //Refresh the cluster property file to make sure the value set for location is the latest
-          // Check if the location is specified in the cluster property.
-          location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty("location", null);
+          String location =
+              repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
           if (location == null) {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-                    + " parameter or as a default repository property or as a cluster property.");
+            // Refresh the cluster property file to make sure the value set for location is the
+            // latest
+            // Check if the location is specified in the cluster property.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
##########
@@ -388,982 +426,1324 @@ public Category getCategory() {
     return Category.ADMIN;
   }
 
-  private static void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException {
+  private static void createSysConfigSet(CoreContainer coreContainer)
+      throws KeeperException, InterruptedException {
     SolrZkClient zk = coreContainer.getZkController().getZkStateReader().getZkClient();
     ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zk.getZkClientTimeout());
     cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE, zk);
-    cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk);
+    cmdExecutor.ensureExists(
+        ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk);
 
     try {
-      String path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml";
-      byte[] data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml"));
+      String path =
+          ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml";
+      byte[] data =
+          IOUtils.toByteArray(
+              CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml"));
       assert data != null && data.length > 0;
       cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
-      path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/solrconfig.xml";
-      data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml"));
+      path =
+          ZkStateReader.CONFIGS_ZKNODE
+              + "/"
+              + CollectionAdminParams.SYSTEM_COLL
+              + "/solrconfig.xml";
+      data =
+          IOUtils.toByteArray(
+              CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml"));
       assert data != null && data.length > 0;
       cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
     } catch (IOException e) {
       throw new SolrException(ErrorCode.SERVER_ERROR, e);
     }
-
-
   }
 
-  private static void addStatusToResponse(NamedList<Object> results, RequestStatusState state, String msg) {
+  private static void addStatusToResponse(
+      NamedList<Object> results, RequestStatusState state, String msg) {
     SimpleOrderedMap<String> status = new SimpleOrderedMap<>();
     status.add("state", state.getKey());
     status.add("msg", msg);
     results.add("status", status);
   }
 
   public enum CollectionOperation implements CollectionOp {
-    CREATE_OP(CREATE, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams().required(), null, NAME);
-      props.put("fromApi", "true");
-      copy(req.getParams(), props,
-          REPLICATION_FACTOR,
-          COLL_CONF,
-          NUM_SLICES,
-          CREATE_NODE_SET,
-          CREATE_NODE_SET_SHUFFLE,
-          SHARDS_PROP,
-          PULL_REPLICAS,
-          TLOG_REPLICAS,
-          NRT_REPLICAS,
-          WAIT_FOR_FINAL_STATE,
-          PER_REPLICA_STATE,
-          ALIAS);
-
-      if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
-        //TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for more details
-        int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR));
-        int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS));
-        if (replicationFactor != nrtReplicas) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing");
-        }
-      }
-      if (props.get(REPLICATION_FACTOR) != null) {
-        props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR));
-      } else if (props.get(NRT_REPLICAS) != null) {
-        props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS));
-      }
-
-      final String collectionName = SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
-      final String shardsParam = (String) props.get(SHARDS_PROP);
-      if (StringUtils.isNotEmpty(shardsParam)) {
-        verifyShardsParam(shardsParam);
-      }
-      if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) {
-        //We must always create a .system collection with only a single shard
-        props.put(NUM_SLICES, 1);
-        props.remove(SHARDS_PROP);
-        createSysConfigSet(h.coreContainer);
-
-      }
-      if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
-      for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
-        h.copyFromClusterProp(props, prop);
-      copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
-      return copyPropertiesWithPrefix(req.getParams(), props, "router.");
+    CREATE_OP(
+        CREATE,
+        (req, rsp, h) -> {
+          Map<String, Object> props = copy(req.getParams().required(), null, NAME);
+          props.put("fromApi", "true");
+          copy(
+              req.getParams(),
+              props,
+              REPLICATION_FACTOR,
+              COLL_CONF,
+              NUM_SLICES,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              SHARDS_PROP,
+              PULL_REPLICAS,
+              TLOG_REPLICAS,
+              NRT_REPLICAS,
+              WAIT_FOR_FINAL_STATE,
+              PER_REPLICA_STATE,
+              ALIAS);
+
+          if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
+            // TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for
+            // more details
+            int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR));
+            int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS));
+            if (replicationFactor != nrtReplicas) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing");
+            }
+          }
+          if (props.get(REPLICATION_FACTOR) != null) {
+            props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR));
+          } else if (props.get(NRT_REPLICAS) != null) {
+            props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS));
+          }
 
-    }),
+          final String collectionName =
+              SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
+          final String shardsParam = (String) props.get(SHARDS_PROP);
+          if (StringUtils.isNotEmpty(shardsParam)) {
+            verifyShardsParam(shardsParam);
+          }
+          if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) {
+            // We must always create a .system collection with only a single shard
+            props.put(NUM_SLICES, 1);
+            props.remove(SHARDS_PROP);
+            createSysConfigSet(h.coreContainer);
+          }
+          if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
+          for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
+            h.copyFromClusterProp(props, prop);
+          copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
+          return copyPropertiesWithPrefix(req.getParams(), props, "router.");
+        }),
     @SuppressWarnings({"unchecked"})
-    COLSTATUS_OP(COLSTATUS, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          ColStatus.CORE_INFO_PROP,
-          ColStatus.SEGMENTS_PROP,
-          ColStatus.FIELD_INFO_PROP,
-          ColStatus.RAW_SIZE_PROP,
-          ColStatus.RAW_SIZE_SUMMARY_PROP,
-          ColStatus.RAW_SIZE_DETAILS_PROP,
-          ColStatus.RAW_SIZE_SAMPLING_PERCENT_PROP,
-          ColStatus.SIZE_INFO_PROP);
-
-      new ColStatus(h.coreContainer.getSolrClientCache(),
-          h.coreContainer.getZkController().getZkStateReader().getClusterState(), new ZkNodeProps(props))
-          .getColStatus(rsp.getValues());
-      return null;
-    }),
-    DELETE_OP(DELETE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME);
-      return copy(req.getParams(), map, FOLLOW_ALIASES);
-    }),
+    COLSTATUS_OP(
+        COLSTATUS,
+        (req, rsp, h) -> {
+          Map<String, Object> props =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  ColStatus.CORE_INFO_PROP,
+                  ColStatus.SEGMENTS_PROP,
+                  ColStatus.FIELD_INFO_PROP,
+                  ColStatus.RAW_SIZE_PROP,
+                  ColStatus.RAW_SIZE_SUMMARY_PROP,
+                  ColStatus.RAW_SIZE_DETAILS_PROP,
+                  ColStatus.RAW_SIZE_SAMPLING_PERCENT_PROP,
+                  ColStatus.SIZE_INFO_PROP);
+
+          new ColStatus(
+                  h.coreContainer.getSolrClientCache(),
+                  h.coreContainer.getZkController().getZkStateReader().getClusterState(),
+                  new ZkNodeProps(props))
+              .getColStatus(rsp.getValues());
+          return null;
+        }),
+    DELETE_OP(
+        DELETE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, NAME);
+          return copy(req.getParams(), map, FOLLOW_ALIASES);
+        }),
     // XXX should this command support followAliases?
-    RELOAD_OP(RELOAD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME);
-      return copy(req.getParams(), map);
-    }),
-
-    RENAME_OP(RENAME, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME, CollectionAdminParams.TARGET);
-      return copy(req.getParams(), map, FOLLOW_ALIASES);
-    }),
-
-    REINDEXCOLLECTION_OP(REINDEXCOLLECTION, (req, rsp, h) -> {
-      Map<String, Object> m = copy(req.getParams().required(), null, NAME);
-      copy(req.getParams(), m,
-          ReindexCollectionCmd.COMMAND,
-          ReindexCollectionCmd.REMOVE_SOURCE,
-          ReindexCollectionCmd.TARGET,
-          ZkStateReader.CONFIGNAME_PROP,
-          NUM_SLICES,
-          NRT_REPLICAS,
-          PULL_REPLICAS,
-          TLOG_REPLICAS,
-          REPLICATION_FACTOR,
-          CREATE_NODE_SET,
-          CREATE_NODE_SET_SHUFFLE,
-          "shards",
-          CommonParams.ROWS,
-          CommonParams.Q,
-          CommonParams.FL,
-          FOLLOW_ALIASES);
-      if (req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP) != null) {
-        m.put(ZkStateReader.CONFIGNAME_PROP, req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP));
-      }
-      copyPropertiesWithPrefix(req.getParams(), m, "router.");
-      return m;
-    }),
-
-    SYNCSHARD_OP(SYNCSHARD, (req, rsp, h) -> {
-      String extCollection = req.getParams().required().get("collection");
-      String collection = h.coreContainer.getZkController().getZkStateReader().getAliases().resolveSimpleAlias(extCollection);
-      String shard = req.getParams().required().get("shard");
-
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-
-      DocCollection docCollection = clusterState.getCollection(collection);
-      ZkNodeProps leaderProps = docCollection.getLeader(shard);
-      ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
-
-      try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl())
-          .withConnectionTimeout(15000)
-          .withSocketTimeout(60000)
-          .build()) {
-        RequestSyncShard reqSyncShard = new RequestSyncShard();
-        reqSyncShard.setCollection(collection);
-        reqSyncShard.setShard(shard);
-        reqSyncShard.setCoreName(nodeProps.getCoreName());
-        client.request(reqSyncShard);
-      }
-      return null;
-    }),
-
-    CREATEALIAS_OP(CREATEALIAS, (req, rsp, h) -> {
-      String alias = req.getParams().get(NAME);
-      SolrIdentifierValidator.validateAliasName(alias);
-      String collections = req.getParams().get("collections");
-      RoutedAlias routedAlias = null;
-      Exception ex = null;
-      HashMap<String,Object> possiblyModifiedParams = new HashMap<>();
-      try {
-        // note that RA specific validation occurs here.
-        req.getParams().toMap(possiblyModifiedParams);
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        // This is awful because RoutedAlias lies about what types it wants
-        Map<String, String> temp = (Map<String, String>) (Map) possiblyModifiedParams;
-        routedAlias = RoutedAlias.fromProps(alias, temp);
-      } catch (SolrException e) {
-        // we'll throw this later if we are in fact creating a routed alias.
-        ex = e;
-      }
-      ModifiableSolrParams finalParams = new ModifiableSolrParams();
-      for (Map.Entry<String, Object> entry : possiblyModifiedParams.entrySet()) {
-        if (entry.getValue().getClass().isArray() ) {
-          // v2 api hits this case
-          for (Object o : (Object[]) entry.getValue()) {
-            finalParams.add(entry.getKey(),o.toString());
+    RELOAD_OP(
+        RELOAD,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, NAME);
+          return copy(req.getParams(), map);
+        }),
+
+    RENAME_OP(
+        RENAME,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, NAME, CollectionAdminParams.TARGET);
+          return copy(req.getParams(), map, FOLLOW_ALIASES);
+        }),
+
+    REINDEXCOLLECTION_OP(
+        REINDEXCOLLECTION,
+        (req, rsp, h) -> {
+          Map<String, Object> m = copy(req.getParams().required(), null, NAME);
+          copy(
+              req.getParams(),
+              m,
+              ReindexCollectionCmd.COMMAND,
+              ReindexCollectionCmd.REMOVE_SOURCE,
+              ReindexCollectionCmd.TARGET,
+              ZkStateReader.CONFIGNAME_PROP,
+              NUM_SLICES,
+              NRT_REPLICAS,
+              PULL_REPLICAS,
+              TLOG_REPLICAS,
+              REPLICATION_FACTOR,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              "shards",
+              CommonParams.ROWS,
+              CommonParams.Q,
+              CommonParams.FL,
+              FOLLOW_ALIASES);
+          if (req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP) != null) {
+            m.put(
+                ZkStateReader.CONFIGNAME_PROP,
+                req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP));
+          }
+          copyPropertiesWithPrefix(req.getParams(), m, "router.");
+          return m;
+        }),
+
+    SYNCSHARD_OP(
+        SYNCSHARD,
+        (req, rsp, h) -> {
+          String extCollection = req.getParams().required().get("collection");
+          String collection =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getAliases()
+                  .resolveSimpleAlias(extCollection);
+          String shard = req.getParams().required().get("shard");
+
+          ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+
+          DocCollection docCollection = clusterState.getCollection(collection);
+          ZkNodeProps leaderProps = docCollection.getLeader(shard);
+          ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
+
+          try (HttpSolrClient client =
+              new Builder(nodeProps.getBaseUrl())
+                  .withConnectionTimeout(15000)
+                  .withSocketTimeout(60000)
+                  .build()) {
+            RequestSyncShard reqSyncShard = new RequestSyncShard();
+            reqSyncShard.setCollection(collection);
+            reqSyncShard.setShard(shard);
+            reqSyncShard.setCoreName(nodeProps.getCoreName());
+            client.request(reqSyncShard);
+          }
+          return null;
+        }),
+
+    CREATEALIAS_OP(
+        CREATEALIAS,
+        (req, rsp, h) -> {
+          String alias = req.getParams().get(NAME);
+          SolrIdentifierValidator.validateAliasName(alias);
+          String collections = req.getParams().get("collections");
+          RoutedAlias routedAlias = null;
+          Exception ex = null;
+          HashMap<String, Object> possiblyModifiedParams = new HashMap<>();
+          try {
+            // note that RA specific validation occurs here.
+            req.getParams().toMap(possiblyModifiedParams);
+            @SuppressWarnings({"unchecked", "rawtypes"})
+            // This is awful because RoutedAlias lies about what types it wants
+            Map<String, String> temp = (Map<String, String>) (Map) possiblyModifiedParams;
+            routedAlias = RoutedAlias.fromProps(alias, temp);
+          } catch (SolrException e) {
+            // we'll throw this later if we are in fact creating a routed alias.
+            ex = e;
+          }
+          ModifiableSolrParams finalParams = new ModifiableSolrParams();
+          for (Map.Entry<String, Object> entry : possiblyModifiedParams.entrySet()) {
+            if (entry.getValue().getClass().isArray()) {
+              // v2 api hits this case
+              for (Object o : (Object[]) entry.getValue()) {
+                finalParams.add(entry.getKey(), o.toString());
+              }
+            } else {
+              finalParams.add(entry.getKey(), entry.getValue().toString());
+            }
           }
-        } else {
-          finalParams.add(entry.getKey(),entry.getValue().toString());
-        }
-      }
 
-      if (collections != null) {
-        if (routedAlias != null) {
-          throw new SolrException(BAD_REQUEST, "Collections cannot be specified when creating a routed alias.");
-        } else {
-          //////////////////////////////////////
-          // Regular alias creation indicated //
-          //////////////////////////////////////
-          return copy(finalParams.required(), null, NAME, "collections");
-        }
-      }
+          if (collections != null) {
+            if (routedAlias != null) {
+              throw new SolrException(
+                  BAD_REQUEST, "Collections cannot be specified when creating a routed alias.");
+            } else {
+              //////////////////////////////////////
+              // Regular alias creation indicated //
+              //////////////////////////////////////
+              return copy(finalParams.required(), null, NAME, "collections");
+            }
+          }
 
-      /////////////////////////////////////////////////
-      // We are creating a routed alias from here on //
-      /////////////////////////////////////////////////
+          /////////////////////////////////////////////////
+          // We are creating a routed alias from here on //
+          /////////////////////////////////////////////////
 
-      // If our prior creation attempt had issues expose them now.
-      if (ex != null) {
-        throw ex;
-      }
+          // If our prior creation attempt had issues expose them now.
+          if (ex != null) {
+            throw ex;
+          }
 
-      // Now filter out just the parameters we care about from the request
-      assert routedAlias != null;
-      Map<String, Object> result = copy(finalParams, null, routedAlias.getRequiredParams());
-      copy(finalParams, result, routedAlias.getOptionalParams());
-
-      ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix
-
-      // add to result params that start with "create-collection.".
-      //   Additionally, save these without the prefix to createCollParams
-      for (Map.Entry<String, String[]> entry : finalParams) {
-        final String p = entry.getKey();
-        if (p.startsWith(CREATE_COLLECTION_PREFIX)) {
-          // This is what SolrParams#getAll(Map, Collection)} does
-          final String[] v = entry.getValue();
-          if (v.length == 1) {
-            result.put(p, v[0]);
-          } else {
-            result.put(p, v);
+          // Now filter out just the parameters we care about from the request
+          assert routedAlias != null;
+          Map<String, Object> result = copy(finalParams, null, routedAlias.getRequiredParams());
+          copy(finalParams, result, routedAlias.getOptionalParams());
+
+          ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix
+
+          // add to result params that start with "create-collection.".
+          //   Additionally, save these without the prefix to createCollParams
+          for (Map.Entry<String, String[]> entry : finalParams) {
+            final String p = entry.getKey();
+            if (p.startsWith(CREATE_COLLECTION_PREFIX)) {
+              // This is what SolrParams#getAll(Map, Collection)} does
+              final String[] v = entry.getValue();
+              if (v.length == 1) {
+                result.put(p, v[0]);
+              } else {
+                result.put(p, v);
+              }
+              createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v);
+            }
           }
-          createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v);
-        }
-      }
 
-      // Verify that the create-collection prefix'ed params appear to be valid.
-      if (createCollParams.get(NAME) != null) {
-        throw new SolrException(BAD_REQUEST, "routed aliases calculate names for their " +
-            "dependent collections, you cannot specify the name.");
-      }
-      if (createCollParams.get(COLL_CONF) == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "We require an explicit " + COLL_CONF);
-      }
-      // note: could insist on a config name here as well.... or wait to throw at overseer
-      createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation
-      CREATE_OP.execute(new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results
+          // Verify that the create-collection prefix'ed params appear to be valid.
+          if (createCollParams.get(NAME) != null) {
+            throw new SolrException(
+                BAD_REQUEST,
+                "routed aliases calculate names for their "
+                    + "dependent collections, you cannot specify the name.");
+          }
+          if (createCollParams.get(COLL_CONF) == null) {
+            throw new SolrException(
+                SolrException.ErrorCode.BAD_REQUEST, "We require an explicit " + COLL_CONF);
+          }
+          // note: could insist on a config name here as well.... or wait to throw at overseer
+          createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation
+          CREATE_OP.execute(
+              new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results
 
-      return result;
-    }),
+          return result;
+        }),
 
     DELETEALIAS_OP(DELETEALIAS, (req, rsp, h) -> copy(req.getParams().required(), null, NAME)),
 
     /**
      * Change properties for an alias (use CREATEALIAS_OP to change the actual value of the alias)
      */
-    ALIASPROP_OP(ALIASPROP, (req, rsp, h) -> {
-      Map<String, Object> params = copy(req.getParams().required(), null, NAME);
-
-      // Note: success/no-op in the event of no properties supplied is intentional. Keeps code simple and one less case
-      // for api-callers to check for.
-      return convertPrefixToMap(req.getParams(), params, "property");
-    }),
-
-    /**
-     * List the aliases and associated properties.
-     */
+    ALIASPROP_OP(
+        ALIASPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> params = copy(req.getParams().required(), null, NAME);
+
+          // Note: success/no-op in the event of no properties supplied is intentional. Keeps code
+          // simple and one less case
+          // for api-callers to check for.
+          return convertPrefixToMap(req.getParams(), params, "property");
+        }),
+
+    /** List the aliases and associated properties. */
     @SuppressWarnings({"unchecked"})
-    LISTALIASES_OP(LISTALIASES, (req, rsp, h) -> {
-      ZkStateReader zkStateReader = h.coreContainer.getZkController().getZkStateReader();
-      // if someone calls listAliases, lets ensure we return an up to date response
-      zkStateReader.aliasesManager.update();
-      Aliases aliases = zkStateReader.getAliases();
-      if (aliases != null) {
-        // the aliases themselves...
-        rsp.getValues().add("aliases", aliases.getCollectionAliasMap());
-        // Any properties for the above aliases.
-        Map<String, Map<String, String>> meta = new LinkedHashMap<>();
-        for (String alias : aliases.getCollectionAliasListMap().keySet()) {
-          Map<String, String> collectionAliasProperties = aliases.getCollectionAliasProperties(alias);
-          if (!collectionAliasProperties.isEmpty()) {
-            meta.put(alias, collectionAliasProperties);
+    LISTALIASES_OP(
+        LISTALIASES,
+        (req, rsp, h) -> {
+          ZkStateReader zkStateReader = h.coreContainer.getZkController().getZkStateReader();
+          // if someone calls listAliases, lets ensure we return an up to date response
+          zkStateReader.aliasesManager.update();
+          Aliases aliases = zkStateReader.getAliases();
+          if (aliases != null) {
+            // the aliases themselves...
+            rsp.getValues().add("aliases", aliases.getCollectionAliasMap());
+            // Any properties for the above aliases.
+            Map<String, Map<String, String>> meta = new LinkedHashMap<>();
+            for (String alias : aliases.getCollectionAliasListMap().keySet()) {
+              Map<String, String> collectionAliasProperties =
+                  aliases.getCollectionAliasProperties(alias);
+              if (!collectionAliasProperties.isEmpty()) {
+                meta.put(alias, collectionAliasProperties);
+              }
+            }
+            rsp.getValues().add("properties", meta);
+          }
+          return null;
+        }),
+    SPLITSHARD_OP(
+        SPLITSHARD,
+        DEFAULT_COLLECTION_OP_TIMEOUT * 5,
+        (req, rsp, h) -> {
+          String name = req.getParams().required().get(COLLECTION_PROP);
+          // TODO : add support for multiple shards
+          String shard = req.getParams().get(SHARD_ID_PROP);
+          String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
+          String splitKey = req.getParams().get("split.key");
+          String numSubShards = req.getParams().get(NUM_SUB_SHARDS);
+          String fuzz = req.getParams().get(SPLIT_FUZZ);
+
+          if (splitKey == null && shard == null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
+          }
+          if (splitKey != null && shard != null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Only one of 'shard' or 'split.key' should be specified");
+          }
+          if (splitKey != null && rangesStr != null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Only one of 'ranges' or 'split.key' should be specified");
+          }
+          if (numSubShards != null && (splitKey != null || rangesStr != null)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "numSubShards can not be specified with split.key or ranges parameters");
+          }
+          if (fuzz != null && (splitKey != null || rangesStr != null)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "fuzz can not be specified with split.key or ranges parameters");
           }
-        }
-        rsp.getValues().add("properties", meta);
-      }
-      return null;
-    }),
-    SPLITSHARD_OP(SPLITSHARD, DEFAULT_COLLECTION_OP_TIMEOUT * 5, (req, rsp, h) -> {
-      String name = req.getParams().required().get(COLLECTION_PROP);
-      // TODO : add support for multiple shards
-      String shard = req.getParams().get(SHARD_ID_PROP);
-      String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
-      String splitKey = req.getParams().get("split.key");
-      String numSubShards = req.getParams().get(NUM_SUB_SHARDS);
-      String fuzz = req.getParams().get(SPLIT_FUZZ);
-
-      if (splitKey == null && shard == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
-      }
-      if (splitKey != null && shard != null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Only one of 'shard' or 'split.key' should be specified");
-      }
-      if (splitKey != null && rangesStr != null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Only one of 'ranges' or 'split.key' should be specified");
-      }
-      if (numSubShards != null && (splitKey != null || rangesStr != null)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "numSubShards can not be specified with split.key or ranges parameters");
-      }
-      if (fuzz != null && (splitKey != null || rangesStr != null)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "fuzz can not be specified with split.key or ranges parameters");
-      }
 
-      Map<String, Object> map = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP,
-          "split.key",
-          CoreAdminParams.RANGES,
-          WAIT_FOR_FINAL_STATE,
-          TIMING,
-          SPLIT_METHOD,
-          NUM_SUB_SHARDS,
-          SPLIT_FUZZ,
-          SPLIT_BY_PREFIX,
-          FOLLOW_ALIASES);
-      return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
-    }),
-    DELETESHARD_OP(DELETESHARD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP);
-      copy(req.getParams(), map,
-          DELETE_INDEX,
-          DELETE_DATA_DIR,
-          DELETE_INSTANCE_DIR,
-          FOLLOW_ALIASES);
-      return map;
-    }),
-    FORCELEADER_OP(FORCELEADER, (req, rsp, h) -> {
-      forceLeaderElection(req, h);
-      return null;
-    }),
-    CREATESHARD_OP(CREATESHARD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP);
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      final String newShardName = SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
-      boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
-      String extCollectionName = req.getParams().get(COLLECTION_PROP);
-      String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader()
-          .getAliases().resolveSimpleAlias(extCollectionName) : extCollectionName;
-      if (!ImplicitDocRouter.NAME.equals(((Map<?,?>) clusterState.getCollection(collectionName).get(DOC_ROUTER)).get(NAME)))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
-      copy(req.getParams(), map,
-          REPLICATION_FACTOR,
-          NRT_REPLICAS,
-          TLOG_REPLICAS,
-          PULL_REPLICAS,
-          CREATE_NODE_SET,
-          WAIT_FOR_FINAL_STATE,
-          FOLLOW_ALIASES);
-      return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
-    }),
-    DELETEREPLICA_OP(DELETEREPLICA, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP);
-
-      return copy(req.getParams(), map,
-          DELETE_INDEX,
-          DELETE_DATA_DIR,
-          DELETE_INSTANCE_DIR,
-          COUNT_PROP, REPLICA_PROP,
-          SHARD_ID_PROP,
-          ONLY_IF_DOWN,
-          FOLLOW_ALIASES);
-    }),
-    MIGRATE_OP(MIGRATE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP, "split.key", "target.collection");
-      return copy(req.getParams(), map, "forward.timeout", FOLLOW_ALIASES);
-    }),
-    ADDROLE_OP(ADDROLE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
-      if (!KNOWN_ROLES.contains(map.get("role")))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
-      return map;
-    }),
-    REMOVEROLE_OP(REMOVEROLE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
-      if (!KNOWN_ROLES.contains(map.get("role")))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
-      return map;
-    }),
-    CLUSTERPROP_OP(CLUSTERPROP, (req, rsp, h) -> {
-      String name = req.getParams().required().get(NAME);
-      String val = req.getParams().get(VALUE_LONG);
-      ClusterProperties cp = new ClusterProperties(h.coreContainer.getZkController().getZkClient());
-      cp.setClusterProperty(name, val);
-      return null;
-    }),
-    COLLECTIONPROP_OP(COLLECTIONPROP, (req, rsp, h) -> {
-      String extCollection = req.getParams().required().get(NAME);
-      String collection = h.coreContainer.getZkController().getZkStateReader().getAliases().resolveSimpleAlias(extCollection);
-      String name = req.getParams().required().get(PROPERTY_NAME);
-      String val = req.getParams().get(PROPERTY_VALUE);
-      CollectionProperties cp = new CollectionProperties(h.coreContainer.getZkController().getZkClient());
-      cp.setCollectionProperty(collection, name, val);
-      return null;
-    }),
+          Map<String, Object> map =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  SHARD_ID_PROP,
+                  "split.key",
+                  CoreAdminParams.RANGES,
+                  WAIT_FOR_FINAL_STATE,
+                  TIMING,
+                  SPLIT_METHOD,
+                  NUM_SUB_SHARDS,
+                  SPLIT_FUZZ,
+                  SPLIT_BY_PREFIX,
+                  FOLLOW_ALIASES);
+          return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
+        }),
+    DELETESHARD_OP(
+        DELETESHARD,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, SHARD_ID_PROP);
+          copy(
+              req.getParams(),
+              map,
+              DELETE_INDEX,
+              DELETE_DATA_DIR,
+              DELETE_INSTANCE_DIR,
+              FOLLOW_ALIASES);
+          return map;
+        }),
+    FORCELEADER_OP(
+        FORCELEADER,
+        (req, rsp, h) -> {
+          forceLeaderElection(req, h);
+          return null;
+        }),
+    CREATESHARD_OP(
+        CREATESHARD,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, SHARD_ID_PROP);
+          ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+          final String newShardName =
+              SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
+          boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
+          String extCollectionName = req.getParams().get(COLLECTION_PROP);
+          String collectionName =
+              followAliases
+                  ? h.coreContainer
+                      .getZkController()
+                      .getZkStateReader()
+                      .getAliases()
+                      .resolveSimpleAlias(extCollectionName)
+                  : extCollectionName;
+          if (!ImplicitDocRouter.NAME.equals(
+              ((Map<?, ?>) clusterState.getCollection(collectionName).get(DOC_ROUTER)).get(NAME)))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
+          copy(
+              req.getParams(),
+              map,
+              REPLICATION_FACTOR,
+              NRT_REPLICAS,
+              TLOG_REPLICAS,
+              PULL_REPLICAS,
+              CREATE_NODE_SET,
+              WAIT_FOR_FINAL_STATE,
+              FOLLOW_ALIASES);
+          return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
+        }),
+    DELETEREPLICA_OP(
+        DELETEREPLICA,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP);
+
+          return copy(
+              req.getParams(),
+              map,
+              DELETE_INDEX,
+              DELETE_DATA_DIR,
+              DELETE_INSTANCE_DIR,
+              COUNT_PROP,
+              REPLICA_PROP,
+              SHARD_ID_PROP,
+              ONLY_IF_DOWN,
+              FOLLOW_ALIASES);
+        }),
+    MIGRATE_OP(
+        MIGRATE,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  "split.key",
+                  "target.collection");
+          return copy(req.getParams(), map, "forward.timeout", FOLLOW_ALIASES);
+        }),
+    ADDROLE_OP(
+        ADDROLE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
+          if (!KNOWN_ROLES.contains(map.get("role")))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
+          return map;
+        }),
+    REMOVEROLE_OP(
+        REMOVEROLE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
+          if (!KNOWN_ROLES.contains(map.get("role")))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
+          return map;
+        }),
+    CLUSTERPROP_OP(
+        CLUSTERPROP,
+        (req, rsp, h) -> {
+          String name = req.getParams().required().get(NAME);
+          String val = req.getParams().get(VALUE_LONG);
+          ClusterProperties cp =
+              new ClusterProperties(h.coreContainer.getZkController().getZkClient());
+          cp.setClusterProperty(name, val);
+          return null;
+        }),
+    COLLECTIONPROP_OP(
+        COLLECTIONPROP,
+        (req, rsp, h) -> {
+          String extCollection = req.getParams().required().get(NAME);
+          String collection =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getAliases()
+                  .resolveSimpleAlias(extCollection);
+          String name = req.getParams().required().get(PROPERTY_NAME);
+          String val = req.getParams().get(PROPERTY_VALUE);
+          CollectionProperties cp =
+              new CollectionProperties(h.coreContainer.getZkController().getZkClient());
+          cp.setCollectionProperty(collection, name, val);
+          return null;
+        }),
     @SuppressWarnings({"unchecked"})
-    REQUESTSTATUS_OP(REQUESTSTATUS, (req, rsp, h) -> {
-      req.getParams().required().check(REQUESTID);
-
-      final CoreContainer coreContainer = h.coreContainer;
-      final String requestId = req.getParams().get(REQUESTID);
-      final ZkController zkController = coreContainer.getZkController();
-
-      final NamedList<Object> status = new NamedList<>();
-      if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
-        if (zkController.getOverseerCompletedMap().contains(requestId)) {
-          final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
-          rsp.getValues().addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
-          addStatusToResponse(status, COMPLETED, "found [" + requestId + "] in completed tasks");
-        } else if (zkController.getOverseerFailureMap().contains(requestId)) {
-          final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
-          rsp.getValues().addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
-          addStatusToResponse(status, FAILED, "found [" + requestId + "] in failed tasks");
-        } else if (zkController.getOverseerRunningMap().contains(requestId)) {
-          addStatusToResponse(status, RUNNING, "found [" + requestId + "] in running tasks");
-        } else if (h.overseerCollectionQueueContains(requestId)) {
-          addStatusToResponse(status, SUBMITTED, "found [" + requestId + "] in submitted tasks");
-        } else {
-          addStatusToResponse(status, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
-        }
-      } else {
-        Pair<RequestStatusState, OverseerSolrResponse> sr =
-            coreContainer.getDistributedCollectionCommandRunner().get().getAsyncTaskRequestStatus(requestId);
-        final String message;
-        switch (sr.first()) {
-          case COMPLETED:
-            message = "found [" + requestId + "] in completed tasks";
-            rsp.getValues().addAll(sr.second().getResponse());
-            break;
-          case FAILED:
-            message = "found [" + requestId + "] in failed tasks";
-            rsp.getValues().addAll(sr.second().getResponse());
-            break;
-          case RUNNING:
-            message = "found [" + requestId + "] in running tasks";
-            break;
-          case SUBMITTED:
-            message = "found [" + requestId + "] in submitted tasks";
-            break;
-          default:
-            message = "Did not find [" + requestId + "] in any tasks queue";
-        }
-        addStatusToResponse(status, sr.first(), message);
-      }
-
-      rsp.getValues().addAll(status);
-      return null;
-    }),
-    DELETESTATUS_OP(DELETESTATUS, new CollectionOp() {
-      @SuppressWarnings("unchecked")
-      @Override
-      public Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
-        final CoreContainer coreContainer = h.coreContainer;
-        final String requestId = req.getParams().get(REQUESTID);
-        final ZkController zkController = coreContainer.getZkController();
-        Boolean flush = req.getParams().getBool(CollectionAdminParams.FLUSH, false);
-
-        if (requestId == null && !flush) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "Either requestid or flush parameter must be specified.");
-        }
-
-        if (requestId != null && flush) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Both requestid and flush parameters can not be specified together.");
-        }
-
-        if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
-          if (flush) {
-            Collection<String> completed = zkController.getOverseerCompletedMap().keys();
-            Collection<String> failed = zkController.getOverseerFailureMap().keys();
-            for (String asyncId : completed) {
-              zkController.getOverseerCompletedMap().remove(asyncId);
-              zkController.clearAsyncId(asyncId);
-            }
-            for (String asyncId : failed) {
-              zkController.getOverseerFailureMap().remove(asyncId);
-              zkController.clearAsyncId(asyncId);
+    REQUESTSTATUS_OP(
+        REQUESTSTATUS,
+        (req, rsp, h) -> {
+          req.getParams().required().check(REQUESTID);
+
+          final CoreContainer coreContainer = h.coreContainer;
+          final String requestId = req.getParams().get(REQUESTID);
+          final ZkController zkController = coreContainer.getZkController();
+
+          final NamedList<Object> status = new NamedList<>();
+          if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
+            if (zkController.getOverseerCompletedMap().contains(requestId)) {
+              final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
+              rsp.getValues()
+                  .addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
+              addStatusToResponse(
+                  status, COMPLETED, "found [" + requestId + "] in completed tasks");
+            } else if (zkController.getOverseerFailureMap().contains(requestId)) {
+              final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
+              rsp.getValues()
+                  .addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
+              addStatusToResponse(status, FAILED, "found [" + requestId + "] in failed tasks");
+            } else if (zkController.getOverseerRunningMap().contains(requestId)) {
+              addStatusToResponse(status, RUNNING, "found [" + requestId + "] in running tasks");
+            } else if (h.overseerCollectionQueueContains(requestId)) {
+              addStatusToResponse(
+                  status, SUBMITTED, "found [" + requestId + "] in submitted tasks");
+            } else {
+              addStatusToResponse(
+                  status, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
             }
-            rsp.getValues().add("status", "successfully cleared stored collection api responses");
           } else {
-            // Request to cleanup
-            if (zkController.getOverseerCompletedMap().remove(requestId)) {
-              zkController.clearAsyncId(requestId);
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
-            } else if (zkController.getOverseerFailureMap().remove(requestId)) {
-              zkController.clearAsyncId(requestId);
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
-            } else {
-              rsp.getValues().add("status", "[" + requestId + "] not found in stored responses");
-              // Don't call zkController.clearAsyncId for this, since it could be a running/pending task
+            Pair<RequestStatusState, OverseerSolrResponse> sr =
+                coreContainer
+                    .getDistributedCollectionCommandRunner()
+                    .get()
+                    .getAsyncTaskRequestStatus(requestId);
+            final String message;
+            switch (sr.first()) {
+              case COMPLETED:
+                message = "found [" + requestId + "] in completed tasks";
+                rsp.getValues().addAll(sr.second().getResponse());
+                break;
+              case FAILED:
+                message = "found [" + requestId + "] in failed tasks";
+                rsp.getValues().addAll(sr.second().getResponse());
+                break;
+              case RUNNING:
+                message = "found [" + requestId + "] in running tasks";
+                break;
+              case SUBMITTED:
+                message = "found [" + requestId + "] in submitted tasks";
+                break;
+              default:
+                message = "Did not find [" + requestId + "] in any tasks queue";
             }
+            addStatusToResponse(status, sr.first(), message);
           }
-        } else {
-          if (flush) {
-            coreContainer.getDistributedCollectionCommandRunner().get().deleteAllAsyncIds();
-            rsp.getValues().add("status", "successfully cleared stored collection api responses");
-          } else {
-            if (coreContainer.getDistributedCollectionCommandRunner().get().deleteSingleAsyncId(requestId)) {
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
+
+          rsp.getValues().addAll(status);
+          return null;
+        }),
+    DELETESTATUS_OP(
+        DELETESTATUS,
+        new CollectionOp() {
+          @SuppressWarnings("unchecked")
+          @Override
+          public Map<String, Object> execute(
+              SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
+            final CoreContainer coreContainer = h.coreContainer;
+            final String requestId = req.getParams().get(REQUESTID);
+            final ZkController zkController = coreContainer.getZkController();
+            Boolean flush = req.getParams().getBool(CollectionAdminParams.FLUSH, false);
+
+            if (requestId == null && !flush) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST, "Either requestid or flush parameter must be specified.");
+            }
+
+            if (requestId != null && flush) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "Both requestid and flush parameters can not be specified together.");
+            }
+
+            if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
+              if (flush) {
+                Collection<String> completed = zkController.getOverseerCompletedMap().keys();
+                Collection<String> failed = zkController.getOverseerFailureMap().keys();
+                for (String asyncId : completed) {
+                  zkController.getOverseerCompletedMap().remove(asyncId);
+                  zkController.clearAsyncId(asyncId);
+                }
+                for (String asyncId : failed) {
+                  zkController.getOverseerFailureMap().remove(asyncId);
+                  zkController.clearAsyncId(asyncId);
+                }
+                rsp.getValues()
+                    .add("status", "successfully cleared stored collection api responses");
+              } else {
+                // Request to cleanup
+                if (zkController.getOverseerCompletedMap().remove(requestId)) {
+                  zkController.clearAsyncId(requestId);
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else if (zkController.getOverseerFailureMap().remove(requestId)) {
+                  zkController.clearAsyncId(requestId);
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else {
+                  rsp.getValues()
+                      .add("status", "[" + requestId + "] not found in stored responses");
+                  // Don't call zkController.clearAsyncId for this, since it could be a
+                  // running/pending task
+                }
+              }
             } else {
-              rsp.getValues().add("status", "[" + requestId + "] not found in stored responses");
+              if (flush) {
+                coreContainer.getDistributedCollectionCommandRunner().get().deleteAllAsyncIds();
+                rsp.getValues()
+                    .add("status", "successfully cleared stored collection api responses");
+              } else {
+                if (coreContainer
+                    .getDistributedCollectionCommandRunner()
+                    .get()
+                    .deleteSingleAsyncId(requestId)) {
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else {
+                  rsp.getValues()
+                      .add("status", "[" + requestId + "] not found in stored responses");
+                }
+              }
             }
+            return null;
           }
-        }
-        return null;
-      }
-    }),
-    ADDREPLICA_OP(ADDREPLICA, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          "node",
-          SHARD_ID_PROP,
-          _ROUTE_,
-          CoreAdminParams.NAME,
-          INSTANCE_DIR,
-          DATA_DIR,
-          ULOG_DIR,
-          REPLICA_TYPE,
-          WAIT_FOR_FINAL_STATE,
-          NRT_REPLICAS,
-          TLOG_REPLICAS,
-          PULL_REPLICAS,
-          CREATE_NODE_SET,
-          FOLLOW_ALIASES,
-          SKIP_NODE_ASSIGNMENT);
-      return copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
-    }),
+        }),
+    ADDREPLICA_OP(
+        ADDREPLICA,
+        (req, rsp, h) -> {
+          Map<String, Object> props =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  "node",
+                  SHARD_ID_PROP,
+                  _ROUTE_,
+                  CoreAdminParams.NAME,
+                  INSTANCE_DIR,
+                  DATA_DIR,
+                  ULOG_DIR,
+                  REPLICA_TYPE,
+                  WAIT_FOR_FINAL_STATE,
+                  NRT_REPLICAS,
+                  TLOG_REPLICAS,
+                  PULL_REPLICAS,
+                  CREATE_NODE_SET,
+                  FOLLOW_ALIASES,
+                  SKIP_NODE_ASSIGNMENT);
+          return copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
+        }),
     OVERSEERSTATUS_OP(OVERSEERSTATUS, (req, rsp, h) -> new LinkedHashMap<>()),
     @SuppressWarnings({"unchecked"})
-    DISTRIBUTEDAPIPROCESSING_OP(DISTRIBUTEDAPIPROCESSING, (req, rsp, h)  -> {
-      NamedList<Object> results = new NamedList<>();
-      boolean isDistributedApi = h.coreContainer.getDistributedCollectionCommandRunner().isPresent();
-      results.add("isDistributedApi", isDistributedApi);
-      rsp.getValues().addAll(results);
-      return null;
-    }),
-    /**
-     * Handle list collection request.
-     * Do list collection request to zk host
-     */
+    DISTRIBUTEDAPIPROCESSING_OP(
+        DISTRIBUTEDAPIPROCESSING,
+        (req, rsp, h) -> {
+          NamedList<Object> results = new NamedList<>();
+          boolean isDistributedApi =
+              h.coreContainer.getDistributedCollectionCommandRunner().isPresent();
+          results.add("isDistributedApi", isDistributedApi);
+          rsp.getValues().addAll(results);
+          return null;
+        }),
+    /** Handle list collection request. Do list collection request to zk host */
     @SuppressWarnings({"unchecked"})
-    LIST_OP(LIST, (req, rsp, h) -> {
-      NamedList<Object> results = new NamedList<>();
-      Map<String, DocCollection> collections = h.coreContainer.getZkController().getZkStateReader().getClusterState().getCollectionsMap();
-      List<String> collectionList = new ArrayList<>(collections.keySet());
-      Collections.sort(collectionList);
-      // XXX should we add aliases here?
-      results.add("collections", collectionList);
-      SolrResponse response = new OverseerSolrResponse(results);
-      rsp.getValues().addAll(response.getResponse());
-      return null;
-    }),
+    LIST_OP(
+        LIST,
+        (req, rsp, h) -> {
+          NamedList<Object> results = new NamedList<>();
+          Map<String, DocCollection> collections =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getClusterState()
+                  .getCollectionsMap();
+          List<String> collectionList = new ArrayList<>(collections.keySet());
+          Collections.sort(collectionList);
+          // XXX should we add aliases here?
+          results.add("collections", collectionList);
+          SolrResponse response = new OverseerSolrResponse(results);
+          rsp.getValues().addAll(response.getResponse());
+          return null;
+        }),
     /**
-     * Handle cluster status request.
-     * Can return status per specific collection/shard or per all collections.
+     * Handle cluster status request. Can return status per specific collection/shard or per all
+     * collections.
      */
-    CLUSTERSTATUS_OP(CLUSTERSTATUS, (req, rsp, h) -> {
-      Map<String, Object> all = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP,
-          _ROUTE_);
-      new ClusterStatus(h.coreContainer.getZkController().getZkStateReader(),
-          new ZkNodeProps(all)).getClusterStatus(rsp.getValues());
-      return null;
-    }),
-    ADDREPLICAPROP_OP(ADDREPLICAPROP, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP,
-          SHARD_ID_PROP,
-          REPLICA_PROP,
-          PROPERTY_VALUE_PROP);
-      copy(req.getParams(), map, SHARD_UNIQUE);
-      String property = (String) map.get(PROPERTY_PROP);
-      if (!property.startsWith(PROPERTY_PREFIX)) {
-        property = PROPERTY_PREFIX + property;
-      }
+    CLUSTERSTATUS_OP(
+        CLUSTERSTATUS,
+        (req, rsp, h) -> {
+          Map<String, Object> all =
+              copy(req.getParams(), null, COLLECTION_PROP, SHARD_ID_PROP, _ROUTE_);
+          new ClusterStatus(
+                  h.coreContainer.getZkController().getZkStateReader(), new ZkNodeProps(all))
+              .getClusterStatus(rsp.getValues());
+          return null;
+        }),
+    ADDREPLICAPROP_OP(
+        ADDREPLICAPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  PROPERTY_PROP,
+                  SHARD_ID_PROP,
+                  REPLICA_PROP,
+                  PROPERTY_VALUE_PROP);
+          copy(req.getParams(), map, SHARD_UNIQUE);
+          String property = (String) map.get(PROPERTY_PROP);
+          if (!property.startsWith(PROPERTY_PREFIX)) {
+            property = PROPERTY_PREFIX + property;
+          }
 
-      boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
-
-      // Check if we're trying to set a property with parameters that allow us to set the property on multiple replicas
-      // in a slice on properties that are known to only be one-per-slice and error out if so.
-      if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE)) &&
-          SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(property.toLowerCase(Locale.ROOT)) &&
-          uniquePerSlice == false) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Overseer replica property command received for property " + property +
-                " with the " + SHARD_UNIQUE +
-                " parameter set to something other than 'true'. No action taken.");
-      }
-      return map;
-    }),
+          boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
+
+          // Check if we're trying to set a property with parameters that allow us to set the
+          // property on multiple replicas
+          // in a slice on properties that are known to only be one-per-slice and error out if so.
+          if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE))
+              && SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(
+                  property.toLowerCase(Locale.ROOT))
+              && uniquePerSlice == false) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Overseer replica property command received for property "
+                    + property
+                    + " with the "
+                    + SHARD_UNIQUE
+                    + " parameter set to something other than 'true'. No action taken.");
+          }
+          return map;
+        }),
     // XXX should this command support followAliases?
-    DELETEREPLICAPROP_OP(DELETEREPLICAPROP, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP,
-          SHARD_ID_PROP,
-          REPLICA_PROP);
-      return copy(req.getParams(), map, PROPERTY_PROP);
-    }),
+    DELETEREPLICAPROP_OP(
+        DELETEREPLICAPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  PROPERTY_PROP,
+                  SHARD_ID_PROP,
+                  REPLICA_PROP);
+          return copy(req.getParams(), map, PROPERTY_PROP);
+        }),
     // XXX should this command support followAliases?
-    BALANCESHARDUNIQUE_OP(BALANCESHARDUNIQUE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP);
-      Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
-      String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
-      if (!StringUtils.startsWith(prop, PROPERTY_PREFIX)) {
-        prop = PROPERTY_PREFIX + prop;
-      }
+    BALANCESHARDUNIQUE_OP(
+        BALANCESHARDUNIQUE,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, PROPERTY_PROP);
+          Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
+          String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
+          if (!StringUtils.startsWith(prop, PROPERTY_PREFIX)) {
+            prop = PROPERTY_PREFIX + prop;
+          }
 
-      if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
-            + " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. " +
-            " Property: " + prop + " shardUnique: " + shardUnique);
-      }
+          if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Balancing properties amongst replicas in a slice requires that"
+                    + " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. "
+                    + " Property: "
+                    + prop
+                    + " shardUnique: "
+                    + shardUnique);
+          }
 
-      return copy(req.getParams(), map, ONLY_ACTIVE_NODES, SHARD_UNIQUE);
-    }),
-    REBALANCELEADERS_OP(REBALANCELEADERS, (req, rsp, h) -> {
-      new RebalanceLeaders(req, rsp, h).execute();
-      return null;
-    }),
+          return copy(req.getParams(), map, ONLY_ACTIVE_NODES, SHARD_UNIQUE);
+        }),
+    REBALANCELEADERS_OP(
+        REBALANCELEADERS,
+        (req, rsp, h) -> {
+          new RebalanceLeaders(req, rsp, h).execute();
+          return null;
+        }),
     // XXX should this command support followAliases?
-    MODIFYCOLLECTION_OP(MODIFYCOLLECTION, (req, rsp, h) -> {
-      Map<String, Object> m = copy(req.getParams(), null, CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES);
-      copyPropertiesWithPrefix(req.getParams(), m, PROPERTY_PREFIX);
-      if (m.isEmpty()) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            formatString("no supported values provided {0}", CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES.toString()));
-      }
-      copy(req.getParams().required(), m, COLLECTION_PROP);
-      for (Map.Entry<String, Object> entry : m.entrySet()) {
-        String prop = entry.getKey();
-        if ("".equals(entry.getValue())) {
-          // set to an empty string is equivalent to removing the property, see SOLR-12507
-          entry.setValue(null);
-        }
-        DocCollection.verifyProp(m, prop);
-      }
-      if (m.get(REPLICATION_FACTOR) != null) {
-        m.put(NRT_REPLICAS, m.get(REPLICATION_FACTOR));
-      }
-      return m;
-    }),
-    BACKUP_OP(BACKUP, (req, rsp, h) -> {
-      req.getParams().required().check(NAME, COLLECTION_PROP);
-
-      final String extCollectionName = req.getParams().get(COLLECTION_PROP);
-      final boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
-      final String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader()
-          .getAliases().resolveSimpleAlias(extCollectionName) : extCollectionName;
-      final ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      if (!clusterState.hasCollection(collectionName)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
-      }
-
-      CoreContainer cc = h.coreContainer;
-      String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      BackupRepository repository = cc.newBackupRepository(repo);
-
-      String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-      if (location == null) {
-        //Refresh the cluster property file to make sure the value set for location is the latest
-        // Check if the location is specified in the cluster property.
-        location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
-        if (location == null) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-              + " parameter or as a default repository property or as a cluster property.");
-        }
-      }
-      boolean incremental = req.getParams().getBool(CoreAdminParams.BACKUP_INCREMENTAL, true);
-
-      // Check if the specified location is valid for this repository.
-      final URI uri = repository.createDirectoryURI(location);
-      try {
-        if (!repository.exists(uri)) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
-        }
-      } catch (IOException ex) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existence of " + uri + ". Is it valid?", ex);
-      }
+    MODIFYCOLLECTION_OP(
+        MODIFYCOLLECTION,
+        (req, rsp, h) -> {
+          Map<String, Object> m =
+              copy(req.getParams(), null, CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES);
+          copyPropertiesWithPrefix(req.getParams(), m, PROPERTY_PREFIX);
+          if (m.isEmpty()) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                formatString(
+                    "no supported values provided {0}",
+                    CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES.toString()));
+          }
+          copy(req.getParams().required(), m, COLLECTION_PROP);
+          for (Map.Entry<String, Object> entry : m.entrySet()) {
+            String prop = entry.getKey();
+            if ("".equals(entry.getValue())) {
+              // set to an empty string is equivalent to removing the property, see SOLR-12507
+              entry.setValue(null);
+            }
+            DocCollection.verifyProp(m, prop);
+          }
+          if (m.get(REPLICATION_FACTOR) != null) {
+            m.put(NRT_REPLICAS, m.get(REPLICATION_FACTOR));
+          }
+          return m;
+        }),
+    BACKUP_OP(
+        BACKUP,
+        (req, rsp, h) -> {
+          req.getParams().required().check(NAME, COLLECTION_PROP);
+
+          final String extCollectionName = req.getParams().get(COLLECTION_PROP);
+          final boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
+          final String collectionName =
+              followAliases
+                  ? h.coreContainer
+                      .getZkController()
+                      .getZkStateReader()
+                      .getAliases()
+                      .resolveSimpleAlias(extCollectionName)
+                  : extCollectionName;
+          final ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+          if (!clusterState.hasCollection(collectionName)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Collection '" + collectionName + "' does not exist, no action taken.");
+          }
 
-      String strategy = req.getParams().get(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY);
-      if (!CollectionAdminParams.INDEX_BACKUP_STRATEGIES.contains(strategy)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown index backup strategy " + strategy);
-      }
+          CoreContainer cc = h.coreContainer;
+          String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
+          BackupRepository repository = cc.newBackupRepository(repo);
 
-      Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP,
-              FOLLOW_ALIASES, CoreAdminParams.COMMIT_NAME, CoreAdminParams.MAX_NUM_BACKUP_POINTS);
-      params.put(CoreAdminParams.BACKUP_LOCATION, location);
-      if (repo != null) {
-        params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
-      }
+          String location =
+              repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
+          if (location == null) {
+            // Refresh the cluster property file to make sure the value set for location is the
+            // latest
+            // Check if the location is specified in the cluster property.
+            location =
+                new ClusterProperties(h.coreContainer.getZkController().getZkClient())
+                    .getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
+            if (location == null) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "'location' is not specified as a query"
+                      + " parameter or as a default repository property or as a cluster property.");
+            }
+          }
+          boolean incremental = req.getParams().getBool(CoreAdminParams.BACKUP_INCREMENTAL, true);
 
-      params.put(CollectionAdminParams.INDEX_BACKUP_STRATEGY, strategy);
-      params.put(CoreAdminParams.BACKUP_INCREMENTAL, incremental);
-      return params;
-    }),
-    RESTORE_OP(RESTORE, (req, rsp, h) -> {
-      req.getParams().required().check(NAME, COLLECTION_PROP);
+          // Check if the specified location is valid for this repository.
+          final URI uri = repository.createDirectoryURI(location);
+          try {
+            if (!repository.exists(uri)) {
+              throw new SolrException(
+                  ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
+            }
+          } catch (IOException ex) {
+            throw new SolrException(
+                ErrorCode.SERVER_ERROR,
+                "Failed to check the existence of " + uri + ". Is it valid?",
+                ex);
+          }
 
-      final String collectionName = SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP));
-      if (h.coreContainer.getZkController().getZkStateReader().getAliases().hasAlias(collectionName)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' is an existing alias, no action taken.");
-      }
+          String strategy =
+              req.getParams()
+                  .get(
+                      CollectionAdminParams.INDEX_BACKUP_STRATEGY,
+                      CollectionAdminParams.COPY_FILES_STRATEGY);
+          if (!CollectionAdminParams.INDEX_BACKUP_STRATEGIES.contains(strategy)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Unknown index backup strategy " + strategy);
+          }
 
-      final CoreContainer cc = h.coreContainer;
-      final String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      final BackupRepository repository = cc.newBackupRepository(repo);
-
-      String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-      if (location == null) {
-        //Refresh the cluster property file to make sure the value set for location is the latest
-        // Check if the location is specified in the cluster property.
-        location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty("location", null);
-        if (location == null) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-              + " parameter or as a default repository property or as a cluster property.");
-        }
-      }
+          Map<String, Object> params =
+              copy(
+                  req.getParams(),
+                  null,
+                  NAME,
+                  COLLECTION_PROP,
+                  FOLLOW_ALIASES,
+                  CoreAdminParams.COMMIT_NAME,
+                  CoreAdminParams.MAX_NUM_BACKUP_POINTS);
+          params.put(CoreAdminParams.BACKUP_LOCATION, location);
+          if (repo != null) {
+            params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
+          }
 
-      // Check if the specified location is valid for this repository.
-      final URI uri = repository.createDirectoryURI(location);
-      try {
-        if (!repository.exists(uri)) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
-        }
-      } catch (IOException ex) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existence of " + uri + ". Is it valid?", ex);
-      }
+          params.put(CollectionAdminParams.INDEX_BACKUP_STRATEGY, strategy);
+          params.put(CoreAdminParams.BACKUP_INCREMENTAL, incremental);
+          return params;
+        }),
+    RESTORE_OP(
+        RESTORE,
+        (req, rsp, h) -> {
+          req.getParams().required().check(NAME, COLLECTION_PROP);
+
+          final String collectionName =
+              SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP));
+          if (h.coreContainer
+              .getZkController()
+              .getZkStateReader()
+              .getAliases()
+              .hasAlias(collectionName)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Collection '" + collectionName + "' is an existing alias, no action taken.");
+          }
 
-      final String createNodeArg = req.getParams().get(CREATE_NODE_SET);
-      if (CREATE_NODE_SET_EMPTY.equals(createNodeArg)) {
-        throw new SolrException(
-            SolrException.ErrorCode.BAD_REQUEST,
-            "Cannot restore with a CREATE_NODE_SET of CREATE_NODE_SET_EMPTY."
-        );
-      }
-      if (req.getParams().get(NRT_REPLICAS) != null && req.getParams().get(REPLICATION_FACTOR) != null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Cannot set both replicationFactor and nrtReplicas as they mean the same thing");
-      }
+          final CoreContainer cc = h.coreContainer;
+          final String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
+          final BackupRepository repository = cc.newBackupRepository(repo);
 
-      final Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP);
-      params.put(CoreAdminParams.BACKUP_LOCATION, location);
-      if (repo != null) {
-        params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
-      }
-      // from CREATE_OP:
-      copy(req.getParams(), params, COLL_CONF, REPLICATION_FACTOR, NRT_REPLICAS, TLOG_REPLICAS,
-          PULL_REPLICAS, CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE, BACKUP_ID);
-      copyPropertiesWithPrefix(req.getParams(), params, PROPERTY_PREFIX);
-      return params;
-    }),
-    DELETEBACKUP_OP(DELETEBACKUP, (req, rsp, h) -> {
-      req.getParams().required().check(NAME);
-
-      CoreContainer cc = h.coreContainer;
-      String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      try (BackupRepository repository = cc.newBackupRepository(repo)) {
-
-        String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-        if (location == null) {
-          //Refresh the cluster property file to make sure the value set for location is the latest
-          // Check if the location is specified in the cluster property.
-          location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty("location", null);
+          String location =
+              repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
           if (location == null) {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-                    + " parameter or as a default repository property or as a cluster property.");
+            // Refresh the cluster property file to make sure the value set for location is the
+            // latest
+            // Check if the location is specified in the cluster property.
+            location =
+                new ClusterProperties(h.coreContainer.getZkController().getZkClient())
+                    .getClusterProperty("location", null);
+            if (location == null) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "'location' is not specified as a query"
+                      + " parameter or as a default repository property or as a cluster property.");
+            }
           }
-        }
 
-        // Check if the specified location is valid for this repository.
-        URI uri = repository.createDirectoryURI(location);
-        try {
-          if (!repository.exists(uri)) {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "specified location " + uri + " does not exist.");
+          // Check if the specified location is valid for this repository.
+          final URI uri = repository.createDirectoryURI(location);
+          try {
+            if (!repository.exists(uri)) {
+              throw new SolrException(
+                  ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
+            }
+          } catch (IOException ex) {
+            throw new SolrException(
+                ErrorCode.SERVER_ERROR,
+                "Failed to check the existence of " + uri + ". Is it valid?",
+                ex);
           }
-        } catch (IOException ex) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existence of " + uri + ". Is it valid?", ex);
-        }
 
-        int deletionModesProvided = 0;
-        if (req.getParams().get(MAX_NUM_BACKUP_POINTS) != null) deletionModesProvided++;
-        if (req.getParams().get(BACKUP_PURGE_UNUSED) != null) deletionModesProvided++;
-        if (req.getParams().get(BACKUP_ID) != null) deletionModesProvided++;
-        if (deletionModesProvided != 1) {
-          throw new SolrException(BAD_REQUEST,
-                  String.format(Locale.ROOT, "Exactly one of %s, %s, and %s parameters must be provided",
-                          MAX_NUM_BACKUP_POINTS, BACKUP_PURGE_UNUSED, BACKUP_ID));
-        }
-
-        final Map<String, Object> params = copy(req.getParams(), null, NAME, BACKUP_REPOSITORY,
-                BACKUP_LOCATION, BACKUP_ID, MAX_NUM_BACKUP_POINTS, BACKUP_PURGE_UNUSED);
-        params.put(BACKUP_LOCATION, location);
-        if (repo != null) {
-          params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
-        }
-        return params;
-      }
-    }),
-    LISTBACKUP_OP(LISTBACKUP, (req, rsp, h) -> {
-      req.getParams().required().check(NAME);
-
-      CoreContainer cc = h.coreContainer;
-      String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      try (BackupRepository repository = cc.newBackupRepository(repo)) {
-
-        String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-        if (location == null) {
-          //Refresh the cluster property file to make sure the value set for location is the latest
-          // Check if the location is specified in the cluster property.
-          location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
-          if (location == null) {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-                    + " parameter or as a default repository property or as a cluster property.");
+          final String createNodeArg = req.getParams().get(CREATE_NODE_SET);
+          if (CREATE_NODE_SET_EMPTY.equals(createNodeArg)) {
+            throw new SolrException(
+                SolrException.ErrorCode.BAD_REQUEST,
+                "Cannot restore with a CREATE_NODE_SET of CREATE_NODE_SET_EMPTY.");
+          }
+          if (req.getParams().get(NRT_REPLICAS) != null
+              && req.getParams().get(REPLICATION_FACTOR) != null) {
+            throw new SolrException(
+                SolrException.ErrorCode.BAD_REQUEST,
+                "Cannot set both replicationFactor and nrtReplicas as they mean the same thing");
           }
-        }
 
-        String backupName = req.getParams().get(NAME);
-        final URI locationURI = repository.createDirectoryURI(location);
-        try {
-          if (!repository.exists(locationURI)) {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "specified location " + locationURI + " does not exist.");
+          final Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP);
+          params.put(CoreAdminParams.BACKUP_LOCATION, location);
+          if (repo != null) {
+            params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
           }
-        } catch (IOException ex) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existence of " + locationURI + ". Is it valid?", ex);
-        }
-        URI backupLocation = BackupFilePaths.buildExistingBackupLocationURI(repository, locationURI, backupName);
-        if (repository.exists(repository.resolve(backupLocation, BackupManager.TRADITIONAL_BACKUP_PROPS_FILE))) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The backup name [" + backupName + "] at " +
-                  "location [" + location + "] holds a non-incremental (legacy) backup, but " +
-                  "backup-listing is only supported on incremental backups");
-        }
+          // from CREATE_OP:
+          copy(
+              req.getParams(),
+              params,
+              COLL_CONF,
+              REPLICATION_FACTOR,
+              NRT_REPLICAS,
+              TLOG_REPLICAS,
+              PULL_REPLICAS,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              BACKUP_ID);
+          copyPropertiesWithPrefix(req.getParams(), params, PROPERTY_PREFIX);
+          return params;
+        }),
+    DELETEBACKUP_OP(
+        DELETEBACKUP,
+        (req, rsp, h) -> {
+          req.getParams().required().check(NAME);
+
+          CoreContainer cc = h.coreContainer;
+          String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
+          try (BackupRepository repository = cc.newBackupRepository(repo)) {
+
+            String location =
+                repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
+            if (location == null) {
+              // Refresh the cluster property file to make sure the value set for location is the
+              // latest
+              // Check if the location is specified in the cluster property.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
##########
@@ -388,982 +426,1324 @@ public Category getCategory() {
     return Category.ADMIN;
   }
 
-  private static void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException {
+  private static void createSysConfigSet(CoreContainer coreContainer)
+      throws KeeperException, InterruptedException {
     SolrZkClient zk = coreContainer.getZkController().getZkStateReader().getZkClient();
     ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zk.getZkClientTimeout());
     cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE, zk);
-    cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk);
+    cmdExecutor.ensureExists(
+        ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL, zk);
 
     try {
-      String path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml";
-      byte[] data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml"));
+      String path =
+          ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/schema.xml";
+      byte[] data =
+          IOUtils.toByteArray(
+              CollectionsHandler.class.getResourceAsStream("/SystemCollectionSchema.xml"));
       assert data != null && data.length > 0;
       cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
-      path = ZkStateReader.CONFIGS_ZKNODE + "/" + CollectionAdminParams.SYSTEM_COLL + "/solrconfig.xml";
-      data = IOUtils.toByteArray(CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml"));
+      path =
+          ZkStateReader.CONFIGS_ZKNODE
+              + "/"
+              + CollectionAdminParams.SYSTEM_COLL
+              + "/solrconfig.xml";
+      data =
+          IOUtils.toByteArray(
+              CollectionsHandler.class.getResourceAsStream("/SystemCollectionSolrConfig.xml"));
       assert data != null && data.length > 0;
       cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
     } catch (IOException e) {
       throw new SolrException(ErrorCode.SERVER_ERROR, e);
     }
-
-
   }
 
-  private static void addStatusToResponse(NamedList<Object> results, RequestStatusState state, String msg) {
+  private static void addStatusToResponse(
+      NamedList<Object> results, RequestStatusState state, String msg) {
     SimpleOrderedMap<String> status = new SimpleOrderedMap<>();
     status.add("state", state.getKey());
     status.add("msg", msg);
     results.add("status", status);
   }
 
   public enum CollectionOperation implements CollectionOp {
-    CREATE_OP(CREATE, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams().required(), null, NAME);
-      props.put("fromApi", "true");
-      copy(req.getParams(), props,
-          REPLICATION_FACTOR,
-          COLL_CONF,
-          NUM_SLICES,
-          CREATE_NODE_SET,
-          CREATE_NODE_SET_SHUFFLE,
-          SHARDS_PROP,
-          PULL_REPLICAS,
-          TLOG_REPLICAS,
-          NRT_REPLICAS,
-          WAIT_FOR_FINAL_STATE,
-          PER_REPLICA_STATE,
-          ALIAS);
-
-      if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
-        //TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for more details
-        int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR));
-        int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS));
-        if (replicationFactor != nrtReplicas) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing");
-        }
-      }
-      if (props.get(REPLICATION_FACTOR) != null) {
-        props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR));
-      } else if (props.get(NRT_REPLICAS) != null) {
-        props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS));
-      }
-
-      final String collectionName = SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
-      final String shardsParam = (String) props.get(SHARDS_PROP);
-      if (StringUtils.isNotEmpty(shardsParam)) {
-        verifyShardsParam(shardsParam);
-      }
-      if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) {
-        //We must always create a .system collection with only a single shard
-        props.put(NUM_SLICES, 1);
-        props.remove(SHARDS_PROP);
-        createSysConfigSet(h.coreContainer);
-
-      }
-      if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
-      for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
-        h.copyFromClusterProp(props, prop);
-      copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
-      return copyPropertiesWithPrefix(req.getParams(), props, "router.");
+    CREATE_OP(
+        CREATE,
+        (req, rsp, h) -> {
+          Map<String, Object> props = copy(req.getParams().required(), null, NAME);
+          props.put("fromApi", "true");
+          copy(
+              req.getParams(),
+              props,
+              REPLICATION_FACTOR,
+              COLL_CONF,
+              NUM_SLICES,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              SHARDS_PROP,
+              PULL_REPLICAS,
+              TLOG_REPLICAS,
+              NRT_REPLICAS,
+              WAIT_FOR_FINAL_STATE,
+              PER_REPLICA_STATE,
+              ALIAS);
+
+          if (props.get(REPLICATION_FACTOR) != null && props.get(NRT_REPLICAS) != null) {
+            // TODO: Remove this in 8.0 . Keep this for SolrJ client back-compat. See SOLR-11676 for
+            // more details
+            int replicationFactor = Integer.parseInt((String) props.get(REPLICATION_FACTOR));
+            int nrtReplicas = Integer.parseInt((String) props.get(NRT_REPLICAS));
+            if (replicationFactor != nrtReplicas) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "Cannot specify both replicationFactor and nrtReplicas as they mean the same thing");
+            }
+          }
+          if (props.get(REPLICATION_FACTOR) != null) {
+            props.put(NRT_REPLICAS, props.get(REPLICATION_FACTOR));
+          } else if (props.get(NRT_REPLICAS) != null) {
+            props.put(REPLICATION_FACTOR, props.get(NRT_REPLICAS));
+          }
 
-    }),
+          final String collectionName =
+              SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
+          final String shardsParam = (String) props.get(SHARDS_PROP);
+          if (StringUtils.isNotEmpty(shardsParam)) {
+            verifyShardsParam(shardsParam);
+          }
+          if (CollectionAdminParams.SYSTEM_COLL.equals(collectionName)) {
+            // We must always create a .system collection with only a single shard
+            props.put(NUM_SLICES, 1);
+            props.remove(SHARDS_PROP);
+            createSysConfigSet(h.coreContainer);
+          }
+          if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
+          for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
+            h.copyFromClusterProp(props, prop);
+          copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
+          return copyPropertiesWithPrefix(req.getParams(), props, "router.");
+        }),
     @SuppressWarnings({"unchecked"})
-    COLSTATUS_OP(COLSTATUS, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          ColStatus.CORE_INFO_PROP,
-          ColStatus.SEGMENTS_PROP,
-          ColStatus.FIELD_INFO_PROP,
-          ColStatus.RAW_SIZE_PROP,
-          ColStatus.RAW_SIZE_SUMMARY_PROP,
-          ColStatus.RAW_SIZE_DETAILS_PROP,
-          ColStatus.RAW_SIZE_SAMPLING_PERCENT_PROP,
-          ColStatus.SIZE_INFO_PROP);
-
-      new ColStatus(h.coreContainer.getSolrClientCache(),
-          h.coreContainer.getZkController().getZkStateReader().getClusterState(), new ZkNodeProps(props))
-          .getColStatus(rsp.getValues());
-      return null;
-    }),
-    DELETE_OP(DELETE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME);
-      return copy(req.getParams(), map, FOLLOW_ALIASES);
-    }),
+    COLSTATUS_OP(
+        COLSTATUS,
+        (req, rsp, h) -> {
+          Map<String, Object> props =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  ColStatus.CORE_INFO_PROP,
+                  ColStatus.SEGMENTS_PROP,
+                  ColStatus.FIELD_INFO_PROP,
+                  ColStatus.RAW_SIZE_PROP,
+                  ColStatus.RAW_SIZE_SUMMARY_PROP,
+                  ColStatus.RAW_SIZE_DETAILS_PROP,
+                  ColStatus.RAW_SIZE_SAMPLING_PERCENT_PROP,
+                  ColStatus.SIZE_INFO_PROP);
+
+          new ColStatus(
+                  h.coreContainer.getSolrClientCache(),
+                  h.coreContainer.getZkController().getZkStateReader().getClusterState(),
+                  new ZkNodeProps(props))
+              .getColStatus(rsp.getValues());
+          return null;
+        }),
+    DELETE_OP(
+        DELETE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, NAME);
+          return copy(req.getParams(), map, FOLLOW_ALIASES);
+        }),
     // XXX should this command support followAliases?
-    RELOAD_OP(RELOAD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME);
-      return copy(req.getParams(), map);
-    }),
-
-    RENAME_OP(RENAME, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, NAME, CollectionAdminParams.TARGET);
-      return copy(req.getParams(), map, FOLLOW_ALIASES);
-    }),
-
-    REINDEXCOLLECTION_OP(REINDEXCOLLECTION, (req, rsp, h) -> {
-      Map<String, Object> m = copy(req.getParams().required(), null, NAME);
-      copy(req.getParams(), m,
-          ReindexCollectionCmd.COMMAND,
-          ReindexCollectionCmd.REMOVE_SOURCE,
-          ReindexCollectionCmd.TARGET,
-          ZkStateReader.CONFIGNAME_PROP,
-          NUM_SLICES,
-          NRT_REPLICAS,
-          PULL_REPLICAS,
-          TLOG_REPLICAS,
-          REPLICATION_FACTOR,
-          CREATE_NODE_SET,
-          CREATE_NODE_SET_SHUFFLE,
-          "shards",
-          CommonParams.ROWS,
-          CommonParams.Q,
-          CommonParams.FL,
-          FOLLOW_ALIASES);
-      if (req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP) != null) {
-        m.put(ZkStateReader.CONFIGNAME_PROP, req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP));
-      }
-      copyPropertiesWithPrefix(req.getParams(), m, "router.");
-      return m;
-    }),
-
-    SYNCSHARD_OP(SYNCSHARD, (req, rsp, h) -> {
-      String extCollection = req.getParams().required().get("collection");
-      String collection = h.coreContainer.getZkController().getZkStateReader().getAliases().resolveSimpleAlias(extCollection);
-      String shard = req.getParams().required().get("shard");
-
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-
-      DocCollection docCollection = clusterState.getCollection(collection);
-      ZkNodeProps leaderProps = docCollection.getLeader(shard);
-      ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
-
-      try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl())
-          .withConnectionTimeout(15000)
-          .withSocketTimeout(60000)
-          .build()) {
-        RequestSyncShard reqSyncShard = new RequestSyncShard();
-        reqSyncShard.setCollection(collection);
-        reqSyncShard.setShard(shard);
-        reqSyncShard.setCoreName(nodeProps.getCoreName());
-        client.request(reqSyncShard);
-      }
-      return null;
-    }),
-
-    CREATEALIAS_OP(CREATEALIAS, (req, rsp, h) -> {
-      String alias = req.getParams().get(NAME);
-      SolrIdentifierValidator.validateAliasName(alias);
-      String collections = req.getParams().get("collections");
-      RoutedAlias routedAlias = null;
-      Exception ex = null;
-      HashMap<String,Object> possiblyModifiedParams = new HashMap<>();
-      try {
-        // note that RA specific validation occurs here.
-        req.getParams().toMap(possiblyModifiedParams);
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        // This is awful because RoutedAlias lies about what types it wants
-        Map<String, String> temp = (Map<String, String>) (Map) possiblyModifiedParams;
-        routedAlias = RoutedAlias.fromProps(alias, temp);
-      } catch (SolrException e) {
-        // we'll throw this later if we are in fact creating a routed alias.
-        ex = e;
-      }
-      ModifiableSolrParams finalParams = new ModifiableSolrParams();
-      for (Map.Entry<String, Object> entry : possiblyModifiedParams.entrySet()) {
-        if (entry.getValue().getClass().isArray() ) {
-          // v2 api hits this case
-          for (Object o : (Object[]) entry.getValue()) {
-            finalParams.add(entry.getKey(),o.toString());
+    RELOAD_OP(
+        RELOAD,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, NAME);
+          return copy(req.getParams(), map);
+        }),
+
+    RENAME_OP(
+        RENAME,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, NAME, CollectionAdminParams.TARGET);
+          return copy(req.getParams(), map, FOLLOW_ALIASES);
+        }),
+
+    REINDEXCOLLECTION_OP(
+        REINDEXCOLLECTION,
+        (req, rsp, h) -> {
+          Map<String, Object> m = copy(req.getParams().required(), null, NAME);
+          copy(
+              req.getParams(),
+              m,
+              ReindexCollectionCmd.COMMAND,
+              ReindexCollectionCmd.REMOVE_SOURCE,
+              ReindexCollectionCmd.TARGET,
+              ZkStateReader.CONFIGNAME_PROP,
+              NUM_SLICES,
+              NRT_REPLICAS,
+              PULL_REPLICAS,
+              TLOG_REPLICAS,
+              REPLICATION_FACTOR,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              "shards",
+              CommonParams.ROWS,
+              CommonParams.Q,
+              CommonParams.FL,
+              FOLLOW_ALIASES);
+          if (req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP) != null) {
+            m.put(
+                ZkStateReader.CONFIGNAME_PROP,
+                req.getParams().get("collection." + ZkStateReader.CONFIGNAME_PROP));
+          }
+          copyPropertiesWithPrefix(req.getParams(), m, "router.");
+          return m;
+        }),
+
+    SYNCSHARD_OP(
+        SYNCSHARD,
+        (req, rsp, h) -> {
+          String extCollection = req.getParams().required().get("collection");
+          String collection =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getAliases()
+                  .resolveSimpleAlias(extCollection);
+          String shard = req.getParams().required().get("shard");
+
+          ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+
+          DocCollection docCollection = clusterState.getCollection(collection);
+          ZkNodeProps leaderProps = docCollection.getLeader(shard);
+          ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
+
+          try (HttpSolrClient client =
+              new Builder(nodeProps.getBaseUrl())
+                  .withConnectionTimeout(15000)
+                  .withSocketTimeout(60000)
+                  .build()) {
+            RequestSyncShard reqSyncShard = new RequestSyncShard();
+            reqSyncShard.setCollection(collection);
+            reqSyncShard.setShard(shard);
+            reqSyncShard.setCoreName(nodeProps.getCoreName());
+            client.request(reqSyncShard);
+          }
+          return null;
+        }),
+
+    CREATEALIAS_OP(
+        CREATEALIAS,
+        (req, rsp, h) -> {
+          String alias = req.getParams().get(NAME);
+          SolrIdentifierValidator.validateAliasName(alias);
+          String collections = req.getParams().get("collections");
+          RoutedAlias routedAlias = null;
+          Exception ex = null;
+          HashMap<String, Object> possiblyModifiedParams = new HashMap<>();
+          try {
+            // note that RA specific validation occurs here.
+            req.getParams().toMap(possiblyModifiedParams);
+            @SuppressWarnings({"unchecked", "rawtypes"})
+            // This is awful because RoutedAlias lies about what types it wants
+            Map<String, String> temp = (Map<String, String>) (Map) possiblyModifiedParams;
+            routedAlias = RoutedAlias.fromProps(alias, temp);
+          } catch (SolrException e) {
+            // we'll throw this later if we are in fact creating a routed alias.
+            ex = e;
+          }
+          ModifiableSolrParams finalParams = new ModifiableSolrParams();
+          for (Map.Entry<String, Object> entry : possiblyModifiedParams.entrySet()) {
+            if (entry.getValue().getClass().isArray()) {
+              // v2 api hits this case
+              for (Object o : (Object[]) entry.getValue()) {
+                finalParams.add(entry.getKey(), o.toString());
+              }
+            } else {
+              finalParams.add(entry.getKey(), entry.getValue().toString());
+            }
           }
-        } else {
-          finalParams.add(entry.getKey(),entry.getValue().toString());
-        }
-      }
 
-      if (collections != null) {
-        if (routedAlias != null) {
-          throw new SolrException(BAD_REQUEST, "Collections cannot be specified when creating a routed alias.");
-        } else {
-          //////////////////////////////////////
-          // Regular alias creation indicated //
-          //////////////////////////////////////
-          return copy(finalParams.required(), null, NAME, "collections");
-        }
-      }
+          if (collections != null) {
+            if (routedAlias != null) {
+              throw new SolrException(
+                  BAD_REQUEST, "Collections cannot be specified when creating a routed alias.");
+            } else {
+              //////////////////////////////////////
+              // Regular alias creation indicated //
+              //////////////////////////////////////
+              return copy(finalParams.required(), null, NAME, "collections");
+            }
+          }
 
-      /////////////////////////////////////////////////
-      // We are creating a routed alias from here on //
-      /////////////////////////////////////////////////
+          /////////////////////////////////////////////////
+          // We are creating a routed alias from here on //
+          /////////////////////////////////////////////////
 
-      // If our prior creation attempt had issues expose them now.
-      if (ex != null) {
-        throw ex;
-      }
+          // If our prior creation attempt had issues expose them now.
+          if (ex != null) {
+            throw ex;
+          }
 
-      // Now filter out just the parameters we care about from the request
-      assert routedAlias != null;
-      Map<String, Object> result = copy(finalParams, null, routedAlias.getRequiredParams());
-      copy(finalParams, result, routedAlias.getOptionalParams());
-
-      ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix
-
-      // add to result params that start with "create-collection.".
-      //   Additionally, save these without the prefix to createCollParams
-      for (Map.Entry<String, String[]> entry : finalParams) {
-        final String p = entry.getKey();
-        if (p.startsWith(CREATE_COLLECTION_PREFIX)) {
-          // This is what SolrParams#getAll(Map, Collection)} does
-          final String[] v = entry.getValue();
-          if (v.length == 1) {
-            result.put(p, v[0]);
-          } else {
-            result.put(p, v);
+          // Now filter out just the parameters we care about from the request
+          assert routedAlias != null;
+          Map<String, Object> result = copy(finalParams, null, routedAlias.getRequiredParams());
+          copy(finalParams, result, routedAlias.getOptionalParams());
+
+          ModifiableSolrParams createCollParams = new ModifiableSolrParams(); // without prefix
+
+          // add to result params that start with "create-collection.".
+          //   Additionally, save these without the prefix to createCollParams
+          for (Map.Entry<String, String[]> entry : finalParams) {
+            final String p = entry.getKey();
+            if (p.startsWith(CREATE_COLLECTION_PREFIX)) {
+              // This is what SolrParams#getAll(Map, Collection)} does
+              final String[] v = entry.getValue();
+              if (v.length == 1) {
+                result.put(p, v[0]);
+              } else {
+                result.put(p, v);
+              }
+              createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v);
+            }
           }
-          createCollParams.set(p.substring(CREATE_COLLECTION_PREFIX.length()), v);
-        }
-      }
 
-      // Verify that the create-collection prefix'ed params appear to be valid.
-      if (createCollParams.get(NAME) != null) {
-        throw new SolrException(BAD_REQUEST, "routed aliases calculate names for their " +
-            "dependent collections, you cannot specify the name.");
-      }
-      if (createCollParams.get(COLL_CONF) == null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "We require an explicit " + COLL_CONF);
-      }
-      // note: could insist on a config name here as well.... or wait to throw at overseer
-      createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation
-      CREATE_OP.execute(new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results
+          // Verify that the create-collection prefix'ed params appear to be valid.
+          if (createCollParams.get(NAME) != null) {
+            throw new SolrException(
+                BAD_REQUEST,
+                "routed aliases calculate names for their "
+                    + "dependent collections, you cannot specify the name.");
+          }
+          if (createCollParams.get(COLL_CONF) == null) {
+            throw new SolrException(
+                SolrException.ErrorCode.BAD_REQUEST, "We require an explicit " + COLL_CONF);
+          }
+          // note: could insist on a config name here as well.... or wait to throw at overseer
+          createCollParams.add(NAME, "TMP_name_TMP_name_TMP"); // just to pass validation
+          CREATE_OP.execute(
+              new LocalSolrQueryRequest(null, createCollParams), rsp, h); // ignore results
 
-      return result;
-    }),
+          return result;
+        }),
 
     DELETEALIAS_OP(DELETEALIAS, (req, rsp, h) -> copy(req.getParams().required(), null, NAME)),
 
     /**
      * Change properties for an alias (use CREATEALIAS_OP to change the actual value of the alias)
      */
-    ALIASPROP_OP(ALIASPROP, (req, rsp, h) -> {
-      Map<String, Object> params = copy(req.getParams().required(), null, NAME);
-
-      // Note: success/no-op in the event of no properties supplied is intentional. Keeps code simple and one less case
-      // for api-callers to check for.
-      return convertPrefixToMap(req.getParams(), params, "property");
-    }),
-
-    /**
-     * List the aliases and associated properties.
-     */
+    ALIASPROP_OP(
+        ALIASPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> params = copy(req.getParams().required(), null, NAME);
+
+          // Note: success/no-op in the event of no properties supplied is intentional. Keeps code
+          // simple and one less case
+          // for api-callers to check for.
+          return convertPrefixToMap(req.getParams(), params, "property");
+        }),
+
+    /** List the aliases and associated properties. */
     @SuppressWarnings({"unchecked"})
-    LISTALIASES_OP(LISTALIASES, (req, rsp, h) -> {
-      ZkStateReader zkStateReader = h.coreContainer.getZkController().getZkStateReader();
-      // if someone calls listAliases, lets ensure we return an up to date response
-      zkStateReader.aliasesManager.update();
-      Aliases aliases = zkStateReader.getAliases();
-      if (aliases != null) {
-        // the aliases themselves...
-        rsp.getValues().add("aliases", aliases.getCollectionAliasMap());
-        // Any properties for the above aliases.
-        Map<String, Map<String, String>> meta = new LinkedHashMap<>();
-        for (String alias : aliases.getCollectionAliasListMap().keySet()) {
-          Map<String, String> collectionAliasProperties = aliases.getCollectionAliasProperties(alias);
-          if (!collectionAliasProperties.isEmpty()) {
-            meta.put(alias, collectionAliasProperties);
+    LISTALIASES_OP(
+        LISTALIASES,
+        (req, rsp, h) -> {
+          ZkStateReader zkStateReader = h.coreContainer.getZkController().getZkStateReader();
+          // if someone calls listAliases, lets ensure we return an up to date response
+          zkStateReader.aliasesManager.update();
+          Aliases aliases = zkStateReader.getAliases();
+          if (aliases != null) {
+            // the aliases themselves...
+            rsp.getValues().add("aliases", aliases.getCollectionAliasMap());
+            // Any properties for the above aliases.
+            Map<String, Map<String, String>> meta = new LinkedHashMap<>();
+            for (String alias : aliases.getCollectionAliasListMap().keySet()) {
+              Map<String, String> collectionAliasProperties =
+                  aliases.getCollectionAliasProperties(alias);
+              if (!collectionAliasProperties.isEmpty()) {
+                meta.put(alias, collectionAliasProperties);
+              }
+            }
+            rsp.getValues().add("properties", meta);
+          }
+          return null;
+        }),
+    SPLITSHARD_OP(
+        SPLITSHARD,
+        DEFAULT_COLLECTION_OP_TIMEOUT * 5,
+        (req, rsp, h) -> {
+          String name = req.getParams().required().get(COLLECTION_PROP);
+          // TODO : add support for multiple shards
+          String shard = req.getParams().get(SHARD_ID_PROP);
+          String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
+          String splitKey = req.getParams().get("split.key");
+          String numSubShards = req.getParams().get(NUM_SUB_SHARDS);
+          String fuzz = req.getParams().get(SPLIT_FUZZ);
+
+          if (splitKey == null && shard == null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
+          }
+          if (splitKey != null && shard != null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Only one of 'shard' or 'split.key' should be specified");
+          }
+          if (splitKey != null && rangesStr != null) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Only one of 'ranges' or 'split.key' should be specified");
+          }
+          if (numSubShards != null && (splitKey != null || rangesStr != null)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "numSubShards can not be specified with split.key or ranges parameters");
+          }
+          if (fuzz != null && (splitKey != null || rangesStr != null)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "fuzz can not be specified with split.key or ranges parameters");
           }
-        }
-        rsp.getValues().add("properties", meta);
-      }
-      return null;
-    }),
-    SPLITSHARD_OP(SPLITSHARD, DEFAULT_COLLECTION_OP_TIMEOUT * 5, (req, rsp, h) -> {
-      String name = req.getParams().required().get(COLLECTION_PROP);
-      // TODO : add support for multiple shards
-      String shard = req.getParams().get(SHARD_ID_PROP);
-      String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
-      String splitKey = req.getParams().get("split.key");
-      String numSubShards = req.getParams().get(NUM_SUB_SHARDS);
-      String fuzz = req.getParams().get(SPLIT_FUZZ);
-
-      if (splitKey == null && shard == null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
-      }
-      if (splitKey != null && shard != null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Only one of 'shard' or 'split.key' should be specified");
-      }
-      if (splitKey != null && rangesStr != null) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Only one of 'ranges' or 'split.key' should be specified");
-      }
-      if (numSubShards != null && (splitKey != null || rangesStr != null)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "numSubShards can not be specified with split.key or ranges parameters");
-      }
-      if (fuzz != null && (splitKey != null || rangesStr != null)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "fuzz can not be specified with split.key or ranges parameters");
-      }
 
-      Map<String, Object> map = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP,
-          "split.key",
-          CoreAdminParams.RANGES,
-          WAIT_FOR_FINAL_STATE,
-          TIMING,
-          SPLIT_METHOD,
-          NUM_SUB_SHARDS,
-          SPLIT_FUZZ,
-          SPLIT_BY_PREFIX,
-          FOLLOW_ALIASES);
-      return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
-    }),
-    DELETESHARD_OP(DELETESHARD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP);
-      copy(req.getParams(), map,
-          DELETE_INDEX,
-          DELETE_DATA_DIR,
-          DELETE_INSTANCE_DIR,
-          FOLLOW_ALIASES);
-      return map;
-    }),
-    FORCELEADER_OP(FORCELEADER, (req, rsp, h) -> {
-      forceLeaderElection(req, h);
-      return null;
-    }),
-    CREATESHARD_OP(CREATESHARD, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP);
-      ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      final String newShardName = SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
-      boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
-      String extCollectionName = req.getParams().get(COLLECTION_PROP);
-      String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader()
-          .getAliases().resolveSimpleAlias(extCollectionName) : extCollectionName;
-      if (!ImplicitDocRouter.NAME.equals(((Map<?,?>) clusterState.getCollection(collectionName).get(DOC_ROUTER)).get(NAME)))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
-      copy(req.getParams(), map,
-          REPLICATION_FACTOR,
-          NRT_REPLICAS,
-          TLOG_REPLICAS,
-          PULL_REPLICAS,
-          CREATE_NODE_SET,
-          WAIT_FOR_FINAL_STATE,
-          FOLLOW_ALIASES);
-      return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
-    }),
-    DELETEREPLICA_OP(DELETEREPLICA, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP);
-
-      return copy(req.getParams(), map,
-          DELETE_INDEX,
-          DELETE_DATA_DIR,
-          DELETE_INSTANCE_DIR,
-          COUNT_PROP, REPLICA_PROP,
-          SHARD_ID_PROP,
-          ONLY_IF_DOWN,
-          FOLLOW_ALIASES);
-    }),
-    MIGRATE_OP(MIGRATE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP, "split.key", "target.collection");
-      return copy(req.getParams(), map, "forward.timeout", FOLLOW_ALIASES);
-    }),
-    ADDROLE_OP(ADDROLE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
-      if (!KNOWN_ROLES.contains(map.get("role")))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
-      return map;
-    }),
-    REMOVEROLE_OP(REMOVEROLE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
-      if (!KNOWN_ROLES.contains(map.get("role")))
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
-      return map;
-    }),
-    CLUSTERPROP_OP(CLUSTERPROP, (req, rsp, h) -> {
-      String name = req.getParams().required().get(NAME);
-      String val = req.getParams().get(VALUE_LONG);
-      ClusterProperties cp = new ClusterProperties(h.coreContainer.getZkController().getZkClient());
-      cp.setClusterProperty(name, val);
-      return null;
-    }),
-    COLLECTIONPROP_OP(COLLECTIONPROP, (req, rsp, h) -> {
-      String extCollection = req.getParams().required().get(NAME);
-      String collection = h.coreContainer.getZkController().getZkStateReader().getAliases().resolveSimpleAlias(extCollection);
-      String name = req.getParams().required().get(PROPERTY_NAME);
-      String val = req.getParams().get(PROPERTY_VALUE);
-      CollectionProperties cp = new CollectionProperties(h.coreContainer.getZkController().getZkClient());
-      cp.setCollectionProperty(collection, name, val);
-      return null;
-    }),
+          Map<String, Object> map =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  SHARD_ID_PROP,
+                  "split.key",
+                  CoreAdminParams.RANGES,
+                  WAIT_FOR_FINAL_STATE,
+                  TIMING,
+                  SPLIT_METHOD,
+                  NUM_SUB_SHARDS,
+                  SPLIT_FUZZ,
+                  SPLIT_BY_PREFIX,
+                  FOLLOW_ALIASES);
+          return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
+        }),
+    DELETESHARD_OP(
+        DELETESHARD,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, SHARD_ID_PROP);
+          copy(
+              req.getParams(),
+              map,
+              DELETE_INDEX,
+              DELETE_DATA_DIR,
+              DELETE_INSTANCE_DIR,
+              FOLLOW_ALIASES);
+          return map;
+        }),
+    FORCELEADER_OP(
+        FORCELEADER,
+        (req, rsp, h) -> {
+          forceLeaderElection(req, h);
+          return null;
+        }),
+    CREATESHARD_OP(
+        CREATESHARD,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, SHARD_ID_PROP);
+          ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+          final String newShardName =
+              SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
+          boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
+          String extCollectionName = req.getParams().get(COLLECTION_PROP);
+          String collectionName =
+              followAliases
+                  ? h.coreContainer
+                      .getZkController()
+                      .getZkStateReader()
+                      .getAliases()
+                      .resolveSimpleAlias(extCollectionName)
+                  : extCollectionName;
+          if (!ImplicitDocRouter.NAME.equals(
+              ((Map<?, ?>) clusterState.getCollection(collectionName).get(DOC_ROUTER)).get(NAME)))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
+          copy(
+              req.getParams(),
+              map,
+              REPLICATION_FACTOR,
+              NRT_REPLICAS,
+              TLOG_REPLICAS,
+              PULL_REPLICAS,
+              CREATE_NODE_SET,
+              WAIT_FOR_FINAL_STATE,
+              FOLLOW_ALIASES);
+          return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
+        }),
+    DELETEREPLICA_OP(
+        DELETEREPLICA,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, COLLECTION_PROP);
+
+          return copy(
+              req.getParams(),
+              map,
+              DELETE_INDEX,
+              DELETE_DATA_DIR,
+              DELETE_INSTANCE_DIR,
+              COUNT_PROP,
+              REPLICA_PROP,
+              SHARD_ID_PROP,
+              ONLY_IF_DOWN,
+              FOLLOW_ALIASES);
+        }),
+    MIGRATE_OP(
+        MIGRATE,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  "split.key",
+                  "target.collection");
+          return copy(req.getParams(), map, "forward.timeout", FOLLOW_ALIASES);
+        }),
+    ADDROLE_OP(
+        ADDROLE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
+          if (!KNOWN_ROLES.contains(map.get("role")))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
+          return map;
+        }),
+    REMOVEROLE_OP(
+        REMOVEROLE,
+        (req, rsp, h) -> {
+          Map<String, Object> map = copy(req.getParams().required(), null, "role", "node");
+          if (!KNOWN_ROLES.contains(map.get("role")))
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
+          return map;
+        }),
+    CLUSTERPROP_OP(
+        CLUSTERPROP,
+        (req, rsp, h) -> {
+          String name = req.getParams().required().get(NAME);
+          String val = req.getParams().get(VALUE_LONG);
+          ClusterProperties cp =
+              new ClusterProperties(h.coreContainer.getZkController().getZkClient());
+          cp.setClusterProperty(name, val);
+          return null;
+        }),
+    COLLECTIONPROP_OP(
+        COLLECTIONPROP,
+        (req, rsp, h) -> {
+          String extCollection = req.getParams().required().get(NAME);
+          String collection =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getAliases()
+                  .resolveSimpleAlias(extCollection);
+          String name = req.getParams().required().get(PROPERTY_NAME);
+          String val = req.getParams().get(PROPERTY_VALUE);
+          CollectionProperties cp =
+              new CollectionProperties(h.coreContainer.getZkController().getZkClient());
+          cp.setCollectionProperty(collection, name, val);
+          return null;
+        }),
     @SuppressWarnings({"unchecked"})
-    REQUESTSTATUS_OP(REQUESTSTATUS, (req, rsp, h) -> {
-      req.getParams().required().check(REQUESTID);
-
-      final CoreContainer coreContainer = h.coreContainer;
-      final String requestId = req.getParams().get(REQUESTID);
-      final ZkController zkController = coreContainer.getZkController();
-
-      final NamedList<Object> status = new NamedList<>();
-      if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
-        if (zkController.getOverseerCompletedMap().contains(requestId)) {
-          final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
-          rsp.getValues().addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
-          addStatusToResponse(status, COMPLETED, "found [" + requestId + "] in completed tasks");
-        } else if (zkController.getOverseerFailureMap().contains(requestId)) {
-          final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
-          rsp.getValues().addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
-          addStatusToResponse(status, FAILED, "found [" + requestId + "] in failed tasks");
-        } else if (zkController.getOverseerRunningMap().contains(requestId)) {
-          addStatusToResponse(status, RUNNING, "found [" + requestId + "] in running tasks");
-        } else if (h.overseerCollectionQueueContains(requestId)) {
-          addStatusToResponse(status, SUBMITTED, "found [" + requestId + "] in submitted tasks");
-        } else {
-          addStatusToResponse(status, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
-        }
-      } else {
-        Pair<RequestStatusState, OverseerSolrResponse> sr =
-            coreContainer.getDistributedCollectionCommandRunner().get().getAsyncTaskRequestStatus(requestId);
-        final String message;
-        switch (sr.first()) {
-          case COMPLETED:
-            message = "found [" + requestId + "] in completed tasks";
-            rsp.getValues().addAll(sr.second().getResponse());
-            break;
-          case FAILED:
-            message = "found [" + requestId + "] in failed tasks";
-            rsp.getValues().addAll(sr.second().getResponse());
-            break;
-          case RUNNING:
-            message = "found [" + requestId + "] in running tasks";
-            break;
-          case SUBMITTED:
-            message = "found [" + requestId + "] in submitted tasks";
-            break;
-          default:
-            message = "Did not find [" + requestId + "] in any tasks queue";
-        }
-        addStatusToResponse(status, sr.first(), message);
-      }
-
-      rsp.getValues().addAll(status);
-      return null;
-    }),
-    DELETESTATUS_OP(DELETESTATUS, new CollectionOp() {
-      @SuppressWarnings("unchecked")
-      @Override
-      public Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
-        final CoreContainer coreContainer = h.coreContainer;
-        final String requestId = req.getParams().get(REQUESTID);
-        final ZkController zkController = coreContainer.getZkController();
-        Boolean flush = req.getParams().getBool(CollectionAdminParams.FLUSH, false);
-
-        if (requestId == null && !flush) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "Either requestid or flush parameter must be specified.");
-        }
-
-        if (requestId != null && flush) {
-          throw new SolrException(ErrorCode.BAD_REQUEST,
-              "Both requestid and flush parameters can not be specified together.");
-        }
-
-        if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
-          if (flush) {
-            Collection<String> completed = zkController.getOverseerCompletedMap().keys();
-            Collection<String> failed = zkController.getOverseerFailureMap().keys();
-            for (String asyncId : completed) {
-              zkController.getOverseerCompletedMap().remove(asyncId);
-              zkController.clearAsyncId(asyncId);
-            }
-            for (String asyncId : failed) {
-              zkController.getOverseerFailureMap().remove(asyncId);
-              zkController.clearAsyncId(asyncId);
+    REQUESTSTATUS_OP(
+        REQUESTSTATUS,
+        (req, rsp, h) -> {
+          req.getParams().required().check(REQUESTID);
+
+          final CoreContainer coreContainer = h.coreContainer;
+          final String requestId = req.getParams().get(REQUESTID);
+          final ZkController zkController = coreContainer.getZkController();
+
+          final NamedList<Object> status = new NamedList<>();
+          if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
+            if (zkController.getOverseerCompletedMap().contains(requestId)) {
+              final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
+              rsp.getValues()
+                  .addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
+              addStatusToResponse(
+                  status, COMPLETED, "found [" + requestId + "] in completed tasks");
+            } else if (zkController.getOverseerFailureMap().contains(requestId)) {
+              final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
+              rsp.getValues()
+                  .addAll(OverseerSolrResponseSerializer.deserialize(mapEntry).getResponse());
+              addStatusToResponse(status, FAILED, "found [" + requestId + "] in failed tasks");
+            } else if (zkController.getOverseerRunningMap().contains(requestId)) {
+              addStatusToResponse(status, RUNNING, "found [" + requestId + "] in running tasks");
+            } else if (h.overseerCollectionQueueContains(requestId)) {
+              addStatusToResponse(
+                  status, SUBMITTED, "found [" + requestId + "] in submitted tasks");
+            } else {
+              addStatusToResponse(
+                  status, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
             }
-            rsp.getValues().add("status", "successfully cleared stored collection api responses");
           } else {
-            // Request to cleanup
-            if (zkController.getOverseerCompletedMap().remove(requestId)) {
-              zkController.clearAsyncId(requestId);
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
-            } else if (zkController.getOverseerFailureMap().remove(requestId)) {
-              zkController.clearAsyncId(requestId);
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
-            } else {
-              rsp.getValues().add("status", "[" + requestId + "] not found in stored responses");
-              // Don't call zkController.clearAsyncId for this, since it could be a running/pending task
+            Pair<RequestStatusState, OverseerSolrResponse> sr =
+                coreContainer
+                    .getDistributedCollectionCommandRunner()
+                    .get()
+                    .getAsyncTaskRequestStatus(requestId);
+            final String message;
+            switch (sr.first()) {
+              case COMPLETED:
+                message = "found [" + requestId + "] in completed tasks";
+                rsp.getValues().addAll(sr.second().getResponse());
+                break;
+              case FAILED:
+                message = "found [" + requestId + "] in failed tasks";
+                rsp.getValues().addAll(sr.second().getResponse());
+                break;
+              case RUNNING:
+                message = "found [" + requestId + "] in running tasks";
+                break;
+              case SUBMITTED:
+                message = "found [" + requestId + "] in submitted tasks";
+                break;
+              default:
+                message = "Did not find [" + requestId + "] in any tasks queue";
             }
+            addStatusToResponse(status, sr.first(), message);
           }
-        } else {
-          if (flush) {
-            coreContainer.getDistributedCollectionCommandRunner().get().deleteAllAsyncIds();
-            rsp.getValues().add("status", "successfully cleared stored collection api responses");
-          } else {
-            if (coreContainer.getDistributedCollectionCommandRunner().get().deleteSingleAsyncId(requestId)) {
-              rsp.getValues().add("status", "successfully removed stored response for [" + requestId + "]");
+
+          rsp.getValues().addAll(status);
+          return null;
+        }),
+    DELETESTATUS_OP(
+        DELETESTATUS,
+        new CollectionOp() {
+          @SuppressWarnings("unchecked")
+          @Override
+          public Map<String, Object> execute(
+              SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
+            final CoreContainer coreContainer = h.coreContainer;
+            final String requestId = req.getParams().get(REQUESTID);
+            final ZkController zkController = coreContainer.getZkController();
+            Boolean flush = req.getParams().getBool(CollectionAdminParams.FLUSH, false);
+
+            if (requestId == null && !flush) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST, "Either requestid or flush parameter must be specified.");
+            }
+
+            if (requestId != null && flush) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "Both requestid and flush parameters can not be specified together.");
+            }
+
+            if (coreContainer.getDistributedCollectionCommandRunner().isEmpty()) {
+              if (flush) {
+                Collection<String> completed = zkController.getOverseerCompletedMap().keys();
+                Collection<String> failed = zkController.getOverseerFailureMap().keys();
+                for (String asyncId : completed) {
+                  zkController.getOverseerCompletedMap().remove(asyncId);
+                  zkController.clearAsyncId(asyncId);
+                }
+                for (String asyncId : failed) {
+                  zkController.getOverseerFailureMap().remove(asyncId);
+                  zkController.clearAsyncId(asyncId);
+                }
+                rsp.getValues()
+                    .add("status", "successfully cleared stored collection api responses");
+              } else {
+                // Request to cleanup
+                if (zkController.getOverseerCompletedMap().remove(requestId)) {
+                  zkController.clearAsyncId(requestId);
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else if (zkController.getOverseerFailureMap().remove(requestId)) {
+                  zkController.clearAsyncId(requestId);
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else {
+                  rsp.getValues()
+                      .add("status", "[" + requestId + "] not found in stored responses");
+                  // Don't call zkController.clearAsyncId for this, since it could be a
+                  // running/pending task
+                }
+              }
             } else {
-              rsp.getValues().add("status", "[" + requestId + "] not found in stored responses");
+              if (flush) {
+                coreContainer.getDistributedCollectionCommandRunner().get().deleteAllAsyncIds();
+                rsp.getValues()
+                    .add("status", "successfully cleared stored collection api responses");
+              } else {
+                if (coreContainer
+                    .getDistributedCollectionCommandRunner()
+                    .get()
+                    .deleteSingleAsyncId(requestId)) {
+                  rsp.getValues()
+                      .add(
+                          "status", "successfully removed stored response for [" + requestId + "]");
+                } else {
+                  rsp.getValues()
+                      .add("status", "[" + requestId + "] not found in stored responses");
+                }
+              }
             }
+            return null;
           }
-        }
-        return null;
-      }
-    }),
-    ADDREPLICA_OP(ADDREPLICA, (req, rsp, h) -> {
-      Map<String, Object> props = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          "node",
-          SHARD_ID_PROP,
-          _ROUTE_,
-          CoreAdminParams.NAME,
-          INSTANCE_DIR,
-          DATA_DIR,
-          ULOG_DIR,
-          REPLICA_TYPE,
-          WAIT_FOR_FINAL_STATE,
-          NRT_REPLICAS,
-          TLOG_REPLICAS,
-          PULL_REPLICAS,
-          CREATE_NODE_SET,
-          FOLLOW_ALIASES,
-          SKIP_NODE_ASSIGNMENT);
-      return copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
-    }),
+        }),
+    ADDREPLICA_OP(
+        ADDREPLICA,
+        (req, rsp, h) -> {
+          Map<String, Object> props =
+              copy(
+                  req.getParams(),
+                  null,
+                  COLLECTION_PROP,
+                  "node",
+                  SHARD_ID_PROP,
+                  _ROUTE_,
+                  CoreAdminParams.NAME,
+                  INSTANCE_DIR,
+                  DATA_DIR,
+                  ULOG_DIR,
+                  REPLICA_TYPE,
+                  WAIT_FOR_FINAL_STATE,
+                  NRT_REPLICAS,
+                  TLOG_REPLICAS,
+                  PULL_REPLICAS,
+                  CREATE_NODE_SET,
+                  FOLLOW_ALIASES,
+                  SKIP_NODE_ASSIGNMENT);
+          return copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
+        }),
     OVERSEERSTATUS_OP(OVERSEERSTATUS, (req, rsp, h) -> new LinkedHashMap<>()),
     @SuppressWarnings({"unchecked"})
-    DISTRIBUTEDAPIPROCESSING_OP(DISTRIBUTEDAPIPROCESSING, (req, rsp, h)  -> {
-      NamedList<Object> results = new NamedList<>();
-      boolean isDistributedApi = h.coreContainer.getDistributedCollectionCommandRunner().isPresent();
-      results.add("isDistributedApi", isDistributedApi);
-      rsp.getValues().addAll(results);
-      return null;
-    }),
-    /**
-     * Handle list collection request.
-     * Do list collection request to zk host
-     */
+    DISTRIBUTEDAPIPROCESSING_OP(
+        DISTRIBUTEDAPIPROCESSING,
+        (req, rsp, h) -> {
+          NamedList<Object> results = new NamedList<>();
+          boolean isDistributedApi =
+              h.coreContainer.getDistributedCollectionCommandRunner().isPresent();
+          results.add("isDistributedApi", isDistributedApi);
+          rsp.getValues().addAll(results);
+          return null;
+        }),
+    /** Handle list collection request. Do list collection request to zk host */
     @SuppressWarnings({"unchecked"})
-    LIST_OP(LIST, (req, rsp, h) -> {
-      NamedList<Object> results = new NamedList<>();
-      Map<String, DocCollection> collections = h.coreContainer.getZkController().getZkStateReader().getClusterState().getCollectionsMap();
-      List<String> collectionList = new ArrayList<>(collections.keySet());
-      Collections.sort(collectionList);
-      // XXX should we add aliases here?
-      results.add("collections", collectionList);
-      SolrResponse response = new OverseerSolrResponse(results);
-      rsp.getValues().addAll(response.getResponse());
-      return null;
-    }),
+    LIST_OP(
+        LIST,
+        (req, rsp, h) -> {
+          NamedList<Object> results = new NamedList<>();
+          Map<String, DocCollection> collections =
+              h.coreContainer
+                  .getZkController()
+                  .getZkStateReader()
+                  .getClusterState()
+                  .getCollectionsMap();
+          List<String> collectionList = new ArrayList<>(collections.keySet());
+          Collections.sort(collectionList);
+          // XXX should we add aliases here?
+          results.add("collections", collectionList);
+          SolrResponse response = new OverseerSolrResponse(results);
+          rsp.getValues().addAll(response.getResponse());
+          return null;
+        }),
     /**
-     * Handle cluster status request.
-     * Can return status per specific collection/shard or per all collections.
+     * Handle cluster status request. Can return status per specific collection/shard or per all
+     * collections.
      */
-    CLUSTERSTATUS_OP(CLUSTERSTATUS, (req, rsp, h) -> {
-      Map<String, Object> all = copy(req.getParams(), null,
-          COLLECTION_PROP,
-          SHARD_ID_PROP,
-          _ROUTE_);
-      new ClusterStatus(h.coreContainer.getZkController().getZkStateReader(),
-          new ZkNodeProps(all)).getClusterStatus(rsp.getValues());
-      return null;
-    }),
-    ADDREPLICAPROP_OP(ADDREPLICAPROP, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP,
-          SHARD_ID_PROP,
-          REPLICA_PROP,
-          PROPERTY_VALUE_PROP);
-      copy(req.getParams(), map, SHARD_UNIQUE);
-      String property = (String) map.get(PROPERTY_PROP);
-      if (!property.startsWith(PROPERTY_PREFIX)) {
-        property = PROPERTY_PREFIX + property;
-      }
+    CLUSTERSTATUS_OP(
+        CLUSTERSTATUS,
+        (req, rsp, h) -> {
+          Map<String, Object> all =
+              copy(req.getParams(), null, COLLECTION_PROP, SHARD_ID_PROP, _ROUTE_);
+          new ClusterStatus(
+                  h.coreContainer.getZkController().getZkStateReader(), new ZkNodeProps(all))
+              .getClusterStatus(rsp.getValues());
+          return null;
+        }),
+    ADDREPLICAPROP_OP(
+        ADDREPLICAPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  PROPERTY_PROP,
+                  SHARD_ID_PROP,
+                  REPLICA_PROP,
+                  PROPERTY_VALUE_PROP);
+          copy(req.getParams(), map, SHARD_UNIQUE);
+          String property = (String) map.get(PROPERTY_PROP);
+          if (!property.startsWith(PROPERTY_PREFIX)) {
+            property = PROPERTY_PREFIX + property;
+          }
 
-      boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
-
-      // Check if we're trying to set a property with parameters that allow us to set the property on multiple replicas
-      // in a slice on properties that are known to only be one-per-slice and error out if so.
-      if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE)) &&
-          SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(property.toLowerCase(Locale.ROOT)) &&
-          uniquePerSlice == false) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            "Overseer replica property command received for property " + property +
-                " with the " + SHARD_UNIQUE +
-                " parameter set to something other than 'true'. No action taken.");
-      }
-      return map;
-    }),
+          boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
+
+          // Check if we're trying to set a property with parameters that allow us to set the
+          // property on multiple replicas
+          // in a slice on properties that are known to only be one-per-slice and error out if so.
+          if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE))
+              && SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(
+                  property.toLowerCase(Locale.ROOT))
+              && uniquePerSlice == false) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Overseer replica property command received for property "
+                    + property
+                    + " with the "
+                    + SHARD_UNIQUE
+                    + " parameter set to something other than 'true'. No action taken.");
+          }
+          return map;
+        }),
     // XXX should this command support followAliases?
-    DELETEREPLICAPROP_OP(DELETEREPLICAPROP, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP,
-          SHARD_ID_PROP,
-          REPLICA_PROP);
-      return copy(req.getParams(), map, PROPERTY_PROP);
-    }),
+    DELETEREPLICAPROP_OP(
+        DELETEREPLICAPROP,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(
+                  req.getParams().required(),
+                  null,
+                  COLLECTION_PROP,
+                  PROPERTY_PROP,
+                  SHARD_ID_PROP,
+                  REPLICA_PROP);
+          return copy(req.getParams(), map, PROPERTY_PROP);
+        }),
     // XXX should this command support followAliases?
-    BALANCESHARDUNIQUE_OP(BALANCESHARDUNIQUE, (req, rsp, h) -> {
-      Map<String, Object> map = copy(req.getParams().required(), null,
-          COLLECTION_PROP,
-          PROPERTY_PROP);
-      Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
-      String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
-      if (!StringUtils.startsWith(prop, PROPERTY_PREFIX)) {
-        prop = PROPERTY_PREFIX + prop;
-      }
+    BALANCESHARDUNIQUE_OP(
+        BALANCESHARDUNIQUE,
+        (req, rsp, h) -> {
+          Map<String, Object> map =
+              copy(req.getParams().required(), null, COLLECTION_PROP, PROPERTY_PROP);
+          Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
+          String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
+          if (!StringUtils.startsWith(prop, PROPERTY_PREFIX)) {
+            prop = PROPERTY_PREFIX + prop;
+          }
 
-      if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
-            + " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. " +
-            " Property: " + prop + " shardUnique: " + shardUnique);
-      }
+          if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Balancing properties amongst replicas in a slice requires that"
+                    + " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. "
+                    + " Property: "
+                    + prop
+                    + " shardUnique: "
+                    + shardUnique);
+          }
 
-      return copy(req.getParams(), map, ONLY_ACTIVE_NODES, SHARD_UNIQUE);
-    }),
-    REBALANCELEADERS_OP(REBALANCELEADERS, (req, rsp, h) -> {
-      new RebalanceLeaders(req, rsp, h).execute();
-      return null;
-    }),
+          return copy(req.getParams(), map, ONLY_ACTIVE_NODES, SHARD_UNIQUE);
+        }),
+    REBALANCELEADERS_OP(
+        REBALANCELEADERS,
+        (req, rsp, h) -> {
+          new RebalanceLeaders(req, rsp, h).execute();
+          return null;
+        }),
     // XXX should this command support followAliases?
-    MODIFYCOLLECTION_OP(MODIFYCOLLECTION, (req, rsp, h) -> {
-      Map<String, Object> m = copy(req.getParams(), null, CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES);
-      copyPropertiesWithPrefix(req.getParams(), m, PROPERTY_PREFIX);
-      if (m.isEmpty()) {
-        throw new SolrException(ErrorCode.BAD_REQUEST,
-            formatString("no supported values provided {0}", CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES.toString()));
-      }
-      copy(req.getParams().required(), m, COLLECTION_PROP);
-      for (Map.Entry<String, Object> entry : m.entrySet()) {
-        String prop = entry.getKey();
-        if ("".equals(entry.getValue())) {
-          // set to an empty string is equivalent to removing the property, see SOLR-12507
-          entry.setValue(null);
-        }
-        DocCollection.verifyProp(m, prop);
-      }
-      if (m.get(REPLICATION_FACTOR) != null) {
-        m.put(NRT_REPLICAS, m.get(REPLICATION_FACTOR));
-      }
-      return m;
-    }),
-    BACKUP_OP(BACKUP, (req, rsp, h) -> {
-      req.getParams().required().check(NAME, COLLECTION_PROP);
-
-      final String extCollectionName = req.getParams().get(COLLECTION_PROP);
-      final boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
-      final String collectionName = followAliases ? h.coreContainer.getZkController().getZkStateReader()
-          .getAliases().resolveSimpleAlias(extCollectionName) : extCollectionName;
-      final ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
-      if (!clusterState.hasCollection(collectionName)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
-      }
-
-      CoreContainer cc = h.coreContainer;
-      String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      BackupRepository repository = cc.newBackupRepository(repo);
-
-      String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-      if (location == null) {
-        //Refresh the cluster property file to make sure the value set for location is the latest
-        // Check if the location is specified in the cluster property.
-        location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
-        if (location == null) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-              + " parameter or as a default repository property or as a cluster property.");
-        }
-      }
-      boolean incremental = req.getParams().getBool(CoreAdminParams.BACKUP_INCREMENTAL, true);
-
-      // Check if the specified location is valid for this repository.
-      final URI uri = repository.createDirectoryURI(location);
-      try {
-        if (!repository.exists(uri)) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
-        }
-      } catch (IOException ex) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existence of " + uri + ". Is it valid?", ex);
-      }
+    MODIFYCOLLECTION_OP(
+        MODIFYCOLLECTION,
+        (req, rsp, h) -> {
+          Map<String, Object> m =
+              copy(req.getParams(), null, CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES);
+          copyPropertiesWithPrefix(req.getParams(), m, PROPERTY_PREFIX);
+          if (m.isEmpty()) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                formatString(
+                    "no supported values provided {0}",
+                    CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES.toString()));
+          }
+          copy(req.getParams().required(), m, COLLECTION_PROP);
+          for (Map.Entry<String, Object> entry : m.entrySet()) {
+            String prop = entry.getKey();
+            if ("".equals(entry.getValue())) {
+              // set to an empty string is equivalent to removing the property, see SOLR-12507
+              entry.setValue(null);
+            }
+            DocCollection.verifyProp(m, prop);
+          }
+          if (m.get(REPLICATION_FACTOR) != null) {
+            m.put(NRT_REPLICAS, m.get(REPLICATION_FACTOR));
+          }
+          return m;
+        }),
+    BACKUP_OP(
+        BACKUP,
+        (req, rsp, h) -> {
+          req.getParams().required().check(NAME, COLLECTION_PROP);
+
+          final String extCollectionName = req.getParams().get(COLLECTION_PROP);
+          final boolean followAliases = req.getParams().getBool(FOLLOW_ALIASES, false);
+          final String collectionName =
+              followAliases
+                  ? h.coreContainer
+                      .getZkController()
+                      .getZkStateReader()
+                      .getAliases()
+                      .resolveSimpleAlias(extCollectionName)
+                  : extCollectionName;
+          final ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
+          if (!clusterState.hasCollection(collectionName)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Collection '" + collectionName + "' does not exist, no action taken.");
+          }
 
-      String strategy = req.getParams().get(CollectionAdminParams.INDEX_BACKUP_STRATEGY, CollectionAdminParams.COPY_FILES_STRATEGY);
-      if (!CollectionAdminParams.INDEX_BACKUP_STRATEGIES.contains(strategy)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown index backup strategy " + strategy);
-      }
+          CoreContainer cc = h.coreContainer;
+          String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
+          BackupRepository repository = cc.newBackupRepository(repo);
 
-      Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP,
-              FOLLOW_ALIASES, CoreAdminParams.COMMIT_NAME, CoreAdminParams.MAX_NUM_BACKUP_POINTS);
-      params.put(CoreAdminParams.BACKUP_LOCATION, location);
-      if (repo != null) {
-        params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
-      }
+          String location =
+              repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
+          if (location == null) {
+            // Refresh the cluster property file to make sure the value set for location is the
+            // latest
+            // Check if the location is specified in the cluster property.
+            location =
+                new ClusterProperties(h.coreContainer.getZkController().getZkClient())
+                    .getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
+            if (location == null) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "'location' is not specified as a query"
+                      + " parameter or as a default repository property or as a cluster property.");
+            }
+          }
+          boolean incremental = req.getParams().getBool(CoreAdminParams.BACKUP_INCREMENTAL, true);
 
-      params.put(CollectionAdminParams.INDEX_BACKUP_STRATEGY, strategy);
-      params.put(CoreAdminParams.BACKUP_INCREMENTAL, incremental);
-      return params;
-    }),
-    RESTORE_OP(RESTORE, (req, rsp, h) -> {
-      req.getParams().required().check(NAME, COLLECTION_PROP);
+          // Check if the specified location is valid for this repository.
+          final URI uri = repository.createDirectoryURI(location);
+          try {
+            if (!repository.exists(uri)) {
+              throw new SolrException(
+                  ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
+            }
+          } catch (IOException ex) {
+            throw new SolrException(
+                ErrorCode.SERVER_ERROR,
+                "Failed to check the existence of " + uri + ". Is it valid?",
+                ex);
+          }
 
-      final String collectionName = SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP));
-      if (h.coreContainer.getZkController().getZkStateReader().getAliases().hasAlias(collectionName)) {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' is an existing alias, no action taken.");
-      }
+          String strategy =
+              req.getParams()
+                  .get(
+                      CollectionAdminParams.INDEX_BACKUP_STRATEGY,
+                      CollectionAdminParams.COPY_FILES_STRATEGY);
+          if (!CollectionAdminParams.INDEX_BACKUP_STRATEGIES.contains(strategy)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST, "Unknown index backup strategy " + strategy);
+          }
 
-      final CoreContainer cc = h.coreContainer;
-      final String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      final BackupRepository repository = cc.newBackupRepository(repo);
-
-      String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-      if (location == null) {
-        //Refresh the cluster property file to make sure the value set for location is the latest
-        // Check if the location is specified in the cluster property.
-        location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty("location", null);
-        if (location == null) {
-          throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-              + " parameter or as a default repository property or as a cluster property.");
-        }
-      }
+          Map<String, Object> params =
+              copy(
+                  req.getParams(),
+                  null,
+                  NAME,
+                  COLLECTION_PROP,
+                  FOLLOW_ALIASES,
+                  CoreAdminParams.COMMIT_NAME,
+                  CoreAdminParams.MAX_NUM_BACKUP_POINTS);
+          params.put(CoreAdminParams.BACKUP_LOCATION, location);
+          if (repo != null) {
+            params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
+          }
 
-      // Check if the specified location is valid for this repository.
-      final URI uri = repository.createDirectoryURI(location);
-      try {
-        if (!repository.exists(uri)) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
-        }
-      } catch (IOException ex) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existence of " + uri + ". Is it valid?", ex);
-      }
+          params.put(CollectionAdminParams.INDEX_BACKUP_STRATEGY, strategy);
+          params.put(CoreAdminParams.BACKUP_INCREMENTAL, incremental);
+          return params;
+        }),
+    RESTORE_OP(
+        RESTORE,
+        (req, rsp, h) -> {
+          req.getParams().required().check(NAME, COLLECTION_PROP);
+
+          final String collectionName =
+              SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP));
+          if (h.coreContainer
+              .getZkController()
+              .getZkStateReader()
+              .getAliases()
+              .hasAlias(collectionName)) {
+            throw new SolrException(
+                ErrorCode.BAD_REQUEST,
+                "Collection '" + collectionName + "' is an existing alias, no action taken.");
+          }
 
-      final String createNodeArg = req.getParams().get(CREATE_NODE_SET);
-      if (CREATE_NODE_SET_EMPTY.equals(createNodeArg)) {
-        throw new SolrException(
-            SolrException.ErrorCode.BAD_REQUEST,
-            "Cannot restore with a CREATE_NODE_SET of CREATE_NODE_SET_EMPTY."
-        );
-      }
-      if (req.getParams().get(NRT_REPLICAS) != null && req.getParams().get(REPLICATION_FACTOR) != null) {
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-            "Cannot set both replicationFactor and nrtReplicas as they mean the same thing");
-      }
+          final CoreContainer cc = h.coreContainer;
+          final String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
+          final BackupRepository repository = cc.newBackupRepository(repo);
 
-      final Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP);
-      params.put(CoreAdminParams.BACKUP_LOCATION, location);
-      if (repo != null) {
-        params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
-      }
-      // from CREATE_OP:
-      copy(req.getParams(), params, COLL_CONF, REPLICATION_FACTOR, NRT_REPLICAS, TLOG_REPLICAS,
-          PULL_REPLICAS, CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE, BACKUP_ID);
-      copyPropertiesWithPrefix(req.getParams(), params, PROPERTY_PREFIX);
-      return params;
-    }),
-    DELETEBACKUP_OP(DELETEBACKUP, (req, rsp, h) -> {
-      req.getParams().required().check(NAME);
-
-      CoreContainer cc = h.coreContainer;
-      String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      try (BackupRepository repository = cc.newBackupRepository(repo)) {
-
-        String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-        if (location == null) {
-          //Refresh the cluster property file to make sure the value set for location is the latest
-          // Check if the location is specified in the cluster property.
-          location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty("location", null);
+          String location =
+              repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
           if (location == null) {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-                    + " parameter or as a default repository property or as a cluster property.");
+            // Refresh the cluster property file to make sure the value set for location is the
+            // latest
+            // Check if the location is specified in the cluster property.
+            location =
+                new ClusterProperties(h.coreContainer.getZkController().getZkClient())
+                    .getClusterProperty("location", null);
+            if (location == null) {
+              throw new SolrException(
+                  ErrorCode.BAD_REQUEST,
+                  "'location' is not specified as a query"
+                      + " parameter or as a default repository property or as a cluster property.");
+            }
           }
-        }
 
-        // Check if the specified location is valid for this repository.
-        URI uri = repository.createDirectoryURI(location);
-        try {
-          if (!repository.exists(uri)) {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "specified location " + uri + " does not exist.");
+          // Check if the specified location is valid for this repository.
+          final URI uri = repository.createDirectoryURI(location);
+          try {
+            if (!repository.exists(uri)) {
+              throw new SolrException(
+                  ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
+            }
+          } catch (IOException ex) {
+            throw new SolrException(
+                ErrorCode.SERVER_ERROR,
+                "Failed to check the existence of " + uri + ". Is it valid?",
+                ex);
           }
-        } catch (IOException ex) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existence of " + uri + ". Is it valid?", ex);
-        }
 
-        int deletionModesProvided = 0;
-        if (req.getParams().get(MAX_NUM_BACKUP_POINTS) != null) deletionModesProvided++;
-        if (req.getParams().get(BACKUP_PURGE_UNUSED) != null) deletionModesProvided++;
-        if (req.getParams().get(BACKUP_ID) != null) deletionModesProvided++;
-        if (deletionModesProvided != 1) {
-          throw new SolrException(BAD_REQUEST,
-                  String.format(Locale.ROOT, "Exactly one of %s, %s, and %s parameters must be provided",
-                          MAX_NUM_BACKUP_POINTS, BACKUP_PURGE_UNUSED, BACKUP_ID));
-        }
-
-        final Map<String, Object> params = copy(req.getParams(), null, NAME, BACKUP_REPOSITORY,
-                BACKUP_LOCATION, BACKUP_ID, MAX_NUM_BACKUP_POINTS, BACKUP_PURGE_UNUSED);
-        params.put(BACKUP_LOCATION, location);
-        if (repo != null) {
-          params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
-        }
-        return params;
-      }
-    }),
-    LISTBACKUP_OP(LISTBACKUP, (req, rsp, h) -> {
-      req.getParams().required().check(NAME);
-
-      CoreContainer cc = h.coreContainer;
-      String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
-      try (BackupRepository repository = cc.newBackupRepository(repo)) {
-
-        String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
-        if (location == null) {
-          //Refresh the cluster property file to make sure the value set for location is the latest
-          // Check if the location is specified in the cluster property.
-          location = new ClusterProperties(h.coreContainer.getZkController().getZkClient()).getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
-          if (location == null) {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
-                    + " parameter or as a default repository property or as a cluster property.");
+          final String createNodeArg = req.getParams().get(CREATE_NODE_SET);
+          if (CREATE_NODE_SET_EMPTY.equals(createNodeArg)) {
+            throw new SolrException(
+                SolrException.ErrorCode.BAD_REQUEST,
+                "Cannot restore with a CREATE_NODE_SET of CREATE_NODE_SET_EMPTY.");
+          }
+          if (req.getParams().get(NRT_REPLICAS) != null
+              && req.getParams().get(REPLICATION_FACTOR) != null) {
+            throw new SolrException(
+                SolrException.ErrorCode.BAD_REQUEST,
+                "Cannot set both replicationFactor and nrtReplicas as they mean the same thing");
           }
-        }
 
-        String backupName = req.getParams().get(NAME);
-        final URI locationURI = repository.createDirectoryURI(location);
-        try {
-          if (!repository.exists(locationURI)) {
-            throw new SolrException(ErrorCode.BAD_REQUEST, "specified location " + locationURI + " does not exist.");
+          final Map<String, Object> params = copy(req.getParams(), null, NAME, COLLECTION_PROP);
+          params.put(CoreAdminParams.BACKUP_LOCATION, location);
+          if (repo != null) {
+            params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
           }
-        } catch (IOException ex) {
-          throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existence of " + locationURI + ". Is it valid?", ex);
-        }
-        URI backupLocation = BackupFilePaths.buildExistingBackupLocationURI(repository, locationURI, backupName);
-        if (repository.exists(repository.resolve(backupLocation, BackupManager.TRADITIONAL_BACKUP_PROPS_FILE))) {
-          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The backup name [" + backupName + "] at " +
-                  "location [" + location + "] holds a non-incremental (legacy) backup, but " +
-                  "backup-listing is only supported on incremental backups");
-        }
+          // from CREATE_OP:
+          copy(
+              req.getParams(),
+              params,
+              COLL_CONF,
+              REPLICATION_FACTOR,
+              NRT_REPLICAS,
+              TLOG_REPLICAS,
+              PULL_REPLICAS,
+              CREATE_NODE_SET,
+              CREATE_NODE_SET_SHUFFLE,
+              BACKUP_ID);
+          copyPropertiesWithPrefix(req.getParams(), params, PROPERTY_PREFIX);
+          return params;
+        }),
+    DELETEBACKUP_OP(
+        DELETEBACKUP,
+        (req, rsp, h) -> {
+          req.getParams().required().check(NAME);
+
+          CoreContainer cc = h.coreContainer;
+          String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
+          try (BackupRepository repository = cc.newBackupRepository(repo)) {
+
+            String location =
+                repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
+            if (location == null) {
+              // Refresh the cluster property file to make sure the value set for location is the
+              // latest
+              // Check if the location is specified in the cluster property.
+              location =
+                  new ClusterProperties(h.coreContainer.getZkController().getZkClient())
+                      .getClusterProperty("location", null);
+              if (location == null) {
+                throw new SolrException(
+                    ErrorCode.BAD_REQUEST,
+                    "'location' is not specified as a query"
+                        + " parameter or as a default repository property or as a cluster property.");
+              }
+            }
+
+            // Check if the specified location is valid for this repository.
+            URI uri = repository.createDirectoryURI(location);
+            try {
+              if (!repository.exists(uri)) {
+                throw new SolrException(
+                    ErrorCode.BAD_REQUEST, "specified location " + uri + " does not exist.");
+              }
+            } catch (IOException ex) {
+              throw new SolrException(
+                  ErrorCode.SERVER_ERROR,
+                  "Failed to check the existence of " + uri + ". Is it valid?",
+                  ex);
+            }
+
+            int deletionModesProvided = 0;
+            if (req.getParams().get(MAX_NUM_BACKUP_POINTS) != null) deletionModesProvided++;
+            if (req.getParams().get(BACKUP_PURGE_UNUSED) != null) deletionModesProvided++;
+            if (req.getParams().get(BACKUP_ID) != null) deletionModesProvided++;
+            if (deletionModesProvided != 1) {
+              throw new SolrException(
+                  BAD_REQUEST,
+                  String.format(
+                      Locale.ROOT,
+                      "Exactly one of %s, %s, and %s parameters must be provided",
+                      MAX_NUM_BACKUP_POINTS,
+                      BACKUP_PURGE_UNUSED,
+                      BACKUP_ID));
+            }
 
-        String[] subFiles = repository.listAllOrEmpty(backupLocation);
-        List<BackupId> propsFiles = BackupFilePaths.findAllBackupIdsFromFileListing(subFiles);
-
-        NamedList<Object> results = new NamedList<>();
-        ArrayList<Map<Object,Object>> backups = new ArrayList<>();
-        String collectionName = null;
-        for (BackupId backupId: propsFiles) {
-          BackupProperties properties = BackupProperties.readFrom(repository, backupLocation, BackupFilePaths.getBackupPropsName(backupId));
-          if (collectionName == null) {
-            collectionName = properties.getCollection();
-            results.add(BackupManager.COLLECTION_NAME_PROP, collectionName);
+            final Map<String, Object> params =
+                copy(
+                    req.getParams(),
+                    null,
+                    NAME,
+                    BACKUP_REPOSITORY,
+                    BACKUP_LOCATION,
+                    BACKUP_ID,
+                    MAX_NUM_BACKUP_POINTS,
+                    BACKUP_PURGE_UNUSED);
+            params.put(BACKUP_LOCATION, location);
+            if (repo != null) {
+              params.put(CoreAdminParams.BACKUP_REPOSITORY, repo);
+            }
+            return params;
           }
+        }),
+    LISTBACKUP_OP(
+        LISTBACKUP,
+        (req, rsp, h) -> {
+          req.getParams().required().check(NAME);
+
+          CoreContainer cc = h.coreContainer;
+          String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
+          try (BackupRepository repository = cc.newBackupRepository(repo)) {
+
+            String location =
+                repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
+            if (location == null) {
+              // Refresh the cluster property file to make sure the value set for location is the
+              // latest
+              // Check if the location is specified in the cluster property.

Review comment:
       Fix this

##########
File path: solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java
##########
@@ -171,28 +182,34 @@ protected void processUpdate(SolrQueryRequest req, UpdateRequestProcessor proces
             }
 
           } else if ("doc".equals(currTag)) {
-            if(addCmd != null) {
+            if (addCmd != null) {
               log.trace("adding doc...");
               addCmd.clear();
               addCmd.solrDoc = readDoc(parser);
               processor.processAdd(addCmd);
             } else {
-              throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unexpected <doc> tag without an <add> tag surrounding it.");
+              throw new SolrException(
+                  SolrException.ErrorCode.BAD_REQUEST,
+                  "Unexpected <doc> tag without an <add> tag surrounding it.");
             }
-          } else if (UpdateRequestHandler.COMMIT.equals(currTag) || UpdateRequestHandler.OPTIMIZE.equals(currTag)) {
+          } else if (UpdateRequestHandler.COMMIT.equals(currTag)
+              || UpdateRequestHandler.OPTIMIZE.equals(currTag)) {
             log.trace("parsing {}", currTag);
 
-            CommitUpdateCommand cmd = new CommitUpdateCommand(req, UpdateRequestHandler.OPTIMIZE.equals(currTag));
+            CommitUpdateCommand cmd =
+                new CommitUpdateCommand(req, UpdateRequestHandler.OPTIMIZE.equals(currTag));
             ModifiableSolrParams mp = new ModifiableSolrParams();
-            
+
             for (int i = 0; i < parser.getAttributeCount(); i++) {
               String attrName = parser.getAttributeLocalName(i);
               String attrVal = parser.getAttributeValue(i);
               mp.set(attrName, attrVal);
             }
 
             RequestHandlerUtils.validateCommitParams(mp);
-            SolrParams p = SolrParams.wrapDefaults(mp, req.getParams());   // default to the normal request params for commit options
+            SolrParams p =
+                SolrParams.wrapDefaults(
+                    mp, req.getParams()); // default to the normal request params for commit options

Review comment:
       Fix this




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscribe@solr.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@solr.apache.org
For additional commands, e-mail: issues-help@solr.apache.org