You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@solr.apache.org by GitBox <gi...@apache.org> on 2022/02/25 04:44:35 UTC

[GitHub] [solr] risdenk commented on a change in pull request #706: SOLR-14920: Spotless formatting for solrj

risdenk commented on a change in pull request #706:
URL: https://github.com/apache/solr/pull/706#discussion_r814364912



##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateHttp2SolrClient.java
##########
@@ -469,18 +481,20 @@ public synchronized void blockUntilFinished() throws IOException {
 
       synchronized (runners) {
 
-        // NOTE: if the executor is shut down, runners may never become empty (a scheduled task may never be run,
-        // which means it would never remove itself from the runners list. This is why we don't wait forever
+        // NOTE: if the executor is shut down, runners may never become empty (a scheduled task may
+        // never be run,
+        // which means it would never remove itself from the runners list. This is why we don't wait
+        // forever

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClient.java
##########
@@ -225,85 +227,90 @@ void sendUpdateStream() throws Exception {
           } finally {
             inPoll = false;
           }
-          if (update == null)
-            break;
+          if (update == null) break;
 
           String contentType = client.requestWriter.getUpdateContentType();
           final boolean isXml = ClientUtils.TEXT_XML.equals(contentType);
 
-          final ModifiableSolrParams origParams = new ModifiableSolrParams(update.getRequest().getParams());
+          final ModifiableSolrParams origParams =
+              new ModifiableSolrParams(update.getRequest().getParams());
           final String origTargetCollection = update.getCollection();
 
-          EntityTemplate template = new EntityTemplate(new ContentProducer() {
-            
-            @Override
-            public void writeTo(OutputStream out) throws IOException {
+          EntityTemplate template =
+              new EntityTemplate(
+                  new ContentProducer() {
 
-              if (isXml) {
-                out.write("<stream>".getBytes(StandardCharsets.UTF_8)); // can be anything
-              }
-              Update upd = update;
-              while (upd != null) {
-                UpdateRequest req = upd.getRequest();
-                SolrParams currentParams = new ModifiableSolrParams(req.getParams());
-                if (!origParams.toNamedList().equals(currentParams.toNamedList()) || !StringUtils.equals(origTargetCollection, upd.getCollection())) {
-                  queue.add(upd); // Request has different params or destination core/collection, return to queue
-                  break;
-                }
+                    @Override
+                    public void writeTo(OutputStream out) throws IOException {
 
-                client.requestWriter.write(req, out);
-                if (isXml) {
-                  // check for commit or optimize
-                  SolrParams params = req.getParams();
-                  if (params != null) {
-                    String fmt = null;
-                    if (params.getBool(UpdateParams.OPTIMIZE, false)) {
-                      fmt = "<optimize waitSearcher=\"%s\" />";
-                    } else if (params.getBool(UpdateParams.COMMIT, false)) {
-                      fmt = "<commit waitSearcher=\"%s\" />";
-                    }
-                    if (fmt != null) {
-                      byte[] content = String.format(Locale.ROOT,
-                          fmt, params.getBool(UpdateParams.WAIT_SEARCHER, false)
-                              + "")
-                          .getBytes(StandardCharsets.UTF_8);
-                      out.write(content);
-                    }
-                  }
-                }
-                out.flush();
-
-                notifyQueueAndRunnersIfEmptyQueue();
-                inPoll = true;
-                try {
-                  while (true) {
-                    try {
-                      upd = queue.poll(pollQueueTime, TimeUnit.MILLISECONDS);
-                      break;
-                    } catch (InterruptedException e) {
-                      if (log.isDebugEnabled()) pollInterrupts.incrementAndGet();
-                      if (!queue.isEmpty()) {
-                        continue;
+                      if (isXml) {
+                        out.write("<stream>".getBytes(StandardCharsets.UTF_8)); // can be anything
+                      }
+                      Update upd = update;
+                      while (upd != null) {
+                        UpdateRequest req = upd.getRequest();
+                        SolrParams currentParams = new ModifiableSolrParams(req.getParams());
+                        if (!origParams.toNamedList().equals(currentParams.toNamedList())
+                            || !StringUtils.equals(origTargetCollection, upd.getCollection())) {
+                          queue.add(
+                              upd); // Request has different params or destination core/collection,
+                          // return to queue

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/BiJoinStream.java
##########
@@ -28,67 +28,78 @@
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 
 /**
- * Joins leftStream with rightStream based on a Equalitor. Both streams must be sorted by the fields being joined on.
- * Resulting stream is sorted by the equalitor.
+ * Joins leftStream with rightStream based on a Equalitor. Both streams must be sorted by the fields
+ * being joined on. Resulting stream is sorted by the equalitor.
+ *
  * @since 6.0.0
- **/
-
+ */
 public abstract class BiJoinStream extends JoinStream implements Expressible {
-  
+
   protected PushBackStream leftStream;
   protected PushBackStream rightStream;
-  
-  // This is used to determine whether we should iterate the left or right side (depending on stream order).
+
+  // This is used to determine whether we should iterate the left or right side (depending on stream
+  // order).
   // It is built from the incoming equalitor and streams' comparators.
   protected StreamComparator iterationComparator;
   protected StreamComparator leftStreamComparator, rightStreamComparator;
-  
-  public BiJoinStream(TupleStream leftStream, TupleStream rightStream, StreamEqualitor eq) throws IOException {
+
+  public BiJoinStream(TupleStream leftStream, TupleStream rightStream, StreamEqualitor eq)
+      throws IOException {
     super(eq, leftStream, rightStream);
     init();
   }
-  
+
   public BiJoinStream(StreamExpression expression, StreamFactory factory) throws IOException {
     super(expression, factory);
     init();
   }
-  
+
   private void init() throws IOException {
-    
+
     // Validates all incoming streams for tuple order
     validateTupleOrder();
-    
+
     leftStream = getStream(0);
     rightStream = getStream(1);
-    
-    // iterationComparator is a combination of the equalitor and the comp from each stream. This can easily be done by
-    // grabbing the first N parts of each comp where N is the number of parts in the equalitor. Because we've already
+
+    // iterationComparator is a combination of the equalitor and the comp from each stream. This can
+    // easily be done by
+    // grabbing the first N parts of each comp where N is the number of parts in the equalitor.
+    // Because we've already

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/OuterHashJoinStream.java
##########
@@ -18,116 +18,121 @@
 
 import java.io.IOException;
 import java.util.List;
-
 import org.apache.solr.client.solrj.io.Tuple;
 import org.apache.solr.client.solrj.io.stream.expr.Expressible;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionNamedParameter;
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 
 /**
- * Takes two streams (fullStream and hashStream) and joins them similar to an LeftOuterJoinStream. The difference
- * in a OuterHashJoinStream is that the tuples in the hashStream will all be read and hashed when this stream is
- * opened. This provides a few optimizations iff the hashStream has a relatively small number of documents.
- * The difference between this and a HashJoinStream is that a tuple in the fullStream will be returned even
- * if it doesn't have any matching tuples in the hashStream. 
- * You are expected to provide a set of fields for which the hash will be calculated from. If a tuple from the 
- * hashStream does not contain a value (ie, null) for one of the fields the hash is being computed on then that 
- * tuple will not be considered a match to anything. If a tuple from the fullStream does not contain a value (ie, null) 
- * for one of the fields the hash is being computed on then that tuple will be returned without any joined tuples
- * from the hashStream
+ * Takes two streams (fullStream and hashStream) and joins them similar to an LeftOuterJoinStream.
+ * The difference in a OuterHashJoinStream is that the tuples in the hashStream will all be read and
+ * hashed when this stream is opened. This provides a few optimizations iff the hashStream has a
+ * relatively small number of documents. The difference between this and a HashJoinStream is that a
+ * tuple in the fullStream will be returned even if it doesn't have any matching tuples in the
+ * hashStream. You are expected to provide a set of fields for which the hash will be calculated
+ * from. If a tuple from the hashStream does not contain a value (ie, null) for one of the fields
+ * the hash is being computed on then that tuple will not be considered a match to anything. If a
+ * tuple from the fullStream does not contain a value (ie, null) for one of the fields the hash is
+ * being computed on then that tuple will be returned without any joined tuples from the hashStream
+ *
  * @since 6.0.0
-**/
+ */
 public class OuterHashJoinStream extends HashJoinStream implements Expressible {
-  
+
   private static final long serialVersionUID = 1L;
 
-  public OuterHashJoinStream(TupleStream fullStream, TupleStream hashStream, List<String> hashOn) throws IOException {
+  public OuterHashJoinStream(TupleStream fullStream, TupleStream hashStream, List<String> hashOn)
+      throws IOException {
     super(fullStream, hashStream, hashOn);
   }
-  
-  public OuterHashJoinStream(StreamExpression expression,StreamFactory factory) throws IOException {
+
+  public OuterHashJoinStream(StreamExpression expression, StreamFactory factory)
+      throws IOException {
     super(expression, factory);
   }
-    
+
   @Override
-  public StreamExpression toExpression(StreamFactory factory) throws IOException {    
+  public StreamExpression toExpression(StreamFactory factory) throws IOException {
     // function name
     StreamExpression expression = new StreamExpression(factory.getFunctionName(this.getClass()));
-    
+
     // streams
-    if(hashStream instanceof Expressible && fullStream instanceof Expressible){
-      expression.addParameter(((Expressible)fullStream).toExpression(factory));
-      expression.addParameter(new StreamExpressionNamedParameter("hashed", ((Expressible)hashStream).toExpression(factory)));
+    if (hashStream instanceof Expressible && fullStream instanceof Expressible) {
+      expression.addParameter(((Expressible) fullStream).toExpression(factory));
+      expression.addParameter(
+          new StreamExpressionNamedParameter(
+              "hashed", ((Expressible) hashStream).toExpression(factory)));
+    } else {
+      throw new IOException(
+          "This OuterHashJoinStream contains a non-expressible TupleStream - it cannot be converted to an expression");
     }
-    else{
-      throw new IOException("This OuterHashJoinStream contains a non-expressible TupleStream - it cannot be converted to an expression");
-    }
-    
+
     // on
     StringBuilder sb = new StringBuilder();
-    for(int idx = 0; idx < leftHashOn.size(); ++idx){
-      if(sb.length() > 0){ sb.append(","); }
-      
+    for (int idx = 0; idx < leftHashOn.size(); ++idx) {
+      if (sb.length() > 0) {
+        sb.append(",");
+      }
+
       // we know that left and right hashOns are the same size
       String left = leftHashOn.get(idx);
       String right = rightHashOn.get(idx);
-      
-      if(left.equals(right)){ 
-        sb.append(left); 
-      }
-      else{
+
+      if (left.equals(right)) {
+        sb.append(left);
+      } else {
         sb.append(left);
         sb.append("=");
         sb.append(right);
       }
     }
-    expression.addParameter(new StreamExpressionNamedParameter("on",sb.toString()));
-    
-    return expression;   
+    expression.addParameter(new StreamExpressionNamedParameter("on", sb.toString()));
+
+    return expression;
   }
 
   public Tuple read() throws IOException {
-    
-    if(null == workingFullTuple){
+
+    if (null == workingFullTuple) {
       Tuple fullTuple = fullStream.read();
-      
+
       // We're at the end of the line
-      if(fullTuple.EOF){
+      if (fullTuple.EOF) {
         return fullTuple;
       }
-      
+
       // If fullTuple doesn't have a valid hash or the hash cannot be found in the hashedTuples then
       // return the tuple from fullStream.
-      // This is an outer join so there is no requirement there be a matching value in the hashed stream
+      // This is an outer join so there is no requirement there be a matching value in the hashed
+      // stream
       String fullHash = computeHash(fullTuple, leftHashOn);
-      if(null == fullHash || !hashedTuples.containsKey(fullHash)){
+      if (null == fullHash || !hashedTuples.containsKey(fullHash)) {
         return fullTuple.clone();
       }
-      
+
       workingFullTuple = fullTuple;
       workingFullHash = fullHash;
-      workngHashSetIdx = 0;      
+      workngHashSetIdx = 0;
     }
-  
+
     // At this point we know we have at least one doc to match on
-    // Due to the check at the end, before returning, we know we have at least one to match with left
+    // Due to the check at the end, before returning, we know we have at least one to match with
+    // left

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java
##########
@@ -129,60 +129,66 @@ public static State getState(String stateStr) {
 
   public static final String REPLICAS = "replicas";
   public static final String RANGE = "range";
-  public static final String LEADER = "leader";       // FUTURE: do we want to record the leader as a slice property in the JSON (as opposed to isLeader as a replica property?)
+  public static final String LEADER =
+      "leader"; // FUTURE: do we want to record the leader as a slice property in the JSON (as
+  // opposed to isLeader as a replica property?)

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrClient.java
##########
@@ -535,23 +555,25 @@ private HttpEntityEnclosingRequestBase fillContentStream(
     }
 
     method.setConfig(requestConfigBuilder.build());
-    
+
     HttpEntity entity = null;
     InputStream respBody = null;
     boolean shouldClose = true;
     try {
       // Execute the method.
-      HttpClientContext httpClientRequestContext = HttpClientUtil.createNewHttpClientRequestContext();
+      HttpClientContext httpClientRequestContext =
+          HttpClientUtil.createNewHttpClientRequestContext();
       if (userPrincipal != null) {
         // Normally the context contains a static userToken to enable reuse resources.
         // However, if a personal Principal object exists, we use that instead, also as a means
-        // to transfer authentication information to Auth plugins that wish to intercept the request later
+        // to transfer authentication information to Auth plugins that wish to intercept the request
+        // later

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrClient.java
##########
@@ -687,15 +729,18 @@ private HttpEntityEnclosingRequestBase fillContentStream(
     }
   }
 
-  // When raising an error using HTTP sendError, mime types can be mismatched. This is specifically true when
-  // SolrDispatchFilter uses the sendError mechanism since the expected MIME type of response is not HTML but
+  // When raising an error using HTTP sendError, mime types can be mismatched. This is specifically
+  // true when
+  // SolrDispatchFilter uses the sendError mechanism since the expected MIME type of response is not
+  // HTML but
   // HTTP sendError generates a HTML output, which can lead to mismatch

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateHttp2SolrClient.java
##########
@@ -322,20 +325,25 @@ private void notifyQueueAndRunnersIfEmptyQueue() {
       }
       synchronized (runners) {
         // we notify runners too - if there is a high queue poll time and this is the update
-        // that emptied the queue, we make an attempt to avoid the 250ms timeout in blockUntilFinished
+        // that emptied the queue, we make an attempt to avoid the 250ms timeout in
+        // blockUntilFinished
         runners.notifyAll();
       }
     }
   }
 
   // *must* be called with runners monitor held, e.g. synchronized(runners){ addRunner() }
   private void addRunner() {
-    MDC.put("ConcurrentUpdateHttp2SolrClient.url", String.valueOf(client.getBaseURL())); // MDC can't have null value
+    MDC.put(
+        "ConcurrentUpdateHttp2SolrClient.url",
+        String.valueOf(client.getBaseURL())); // MDC can't have null value
     try {
       Runner r = new Runner();
       runners.add(r);
       try {
-        scheduler.execute(r);  // this can throw an exception if the scheduler has been shutdown, but that should be fine.
+        scheduler.execute(
+            r); // this can throw an exception if the scheduler has been shutdown, but that should
+        // be fine.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/SolrQuery.java
##########
@@ -31,33 +30,36 @@
 import org.apache.solr.common.params.StatsParams;
 import org.apache.solr.common.params.TermsParams;
 
-
 /**
- * This is an augmented SolrParams with get/set/add fields for common fields used
- * in the Standard and Dismax request handlers
- * 
+ * This is an augmented SolrParams with get/set/add fields for common fields used in the Standard
+ * and Dismax request handlers
  *
  * @since solr 1.3
  */
-public class SolrQuery extends ModifiableSolrParams 
-{  
-  public static final String DOCID = "_docid_"; // duplicate of org.apache.solr.search.SortSpecParsing.DOCID which is not accessible from here
-  
-  public enum ORDER { desc, asc;
+public class SolrQuery extends ModifiableSolrParams {
+  public static final String DOCID =
+      "_docid_"; // duplicate of org.apache.solr.search.SortSpecParsing.DOCID which is not
+  // accessible from here

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/request/FieldAnalysisRequest.java
##########
@@ -112,14 +107,13 @@ static String listToCommaDelimitedString(List<String> list) {
     return result.toString();
   }
 
-
-  //============================================ Setter/Getter Methods ===============================================
+  // ============================================ Setter/Getter Methods
+  // ===============================================
 
   /**
    * Sets the field value to be analyzed.
    *
    * @param fieldValue The field value to be analyzed.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
##########
@@ -1769,31 +1920,37 @@ public SolrParams getParams() {
       params.set("collections", aliasedCollections);
       return params;
     }
-
   }
 
   /**
-   * Returns a SolrRequest to create a time routed alias. For time based routing, the start
-   * should be a standard Solr timestamp string (possibly with "date math").
+   * Returns a SolrRequest to create a time routed alias. For time based routing, the start should
+   * be a standard Solr timestamp string (possibly with "date math").
    *
    * @param aliasName the name of the alias to create.
-   * @param start the start of the routing.  A standard Solr date: ISO-8601 or NOW with date math.
-   * @param interval date math representing the time duration of each collection (e.g. {@code +1DAY})
+   * @param start the start of the routing. A standard Solr date: ISO-8601 or NOW with date math.
+   * @param interval date math representing the time duration of each collection (e.g. {@code
+   *     +1DAY})
    * @param routerField the document field to contain the timestamp to route on
-   * @param createCollTemplate Holds options to create a collection.  The "name" is ignored.
+   * @param createCollTemplate Holds options to create a collection. The "name" is ignored.
    */
-  public static CreateTimeRoutedAlias createTimeRoutedAlias(String aliasName, String start,
-                                                            String interval,
-                                                            String routerField,
-                                                            Create createCollTemplate) {
+  public static CreateTimeRoutedAlias createTimeRoutedAlias(
+      String aliasName,
+      String start,
+      String interval,
+      String routerField,
+      Create createCollTemplate) {
 
     return new CreateTimeRoutedAlias(aliasName, routerField, start, interval, createCollTemplate);
   }
 
-  public static class CreateTimeRoutedAlias extends AsyncCollectionAdminRequest implements RoutedAliasAdminRequest {
-    // TODO: This and other commands in this file seem to need to share some sort of constants class with core
-    // to allow this stuff not to be duplicated. (this is pasted from CreateAliasCmd.java), however I think
-    // a comprehensive cleanup of this for all the requests in this class should be done as a separate ticket.
+  public static class CreateTimeRoutedAlias extends AsyncCollectionAdminRequest
+      implements RoutedAliasAdminRequest {
+    // TODO: This and other commands in this file seem to need to share some sort of constants class
+    // with core
+    // to allow this stuff not to be duplicated. (this is pasted from CreateAliasCmd.java), however
+    // I think
+    // a comprehensive cleanup of this for all the requests in this class should be done as a
+    // separate ticket.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/beans/DocumentObjectBinder.java
##########
@@ -217,11 +220,13 @@ private void storeName(Field annotation) {
             name = setter.getName();
           }
         }
-      } else if (annotation.value().indexOf('*') >= 0) { //dynamic fields are annotated as @Field("categories_*")
-        //if the field was annotated as a dynamic field, convert the name into a pattern
-        //the wildcard (*) is supposed to be either a prefix or a suffix, hence the use of replaceFirst
+      } else if (annotation.value().indexOf('*')
+          >= 0) { // dynamic fields are annotated as @Field("categories_*")

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClient.java
##########
@@ -416,7 +431,9 @@ private void addRunner() {
       Runner r = new Runner();
       runners.add(r);
       try {
-        scheduler.execute(r);  // this can throw an exception if the scheduler has been shutdown, but that should be fine.
+        scheduler.execute(
+            r); // this can throw an exception if the scheduler has been shutdown, but that should
+        // be fine.

Review comment:
       fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClient.java
##########
@@ -584,20 +604,22 @@ public synchronized void blockUntilFinished() throws IOException {
 
       synchronized (runners) {
 
-        // NOTE: if the executor is shut down, runners may never become empty (a scheduled task may never be run,
-        // which means it would never remove itself from the runners list. This is why we don't wait forever
+        // NOTE: if the executor is shut down, runners may never become empty (a scheduled task may
+        // never be run,
+        // which means it would never remove itself from the runners list. This is why we don't wait
+        // forever
         // and periodically check if the scheduler is shutting down.
         int loopCount = 0;

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java
##########
@@ -288,16 +308,17 @@ private StreamComparator parseComp(String sort, String fl) throws IOException {
 
     String[] fls = fl.split(",");
     HashSet<String> fieldSet = new HashSet<>();
-    for(String f : fls) {
-      fieldSet.add(f.trim()); //Handle spaces in the field list.
+    for (String f : fls) {
+      fieldSet.add(f.trim()); // Handle spaces in the field list.
     }
 
     String[] sorts = sort.split(",");
     StreamComparator[] comps = new StreamComparator[sorts.length];
-    for(int i=0; i<sorts.length; i++) {
+    for (int i = 0; i < sorts.length; i++) {
       String s = sorts[i];
 
-      String[] spec = s.trim().split("\\s+"); //This should take into account spaces in the sort spec.
+      String[] spec =

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/HashJoinStream.java
##########
@@ -234,53 +259,53 @@ public void close() throws IOException {
   }
 
   public Tuple read() throws IOException {
-    
+
     findNextWorkingFullTuple:
-    while(null == workingFullTuple){
+    while (null == workingFullTuple) {
       Tuple fullTuple = fullStream.read();
-      
+
       // We're at the end of the line
-      if(fullTuple.EOF){
+      if (fullTuple.EOF) {
         return fullTuple;
       }
-      
-      // If fullTuple doesn't have a valid hash or if there is no doc to 
+
+      // If fullTuple doesn't have a valid hash or if there is no doc to
       // join with then retry loop - keep going until we find one
       String fullHash = computeHash(fullTuple, leftHashOn);
-      if(null == fullHash || !hashedTuples.containsKey(fullHash)){
+      if (null == fullHash || !hashedTuples.containsKey(fullHash)) {
         continue findNextWorkingFullTuple;
       }
-      
+
       workingFullTuple = fullTuple;
       workingFullHash = fullHash;
-      workngHashSetIdx = 0;      
+      workngHashSetIdx = 0;
     }
-    
+
     // At this point we know we have at least one doc to match on
-    // Due to the check at the end, before returning, we know we have at least one to match with left
+    // Due to the check at the end, before returning, we know we have at least one to match with
+    // left

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/request/DocumentAnalysisRequest.java
##########
@@ -99,17 +94,17 @@ public ModifiableSolrParams getParams() {
     return params;
   }
 
-  //================================================ Helper Methods ==================================================
+  // ================================================ Helper Methods
+  // ==================================================

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/request/FieldAnalysisRequest.java
##########
@@ -92,13 +87,13 @@ public String getRequestType() {
     return SolrRequestType.QUERY.toString();
   }
 
-  //================================================ Helper Methods ==================================================
+  // ================================================ Helper Methods
+  // ==================================================

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/request/HealthCheckRequest.java
##########
@@ -42,22 +41,23 @@ private HealthCheckRequest(METHOD m, String path) {
   }
 
   public void setMaxGenerationLag(int maxLagAllowed) {
-      this.maxLagAllowed = OptionalInt.of(maxLagAllowed);
+    this.maxLagAllowed = OptionalInt.of(maxLagAllowed);
   }
 
   @Override
   public SolrParams getParams() {
-      if (maxLagAllowed.isPresent()) {
-          ModifiableSolrParams params = new ModifiableSolrParams();
-          params.set(PARAM_MAX_GENERATION_LAG, maxLagAllowed.getAsInt());
-          return params;
-      }
-      return null;
+    if (maxLagAllowed.isPresent()) {
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.set(PARAM_MAX_GENERATION_LAG, maxLagAllowed.getAsInt());
+      return params;
+    }
+    return null;
   }
 
   @Override
   protected HealthCheckResponse createResponse(SolrClient client) {
-    // TODO: Accept requests w/ CloudSolrClient while ensuring that the request doesn't get routed to
+    // TODO: Accept requests w/ CloudSolrClient while ensuring that the request doesn't get routed
+    // to
     // an unintended recepient.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/request/DocumentAnalysisRequest.java
##########
@@ -121,14 +116,13 @@ String getXML(Writer writer) throws IOException {
     return (xml.length() > 0) ? xml : null;
   }
 
-
-  //============================================ Setter/Getter Methods ===============================================
+  // ============================================ Setter/Getter Methods
+  // ===============================================

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/ops/ReplaceOperation.java
##########
@@ -27,63 +26,69 @@
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 
 /**
- * Replaces some tuple value with another. The replacement value can be either a given value or the 
- * value of another field in the tuple. The expression for a replace operation can be of multiple forms:
- *  replace(fieldA, 0, withValue=100)  // for fieldA if equals 0 then set to 100
- *  replace(fieldA, null, withValue=0) // for fieldA if null then set to 0
- *  replace(fieldA, null, withField=fieldB) // for fieldA if null then set to the value of fieldB (if fieldB is null then fieldA will end up as null)
- *  replace(fieldA, 0, withField=fieldB) // for fieldA if 0 then set to the value of fieldB (if fieldB is 0 then fieldA will end up as 0)
- *  replace(fieldA, "Izzy and Kayden", withValue="my kids")
- *  
- * You can also construct these without the field name in the expression but that does require that you provide the field name during construction.
- * This is most useful during metric calculation because when calculating a metric you have already provided a field name in the metric so there
- * is no reason to have to provide the field name again in the operation
- *  sum(fieldA, replace(null, withValue=0)) // performs the replacement on fieldA
- *  
- * Equality is determined by the standard type .equals() functions.
+ * Replaces some tuple value with another. The replacement value can be either a given value or the
+ * value of another field in the tuple. The expression for a replace operation can be of multiple
+ * forms: replace(fieldA, 0, withValue=100) // for fieldA if equals 0 then set to 100
+ * replace(fieldA, null, withValue=0) // for fieldA if null then set to 0 replace(fieldA, null,
+ * withField=fieldB) // for fieldA if null then set to the value of fieldB (if fieldB is null then
+ * fieldA will end up as null) replace(fieldA, 0, withField=fieldB) // for fieldA if 0 then set to
+ * the value of fieldB (if fieldB is 0 then fieldA will end up as 0) replace(fieldA, "Izzy and
+ * Kayden", withValue="my kids")

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/GatherNodesStream.java
##########
@@ -397,51 +429,54 @@ private StreamExpression toExpression(StreamFactory factory, boolean includeStre
 
     return expression;
   }
-  
+
   @Override
   public Explanation toExplanation(StreamFactory factory) throws IOException {
 
     StreamExplanation explanation = new StreamExplanation(getStreamNodeId().toString());
-    
+
     explanation.setFunctionName(factory.getFunctionName(this.getClass()));
     explanation.setImplementingClass(this.getClass().getName());
     explanation.setExpressionType(ExpressionType.GRAPH_SOURCE);
     explanation.setExpression(toExpression(factory).toString());
-    
+
     // one child is a stream
     explanation.addChild(tupleStream.toExplanation(factory));
-    
+
     // one child is a datastore so add it at this point
     StreamExplanation child = new StreamExplanation(getStreamNodeId() + "-datastore");
     child.setFunctionName("solr (graph)");
     child.setImplementingClass("Solr/Lucene");
-    child.setExpressionType(ExpressionType.DATASTORE);    
-    child.setExpression(queryParams.entrySet().stream().map(e -> String.format(Locale.ROOT, "%s=%s", e.getKey(), e.getValue())).collect(Collectors.joining(",")));    
+    child.setExpressionType(ExpressionType.DATASTORE);
+    child.setExpression(
+        queryParams.entrySet().stream()
+            .map(e -> String.format(Locale.ROOT, "%s=%s", e.getKey(), e.getValue()))
+            .collect(Collectors.joining(",")));
     explanation.addChild(child);
-    
-    if(null != metrics){
-      for(Metric metric : metrics){
-          explanation.addHelper(metric.toExplanation(factory));
+
+    if (null != metrics) {
+      for (Metric metric : metrics) {
+        explanation.addHelper(metric.toExplanation(factory));
       }
     }
-    
+
     return explanation;
   }
 
-
   public void setStreamContext(StreamContext context) {
     this.traversal = (Traversal) context.get("traversal");
     if (traversal == null) {
-      //No traversal in the context. So create a new context and a new traversal.
-      //This ensures that two separate traversals in the same expression don't pollute each others traversal.
+      // No traversal in the context. So create a new context and a new traversal.
+      // This ensures that two separate traversals in the same expression don't pollute each others
+      // traversal.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/impl/XMLResponseParser.java
##########
@@ -225,77 +265,87 @@ public static KnownType get( String v )
 
     // just eat up the events...
     int depth = 0;
-    while( true )
-    {
+    while (true) {
       switch (parser.next()) {
-      case XMLStreamConstants.START_ELEMENT:
-        depth++;
-        builder.setLength( 0 ); // reset the text
-        type = KnownType.get( parser.getLocalName() );
-        if( type == null ) {
-          throw new RuntimeException( "this must be known type! not: "+parser.getLocalName() );
-        }
+        case XMLStreamConstants.START_ELEMENT:
+          depth++;
+          builder.setLength(0); // reset the text
+          type = KnownType.get(parser.getLocalName());
+          if (type == null) {
+            throw new RuntimeException("this must be known type! not: " + parser.getLocalName());
+          }
 
-        name = null;
-        int cnt = parser.getAttributeCount();
-        for( int i=0; i<cnt; i++ ) {
-          if( "name".equals( parser.getAttributeLocalName( i ) ) ) {
-            name = parser.getAttributeValue( i );
-            break;
+          name = null;
+          int cnt = parser.getAttributeCount();
+          for (int i = 0; i < cnt; i++) {
+            if ("name".equals(parser.getAttributeLocalName(i))) {
+              name = parser.getAttributeValue(i);
+              break;
+            }
           }
-        }
 
-        /** The name in a NamedList can actually be null
-        if( name == null ) {
-          throw new XMLStreamException( "requires 'name' attribute: "+parser.getLocalName(), parser.getLocation() );
-        }
-        **/
-
-        if( !type.isLeaf ) {
-          switch( type ) {
-          case LST:    nl.add( name, readNamedList( parser ) ); depth--; continue;
-          case ARR:    nl.add( name, readArray(     parser ) ); depth--; continue;
-          case RESULT: nl.add( name, readDocuments( parser ) ); depth--; continue;
-          case DOC:    nl.add( name, readDocument(  parser ) ); depth--; continue;
-          case BOOL:
-          case DATE:
-          case DOUBLE:
-          case FLOAT:
-          case INT:
-          case LONG:
-          case NULL:
-          case STR:
-          case RAW:
-            break;
+          /**
+           * The name in a NamedList can actually be null if( name == null ) { throw new
+           * XMLStreamException( "requires 'name' attribute: "+parser.getLocalName(),
+           * parser.getLocation() ); }
+           */
+          if (!type.isLeaf) {
+            switch (type) {
+              case LST:
+                nl.add(name, readNamedList(parser));
+                depth--;
+                continue;
+              case ARR:
+                nl.add(name, readArray(parser));
+                depth--;
+                continue;
+              case RESULT:
+                nl.add(name, readDocuments(parser));
+                depth--;
+                continue;
+              case DOC:
+                nl.add(name, readDocument(parser));
+                depth--;
+                continue;
+              case BOOL:
+              case DATE:
+              case DOUBLE:
+              case FLOAT:
+              case INT:
+              case LONG:
+              case NULL:
+              case STR:
+              case RAW:
+                break;
+            }
+            throw new XMLStreamException("branch element not handled!", parser.getLocation());
           }
-          throw new XMLStreamException( "branch element not handled!", parser.getLocation() );
-        }
-        break;
+          break;
 
-      case XMLStreamConstants.END_ELEMENT:
-        if( --depth < 0 ) {
-          return nl;
-        }
-        //System.out.println( "NL:ELEM:"+type+"::"+name+"::"+builder );
-        nl.add( name, type.read( builder.toString().trim() ) );
-        break;
-
-      case XMLStreamConstants.SPACE: // TODO?  should this be trimmed? make sure it only gets one/two space?
-      case XMLStreamConstants.CDATA:
-      case XMLStreamConstants.CHARACTERS:
-        builder.append( parser.getText() );
-        break;
+        case XMLStreamConstants.END_ELEMENT:
+          if (--depth < 0) {
+            return nl;
+          }
+          // System.out.println( "NL:ELEM:"+type+"::"+name+"::"+builder );
+          nl.add(name, type.read(builder.toString().trim()));
+          break;
+
+        case XMLStreamConstants
+            .SPACE: // TODO?  should this be trimmed? make sure it only gets one/two space?

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/JoinStream.java
##########
@@ -131,81 +141,88 @@ public Explanation toExplanation(StreamFactory factory) throws IOException {
     explanation.setExpressionType(ExpressionType.STREAM_DECORATOR);
     explanation.setExpression(toExpression(factory, false).toString());
     explanation.addHelper(eq.toExplanation(factory));
-    
-    for(TupleStream stream : streams){
+
+    for (TupleStream stream : streams) {
       explanation.addChild(stream.toExplanation(factory));
     }
-    
-    return explanation;    
+
+    return explanation;
   }
-  
+
   public void setStreamContext(StreamContext context) {
     for (PushBackStream stream : streams) {
       stream.setStreamContext(context);
     }
   }
-  
+
   public void open() throws IOException {
     for (PushBackStream stream : streams) {
       stream.open();
     }
   }
-  
+
   public void close() throws IOException {
     for (PushBackStream stream : streams) {
       stream.close();
     }
   }
-  
+
   public List<TupleStream> children() {
     List<TupleStream> list = new ArrayList<TupleStream>();
     for (TupleStream stream : streams) {
       list.add(stream);
     }
     return list;
   }
-  
+
   public PushBackStream getStream(int idx) {
     if (streams.size() > idx) {
       return streams.get(idx);
     }
-    
-    throw new IllegalArgumentException(String.format(Locale.ROOT,"Stream idx=%d doesn't exist. Number of streams is %d", idx,
-        streams.size()));
+
+    throw new IllegalArgumentException(
+        String.format(
+            Locale.ROOT,
+            "Stream idx=%d doesn't exist. Number of streams is %d",
+            idx,
+            streams.size()));
   }
-  
+
   protected boolean isValidTupleOrder() {
-    // Validate that the equalitor is derivable from the comparator in each stream. If it is, then we know all stream

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/MergeStream.java
##########
@@ -132,118 +148,119 @@ public Explanation toExplanation(StreamFactory factory) throws IOException {
     explanation.setExpressionType(ExpressionType.STREAM_DECORATOR);
     explanation.setExpression(toExpression(factory, false).toString());
     explanation.addHelper(comp.toExplanation(factory));
-    
-    for(PushBackStream stream : streams){
+
+    for (PushBackStream stream : streams) {
       explanation.addChild(stream.toExplanation(factory));
     }
-    
-    return explanation;    
+
+    return explanation;
   }
 
   public void setStreamContext(StreamContext context) {
-    for(PushBackStream stream : streams){
+    for (PushBackStream stream : streams) {
       stream.setStreamContext(context);
     }
   }
 
   public List<TupleStream> children() {
-    List<TupleStream> l =  new ArrayList<TupleStream>();
-    for(PushBackStream stream : streams){
+    List<TupleStream> l = new ArrayList<TupleStream>();
+    for (PushBackStream stream : streams) {
       l.add(stream);
     }
     return l;
   }
 
   public void open() throws IOException {
-    for(PushBackStream stream : streams){
+    for (PushBackStream stream : streams) {
       stream.open();
     }
   }
 
   public void close() throws IOException {
-    for(PushBackStream stream : streams){
+    for (PushBackStream stream : streams) {
       stream.close();
     }
   }
 
   public Tuple read() throws IOException {
-    
-    // might be able to optimize this by sorting the streams based on the next to read tuple from each.
-    // if we can ensure the sort of the streams and update it in less than linear time then there would
-    // be some performance gain. But, assuming the # of streams is kinda small then this might not be
+
+    // might be able to optimize this by sorting the streams based on the next to read tuple from
+    // each.
+    // if we can ensure the sort of the streams and update it in less than linear time then there
+    // would
+    // be some performance gain. But, assuming the # of streams is kinda small then this might not
+    // be
     // worth it

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/JDBCStream.java
##########
@@ -538,97 +636,108 @@ public Tuple read() throws IOException {
         // we do not have a record
         tuple.put(StreamParams.EOF, true);
       }
-      
+
       return tuple;
     } catch (SQLException e) {
-      throw new IOException(String.format(Locale.ROOT, "Failed to read next record with error '%s'", e.getMessage()), e);
+      throw new IOException(
+          String.format(Locale.ROOT, "Failed to read next record with error '%s'", e.getMessage()),
+          e);
     }
   }
 
   @Override
   public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException {
-    // functionName(collectionName, param1, param2, ..., paramN, sort="comp", [aliases="field=alias,..."])
-    
+    // functionName(collectionName, param1, param2, ..., paramN, sort="comp",
+    // [aliases="field=alias,..."])
+
     // function name
     StreamExpression expression = new StreamExpression(factory.getFunctionName(this.getClass()));
-    
+
     // connection url
     expression.addParameter(new StreamExpressionNamedParameter("connection", connectionUrl));
-    
+
     // sql
     expression.addParameter(new StreamExpressionNamedParameter("sql", sqlQuery));
 
     // fetchSize
-    expression.addParameter(new StreamExpressionNamedParameter("fetchSize", Integer.toString(fetchSize)));
+    expression.addParameter(
+        new StreamExpressionNamedParameter("fetchSize", Integer.toString(fetchSize)));
 
     // sort
-    expression.addParameter(new StreamExpressionNamedParameter(SORT, definedSort.toExpression(factory)));
-    
+    expression.addParameter(
+        new StreamExpressionNamedParameter(SORT, definedSort.toExpression(factory)));
+
     // driver class
-    if(null != driverClassName){
-      expression.addParameter(new StreamExpressionNamedParameter("driver", driverClassName));      
+    if (null != driverClassName) {
+      expression.addParameter(new StreamExpressionNamedParameter("driver", driverClassName));
     }
-    
+
     // connection properties
-    if(null != connectionProperties){
-      for(String propertyName : connectionProperties.stringPropertyNames()){
-        expression.addParameter(new StreamExpressionNamedParameter(propertyName, connectionProperties.getProperty(propertyName)));    
+    if (null != connectionProperties) {
+      for (String propertyName : connectionProperties.stringPropertyNames()) {
+        expression.addParameter(
+            new StreamExpressionNamedParameter(
+                propertyName, connectionProperties.getProperty(propertyName)));
       }
     }
-        
-    return expression;   
+
+    return expression;
   }
-  
+
   @Override
   public Explanation toExplanation(StreamFactory factory) throws IOException {
 
     StreamExplanation explanation = new StreamExplanation(getStreamNodeId().toString());
-    
+
     explanation.setFunctionName(factory.getFunctionName(this.getClass()));
     explanation.setImplementingClass(this.getClass().getName());
     explanation.setExpressionType(ExpressionType.STREAM_SOURCE);
-    
-    StreamExpression expression = (StreamExpression)toExpression(factory);
+
+    StreamExpression expression = (StreamExpression) toExpression(factory);
     explanation.setExpression(expression.toString());
-    
+
     String driverClassName = this.driverClassName;
-    if(null == driverClassName){
-      try{
+    if (null == driverClassName) {
+      try {
         driverClassName = DriverManager.getDriver(connectionUrl).getClass().getName();
-      }
-      catch(Exception e){
-        driverClassName = String.format(Locale.ROOT, "Failed to find driver for connectionUrl='%s'", connectionUrl);
+      } catch (Exception e) {
+        driverClassName =
+            String.format(
+                Locale.ROOT, "Failed to find driver for connectionUrl='%s'", connectionUrl);
       }
     }
-    
+
     // child is a datastore so add it at this point
     StreamExplanation child = new StreamExplanation(getStreamNodeId() + "-datastore");
     child.setFunctionName("jdbc-source");
     child.setImplementingClass(driverClassName);
-    child.setExpressionType(ExpressionType.DATASTORE);    
+    child.setExpressionType(ExpressionType.DATASTORE);
     child.setExpression(sqlQuery);
-    
+
     explanation.addChild(child);
-    
+
     return explanation;
   }
-  
+
   @Override
   public List<TupleStream> children() {
     return new ArrayList<>();
   }
 
   @Override
   public StreamComparator getStreamSort() {
-    // TODO: Need to somehow figure out the sort applied to the incoming data. This is not something you can ask a JDBC stream
-    // Possibly we can ask the creator to tell us the fields the data is sorted by. This would be duplicate information because
-    // it's already in the sqlQuery but there's no way we can reliably determine the sort from the query.
+    // TODO: Need to somehow figure out the sort applied to the incoming data. This is not something
+    // you can ask a JDBC stream
+    // Possibly we can ask the creator to tell us the fields the data is sorted by. This would be
+    // duplicate information because
+    // it's already in the sqlQuery but there's no way we can reliably determine the sort from the
+    // query.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/UpdateStream.java
##########
@@ -42,25 +41,26 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 /**
  * Sends tuples emitted by a wrapped {@link TupleStream} as updates to a SolrCloud collection.
+ *
  * @since 6.0.0
  */
 public class UpdateStream extends TupleStream implements Expressible {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  public static String BATCH_INDEXED_FIELD_NAME = "batchIndexed"; // field name in summary tuple for #docs updated in batch
+  public static String BATCH_INDEXED_FIELD_NAME =
+      "batchIndexed"; // field name in summary tuple for #docs updated in batch

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/HashRollupStream.java
##########
@@ -49,103 +48,122 @@
   private Bucket[] buckets;
   private Metric[] metrics;
 
-
   private Iterator<Tuple> tupleIterator;
 
-  public HashRollupStream(TupleStream tupleStream,
-                      Bucket[] buckets,
-                      Metric[] metrics) {
+  public HashRollupStream(TupleStream tupleStream, Bucket[] buckets, Metric[] metrics) {
     init(tupleStream, buckets, metrics);
   }
 
   public HashRollupStream(StreamExpression expression, StreamFactory factory) throws IOException {
     // grab all parameters out
-    List<StreamExpression> streamExpressions = factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, TupleStream.class);
-    List<StreamExpression> metricExpressions = factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, Metric.class);
+    List<StreamExpression> streamExpressions =
+        factory.getExpressionOperandsRepresentingTypes(
+            expression, Expressible.class, TupleStream.class);
+    List<StreamExpression> metricExpressions =
+        factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, Metric.class);
     StreamExpressionNamedParameter overExpression = factory.getNamedOperand(expression, "over");
 
     // validate expression contains only what we want.
-    if(expression.getParameters().size() != streamExpressions.size() + metricExpressions.size() + 1){
-      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - unknown operands found", expression));
+    if (expression.getParameters().size()
+        != streamExpressions.size() + metricExpressions.size() + 1) {
+      throw new IOException(
+          String.format(Locale.ROOT, "Invalid expression %s - unknown operands found", expression));
     }
 
-    if(1 != streamExpressions.size()){
-      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting a single stream but found %d",expression, streamExpressions.size()));
+    if (1 != streamExpressions.size()) {
+      throw new IOException(
+          String.format(
+              Locale.ROOT,
+              "Invalid expression %s - expecting a single stream but found %d",
+              expression,
+              streamExpressions.size()));
     }
 
-    if(null == overExpression || !(overExpression.getParameter() instanceof StreamExpressionValue)){
-      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting single 'over' parameter listing fields to rollup by but didn't find one",expression));
+    if (null == overExpression
+        || !(overExpression.getParameter() instanceof StreamExpressionValue)) {
+      throw new IOException(
+          String.format(
+              Locale.ROOT,
+              "Invalid expression %s - expecting single 'over' parameter listing fields to rollup by but didn't find one",
+              expression));
     }
 
     // Construct the metrics
     Metric[] metrics = new Metric[metricExpressions.size()];
-    for(int idx = 0; idx < metricExpressions.size(); ++idx){
+    for (int idx = 0; idx < metricExpressions.size(); ++idx) {
       metrics[idx] = factory.constructMetric(metricExpressions.get(idx));
     }
 
     // Construct the buckets.
-    // Buckets are nothing more than equalitors (I think). We can use equalitors as helpers for creating the buckets, but because
-    // I feel I'm missing something wrt buckets I don't want to change the use of buckets in this class to instead be equalitors.
-    StreamEqualitor streamEqualitor = factory.constructEqualitor(((StreamExpressionValue)overExpression.getParameter()).getValue(), FieldEqualitor.class);
+    // Buckets are nothing more than equalitors (I think). We can use equalitors as helpers for
+    // creating the buckets, but because
+    // I feel I'm missing something wrt buckets I don't want to change the use of buckets in this
+    // class to instead be equalitors.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/OuterHashJoinStream.java
##########
@@ -18,116 +18,121 @@
 
 import java.io.IOException;
 import java.util.List;
-
 import org.apache.solr.client.solrj.io.Tuple;
 import org.apache.solr.client.solrj.io.stream.expr.Expressible;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionNamedParameter;
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 
 /**
- * Takes two streams (fullStream and hashStream) and joins them similar to an LeftOuterJoinStream. The difference
- * in a OuterHashJoinStream is that the tuples in the hashStream will all be read and hashed when this stream is
- * opened. This provides a few optimizations iff the hashStream has a relatively small number of documents.
- * The difference between this and a HashJoinStream is that a tuple in the fullStream will be returned even
- * if it doesn't have any matching tuples in the hashStream. 
- * You are expected to provide a set of fields for which the hash will be calculated from. If a tuple from the 
- * hashStream does not contain a value (ie, null) for one of the fields the hash is being computed on then that 
- * tuple will not be considered a match to anything. If a tuple from the fullStream does not contain a value (ie, null) 
- * for one of the fields the hash is being computed on then that tuple will be returned without any joined tuples
- * from the hashStream
+ * Takes two streams (fullStream and hashStream) and joins them similar to an LeftOuterJoinStream.
+ * The difference in a OuterHashJoinStream is that the tuples in the hashStream will all be read and
+ * hashed when this stream is opened. This provides a few optimizations iff the hashStream has a
+ * relatively small number of documents. The difference between this and a HashJoinStream is that a
+ * tuple in the fullStream will be returned even if it doesn't have any matching tuples in the
+ * hashStream. You are expected to provide a set of fields for which the hash will be calculated
+ * from. If a tuple from the hashStream does not contain a value (ie, null) for one of the fields
+ * the hash is being computed on then that tuple will not be considered a match to anything. If a
+ * tuple from the fullStream does not contain a value (ie, null) for one of the fields the hash is
+ * being computed on then that tuple will be returned without any joined tuples from the hashStream
+ *
  * @since 6.0.0
-**/
+ */
 public class OuterHashJoinStream extends HashJoinStream implements Expressible {
-  
+
   private static final long serialVersionUID = 1L;
 
-  public OuterHashJoinStream(TupleStream fullStream, TupleStream hashStream, List<String> hashOn) throws IOException {
+  public OuterHashJoinStream(TupleStream fullStream, TupleStream hashStream, List<String> hashOn)
+      throws IOException {
     super(fullStream, hashStream, hashOn);
   }
-  
-  public OuterHashJoinStream(StreamExpression expression,StreamFactory factory) throws IOException {
+
+  public OuterHashJoinStream(StreamExpression expression, StreamFactory factory)
+      throws IOException {
     super(expression, factory);
   }
-    
+
   @Override
-  public StreamExpression toExpression(StreamFactory factory) throws IOException {    
+  public StreamExpression toExpression(StreamFactory factory) throws IOException {
     // function name
     StreamExpression expression = new StreamExpression(factory.getFunctionName(this.getClass()));
-    
+
     // streams
-    if(hashStream instanceof Expressible && fullStream instanceof Expressible){
-      expression.addParameter(((Expressible)fullStream).toExpression(factory));
-      expression.addParameter(new StreamExpressionNamedParameter("hashed", ((Expressible)hashStream).toExpression(factory)));
+    if (hashStream instanceof Expressible && fullStream instanceof Expressible) {
+      expression.addParameter(((Expressible) fullStream).toExpression(factory));
+      expression.addParameter(
+          new StreamExpressionNamedParameter(
+              "hashed", ((Expressible) hashStream).toExpression(factory)));
+    } else {
+      throw new IOException(
+          "This OuterHashJoinStream contains a non-expressible TupleStream - it cannot be converted to an expression");
     }
-    else{
-      throw new IOException("This OuterHashJoinStream contains a non-expressible TupleStream - it cannot be converted to an expression");
-    }
-    
+
     // on
     StringBuilder sb = new StringBuilder();
-    for(int idx = 0; idx < leftHashOn.size(); ++idx){
-      if(sb.length() > 0){ sb.append(","); }
-      
+    for (int idx = 0; idx < leftHashOn.size(); ++idx) {
+      if (sb.length() > 0) {
+        sb.append(",");
+      }
+
       // we know that left and right hashOns are the same size
       String left = leftHashOn.get(idx);
       String right = rightHashOn.get(idx);
-      
-      if(left.equals(right)){ 
-        sb.append(left); 
-      }
-      else{
+
+      if (left.equals(right)) {
+        sb.append(left);
+      } else {
         sb.append(left);
         sb.append("=");
         sb.append(right);
       }
     }
-    expression.addParameter(new StreamExpressionNamedParameter("on",sb.toString()));
-    
-    return expression;   
+    expression.addParameter(new StreamExpressionNamedParameter("on", sb.toString()));
+
+    return expression;
   }
 
   public Tuple read() throws IOException {
-    
-    if(null == workingFullTuple){
+
+    if (null == workingFullTuple) {
       Tuple fullTuple = fullStream.read();
-      
+
       // We're at the end of the line
-      if(fullTuple.EOF){
+      if (fullTuple.EOF) {
         return fullTuple;
       }
-      
+
       // If fullTuple doesn't have a valid hash or the hash cannot be found in the hashedTuples then
       // return the tuple from fullStream.
-      // This is an outer join so there is no requirement there be a matching value in the hashed stream
+      // This is an outer join so there is no requirement there be a matching value in the hashed
+      // stream

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrClient.java
##########
@@ -511,19 +523,27 @@ private HttpEntityEnclosingRequestBase fillContentStream(
       }
       postOrPut.setEntity(entity);
     } else {
-      //not using multipart
+      // not using multipart
       postOrPut.setEntity(new UrlEncodedFormEntity(postOrPutParams, StandardCharsets.UTF_8));
     }
     return postOrPut;
   }
 
-  private static final List<String> errPath = Arrays.asList("metadata", "error-class");//Utils.getObjectByPath(err, false,"metadata/error-class")
+  private static final List<String> errPath =
+      Arrays.asList(
+          "metadata", "error-class"); // Utils.getObjectByPath(err, false,"metadata/error-class")

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/expr/StreamFactory.java
##########
@@ -182,28 +191,33 @@ public StreamExpressionNamedParameter getNamedOperand(StreamExpression expressio
     return namedParameters;
   }
 
-  public List<StreamExpressionParameter> getOperandsOfType(StreamExpression expression, Class<?>... clazzes) {
+  public List<StreamExpressionParameter> getOperandsOfType(
+      StreamExpression expression, Class<?>... clazzes) {
     List<StreamExpressionParameter> parameters = new ArrayList<>();
-    
+
     parameterLoop:
-     for (StreamExpressionParameter parameter : expression.getParameters()) {
+    for (StreamExpressionParameter parameter : expression.getParameters()) {
       for (Class<?> clazz : clazzes) {
         if (!clazz.isAssignableFrom(parameter.getClass())) {
-          continue parameterLoop; // go to the next parameter since this parameter cannot be assigned to at least one of the classes
+          continue
+              parameterLoop; // go to the next parameter since this parameter cannot be assigned to
+          // at least one of the classes

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/response/FieldAnalysisResponse.java
##########
@@ -127,43 +122,38 @@ public Analysis getFieldNameAnalysis(String fieldName) {
     return analysisByFieldName.entrySet();
   }
 
-
-  //================================================= Inner Classes ==================================================
+  // ================================================= Inner Classes
+  // ==================================================

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SelectStream.java
##########
@@ -251,49 +282,51 @@ public void close() throws IOException {
 
   public Tuple read() throws IOException {
     Tuple original = stream.read();
-    
-    if(original.EOF){
+
+    if (original.EOF) {
       return original;
     }
 
     // create a copy with the limited set of fields
     Tuple workingToReturn = new Tuple();
     Tuple workingForEvaluators = new Tuple();
 
-    //Clear the TupleContext before running the evaluators.
-    //The TupleContext allows evaluators to cache values within the scope of a single tuple.
-    //For example a LocalDateTime could be parsed by one evaluator and used by other evaluators within the scope of the tuple.
-    //This avoids the need to create multiple LocalDateTime instances for the same tuple to satisfy a select expression.
+    // Clear the TupleContext before running the evaluators.
+    // The TupleContext allows evaluators to cache values within the scope of a single tuple.
+    // For example a LocalDateTime could be parsed by one evaluator and used by other evaluators
+    // within the scope of the tuple.
+    // This avoids the need to create multiple LocalDateTime instances for the same tuple to satisfy
+    // a select expression.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/CreateCorePayload.java
##########
@@ -17,67 +17,51 @@
 
 package org.apache.solr.client.solrj.request.beans;
 
-import org.apache.solr.common.annotation.JsonProperty;
-import org.apache.solr.common.util.ReflectMapWriter;
-
 import java.util.List;
 import java.util.Map;
+import org.apache.solr.common.annotation.JsonProperty;
+import org.apache.solr.common.util.ReflectMapWriter;
 
 public class CreateCorePayload implements ReflectMapWriter {
-    @JsonProperty(required = true)
-    public String name;
+  @JsonProperty(required = true)
+  public String name;
 
-    @JsonProperty
-    public String instanceDir;
+  @JsonProperty public String instanceDir;
 
-    @JsonProperty
-    public String dataDir;
+  @JsonProperty public String dataDir;
 
-    @JsonProperty
-    public String ulogDir;
+  @JsonProperty public String ulogDir;
 
-    @JsonProperty
-    public String schema;
+  @JsonProperty public String schema;
 
-    @JsonProperty
-    public String config;
+  @JsonProperty public String config;
 
-    @JsonProperty
-    public String configSet;
+  @JsonProperty public String configSet;
 
-    @JsonProperty
-    public Boolean loadOnStartup;
+  @JsonProperty public Boolean loadOnStartup;
 
-    // If our JsonProperty clone was more feature-rich here we could specify the property be called 'transient', but
-    // without that support it needs to be named something else to avoid conflicting with the 'transient' keyword in Java
-    @JsonProperty
-    public Boolean isTransient;
+  // If our JsonProperty clone was more feature-rich here we could specify the property be called
+  // 'transient', but
+  // without that support it needs to be named something else to avoid conflicting with the
+  // 'transient' keyword in Java

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RollupStream.java
##########
@@ -48,127 +47,146 @@
   private PushBackStream tupleStream;
   private Bucket[] buckets;
   private Metric[] metrics;
-  
+
   private HashKey currentKey = new HashKey("-");
   private Metric[] currentMetrics;
   private boolean finished = false;
 
-  public RollupStream(TupleStream tupleStream,
-                      Bucket[] buckets,
-                      Metric[] metrics) {
+  public RollupStream(TupleStream tupleStream, Bucket[] buckets, Metric[] metrics) {
     init(tupleStream, buckets, metrics);
   }
-  
+
   public RollupStream(StreamExpression expression, StreamFactory factory) throws IOException {
     // grab all parameters out
-    List<StreamExpression> streamExpressions = factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, TupleStream.class);
-    List<StreamExpression> metricExpressions = factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, Metric.class);
+    List<StreamExpression> streamExpressions =
+        factory.getExpressionOperandsRepresentingTypes(
+            expression, Expressible.class, TupleStream.class);
+    List<StreamExpression> metricExpressions =
+        factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, Metric.class);
     StreamExpressionNamedParameter overExpression = factory.getNamedOperand(expression, "over");
-    
+
     // validate expression contains only what we want.
-    if(expression.getParameters().size() != streamExpressions.size() + metricExpressions.size() + 1){
-      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - unknown operands found", expression));
+    if (expression.getParameters().size()
+        != streamExpressions.size() + metricExpressions.size() + 1) {
+      throw new IOException(
+          String.format(Locale.ROOT, "Invalid expression %s - unknown operands found", expression));
     }
-    
-    if(1 != streamExpressions.size()){
-      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting a single stream but found %d",expression, streamExpressions.size()));
+
+    if (1 != streamExpressions.size()) {
+      throw new IOException(
+          String.format(
+              Locale.ROOT,
+              "Invalid expression %s - expecting a single stream but found %d",
+              expression,
+              streamExpressions.size()));
     }
 
-    if(null == overExpression || !(overExpression.getParameter() instanceof StreamExpressionValue)){
-      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting single 'over' parameter listing fields to rollup by but didn't find one",expression));
+    if (null == overExpression
+        || !(overExpression.getParameter() instanceof StreamExpressionValue)) {
+      throw new IOException(
+          String.format(
+              Locale.ROOT,
+              "Invalid expression %s - expecting single 'over' parameter listing fields to rollup by but didn't find one",
+              expression));
     }
-    
+
     // Construct the metrics
     Metric[] metrics = new Metric[metricExpressions.size()];
-    for(int idx = 0; idx < metricExpressions.size(); ++idx){
+    for (int idx = 0; idx < metricExpressions.size(); ++idx) {
       metrics[idx] = factory.constructMetric(metricExpressions.get(idx));
     }
-    
+
     // Construct the buckets.
-    // Buckets are nothing more than equalitors (I think). We can use equalitors as helpers for creating the buckets, but because
-    // I feel I'm missing something wrt buckets I don't want to change the use of buckets in this class to instead be equalitors.    
-    StreamEqualitor streamEqualitor = factory.constructEqualitor(((StreamExpressionValue)overExpression.getParameter()).getValue(), FieldEqualitor.class);
+    // Buckets are nothing more than equalitors (I think). We can use equalitors as helpers for
+    // creating the buckets, but because
+    // I feel I'm missing something wrt buckets I don't want to change the use of buckets in this
+    // class to instead be equalitors.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/response/DocumentAnalysisResponse.java
##########
@@ -104,7 +101,8 @@ public DocumentAnalysis getDocumentAnalysis(String documentKey) {
     return documentAnalysisByKey.entrySet().iterator();
   }
 
-  //================================================= Inner Classes ==================================================
+  // ================================================= Inner Classes
+  // ==================================================

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/Aliases.java
##########
@@ -27,29 +27,29 @@
 import java.util.function.BiConsumer;
 import java.util.function.UnaryOperator;
 import java.util.stream.Collectors;
-
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.util.StrUtils;
 import org.apache.solr.common.util.Utils;
 
 /**
- * Holds collection aliases -- virtual collections that point to one or more other collections.
- * We might add other types of aliases here some day.
- * Immutable.
+ * Holds collection aliases -- virtual collections that point to one or more other collections. We
+ * might add other types of aliases here some day. Immutable.
  */
 public class Aliases {
 
   /**
-   * An empty, minimal Aliases primarily used to support the non-cloud solr use cases. Not normally useful
-   * in cloud situations where the version of the node needs to be tracked even if all aliases are removed.
-   * The -1 version makes it subordinate to any real version, and furthermore we never "set" this EMPTY instance
-   * into ZK.
+   * An empty, minimal Aliases primarily used to support the non-cloud solr use cases. Not normally
+   * useful in cloud situations where the version of the node needs to be tracked even if all
+   * aliases are removed. The -1 version makes it subordinate to any real version, and furthermore
+   * we never "set" this EMPTY instance into ZK.
    */
-  public static final Aliases EMPTY = new Aliases(Collections.emptyMap(), Collections.emptyMap(), -1);
+  public static final Aliases EMPTY =
+      new Aliases(Collections.emptyMap(), Collections.emptyMap(), -1);
 
   // These two constants correspond to the top level elements in aliases.json. The first one denotes
-  // a section containing a list of aliases and their attendant collections, the second contains a list of
+  // a section containing a list of aliases and their attendant collections, the second contains a
+  // list of
   // aliases and their attendant properties (metadata) They probably should be
   // named "aliases" and "alias_properties" but for back compat reasons, we cannot change them

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java
##########
@@ -231,9 +248,12 @@ void init(String collectionName, String zkHost, SolrParams params) throws IOExce
     this.collection = collectionName;
     this.params = new ModifiableSolrParams(params);
 
-    // If the comparator is null then it was not explicitly set so we will create one using the sort parameter
-    // of the query. While doing this we will also take into account any aliases such that if we are sorting on
-    // fieldA but fieldA is aliased to alias.fieldA then the comparator will be against alias.fieldA.
+    // If the comparator is null then it was not explicitly set so we will create one using the sort
+    // parameter
+    // of the query. While doing this we will also take into account any aliases such that if we are
+    // sorting on
+    // fieldA but fieldA is aliased to alias.fieldA then the comparator will be against
+    // alias.fieldA.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java
##########
@@ -154,10 +156,13 @@ public int compareTo(Range that) {
   public Range fromString(String range) {
     int middle = range.indexOf('-');
     String minS = range.substring(0, middle);
-    String maxS = range.substring(middle+1);
-    long min = Long.parseLong(minS, 16);  // use long to prevent the parsing routines from potentially worrying about overflow
+    String maxS = range.substring(middle + 1);
+    long min =
+        Long.parseLong(
+            minS, 16); // use long to prevent the parsing routines from potentially worrying about
+    // overflow

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/ConnectionManager.java
##########
@@ -146,11 +172,14 @@ public void process(WatchedEvent event) {
       }
 
       do {
-        // This loop will break if a valid connection is made. If a connection is not made then it will repeat and
+        // This loop will break if a valid connection is made. If a connection is not made then it
+        // will repeat and
         // try again to create a new connection.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java
##########
@@ -129,60 +129,66 @@ public static State getState(String stateStr) {
 
   public static final String REPLICAS = "replicas";
   public static final String RANGE = "range";
-  public static final String LEADER = "leader";       // FUTURE: do we want to record the leader as a slice property in the JSON (as opposed to isLeader as a replica property?)
+  public static final String LEADER =
+      "leader"; // FUTURE: do we want to record the leader as a slice property in the JSON (as
+  // opposed to isLeader as a replica property?)
   public static final String PARENT = "parent";
 
   private final String name;
   private final DocRouter.Range range;
-  private final Integer replicationFactor;      // FUTURE: optional per-slice override of the collection replicationFactor
-  private final Map<String,Replica> replicas;
+  private final Integer
+      replicationFactor; // FUTURE: optional per-slice override of the collection replicationFactor

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/ZkMaintenanceUtils.java
##########
@@ -142,13 +160,14 @@ public static void zkTransfer(SolrZkClient zkClient, String src, Boolean srcIsZk
       return;
     }
 
-    //local -> ZK copy
+    // local -> ZK copy
     if (dstIsZk) {
       uploadToZK(zkClient, Paths.get(src), dst, null);
       return;
     }
 
-    // Copying individual files from ZK requires special handling since downloadFromZK assumes the node has children.
+    // Copying individual files from ZK requires special handling since downloadFromZK assumes the
+    // node has children.
     // This is kind of a weak test for the notion of "directory" on Zookeeper.
     // ZK -> local copy where ZK is a parent node

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/VMParamsAllAndReadonlyDigestZkACLProvider.java
##########
@@ -80,46 +86,65 @@ public VMParamsAllAndReadonlyDigestZkACLProvider(String zkDigestAllUsernameVMPar
   protected List<ACL> createACLsToAdd(boolean includeReadOnly) {
     String digestAllUsername = credentialsProps.getProperty(zkDigestAllUsernameVMParamName);
     String digestAllPassword = credentialsProps.getProperty(zkDigestAllPasswordVMParamName);
-    String digestReadonlyUsername = credentialsProps.getProperty(zkDigestReadonlyUsernameVMParamName);
-    String digestReadonlyPassword = credentialsProps.getProperty(zkDigestReadonlyPasswordVMParamName);
+    String digestReadonlyUsername =
+        credentialsProps.getProperty(zkDigestReadonlyUsernameVMParamName);
+    String digestReadonlyPassword =
+        credentialsProps.getProperty(zkDigestReadonlyPasswordVMParamName);
 
-    return createACLsToAdd(includeReadOnly,
-        digestAllUsername, digestAllPassword,
-        digestReadonlyUsername, digestReadonlyPassword);
+    return createACLsToAdd(
+        includeReadOnly,
+        digestAllUsername,
+        digestAllPassword,
+        digestReadonlyUsername,
+        digestReadonlyPassword);
   }
 
-  /**
-   * Note: only used for tests
-   */
-  protected List<ACL> createACLsToAdd(boolean includeReadOnly,
-                                      String digestAllUsername, String digestAllPassword,
-                                      String digestReadonlyUsername, String digestReadonlyPassword) {
+  /** Note: only used for tests */
+  protected List<ACL> createACLsToAdd(
+      boolean includeReadOnly,
+      String digestAllUsername,
+      String digestAllPassword,
+      String digestReadonlyUsername,
+      String digestReadonlyPassword) {
 
-      try {
+    try {
       List<ACL> result = new ArrayList<>(2);
-  
-      // Not to have to provide too much credentials and ACL information to the process it is assumed that you want "ALL"-acls
-      // added to the user you are using to connect to ZK (if you are using VMParamsSingleSetCredentialsDigestZkCredentialsProvider)
+
+      // Not to have to provide too much credentials and ACL information to the process it is
+      // assumed that you want "ALL"-acls
+      // added to the user you are using to connect to ZK (if you are using
+      // VMParamsSingleSetCredentialsDigestZkCredentialsProvider)

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/client/solrj/response/json/BucketBasedJsonFacet.java
##########
@@ -63,33 +63,34 @@ public BucketBasedJsonFacet(NamedList<?> bucketBasedFacet) {
       } else if ("numBuckets".equals(key)) {
         numBuckets = ((Number) value).longValue();
       } else if ("allBuckets".equals(key)) {
-        allBuckets = ((Number) ((NamedList)value).get("count")).longValue();
+        allBuckets = ((Number) ((NamedList) value).get("count")).longValue();
       } else if ("before".equals(key)) {
-        beforeFirstBucketCount = ((Number) ((NamedList)value).get("count")).longValue();
+        beforeFirstBucketCount = ((Number) ((NamedList) value).get("count")).longValue();
       } else if ("after".equals(key)) {
-        afterLastBucketCount = ((Number) ((NamedList)value).get("count")).longValue();
+        afterLastBucketCount = ((Number) ((NamedList) value).get("count")).longValue();
       } else if ("between".equals(key)) {
-        betweenAllBucketsCount = ((Number) ((NamedList)value).get("count")).longValue();
+        betweenAllBucketsCount = ((Number) ((NamedList) value).get("count")).longValue();
       } else {
-        // We don't recognize the key.  Possible JSON faceting schema has changed without updating client.
-        // Silently ignore for now, though we may want to consider throwing an error if this proves problematic.
+        // We don't recognize the key.  Possible JSON faceting schema has changed without updating
+        // client.
+        // Silently ignore for now, though we may want to consider throwing an error if this proves
+        // problematic.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/ZkMaintenanceUtils.java
##########
@@ -356,13 +416,15 @@ private static int copyDataDown(SolrZkClient zkClient, String zkPath, Path file)
     return 0;
   }
 
-  public static void downloadFromZK(SolrZkClient zkClient, String zkPath, Path file) throws IOException {
+  public static void downloadFromZK(SolrZkClient zkClient, String zkPath, Path file)
+      throws IOException {
     try {
       List<String> children = zkClient.getChildren(zkPath, null, true);
       // If it has no children, it's a leaf node, write the associated data from the ZNode.
       // Otherwise, continue recursing, but write the associated data to a special file if any
       if (children.size() == 0) {
-        // If we didn't copy data down, then we also didn't create the file. But we still need a marker on the local
+        // If we didn't copy data down, then we also didn't create the file. But we still need a
+        // marker on the local
         // disk so create an empty file.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/ZkMaintenanceUtils.java
##########
@@ -179,7 +199,9 @@ private static String normalizeDest(String srcName, String dstName, boolean srcI
     String dstSeparator = (dstIsZk) ? "/" : File.separator;
     String srcSeparator = (srcIsZk) ? "/" : File.separator;
 
-    if (dstName.endsWith(dstSeparator)) { // Dest is a directory or non-leaf znode, append last element of the src path.
+    if (dstName.endsWith(
+        dstSeparator)) { // Dest is a directory or non-leaf znode, append last element of the src
+      // path.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/ZkDynamicConfig.java
##########
@@ -23,60 +23,75 @@
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
-
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.StringUtils;
 
 /**
- * Class holding the dynamic config of a Zookeeper ensemble as fetched from znode <code>/zookeeper/config</code>.
+ * Class holding the dynamic config of a Zookeeper ensemble as fetched from znode <code>
+ * /zookeeper/config</code>.
  */
 public class ZkDynamicConfig {
-  // server.<positive id> = <address1>:<port1>:<port2>[:role][|<address2>:<port2>...];[<client port address>:]<client port>
-  // TODO: Add support for handling multiple address specs per server line, how we simply ignore all but the first
-  public static final Pattern linePattern = Pattern.compile("server\\.(?<serverId>\\d+) ?= ?(?<address>[^:]+):(?<leaderPort>\\d+):(?<leaderElectionPort>\\d+)(:(?<role>.*?))?(\\|.*?)?(;((?<clientPortAddress>.*?):)?(?<clientPort>\\d+))?");
+  // server.<positive id> = <address1>:<port1>:<port2>[:role][|<address2>:<port2>...];[<client port
+  // address>:]<client port>
+  // TODO: Add support for handling multiple address specs per server line, how we simply ignore all
+  // but the first

Review comment:
       Fix this?

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/ZkMaintenanceUtils.java
##########
@@ -204,58 +227,71 @@ public static void moveZnode(SolrZkClient zkClient, String src, String dst) thro
     }
 
     // Insure all source znodes are present in dest before deleting the source.
-    // throws error if not all there so the source is left intact. Throws error if source and dest don't match.
+    // throws error if not all there so the source is left intact. Throws error if source and dest
+    // don't match.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
##########
@@ -1055,14 +1129,19 @@ private void loadClusterProperties() {
             vcp.cacheUntilNs = untilNs;
             watchedCollectionProps.put(collection, vcp);
           } else {
-            // we're synchronized on watchedCollectionProps and we can only get here if we have found an expired
-            // vprops above, so it is safe to remove the cached value and let the GC free up some mem a bit sooner.
+            // we're synchronized on watchedCollectionProps and we can only get here if we have
+            // found an expired
+            // vprops above, so it is safe to remove the cached value and let the GC free up some
+            // mem a bit sooner.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
##########
@@ -1102,13 +1182,17 @@ private VersionedCollectionProps fetchCollectionProperties(String collection, Wa
         Map<String, String> props = (Map<String, String>) Utils.fromJSON(data);
         return new VersionedCollectionProps(stat.getVersion(), props);
       } catch (ClassCastException e) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to parse collection properties for collection " + collection, e);
+        throw new SolrException(
+            ErrorCode.SERVER_ERROR,
+            "Unable to parse collection properties for collection " + collection,
+            e);
       } catch (KeeperException.NoNodeException e) {
         if (watcher != null) {
           // Leave an exists watch in place in case a collectionprops.json is created later.
           Stat exists = zkClient.exists(znodePath, watcher, true);
           if (exists != null) {
-            // Rare race condition, we tried to fetch the data and couldn't find it, then we found it exists.
+            // Rare race condition, we tried to fetch the data and couldn't find it, then we found
+            // it exists.
             // Loop and try again.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
##########
@@ -1449,7 +1561,8 @@ private DocCollection fetchCollectionState(String coll, Watcher watcher) throws
           // Leave an exists watch in place in case a state.json is created later.
           Stat exists = zkClient.exists(collectionPath, watcher, true);
           if (exists != null) {
-            // Rare race condition, we tried to fetch the data and couldn't find it, then we found it exists.
+            // Rare race condition, we tried to fetch the data and couldn't find it, then we found
+            // it exists.
             // Loop and try again.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
##########
@@ -1315,30 +1410,33 @@ void refreshAndWatch(boolean notifyWatchers) {
               watchedCollectionProps.remove(coll);
 
               // core ref counting not relevant here, don't need canRemove(), we just sent
-              // a notification of an empty set of properties, no reason to watch what doesn't exist.
+              // a notification of an empty set of properties, no reason to watch what doesn't
+              // exist.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/params/GroupParams.java
##########
@@ -32,23 +30,27 @@
   /** the offset for the doclist of each group */
   public static final String GROUP_OFFSET = GROUP + ".offset";
 
-  /** treat the first group result as the main result.  true/false */
+  /** treat the first group result as the main result. true/false */
   public static final String GROUP_MAIN = GROUP + ".main";
 
-  /** treat the first group result as the main result.  true/false */
+  /** treat the first group result as the main result. true/false */
   public static final String GROUP_FORMAT = GROUP + ".format";
 
   /**
-   * Whether to cache the first pass search (doc ids and score) for the second pass search.
-   * Also defines the maximum size of the group cache relative to maxdoc in a percentage.
-   * Values can be a positive integer, from 0 till 100. A value of 0 will disable the group cache.
-   * The default is 0.*/
+   * Whether to cache the first pass search (doc ids and score) for the second pass search. Also
+   * defines the maximum size of the group cache relative to maxdoc in a percentage. Values can be a
+   * positive integer, from 0 till 100. A value of 0 will disable the group cache. The default is 0.
+   */
   public static final String GROUP_CACHE_PERCENTAGE = GROUP + ".cache.percent";
 
-  // Note: Since you can supply multiple fields to group on, but only have a facets for the whole result. It only makes
+  // Note: Since you can supply multiple fields to group on, but only have a facets for the whole
+  // result. It only makes
   // sense to me to support these parameters for the first group.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
##########
@@ -1315,30 +1410,33 @@ void refreshAndWatch(boolean notifyWatchers) {
               watchedCollectionProps.remove(coll);
 
               // core ref counting not relevant here, don't need canRemove(), we just sent
-              // a notification of an empty set of properties, no reason to watch what doesn't exist.
+              // a notification of an empty set of properties, no reason to watch what doesn't
+              // exist.
               collectionPropsObservers.remove(coll);
 
-              // This is the one time we know it's safe to throw this out. We just failed to set the watch
-              // due to an NoNodeException, so it isn't held by ZK and can't re-set itself due to an update.
+              // This is the one time we know it's safe to throw this out. We just failed to set the
+              // watch
+              // due to an NoNodeException, so it isn't held by ZK and can't re-set itself due to an
+              // update.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/params/FacetParams.java
##########
@@ -147,324 +133,330 @@
   public static final String FACET_SORT_INDEX = "index";
   public static final String FACET_SORT_INDEX_LEGACY = "false";
 
-  /**
-   * Only return constraints of a facet field with the given prefix.
-   */
+  /** Only return constraints of a facet field with the given prefix. */
   public static final String FACET_PREFIX = FACET + ".prefix";
 
-  /**
-   * Only return constraints of a facet field containing the given string.
-   */
+  /** Only return constraints of a facet field containing the given string. */
   public static final String FACET_CONTAINS = FACET + ".contains";
 
-  /**
-   * Only return constraints of a facet field containing the given string.
-   */
+  /** Only return constraints of a facet field containing the given string. */
   public static final String FACET_MATCHES = FACET + ".matches";
 
-  /**
-   * If using facet contains, ignore case when comparing values.
-   */
+  /** If using facet contains, ignore case when comparing values. */
   public static final String FACET_CONTAINS_IGNORE_CASE = FACET_CONTAINS + ".ignoreCase";
 
-  /**
-   * Only return constraints of a facet field excluding the given string.
-   */
+  /** Only return constraints of a facet field excluding the given string. */
   public static final String FACET_EXCLUDETERMS = FACET + ".excludeTerms";
 
- /**
-   * When faceting by enumerating the terms in a field,
-   * only use the filterCache for terms with a df &gt;= to this parameter.
+  /**
+   * When faceting by enumerating the terms in a field, only use the filterCache for terms with a df
+   * &gt;= to this parameter.
    */
   public static final String FACET_ENUM_CACHE_MINDF = FACET + ".enum.cache.minDf";
-  
+
   /**
-   *  A boolean parameter that caps the facet counts at 1. 
-   *  With this set, a returned count will only be 0 or 1. 
-   *  For apps that don't need the count, this should be an optimization
+   * A boolean parameter that caps the facet counts at 1. With this set, a returned count will only
+   * be 0 or 1. For apps that don't need the count, this should be an optimization
    */
-  public static final String FACET_EXISTS = FACET+".exists";
-  
+  public static final String FACET_EXISTS = FACET + ".exists";
+
   /**
-   * Any field whose terms the user wants to enumerate over for
-   * Facet Contraint Counts (multi-value)
+   * Any field whose terms the user wants to enumerate over for Facet Contraint Counts (multi-value)
    */
   public static final String FACET_DATE = FACET + ".date";
   /**
-   * Date string indicating the starting point for a date facet range.
-   * Can be overridden on a per field basis.
+   * Date string indicating the starting point for a date facet range. Can be overridden on a per
+   * field basis.
    */
   public static final String FACET_DATE_START = FACET_DATE + ".start";
   /**
-   * Date string indicating the ending point for a date facet range.
-   * Can be overridden on a per field basis.
+   * Date string indicating the ending point for a date facet range. Can be overridden on a per
+   * field basis.
    */
   public static final String FACET_DATE_END = FACET_DATE + ".end";
   /**
-   * Date Math string indicating the interval of sub-ranges for a date
-   * facet range.
-   * Can be overridden on a per field basis.
+   * Date Math string indicating the interval of sub-ranges for a date facet range. Can be
+   * overridden on a per field basis.
    */
   public static final String FACET_DATE_GAP = FACET_DATE + ".gap";
   /**
-   * Boolean indicating how counts should be computed if the range
-   * between 'start' and 'end' is not evenly divisible by 'gap'.  If
-   * this value is true, then all counts of ranges involving the 'end'
-   * point will use the exact endpoint specified -- this includes the
-   * 'between' and 'after' counts as well as the last range computed
-   * using the 'gap'.  If the value is false, then 'gap' is used to
-   * compute the effective endpoint closest to the 'end' param which
-   * results in the range between 'start' and 'end' being evenly
-   * divisible by 'gap'.
-   * The default is false.
-   * Can be overridden on a per field basis.
+   * Boolean indicating how counts should be computed if the range between 'start' and 'end' is not
+   * evenly divisible by 'gap'. If this value is true, then all counts of ranges involving the 'end'
+   * point will use the exact endpoint specified -- this includes the 'between' and 'after' counts
+   * as well as the last range computed using the 'gap'. If the value is false, then 'gap' is used
+   * to compute the effective endpoint closest to the 'end' param which results in the range between
+   * 'start' and 'end' being evenly divisible by 'gap'. The default is false. Can be overridden on a
+   * per field basis.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
##########
@@ -2030,32 +2153,42 @@ public void applyModificationAndExportToZk(UnaryOperator<Aliases> op) {
       final long deadlineNanos = System.nanoTime() + TimeUnit.SECONDS.toNanos(30);
       // note: triesLeft tuning is based on ConcurrentCreateRoutedAliasTest
       for (int triesLeft = 30; triesLeft > 0; triesLeft--) {
-        // we could synchronize on "this" but there doesn't seem to be a point; we have a retry loop.
+        // we could synchronize on "this" but there doesn't seem to be a point; we have a retry
+        // loop.
         Aliases curAliases = getAliases();
         Aliases modAliases = op.apply(curAliases);
         final byte[] modAliasesJson = modAliases.toJSON();
         if (curAliases == modAliases) {
-          log.debug("Current aliases has the desired modification; no further ZK interaction needed.");
+          log.debug(
+              "Current aliases has the desired modification; no further ZK interaction needed.");
           return;
         }
 
         try {
           try {
-            final Stat stat = getZkClient().setData(ALIASES, modAliasesJson, curAliases.getZNodeVersion(), true);
+            final Stat stat =
+                getZkClient().setData(ALIASES, modAliasesJson, curAliases.getZNodeVersion(), true);
             setIfNewer(Aliases.fromJSON(modAliasesJson, stat.getVersion()));
             return;
           } catch (KeeperException.BadVersionException e) {
             log.debug("{}", e, e);
-            log.warn("Couldn't save aliases due to race with another modification; will update and retry until timeout");
-            // considered a backoff here, but we really do want to compete strongly since the normal case is
-            // that we will do one update and succeed. This is left as a hot loop for limited tries intentionally.
-            // More failures than that here probably indicate a bug or a very strange high write frequency usage for
-            // aliases.json, timeouts mean zk is being very slow to respond, or this node is being crushed
+            log.warn(
+                "Couldn't save aliases due to race with another modification; will update and retry until timeout");
+            // considered a backoff here, but we really do want to compete strongly since the normal
+            // case is
+            // that we will do one update and succeed. This is left as a hot loop for limited tries
+            // intentionally.
+            // More failures than that here probably indicate a bug or a very strange high write
+            // frequency usage for
+            // aliases.json, timeouts mean zk is being very slow to respond, or this node is being
+            // crushed
             // by other processing and just can't find any cpu cycles at all.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/params/FacetParams.java
##########
@@ -147,324 +133,330 @@
   public static final String FACET_SORT_INDEX = "index";
   public static final String FACET_SORT_INDEX_LEGACY = "false";
 
-  /**
-   * Only return constraints of a facet field with the given prefix.
-   */
+  /** Only return constraints of a facet field with the given prefix. */
   public static final String FACET_PREFIX = FACET + ".prefix";
 
-  /**
-   * Only return constraints of a facet field containing the given string.
-   */
+  /** Only return constraints of a facet field containing the given string. */
   public static final String FACET_CONTAINS = FACET + ".contains";
 
-  /**
-   * Only return constraints of a facet field containing the given string.
-   */
+  /** Only return constraints of a facet field containing the given string. */
   public static final String FACET_MATCHES = FACET + ".matches";
 
-  /**
-   * If using facet contains, ignore case when comparing values.
-   */
+  /** If using facet contains, ignore case when comparing values. */
   public static final String FACET_CONTAINS_IGNORE_CASE = FACET_CONTAINS + ".ignoreCase";
 
-  /**
-   * Only return constraints of a facet field excluding the given string.
-   */
+  /** Only return constraints of a facet field excluding the given string. */
   public static final String FACET_EXCLUDETERMS = FACET + ".excludeTerms";
 
- /**
-   * When faceting by enumerating the terms in a field,
-   * only use the filterCache for terms with a df &gt;= to this parameter.
+  /**
+   * When faceting by enumerating the terms in a field, only use the filterCache for terms with a df
+   * &gt;= to this parameter.
    */
   public static final String FACET_ENUM_CACHE_MINDF = FACET + ".enum.cache.minDf";
-  
+
   /**
-   *  A boolean parameter that caps the facet counts at 1. 
-   *  With this set, a returned count will only be 0 or 1. 
-   *  For apps that don't need the count, this should be an optimization
+   * A boolean parameter that caps the facet counts at 1. With this set, a returned count will only
+   * be 0 or 1. For apps that don't need the count, this should be an optimization
    */
-  public static final String FACET_EXISTS = FACET+".exists";
-  
+  public static final String FACET_EXISTS = FACET + ".exists";
+
   /**
-   * Any field whose terms the user wants to enumerate over for
-   * Facet Contraint Counts (multi-value)
+   * Any field whose terms the user wants to enumerate over for Facet Contraint Counts (multi-value)
    */
   public static final String FACET_DATE = FACET + ".date";
   /**
-   * Date string indicating the starting point for a date facet range.
-   * Can be overridden on a per field basis.
+   * Date string indicating the starting point for a date facet range. Can be overridden on a per
+   * field basis.
    */
   public static final String FACET_DATE_START = FACET_DATE + ".start";
   /**
-   * Date string indicating the ending point for a date facet range.
-   * Can be overridden on a per field basis.
+   * Date string indicating the ending point for a date facet range. Can be overridden on a per
+   * field basis.
    */
   public static final String FACET_DATE_END = FACET_DATE + ".end";
   /**
-   * Date Math string indicating the interval of sub-ranges for a date
-   * facet range.
-   * Can be overridden on a per field basis.
+   * Date Math string indicating the interval of sub-ranges for a date facet range. Can be
+   * overridden on a per field basis.
    */
   public static final String FACET_DATE_GAP = FACET_DATE + ".gap";
   /**
-   * Boolean indicating how counts should be computed if the range
-   * between 'start' and 'end' is not evenly divisible by 'gap'.  If
-   * this value is true, then all counts of ranges involving the 'end'
-   * point will use the exact endpoint specified -- this includes the
-   * 'between' and 'after' counts as well as the last range computed
-   * using the 'gap'.  If the value is false, then 'gap' is used to
-   * compute the effective endpoint closest to the 'end' param which
-   * results in the range between 'start' and 'end' being evenly
-   * divisible by 'gap'.
-   * The default is false.
-   * Can be overridden on a per field basis.
+   * Boolean indicating how counts should be computed if the range between 'start' and 'end' is not
+   * evenly divisible by 'gap'. If this value is true, then all counts of ranges involving the 'end'
+   * point will use the exact endpoint specified -- this includes the 'between' and 'after' counts
+   * as well as the last range computed using the 'gap'. If the value is false, then 'gap' is used
+   * to compute the effective endpoint closest to the 'end' param which results in the range between
+   * 'start' and 'end' being evenly divisible by 'gap'. The default is false. Can be overridden on a
+   * per field basis.
    */
   public static final String FACET_DATE_HARD_END = FACET_DATE + ".hardend";
   /**
-   * String indicating what "other" ranges should be computed for a
-   * date facet range (multi-value).
+   * String indicating what "other" ranges should be computed for a date facet range (multi-value).
    * Can be overridden on a per field basis.
+   *
    * @see FacetRangeOther
    */
   public static final String FACET_DATE_OTHER = FACET_DATE + ".other";
 
   /**
-   * <p>
-   * Multivalued string indicating what rules should be applied to determine 
-   * when the ranges generated for date faceting should be inclusive or 
-   * exclusive of their end points.
-   * </p>
-   * <p>
-   * The default value if none are specified is: [lower,upper,edge] <i>(NOTE: This is different then FACET_RANGE_INCLUDE)</i>
-   * </p>
-   * <p>
-   * Can be overridden on a per field basis.
-   * </p>
+   * Multivalued string indicating what rules should be applied to determine when the ranges
+   * generated for date faceting should be inclusive or exclusive of their end points.
+   *
+   * <p>The default value if none are specified is: [lower,upper,edge] <i>(NOTE: This is different
+   * then FACET_RANGE_INCLUDE)</i>
+   *
+   * <p>Can be overridden on a per field basis.
+   *
    * @see FacetRangeInclude
    * @see #FACET_RANGE_INCLUDE
    */
   public static final String FACET_DATE_INCLUDE = FACET_DATE + ".include";
 
   /**
-   * Any numerical field whose terms the user wants to enumerate over
-   * Facet Contraint Counts for selected ranges.
+   * Any numerical field whose terms the user wants to enumerate over Facet Contraint Counts for
+   * selected ranges.
    */
   public static final String FACET_RANGE = FACET + ".range";
   /**
-   * Number indicating the starting point for a numerical range facet.
-   * Can be overridden on a per field basis.
+   * Number indicating the starting point for a numerical range facet. Can be overridden on a per
+   * field basis.
    */
   public static final String FACET_RANGE_START = FACET_RANGE + ".start";
   /**
-   * Number indicating the ending point for a numerical range facet.
-   * Can be overridden on a per field basis.
+   * Number indicating the ending point for a numerical range facet. Can be overridden on a per
+   * field basis.
    */
   public static final String FACET_RANGE_END = FACET_RANGE + ".end";
   /**
-   * Number indicating the interval of sub-ranges for a numerical
-   * facet range.
-   * Can be overridden on a per field basis.
+   * Number indicating the interval of sub-ranges for a numerical facet range. Can be overridden on
+   * a per field basis.
    */
   public static final String FACET_RANGE_GAP = FACET_RANGE + ".gap";
   /**
-   * Boolean indicating how counts should be computed if the range
-   * between 'start' and 'end' is not evenly divisible by 'gap'.  If
-   * this value is true, then all counts of ranges involving the 'end'
-   * point will use the exact endpoint specified -- this includes the
-   * 'between' and 'after' counts as well as the last range computed
-   * using the 'gap'.  If the value is false, then 'gap' is used to
-   * compute the effective endpoint closest to the 'end' param which
-   * results in the range between 'start' and 'end' being evenly
-   * divisible by 'gap'.
-   * The default is false.
-   * Can be overridden on a per field basis.
+   * Boolean indicating how counts should be computed if the range between 'start' and 'end' is not
+   * evenly divisible by 'gap'. If this value is true, then all counts of ranges involving the 'end'
+   * point will use the exact endpoint specified -- this includes the 'between' and 'after' counts
+   * as well as the last range computed using the 'gap'. If the value is false, then 'gap' is used
+   * to compute the effective endpoint closest to the 'end' param which results in the range between
+   * 'start' and 'end' being evenly divisible by 'gap'. The default is false. Can be overridden on a
+   * per field basis.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/params/SolrParams.java
##########
@@ -120,62 +125,64 @@ public String toString() {
     };
   }
 
-  /** A {@link Stream} view over {@link #iterator()} -- for convenience.  Treat it as read-only. */
+  /** A {@link Stream} view over {@link #iterator()} -- for convenience. Treat it as read-only. */
   public Stream<Map.Entry<String, String[]>> stream() {
     return StreamSupport.stream(spliterator(), false);
   }
-  // Do we add Map.forEach equivalent too?  But it eager-fetches the value, and Iterable<Map.Entry> allows the user
+  // Do we add Map.forEach equivalent too?  But it eager-fetches the value, and Iterable<Map.Entry>
+  // allows the user
   //  to only get the value when needed.

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/params/SpatialParams.java
##########
@@ -15,25 +15,16 @@
  * limitations under the License.
  */
 package org.apache.solr.common.params;
-/**
- *
- *
- **/
+/** */
 public interface SpatialParams {
   public static final String POINT = "pt";
   public static final String DISTANCE = "d";
-  public static final String FIELD = "sfield";  // the field that contains the points we are measuring from "pt"
-  /**
-   * km - kilometers
-   * mi - miles
-   */
+  public static final String FIELD =
+      "sfield"; // the field that contains the points we are measuring from "pt"
+  /** km - kilometers mi - miles */
   public static final String UNITS = "units";

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/params/SolrParams.java
##########
@@ -584,13 +569,17 @@ public String toQueryString() {
   }
 
   /**
-   * Generates a local-params string of the form <pre>{! name=value name2=value2}</pre>.
+   * Generates a local-params string of the form
+   *
+   * <pre>{! name=value name2=value2}</pre>
+   *
+   * .

Review comment:
       Fix this

##########
File path: solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
##########
@@ -211,14 +210,16 @@
 
   private Set<ClusterPropertiesListener> clusterPropertiesListeners = ConcurrentHashMap.newKeySet();
 
-  /**
-   * Used to submit notifications to Collection Properties watchers in order
-   **/
-  private final ExecutorService collectionPropsNotifications = ExecutorUtil.newMDCAwareSingleThreadExecutor(new SolrNamedThreadFactory("collectionPropsNotifications"));
+  /** Used to submit notifications to Collection Properties watchers in order */
+  private final ExecutorService collectionPropsNotifications =
+      ExecutorUtil.newMDCAwareSingleThreadExecutor(
+          new SolrNamedThreadFactory("collectionPropsNotifications"));
 
-  private static final long LAZY_CACHE_TIME = TimeUnit.NANOSECONDS.convert(STATE_UPDATE_DELAY, TimeUnit.MILLISECONDS);
+  private static final long LAZY_CACHE_TIME =
+      TimeUnit.NANOSECONDS.convert(STATE_UPDATE_DELAY, TimeUnit.MILLISECONDS);
 
-  private Future<?> collectionPropsCacheCleaner; // only kept to identify if the cleaner has already been started.
+  private Future<?>
+      collectionPropsCacheCleaner; // only kept to identify if the cleaner has already been started.

Review comment:
       Fix this




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscribe@solr.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@solr.apache.org
For additional commands, e-mail: issues-help@solr.apache.org