You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@lucene.apache.org by GitBox <gi...@apache.org> on 2020/05/25 00:28:05 UTC

[GitHub] [lucene-solr] dsmiley commented on a change in pull request #1535: SOLR-14474: Fix remaining auxilliary class warnings in Solr

dsmiley commented on a change in pull request #1535:
URL: https://github.com/apache/lucene-solr/pull/1535#discussion_r429689783



##########
File path: solr/core/src/java/org/apache/solr/search/facet/FacetParser.java
##########
@@ -16,18 +16,22 @@
  */
 package org.apache.solr.search.facet;
 
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.Optional;
+
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.search.FunctionQParser;

Review comment:
       Strange; I wonder why you (your IDE) re-ordered FunctionQParser to a different spot that is no longer alphabetic?  I don't care much about this so you can leave it but it's curious.

##########
File path: solr/core/src/java/org/apache/solr/search/facet/FacetParser.java
##########
@@ -134,9 +138,9 @@ public Object parseFacetOrStat(String key, String type, Object args) throws Synt
     switch (type) {
       case "field":
       case "terms":
-        return new FacetRequest.FacetFieldParser(this, key).parse(args);
+        return new FacetFieldParser(this, key).parse(args);

Review comment:
       Nice; seeing this simple difference makes it clear to me we're now organizing it correctly.

##########
File path: solr/core/src/java/org/apache/solr/search/join/GraphEdgeCollector.java
##########
@@ -74,8 +74,10 @@ public void setCollectDocs(FixedBitSet target) {
   }
 
   // the number of docs visited
-  public int getNumHits() { return numHits; }
-  
+  public int getNumHits() {

Review comment:
       I admit, I'm a fan of simple one-liners like how it was :-)

##########
File path: solr/core/src/java/org/apache/solr/update/TransactionLog.java
##########
@@ -222,7 +222,8 @@ public boolean writePrimitive(Object val) throws IOException {
   }
 
   // for subclasses
-  protected TransactionLog() {}
+  protected TransactionLog() {

Review comment:
       I guess I failed to convince you :-)

##########
File path: solr/core/src/java/org/apache/solr/search/join/GraphEdgeCollector.java
##########
@@ -115,87 +117,90 @@ public void doSetNextReader(LeafReaderContext context) throws IOException {
   public ScoreMode scoreMode() {
     return ScoreMode.COMPLETE_NO_SCORES;
   }
-  
-}
 
-class GraphTermsCollector extends GraphEdgeCollector {
-  // all the collected terms
-  private BytesRefHash collectorTerms;
-  private SortedSetDocValues docTermOrds;
 
+  static class GraphTermsCollector extends GraphEdgeCollector {
+    // all the collected terms
+    private BytesRefHash collectorTerms;
+    private SortedSetDocValues docTermOrds;
 
-  GraphTermsCollector(SchemaField collectField, DocSet skipSet, DocSet leafNodes) {
-    super(collectField, skipSet, leafNodes);
-    this.collectorTerms =  new BytesRefHash();
-  }
 
-  @Override
-  public void doSetNextReader(LeafReaderContext context) throws IOException {
-    super.doSetNextReader(context);
-    // Grab the updated doc values.
-    docTermOrds = DocValues.getSortedSet(context.reader(), collectField.getName());
-  }
+    GraphTermsCollector(SchemaField collectField, DocSet skipSet, DocSet leafNodes) {
+      super(collectField, skipSet, leafNodes);
+      this.collectorTerms = new BytesRefHash();
+    }
 
-  @Override
-  void addEdgeIdsToResult(int doc) throws IOException {
-    // set the doc to pull the edges ids for.
-    if (doc > docTermOrds.docID()) {
-      docTermOrds.advance(doc);
+    @Override
+    public void doSetNextReader(LeafReaderContext context) throws IOException {
+      super.doSetNextReader(context);
+      // Grab the updated doc values.
+      docTermOrds = DocValues.getSortedSet(context.reader(), collectField.getName());
     }
-    if (doc == docTermOrds.docID()) {
-      BytesRef edgeValue = new BytesRef();
-      long ord;
-      while ((ord = docTermOrds.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
-        edgeValue = docTermOrds.lookupOrd(ord);
-        // add the edge id to the collector terms.
-        collectorTerms.add(edgeValue);
+
+    @Override
+    void addEdgeIdsToResult(int doc) throws IOException {
+      // set the doc to pull the edges ids for.
+      if (doc > docTermOrds.docID()) {
+        docTermOrds.advance(doc);
+      }
+      if (doc == docTermOrds.docID()) {
+        BytesRef edgeValue = new BytesRef();
+        long ord;
+        while ((ord = docTermOrds.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
+          edgeValue = docTermOrds.lookupOrd(ord);
+          // add the edge id to the collector terms.
+          collectorTerms.add(edgeValue);
+        }
       }
     }
-  }
 
-  @Override
-  public Query getResultQuery(SchemaField matchField, boolean useAutomaton) {
-    if (collectorTerms == null || collectorTerms.size() == 0) {
-      // return null if there are no terms (edges) to traverse.
-      return null;
-    } else {
-      // Create a query
-      Query q = null;
-
-      // TODO: see if we should dynamically select this based on the frontier size.
-      if (useAutomaton) {
-        // build an automaton based query for the frontier.
-        Automaton autn = buildAutomaton(collectorTerms);
-        AutomatonQuery autnQuery = new AutomatonQuery(new Term(matchField.getName()), autn);
-        q = autnQuery;
+    @Override
+    public Query getResultQuery(SchemaField matchField, boolean useAutomaton) {
+      if (collectorTerms == null || collectorTerms.size() == 0) {
+        // return null if there are no terms (edges) to traverse.
+        return null;
       } else {
-        List<BytesRef> termList = new ArrayList<>(collectorTerms.size());
-        for (int i = 0 ; i < collectorTerms.size(); i++) {
-          BytesRef ref = new BytesRef();
-          collectorTerms.get(i, ref);
-          termList.add(ref);
+        // Create a query
+        Query q = null;
+
+        // TODO: see if we should dynamically select this based on the frontier size.
+        if (useAutomaton) {
+          // build an automaton based query for the frontier.
+          Automaton autn = buildAutomaton(collectorTerms);
+          AutomatonQuery autnQuery = new AutomatonQuery(new Term(matchField.getName()), autn);
+          q = autnQuery;
+        } else {
+          List<BytesRef> termList = new ArrayList<>(collectorTerms.size());
+          for (int i = 0; i < collectorTerms.size(); i++) {
+            BytesRef ref = new BytesRef();
+            collectorTerms.get(i, ref);
+            termList.add(ref);
+          }
+          q = (matchField.hasDocValues() && !matchField.indexed())
+                  ? new DocValuesTermsQuery(matchField.getName(), termList)
+                  : new TermInSetQuery(matchField.getName(), termList);
         }
-        q = (matchField.hasDocValues() && !matchField.indexed())
-            ? new DocValuesTermsQuery(matchField.getName(), termList)
-            : new TermInSetQuery(matchField.getName(), termList);
-      }
 
-      return q;
+        return q;
+      }
     }
-  }
 
 
-  /** Build an automaton to represent the frontier query */
-  private Automaton buildAutomaton(BytesRefHash termBytesHash) {
-    // need top pass a sorted set of terms to the autn builder (maybe a better way to avoid this?)
-    final TreeSet<BytesRef> terms = new TreeSet<BytesRef>();
-    for (int i = 0 ; i < termBytesHash.size(); i++) {
-      BytesRef ref = new BytesRef();
-      termBytesHash.get(i, ref);
-      terms.add(ref);
+    /**

Review comment:
       Again, one-liner javadocs are nice & sweet :-)  (I configure IntelliJ to leave these be)

##########
File path: solr/core/src/java/org/apache/solr/search/facet/FacetRequest.java
##########
@@ -302,7 +298,7 @@ public Query createDomainQuery(FacetContext fcontext) throws IOException {
    */
   public static FacetRequest parse(SolrQueryRequest req, Map<String, Object> params) {
     @SuppressWarnings({"rawtypes"})
-    FacetParser parser = new FacetTopParser(req);
+    FacetParser parser = new FacetParser.FacetTopParser(req);

Review comment:
       Seeing this... I think FacetParser ought to have a static method (`newParser`) without exposing FacetTopParser to outside of itself.  Feel free to leave if you think out of scope but IMO now's a great time.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@lucene.apache.org
For additional commands, e-mail: issues-help@lucene.apache.org