You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by kr...@apache.org on 2017/01/12 16:51:18 UTC

[01/43] lucene-solr:jira/solr-8593: LUCENE-7614: ComplexPhraseQueryParser ignores quotes around single terms phrases

Repository: lucene-solr
Updated Branches:
  refs/heads/jira/solr-8593 4b17b82a9 -> 737194712


LUCENE-7614: ComplexPhraseQueryParser ignores quotes around single terms phrases


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/52f2a77b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/52f2a77b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/52f2a77b

Branch: refs/heads/jira/solr-8593
Commit: 52f2a77b78fc95bc98d664411cda63d58606df52
Parents: 024c403
Author: Mikhail Khludnev <mk...@apache.org>
Authored: Thu Jan 5 23:39:46 2017 +0300
Committer: Mikhail Khludnev <mk...@apache.org>
Committed: Sat Jan 7 00:42:04 2017 +0300

----------------------------------------------------------------------
 lucene/CHANGES.txt                                             | 3 +++
 .../queryparser/complexPhrase/ComplexPhraseQueryParser.java    | 4 +++-
 .../queryparser/complexPhrase/TestComplexPhraseQuery.java      | 6 ++++++
 3 files changed, 12 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/52f2a77b/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 67d8ae5..b74056f 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -198,6 +198,9 @@ Improvements
 * LUCENE-7401: Changed the way BKD trees pick the split dimension in order to
   ensure all dimensions are indexed. (Adrien Grand)
 
+* LUCENE-7614: Complex Phrase Query parser ignores double quotes around single token 
+  prefix, wildcard, range queries (Mikhail Khludnev) 
+
 Optimizations
 
 * LUCENE-7568: Optimize merging when index sorting is used but the

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/52f2a77b/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/ComplexPhraseQueryParser.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/ComplexPhraseQueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/ComplexPhraseQueryParser.java
index 1a7e5e1..6e18960 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/ComplexPhraseQueryParser.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/ComplexPhraseQueryParser.java
@@ -255,7 +255,9 @@ public class ComplexPhraseQueryParser extends QueryParser {
     public Query rewrite(IndexReader reader) throws IOException {
       final Query contents = this.contents[0];
       // ArrayList spanClauses = new ArrayList();
-      if (contents instanceof TermQuery) {
+      if (contents instanceof TermQuery 
+          || contents instanceof MultiTermQuery
+          ) {
         return contents;
       }
       // Build a sequence of Span clauses arranged in a SpanNear - child

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/52f2a77b/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java
index 66078b0..28b600b 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java
@@ -72,6 +72,12 @@ public class TestComplexPhraseQuery extends LuceneTestCase {
     checkBadQuery("\"jo* \"smith\" \""); // phrases inside phrases is bad
   }
 
+  public void testSingleTermPhrase() throws Exception {
+    checkMatches("\"joh*\" \"tom\"", "1,2,3,4"); 
+    checkMatches("+\"j*\" +\"tom\"", "4"); 
+    checkMatches("\"jo*\" \"[sma TO smZ]\" ", "1,2,3");
+    checkMatches("+\"j*hn\" +\"sm*h\"", "1,3"); 
+  }
 
   public void testUnOrderedProximitySearches() throws Exception {
 


[10/43] lucene-solr:jira/solr-8593: LUCENE-7610: Remove deprecated facet ValueSource methods

Posted by kr...@apache.org.
LUCENE-7610: Remove deprecated facet ValueSource methods


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ce8b678b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ce8b678b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ce8b678b

Branch: refs/heads/jira/solr-8593
Commit: ce8b678ba19a53c43033a235bdca54e5a68adcc8
Parents: e5f39f6
Author: Alan Woodward <ro...@apache.org>
Authored: Sat Jan 7 12:25:15 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Sat Jan 7 13:07:13 2017 +0000

----------------------------------------------------------------------
 .../apache/lucene/facet/range/DoubleRange.java  |  9 ---
 .../facet/range/DoubleRangeFacetCounts.java     | 23 ------
 .../apache/lucene/facet/range/LongRange.java    | 10 ---
 .../facet/range/LongRangeFacetCounts.java       | 21 ------
 .../taxonomy/TaxonomyFacetSumValueSource.java   | 75 +-------------------
 .../facet/range/TestRangeFacetCounts.java       |  8 +--
 .../TestTaxonomyFacetSumValueSource.java        | 49 +++----------
 7 files changed, 13 insertions(+), 182 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ce8b678b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java
index ce377f5..50e771d 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java
@@ -21,7 +21,6 @@ import java.util.Objects;
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.ConstantScoreScorer;
 import org.apache.lucene.search.ConstantScoreWeight;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -178,14 +177,6 @@ public final class DoubleRange extends Range {
   }
 
   /**
-   * @deprecated Use {@link #getQuery(Query, DoubleValuesSource)}
-   */
-  @Deprecated
-  public Query getQuery(final Query fastMatchQuery, final ValueSource valueSource) {
-    return new ValueSourceQuery(this, fastMatchQuery, valueSource.asDoubleValuesSource());
-  }
-
-  /**
    * Create a Query that matches documents in this range
    *
    * The query will check all documents that match the provided match query,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ce8b678b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java
index 63fc935..5ed11a9 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java
@@ -25,7 +25,6 @@ import org.apache.lucene.facet.FacetsCollector;
 import org.apache.lucene.facet.FacetsCollector.MatchingDocs;
 import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.DoubleValues;
 import org.apache.lucene.search.DoubleValuesSource;
@@ -61,15 +60,6 @@ public class DoubleRangeFacetCounts extends RangeFacetCounts {
   }
 
   /**
-   * Create {@code RangeFacetCounts}, using the provided {@link ValueSource}.
-   *
-   * @deprecated Use {@link #DoubleRangeFacetCounts(String, DoubleValuesSource, FacetsCollector, DoubleRange...)}
-   * */
-  public DoubleRangeFacetCounts(String field, ValueSource valueSource, FacetsCollector hits, DoubleRange... ranges) throws IOException {
-    this(field, valueSource, hits, null, ranges);
-  }
-
-  /**
    * Create {@code RangeFacetCounts} using the provided {@link DoubleValuesSource}
    */
   public DoubleRangeFacetCounts(String field, DoubleValuesSource valueSource, FacetsCollector hits, DoubleRange... ranges) throws IOException {
@@ -78,19 +68,6 @@ public class DoubleRangeFacetCounts extends RangeFacetCounts {
 
   /**
    * Create {@code RangeFacetCounts}, using the provided
-   * {@link ValueSource}, and using the provided Query as
-   * a fastmatch: only documents matching the query are
-   * checked for the matching ranges.
-   *
-   * @deprecated Use ({@link #DoubleRangeFacetCounts(String, DoubleValuesSource, FacetsCollector, Query, DoubleRange...)}
-   */
-  @Deprecated
-  public DoubleRangeFacetCounts(String field, ValueSource valueSource, FacetsCollector hits, Query fastMatchQuery, DoubleRange... ranges) throws IOException {
-    this(field, valueSource.asDoubleValuesSource(), hits, fastMatchQuery, ranges);
-  }
-
-  /**
-   * Create {@code RangeFacetCounts}, using the provided
    * {@link DoubleValuesSource}, and using the provided Query as
    * a fastmatch: only documents matching the query are
    * checked for the matching ranges.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ce8b678b/lucene/facet/src/java/org/apache/lucene/facet/range/LongRange.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/LongRange.java b/lucene/facet/src/java/org/apache/lucene/facet/range/LongRange.java
index 20c408d..582d76f 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/LongRange.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/LongRange.java
@@ -21,7 +21,6 @@ import java.util.Objects;
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.ConstantScoreScorer;
 import org.apache.lucene.search.ConstantScoreWeight;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -169,15 +168,6 @@ public final class LongRange extends Range {
 
   }
 
-
-  /**
-   * @deprecated Use {@link #getQuery(Query, LongValuesSource)}
-   */
-  @Deprecated
-  public Query getQuery(final Query fastMatchQuery, final ValueSource valueSource) {
-    return new ValueSourceQuery(this, fastMatchQuery, valueSource.asLongValuesSource());
-  }
-
   /**
    * Create a Query that matches documents in this range
    *

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ce8b678b/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java b/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java
index a3cfc71..c9c42a3 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java
@@ -51,33 +51,12 @@ public class LongRangeFacetCounts extends RangeFacetCounts {
     this(field, LongValuesSource.fromLongField(field), hits, ranges);
   }
 
-  /**
-   * Create {@code RangeFacetCounts}, using the provided {@link ValueSource}.
-   *
-   * @deprecated Use {@link #LongRangeFacetCounts(String, LongValuesSource, FacetsCollector, LongRange...)}
-   */
-  @Deprecated
-  public LongRangeFacetCounts(String field, ValueSource valueSource, FacetsCollector hits, LongRange... ranges) throws IOException {
-    this(field, valueSource.asLongValuesSource(), hits, null, ranges);
-  }
-
   /** Create {@code RangeFacetCounts}, using the provided
    *  {@link ValueSource}. */
   public LongRangeFacetCounts(String field, LongValuesSource valueSource, FacetsCollector hits, LongRange... ranges) throws IOException {
     this(field, valueSource, hits, null, ranges);
   }
 
-  /**
-   * Create {@code RangeFacetCounts}, using the provided {@link ValueSource}.
-   *
-   * @deprecated Use {@link #LongRangeFacetCounts(String, LongValuesSource, FacetsCollector, Query, LongRange...)}
-   */
-  @Deprecated
-  public LongRangeFacetCounts(String field, ValueSource valueSource, FacetsCollector hits, Query fastMatchQuery, LongRange... ranges) throws IOException {
-    this(field, valueSource.asLongValuesSource(), hits, fastMatchQuery, ranges);
-  }
-
-
   /** Create {@code RangeFacetCounts}, using the provided
    *  {@link ValueSource}, and using the provided Filter as
    *  a fastmatch: only documents passing the filter are

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ce8b678b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java
index 0a73ae5..6bc77c2 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java
@@ -18,19 +18,14 @@ package org.apache.lucene.facet.taxonomy;
 
 import java.io.IOException;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.lucene.facet.FacetsCollector;
 import org.apache.lucene.facet.FacetsCollector.MatchingDocs;
 import org.apache.lucene.facet.FacetsConfig;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.DoubleValues;
 import org.apache.lucene.search.DoubleValuesSource;
-import org.apache.lucene.search.Scorer;
 import org.apache.lucene.util.IntsRef;
 
 /** Aggregates sum of values from {@link
@@ -42,21 +37,6 @@ public class TaxonomyFacetSumValueSource extends FloatTaxonomyFacets {
 
   /**
    * Aggreggates double facet values from the provided
-   *  {@link ValueSource}, pulling ordinals using {@link
-   *  DocValuesOrdinalsReader} against the default indexed
-   *  facet field {@link
-   *  FacetsConfig#DEFAULT_INDEX_FIELD_NAME}.
-   *
-   *  @deprecated {@link #TaxonomyFacetSumValueSource(TaxonomyReader, FacetsConfig, FacetsCollector, DoubleValuesSource)}
-   */
-  @Deprecated
-  public TaxonomyFacetSumValueSource(TaxonomyReader taxoReader, FacetsConfig config,
-                                     FacetsCollector fc, ValueSource valueSource) throws IOException {
-    this(new DocValuesOrdinalsReader(FacetsConfig.DEFAULT_INDEX_FIELD_NAME), taxoReader, config, fc, valueSource);
-  }
-
-  /**
-   * Aggreggates double facet values from the provided
    * {@link DoubleValuesSource}, pulling ordinals using {@link
    * DocValuesOrdinalsReader} against the default indexed
    * facet field {@link FacetsConfig#DEFAULT_INDEX_FIELD_NAME}.
@@ -64,29 +44,14 @@ public class TaxonomyFacetSumValueSource extends FloatTaxonomyFacets {
    public TaxonomyFacetSumValueSource(TaxonomyReader taxoReader, FacetsConfig config,
                                      FacetsCollector fc, DoubleValuesSource valueSource) throws IOException {
     this(new DocValuesOrdinalsReader(FacetsConfig.DEFAULT_INDEX_FIELD_NAME), taxoReader, config, fc, valueSource);
-  }
-
-  /**
-   * Aggreggates float facet values from the provided
-   *  {@link ValueSource}, and pulls ordinals from the
-   *  provided {@link OrdinalsReader}.
-   *
-   *  @deprecated use {@link #TaxonomyFacetSumValueSource(OrdinalsReader, TaxonomyReader, FacetsConfig, FacetsCollector, DoubleValuesSource)}
-   */
-  @Deprecated
-  public TaxonomyFacetSumValueSource(OrdinalsReader ordinalsReader, TaxonomyReader taxoReader,
-                                     FacetsConfig config, FacetsCollector fc, ValueSource valueSource) throws IOException {
-    super(ordinalsReader.getIndexFieldName(), taxoReader, config);
-    this.ordinalsReader = ordinalsReader;
-    sumValues(fc.getMatchingDocs(), fc.getKeepScores(), valueSource.asDoubleValuesSource());
-  }
+   }
 
   /**
    * Aggreggates float facet values from the provided
    *  {@link DoubleValuesSource}, and pulls ordinals from the
    *  provided {@link OrdinalsReader}.
    */
-   public TaxonomyFacetSumValueSource(OrdinalsReader ordinalsReader, TaxonomyReader taxoReader,
+  public TaxonomyFacetSumValueSource(OrdinalsReader ordinalsReader, TaxonomyReader taxoReader,
                                      FacetsConfig config, FacetsCollector fc, DoubleValuesSource vs) throws IOException {
     super(ordinalsReader.getIndexFieldName(), taxoReader, config);
     this.ordinalsReader = ordinalsReader;
@@ -134,41 +99,5 @@ public class TaxonomyFacetSumValueSource extends FloatTaxonomyFacets {
 
     rollup();
   }
-
-  /**
-   * {@link ValueSource} that returns the score for each
-   *  hit; use this to aggregate the sum of all hit scores
-   *  for each facet label.
-   *
-   * @deprecated Use {@link DoubleValuesSource#SCORES}
-   */
-  public static class ScoreValueSource extends ValueSource {
-
-    /** Sole constructor. */
-    public ScoreValueSource() {
-    }
-
-    @Override
-    public FunctionValues getValues(@SuppressWarnings("rawtypes") Map context, LeafReaderContext readerContext) throws IOException {
-      final Scorer scorer = (Scorer) context.get("scorer");
-      if (scorer == null) {
-        throw new IllegalStateException("scores are missing; be sure to pass keepScores=true to FacetsCollector");
-      }
-      return new DoubleDocValues(this) {
-        @Override
-        public double doubleVal(int document) {
-          try {
-            return scorer.score();
-          } catch (IOException exception) {
-            throw new RuntimeException(exception);
-          }
-        }
-      };
-    }
-    
-    @Override public boolean equals(Object o) { return o == this; }
-    @Override public int hashCode() { return System.identityHashCode(this); }
-    @Override public String description() { return "score()"; }
-  }
   
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ce8b678b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
index ff207d3..31f9e59 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
@@ -46,13 +46,11 @@ import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
-import org.apache.lucene.queries.function.valuesource.LongFieldSource;
 import org.apache.lucene.search.DoubleValues;
 import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.LongValuesSource;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
@@ -437,7 +435,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
       } else {
         fastMatchQuery = null;
       }
-      ValueSource vs = new LongFieldSource("field");
+      LongValuesSource vs = LongValuesSource.fromLongField("field");
       Facets facets = new LongRangeFacetCounts("field", vs, sfc, fastMatchQuery, ranges);
       FacetResult result = facets.getTopChildren(10, "field");
       assertEquals(numRange, result.labelValues.length);
@@ -580,7 +578,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
       } else {
         fastMatchFilter = null;
       }
-      ValueSource vs = new DoubleFieldSource("field");
+      DoubleValuesSource vs = DoubleValuesSource.fromDoubleField("field");
       Facets facets = new DoubleRangeFacetCounts("field", vs, sfc, fastMatchFilter, ranges);
       FacetResult result = facets.getTopChildren(10, "field");
       assertEquals(numRange, result.labelValues.length);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ce8b678b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
index 31bf6e1..2af9925 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.facet.taxonomy;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -37,18 +36,12 @@ import org.apache.lucene.facet.FacetsConfig;
 import org.apache.lucene.facet.LabelAndValue;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queries.function.FunctionQuery;
-import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
-import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
-import org.apache.lucene.queries.function.valuesource.IntFieldSource;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
 import org.apache.lucene.search.BoostQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
@@ -56,7 +49,6 @@ import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
@@ -121,7 +113,7 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
     // Facets.search utility methods:
     searcher.search(new MatchAllDocsQuery(), c);
 
-    TaxonomyFacetSumValueSource facets = new TaxonomyFacetSumValueSource(taxoReader, new FacetsConfig(), c, new IntFieldSource("num"));
+    TaxonomyFacetSumValueSource facets = new TaxonomyFacetSumValueSource(taxoReader, new FacetsConfig(), c, DoubleValuesSource.fromIntField("num"));
 
     // Retrieve & verify results:
     assertEquals("dim=Author path=[] value=145.0 childCount=4\n  Lisa (50.0)\n  Frank (45.0)\n  Susan (40.0)\n  Bob (10.0)\n", facets.getTopChildren(10, "Author").toString());
@@ -181,7 +173,7 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
     FacetsCollector c = new FacetsCollector();
     searcher.search(new MatchAllDocsQuery(), c);    
 
-    TaxonomyFacetSumValueSource facets = new TaxonomyFacetSumValueSource(taxoReader, new FacetsConfig(), c, new IntFieldSource("num"));
+    TaxonomyFacetSumValueSource facets = new TaxonomyFacetSumValueSource(taxoReader, new FacetsConfig(), c, DoubleValuesSource.fromIntField("num"));
 
     // Ask for top 10 labels for any dims that have counts:
     List<FacetResult> results = facets.getAllDims(10);
@@ -224,7 +216,7 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
     FacetsCollector c = new FacetsCollector();
     searcher.search(new MatchAllDocsQuery(), c);    
 
-    TaxonomyFacetSumValueSource facets = new TaxonomyFacetSumValueSource(taxoReader, config, c, new IntFieldSource("num"));
+    TaxonomyFacetSumValueSource facets = new TaxonomyFacetSumValueSource(taxoReader, config, c, DoubleValuesSource.fromIntField("num"));
 
     // Ask for top 10 labels for any dims that have counts:
     List<FacetResult> results = facets.getAllDims(10);
@@ -295,7 +287,7 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
 
     FacetsCollector sfc = new FacetsCollector();
     newSearcher(r).search(new MatchAllDocsQuery(), sfc);
-    Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, sfc, new LongFieldSource("price"));
+    Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, sfc, DoubleValuesSource.fromLongField("price"));
     assertEquals("dim=a path=[] value=10.0 childCount=2\n  1 (6.0)\n  0 (4.0)\n", facets.getTopChildren(10, "a").toString());
 
     iw.close();
@@ -319,34 +311,12 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
     
     DirectoryReader r = DirectoryReader.open(iw);
     DirectoryTaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoWriter);
-
-    ValueSource valueSource = new ValueSource() {
-      @Override
-      public FunctionValues getValues(@SuppressWarnings("rawtypes") Map context, LeafReaderContext readerContext) throws IOException {
-        final Scorer scorer = (Scorer) context.get("scorer");
-        assert scorer != null;
-        return new DoubleDocValues(this) {
-          @Override
-          public double doubleVal(int document) {
-            try {
-              return scorer.score();
-            } catch (IOException exception) {
-              throw new RuntimeException(exception);
-            }
-          }
-        };
-      }
-
-      @Override public boolean equals(Object o) { return o == this; }
-      @Override public int hashCode() { return System.identityHashCode(this); }
-      @Override public String description() { return "score()"; }
-    };
     
     FacetsCollector fc = new FacetsCollector(true);
     // score documents by their 'price' field - makes asserting the correct counts for the categories easier
     Query q = new FunctionQuery(new LongFieldSource("price"));
     FacetsCollector.search(newSearcher(r), q, 10, fc);
-    Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, fc, valueSource);
+    Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, fc, DoubleValuesSource.SCORES);
     
     assertEquals("dim=a path=[] value=10.0 childCount=2\n  1 (6.0)\n  0 (4.0)\n", facets.getTopChildren(10, "a").toString());
 
@@ -374,10 +344,9 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
     DirectoryReader r = DirectoryReader.open(iw);
     DirectoryTaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoWriter);
 
-    ValueSource valueSource = new LongFieldSource("price");
     FacetsCollector sfc = new FacetsCollector();
     newSearcher(r).search(new MatchAllDocsQuery(), sfc);
-    Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, sfc, valueSource);
+    Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, sfc, DoubleValuesSource.fromLongField("price"));
     
     assertEquals("dim=a path=[] value=10.0 childCount=2\n  1 (6.0)\n  0 (4.0)\n", facets.getTopChildren(10, "a").toString());
 
@@ -447,8 +416,6 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
     // NRT open
     TaxonomyReader tr = new DirectoryTaxonomyReader(tw);
 
-    ValueSource values = new FloatFieldSource("value");
-
     int iters = atLeast(100);
     for(int iter=0;iter<iters;iter++) {
       String searchToken = tokens[random().nextInt(tokens.length)];
@@ -457,7 +424,7 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
       }
       FacetsCollector fc = new FacetsCollector();
       FacetsCollector.search(searcher, new TermQuery(new Term("content", searchToken)), 10, fc);
-      Facets facets = new TaxonomyFacetSumValueSource(tr, config, fc, values);
+      Facets facets = new TaxonomyFacetSumValueSource(tr, config, fc, DoubleValuesSource.fromFloatField("value"));
 
       // Slow, yet hopefully bug-free, faceting:
       @SuppressWarnings({"rawtypes","unchecked"}) Map<String,Float>[] expectedValues = new HashMap[numDims];
@@ -473,7 +440,7 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
               if (v == null) {
                 expectedValues[j].put(doc.dims[j], doc.value);
               } else {
-                expectedValues[j].put(doc.dims[j], v.floatValue() + doc.value);
+                expectedValues[j].put(doc.dims[j], v + doc.value);
               }
             }
           }


[17/43] lucene-solr:jira/solr-8593: LUCENE-7588: the parallell search method was failing to pass on the user's requested sort when merge-sorting all hits

Posted by kr...@apache.org.
LUCENE-7588: the parallell search method was failing to pass on the user's requested sort when merge-sorting all hits


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1aa9c425
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1aa9c425
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1aa9c425

Branch: refs/heads/jira/solr-8593
Commit: 1aa9c4251289e71ab8e87b03797b20f4a8fda0a5
Parents: 570880d
Author: Mike McCandless <mi...@apache.org>
Authored: Sun Jan 8 06:24:03 2017 -0500
Committer: Mike McCandless <mi...@apache.org>
Committed: Sun Jan 8 06:24:03 2017 -0500

----------------------------------------------------------------------
 .../org/apache/lucene/facet/DrillSideways.java  | 37 ++++++++++----------
 1 file changed, 19 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1aa9c425/lucene/facet/src/java/org/apache/lucene/facet/DrillSideways.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillSideways.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillSideways.java
index 61530bc..b2391f5 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/DrillSideways.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillSideways.java
@@ -16,6 +16,17 @@
  */
 package org.apache.lucene.facet;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+
 import org.apache.lucene.facet.sortedset.SortedSetDocValuesFacetCounts;
 import org.apache.lucene.facet.sortedset.SortedSetDocValuesFacetField;
 import org.apache.lucene.facet.sortedset.SortedSetDocValuesReaderState;
@@ -34,20 +45,10 @@ import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.TopFieldCollector;
+import org.apache.lucene.search.TopFieldDocs;
 import org.apache.lucene.search.TopScoreDocCollector;
 import org.apache.lucene.util.ThreadInterruptedException;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-
 /**
  * Computes drill down and sideways counts for the provided
  * {@link DrillDownQuery}.  Drill sideways counts include
@@ -234,8 +235,8 @@ public class DrillSideways {
 
       if (executor != null) { // We have an executor, let use the multi-threaded version
 
-        final CollectorManager<TopFieldCollector, TopDocs> collectorManager =
-                new CollectorManager<TopFieldCollector, TopDocs>() {
+        final CollectorManager<TopFieldCollector, TopFieldDocs> collectorManager =
+                new CollectorManager<TopFieldCollector, TopFieldDocs>() {
 
                   @Override
                   public TopFieldCollector newCollector() throws IOException {
@@ -243,16 +244,16 @@ public class DrillSideways {
                   }
 
                   @Override
-                  public TopDocs reduce(Collection<TopFieldCollector> collectors) throws IOException {
-                    final TopDocs[] topDocs = new TopDocs[collectors.size()];
+                  public TopFieldDocs reduce(Collection<TopFieldCollector> collectors) throws IOException {
+                    final TopFieldDocs[] topFieldDocs = new TopFieldDocs[collectors.size()];
                     int pos = 0;
                     for (TopFieldCollector collector : collectors)
-                      topDocs[pos++] = collector.topDocs();
-                    return TopDocs.merge(topN, topDocs);
+                      topFieldDocs[pos++] = collector.topDocs();
+                    return TopDocs.merge(sort, topN, topFieldDocs);
                   }
 
                 };
-        ConcurrentDrillSidewaysResult<TopDocs> r = search(query, collectorManager);
+        ConcurrentDrillSidewaysResult<TopFieldDocs> r = search(query, collectorManager);
         return new DrillSidewaysResult(r.facets, r.collectorResult);
 
       } else {


[09/43] lucene-solr:jira/solr-8593: LUCENE-7611: Remove queries dependency from suggester module

Posted by kr...@apache.org.
LUCENE-7611: Remove queries dependency from suggester module


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8f4fee3a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8f4fee3a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8f4fee3a

Branch: refs/heads/jira/solr-8593
Commit: 8f4fee3ad1c0027587d0de96f59cf61b2df67bc8
Parents: ce8b678
Author: Alan Woodward <ro...@apache.org>
Authored: Sat Jan 7 12:32:17 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Sat Jan 7 13:07:13 2017 +0000

----------------------------------------------------------------------
 dev-tools/idea/lucene/suggest/suggest.iml       |  1 -
 .../lucene/search/DoubleValuesSource.java       | 27 ++++++++++++
 lucene/suggest/build.xml                        |  3 +-
 .../suggest/DocumentValueSourceDictionary.java  | 45 --------------------
 .../DocumentValueSourceDictionaryTest.java      | 25 +++++------
 5 files changed, 38 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8f4fee3a/dev-tools/idea/lucene/suggest/suggest.iml
----------------------------------------------------------------------
diff --git a/dev-tools/idea/lucene/suggest/suggest.iml b/dev-tools/idea/lucene/suggest/suggest.iml
index 576ea8b..5e58bc2 100644
--- a/dev-tools/idea/lucene/suggest/suggest.iml
+++ b/dev-tools/idea/lucene/suggest/suggest.iml
@@ -13,7 +13,6 @@
     <orderEntry type="sourceFolder" forTests="false" />
     <orderEntry type="library" scope="TEST" name="JUnit" level="project" />
     <orderEntry type="module" scope="TEST" module-name="lucene-test-framework" />
-    <orderEntry type="module" module-name="queries" />
     <orderEntry type="module" module-name="analysis-common" />
     <orderEntry type="module" module-name="lucene-core" />
   </component>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8f4fee3a/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java b/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java
index 4ac8fc1..d4be4e9 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java
@@ -150,6 +150,33 @@ public abstract class DoubleValuesSource {
   };
 
   /**
+   * Creates a DoubleValuesSource that always returns a constant value
+   */
+  public static DoubleValuesSource constant(double value) {
+    return new DoubleValuesSource() {
+      @Override
+      public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
+        return new DoubleValues() {
+          @Override
+          public double doubleValue() throws IOException {
+            return value;
+          }
+
+          @Override
+          public boolean advanceExact(int doc) throws IOException {
+            return true;
+          }
+        };
+      }
+
+      @Override
+      public boolean needsScores() {
+        return false;
+      }
+    };
+  }
+
+  /**
    * Returns a DoubleValues instance that wraps scores returned by a Scorer
    */
   public static DoubleValues fromScorer(Scorer scorer) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8f4fee3a/lucene/suggest/build.xml
----------------------------------------------------------------------
diff --git a/lucene/suggest/build.xml b/lucene/suggest/build.xml
index 5babe06..bc4bed5 100644
--- a/lucene/suggest/build.xml
+++ b/lucene/suggest/build.xml
@@ -30,11 +30,10 @@
 
   <path id="classpath">
     <pathelement path="${analyzers-common.jar}"/>
-    <pathelement path="${queries.jar}"/>
     <path refid="base.classpath"/>
   </path>
 
-  <target name="javadocs" depends="javadocs-queries,compile-core,check-javadocs-uptodate"
+  <target name="javadocs" depends="compile-core,check-javadocs-uptodate"
           unless="javadocs-uptodate-${name}">
     <invoke-module-javadoc>
       <links>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8f4fee3a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
index 656dc04..2291ac9 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
@@ -23,7 +23,6 @@ import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.LongValues;
 import org.apache.lucene.search.LongValuesSource;
 
@@ -63,21 +62,6 @@ import org.apache.lucene.search.LongValuesSource;
 public class DocumentValueSourceDictionary extends DocumentDictionary {
   
   private final LongValuesSource weightsValueSource;
-  
-  /**
-   * Creates a new dictionary with the contents of the fields named <code>field</code>
-   * for the terms, <code>payload</code> for the corresponding payloads, <code>contexts</code>
-   * for the associated contexts and uses the <code>weightsValueSource</code> supplied 
-   * to determine the score.
-   *
-   * @deprecated Use {@link #DocumentValueSourceDictionary(IndexReader, String, LongValuesSource, String, String)}
-   */
-  @Deprecated
-  public DocumentValueSourceDictionary(IndexReader reader, String field,
-                                       ValueSource weightsValueSource, String payload, String contexts) {
-    super(reader, field, null, payload, contexts);
-    this.weightsValueSource = weightsValueSource.asLongValuesSource();
-  }
 
   /**
    * Creates a new dictionary with the contents of the fields named <code>field</code>
@@ -94,21 +78,6 @@ public class DocumentValueSourceDictionary extends DocumentDictionary {
   /**
    * Creates a new dictionary with the contents of the fields named <code>field</code>
    * for the terms, <code>payloadField</code> for the corresponding payloads
-   * and uses the <code>weightsValueSource</code> supplied to determine the 
-   * score.
-   *
-   * @deprecated Use {@link #DocumentValueSourceDictionary(IndexReader, String, LongValuesSource, String)}
-   */
-  @Deprecated
-  public DocumentValueSourceDictionary(IndexReader reader, String field,
-                                       ValueSource weightsValueSource, String payload) {
-    super(reader, field, null, payload);
-    this.weightsValueSource = weightsValueSource.asLongValuesSource();
-  }
-
-  /**
-   * Creates a new dictionary with the contents of the fields named <code>field</code>
-   * for the terms, <code>payloadField</code> for the corresponding payloads
    * and uses the <code>weightsValueSource</code> supplied to determine the
    * score.
    */
@@ -117,20 +86,6 @@ public class DocumentValueSourceDictionary extends DocumentDictionary {
     super(reader, field, null, payload);
     this.weightsValueSource = weightsValueSource;
   }
-  
-  /** 
-   * Creates a new dictionary with the contents of the fields named <code>field</code>
-   * for the terms and uses the <code>weightsValueSource</code> supplied to determine the 
-   * score.
-   *
-   * @deprecated Use {@link #DocumentValueSourceDictionary(IndexReader, String, LongValuesSource)}
-   */
-  @Deprecated
-  public DocumentValueSourceDictionary(IndexReader reader, String field,
-                                       ValueSource weightsValueSource) {
-    super(reader, field, null, null);
-    this.weightsValueSource = weightsValueSource.asLongValuesSource();
-  }
 
   /**
    * Creates a new dictionary with the contents of the fields named <code>field</code>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8f4fee3a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java
index 92799cd..55970e4 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java
@@ -39,10 +39,6 @@ import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.valuesource.DoubleConstValueSource;
-import org.apache.lucene.queries.function.valuesource.LongFieldSource;
-import org.apache.lucene.queries.function.valuesource.SumFloatFunction;
 import org.apache.lucene.search.DoubleValues;
 import org.apache.lucene.search.LongValues;
 import org.apache.lucene.search.LongValuesSource;
@@ -73,7 +69,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
     writer.commit();
     writer.close();
     IndexReader ir = DirectoryReader.open(dir);
-    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME,  new DoubleConstValueSource(10), PAYLOAD_FIELD_NAME);
+    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, LongValuesSource.constant(10), PAYLOAD_FIELD_NAME);
     InputIterator inputIterator = dictionary.getEntryIterator();
 
     assertNull(inputIterator.next());
@@ -119,8 +115,8 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
     writer.close();
 
     IndexReader ir = DirectoryReader.open(dir);
-    ValueSource[] toAdd = new ValueSource[] {new LongFieldSource(WEIGHT_FIELD_NAME_1), new LongFieldSource(WEIGHT_FIELD_NAME_2), new LongFieldSource(WEIGHT_FIELD_NAME_3)};
-    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, new SumFloatFunction(toAdd), PAYLOAD_FIELD_NAME);
+    LongValuesSource s = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2, WEIGHT_FIELD_NAME_3);
+    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, s, PAYLOAD_FIELD_NAME);
     InputIterator inputIterator = dictionary.getEntryIterator();
     BytesRef f;
     while((f = inputIterator.next())!=null) {
@@ -227,8 +223,8 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
     writer.close();
 
     IndexReader ir = DirectoryReader.open(dir);
-    ValueSource[] toAdd = new ValueSource[] {new LongFieldSource(WEIGHT_FIELD_NAME_1), new LongFieldSource(WEIGHT_FIELD_NAME_2), new LongFieldSource(WEIGHT_FIELD_NAME_3)};
-    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, new SumFloatFunction(toAdd), PAYLOAD_FIELD_NAME, CONTEXTS_FIELD_NAME);
+    LongValuesSource s = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2, WEIGHT_FIELD_NAME_3);
+    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, s, PAYLOAD_FIELD_NAME, CONTEXTS_FIELD_NAME);
     InputIterator inputIterator = dictionary.getEntryIterator();
     BytesRef f;
     while((f = inputIterator.next())!=null) {
@@ -305,8 +301,8 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
     writer.close();
 
     IndexReader ir = DirectoryReader.open(dir);
-    ValueSource[] toAdd = new ValueSource[] {new LongFieldSource(WEIGHT_FIELD_NAME_1), new LongFieldSource(WEIGHT_FIELD_NAME_2), new LongFieldSource(WEIGHT_FIELD_NAME_3)};
-    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME,  new SumFloatFunction(toAdd));
+    LongValuesSource s = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2, WEIGHT_FIELD_NAME_3);
+    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, s);
     InputIterator inputIterator = dictionary.getEntryIterator();
     BytesRef f;
     while((f = inputIterator.next())!=null) {
@@ -390,9 +386,8 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
     IndexReader ir = DirectoryReader.open(dir);
     assertTrue("NumDocs should be > 0 but was " + ir.numDocs(), ir.numDocs() > 0);
     assertEquals(ir.numDocs(), docs.size());
-    ValueSource[] toAdd = new ValueSource[] {new LongFieldSource(WEIGHT_FIELD_NAME_1), new LongFieldSource(WEIGHT_FIELD_NAME_2)};
-
-    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME,  new SumFloatFunction(toAdd), PAYLOAD_FIELD_NAME);
+    LongValuesSource s = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2);
+    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, s, PAYLOAD_FIELD_NAME);
     InputIterator inputIterator = dictionary.getEntryIterator();
     BytesRef f;
     while((f = inputIterator.next())!=null) {
@@ -478,7 +473,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
     writer.close();
 
     IndexReader ir = DirectoryReader.open(dir);
-    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, new DoubleConstValueSource(10), PAYLOAD_FIELD_NAME);
+    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, LongValuesSource.constant(10), PAYLOAD_FIELD_NAME);
     InputIterator inputIterator = dictionary.getEntryIterator();
     BytesRef f;
     while((f = inputIterator.next())!=null) {


[25/43] lucene-solr:jira/solr-8593: improve exception message

Posted by kr...@apache.org.
improve exception message


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d7beb0f1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d7beb0f1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d7beb0f1

Branch: refs/heads/jira/solr-8593
Commit: d7beb0f1479d2a9c4f37d780aa31edcd1012ac69
Parents: b9827bc
Author: Mike McCandless <mi...@apache.org>
Authored: Mon Jan 9 05:48:29 2017 -0500
Committer: Mike McCandless <mi...@apache.org>
Committed: Mon Jan 9 05:48:54 2017 -0500

----------------------------------------------------------------------
 lucene/core/src/java/org/apache/lucene/store/MMapDirectory.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d7beb0f1/lucene/core/src/java/org/apache/lucene/store/MMapDirectory.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/store/MMapDirectory.java b/lucene/core/src/java/org/apache/lucene/store/MMapDirectory.java
index 0487400..b994d0d 100644
--- a/lucene/core/src/java/org/apache/lucene/store/MMapDirectory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/MMapDirectory.java
@@ -386,7 +386,7 @@ public class MMapDirectory extends FSDirectory {
           " [Please grant at least the following permissions: RuntimePermission(\"accessClassInPackage.sun.misc\") " +
           " and ReflectPermission(\"suppressAccessChecks\")]";
     } catch (ReflectiveOperationException | RuntimeException e) {
-      return "Unmapping is not supported on this platform, because internal Java APIs are not compatible to this Lucene version: " + e; 
+      return "Unmapping is not supported on this platform, because internal Java APIs are not compatible with this Lucene version: " + e; 
     }
   }
   


[31/43] lucene-solr:jira/solr-8593: SOLR-9644: Fixed SimpleMLTQParser and CloudMLTQParser to handle boosts properly and CloudMLTQParser to only extract actual values from IndexableField type fields to the filtered document.

Posted by kr...@apache.org.
SOLR-9644: Fixed SimpleMLTQParser and CloudMLTQParser to handle boosts properly and CloudMLTQParser to only extract actual values from IndexableField type fields to the filtered document.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2b4e3dd9
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2b4e3dd9
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2b4e3dd9

Branch: refs/heads/jira/solr-8593
Commit: 2b4e3dd941a7a88274f2a86f18ea57a9d95e4364
Parents: b8383db
Author: anshum <an...@apache.org>
Authored: Mon Jan 9 13:05:21 2017 -0800
Committer: anshum <an...@apache.org>
Committed: Mon Jan 9 13:06:24 2017 -0800

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  4 ++
 .../apache/solr/search/mlt/CloudMLTQParser.java | 49 ++++++++++++--------
 .../solr/search/mlt/SimpleMLTQParser.java       | 30 ++++++------
 .../solr/search/mlt/CloudMLTQParserTest.java    | 23 ++++++++-
 .../solr/search/mlt/SimpleMLTQParserTest.java   | 33 +++++++++++--
 5 files changed, 102 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2b4e3dd9/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index c79b3c6..2b79f04 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -345,6 +345,10 @@ Bug Fixes
 * SOLR-9883: Example schemaless solr config files can lead to invalid tlog replays: when updates are buffered,
   update processors ordered before DistributedUpdateProcessor, e.g. field normalization, are never run. (Steve Rowe)
 
+* SOLR-9644: SimpleMLTQParser and CloudMLTQParser did not handle field boosts properly
+  and CloudMLTQParser included extra strings from the field definitions in the query.
+  (Ere Maijala via Anshum Gupta)
+
 Other Changes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2b4e3dd9/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java b/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java
index 0f46725..945047b 100644
--- a/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java
+++ b/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java
@@ -22,6 +22,7 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.regex.Pattern;
 
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.mlt.MoreLikeThis;
@@ -64,73 +65,83 @@ public class CloudMLTQParser extends QParser {
           SolrException.ErrorCode.BAD_REQUEST, "Error completing MLT request. Could not fetch " +
           "document with id [" + id + "]");
     }
-    
+
     String[] qf = localParams.getParams("qf");
     Map<String,Float> boostFields = new HashMap<>();
     MoreLikeThis mlt = new MoreLikeThis(req.getSearcher().getIndexReader());
-    
-    mlt.setMinTermFreq(localParams.getInt("mintf", MoreLikeThis.DEFAULT_MIN_TERM_FREQ));
 
+    mlt.setMinTermFreq(localParams.getInt("mintf", MoreLikeThis.DEFAULT_MIN_TERM_FREQ));
     mlt.setMinDocFreq(localParams.getInt("mindf", 0));
-
     mlt.setMinWordLen(localParams.getInt("minwl", MoreLikeThis.DEFAULT_MIN_WORD_LENGTH));
-
     mlt.setMaxWordLen(localParams.getInt("maxwl", MoreLikeThis.DEFAULT_MAX_WORD_LENGTH));
-
     mlt.setMaxQueryTerms(localParams.getInt("maxqt", MoreLikeThis.DEFAULT_MAX_QUERY_TERMS));
-
     mlt.setMaxNumTokensParsed(localParams.getInt("maxntp", MoreLikeThis.DEFAULT_MAX_NUM_TOKENS_PARSED));
-    
     mlt.setMaxDocFreq(localParams.getInt("maxdf", MoreLikeThis.DEFAULT_MAX_DOC_FREQ));
 
-    if(localParams.get("boost") != null) {
-      mlt.setBoost(localParams.getBool("boost"));
-      boostFields = SolrPluginUtils.parseFieldBoosts(qf);
-    }
+    Boolean boost = localParams.getBool("boost", MoreLikeThis.DEFAULT_BOOST);
+    mlt.setBoost(boost);
 
     mlt.setAnalyzer(req.getSchema().getIndexAnalyzer());
 
     Map<String, Collection<Object>> filteredDocument = new HashMap<>();
-    ArrayList<String> fieldNames = new ArrayList<>();
+    String[] fieldNames;
 
     if (qf != null) {
+      ArrayList<String> fields = new ArrayList();
       for (String fieldName : qf) {
         if (!StringUtils.isEmpty(fieldName))  {
           String[] strings = splitList.split(fieldName);
           for (String string : strings) {
             if (!StringUtils.isEmpty(string)) {
-              fieldNames.add(string);
+              fields.add(string);
             }
           }
         }
       }
+      // Parse field names and boosts from the fields
+      boostFields = SolrPluginUtils.parseFieldBoosts(fields.toArray(new String[0]));
+      fieldNames = boostFields.keySet().toArray(new String[0]);
     } else {
+      ArrayList<String> fields = new ArrayList();
       for (String field : doc.getFieldNames()) {
         // Only use fields that are stored and have an explicit analyzer.
         // This makes sense as the query uses tf/idf/.. for query construction.
         // We might want to relook and change this in the future though.
         SchemaField f = req.getSchema().getFieldOrNull(field);
         if (f != null && f.stored() && f.getType().isExplicitAnalyzer()) {
-          fieldNames.add(field);
+          fields.add(field);
         }
       }
+      fieldNames = fields.toArray(new String[0]);
     }
 
-    if( fieldNames.size() < 1 ) {
+    if (fieldNames.length < 1) {
       throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,
           "MoreLikeThis requires at least one similarity field: qf" );
     }
 
-    mlt.setFieldNames(fieldNames.toArray(new String[fieldNames.size()]));
+    mlt.setFieldNames(fieldNames);
     for (String field : fieldNames) {
-      filteredDocument.put(field, doc.getFieldValues(field));
+      Collection<Object> fieldValues = doc.getFieldValues(field);
+      if (fieldValues != null) {
+        Collection<Object> values = new ArrayList();
+        for (Object val : fieldValues) {
+          if (val instanceof IndexableField) {
+            values.add(((IndexableField)val).stringValue());
+          }
+          else {
+            values.add(val);
+          }
+        }
+        filteredDocument.put(field, values);
+      }
     }
 
     try {
       Query rawMLTQuery = mlt.like(filteredDocument);
       BooleanQuery boostedMLTQuery = (BooleanQuery) rawMLTQuery;
 
-      if (boostFields.size() > 0) {
+      if (boost && boostFields.size() > 0) {
         BooleanQuery.Builder newQ = new BooleanQuery.Builder();
         newQ.setMinimumNumberShouldMatch(boostedMLTQuery.getMinimumNumberShouldMatch());
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2b4e3dd9/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java b/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java
index 50803df..de6eb58 100644
--- a/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java
+++ b/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java
@@ -76,16 +76,13 @@ public class SimpleMLTQParser extends QParser {
       mlt.setMaxQueryTerms(localParams.getInt("maxqt", MoreLikeThis.DEFAULT_MAX_QUERY_TERMS));
       mlt.setMaxNumTokensParsed(localParams.getInt("maxntp", MoreLikeThis.DEFAULT_MAX_NUM_TOKENS_PARSED));
       mlt.setMaxDocFreq(localParams.getInt("maxdf", MoreLikeThis.DEFAULT_MAX_DOC_FREQ));
+      Boolean boost = localParams.getBool("boost", false);
+      mlt.setBoost(boost);
 
-      // what happens if value is explicitly set to false?
-      if(localParams.get("boost") != null) {
-        mlt.setBoost(localParams.getBool("boost", false));
-        boostFields = SolrPluginUtils.parseFieldBoosts(qf);
-      }
+      String[] fieldNames;
       
-      ArrayList<String> fields = new ArrayList<>();
-
       if (qf != null) {
+        ArrayList<String> fields = new ArrayList<>();
         for (String fieldName : qf) {
           if (!StringUtils.isEmpty(fieldName))  {
             String[] strings = splitList.split(fieldName);
@@ -96,26 +93,31 @@ public class SimpleMLTQParser extends QParser {
             }
           }
         }
+        // Parse field names and boosts from the fields
+        boostFields = SolrPluginUtils.parseFieldBoosts(fields.toArray(new String[0]));
+        fieldNames = boostFields.keySet().toArray(new String[0]);
       } else {
-        Map<String, SchemaField> fieldNames = req.getSearcher().getSchema().getFields();
-        for (String fieldName : fieldNames.keySet()) {
-          if (fieldNames.get(fieldName).indexed() && fieldNames.get(fieldName).stored())
-            if (fieldNames.get(fieldName).getType().getNumericType() == null)
+        Map<String, SchemaField> fieldDefinitions = req.getSearcher().getSchema().getFields();
+        ArrayList<String> fields = new ArrayList();
+        for (String fieldName : fieldDefinitions.keySet()) {
+          if (fieldDefinitions.get(fieldName).indexed() && fieldDefinitions.get(fieldName).stored())
+            if (fieldDefinitions.get(fieldName).getType().getNumericType() == null)
               fields.add(fieldName);
         }
+        fieldNames = fields.toArray(new String[0]);
       }
-      if( fields.size() < 1 ) {
+      if (fieldNames.length < 1) {
         throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,
             "MoreLikeThis requires at least one similarity field: qf" );
       }
 
-      mlt.setFieldNames(fields.toArray(new String[fields.size()]));
+      mlt.setFieldNames(fieldNames);
       mlt.setAnalyzer(req.getSchema().getIndexAnalyzer());
 
       Query rawMLTQuery = mlt.like(scoreDocs[0].doc);
       BooleanQuery boostedMLTQuery = (BooleanQuery) rawMLTQuery;
 
-      if (boostFields.size() > 0) {
+      if (boost && boostFields.size() > 0) {
         BooleanQuery.Builder newQ = new BooleanQuery.Builder();
         newQ.setMinimumNumberShouldMatch(boostedMLTQuery.getMinimumNumberShouldMatch());
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2b4e3dd9/solr/core/src/test/org/apache/solr/search/mlt/CloudMLTQParserTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/mlt/CloudMLTQParserTest.java b/solr/core/src/test/org/apache/solr/search/mlt/CloudMLTQParserTest.java
index ffcde2f..e3a8d7b 100644
--- a/solr/core/src/test/org/apache/solr/search/mlt/CloudMLTQParserTest.java
+++ b/solr/core/src/test/org/apache/solr/search/mlt/CloudMLTQParserTest.java
@@ -121,6 +121,27 @@ public class CloudMLTQParserTest extends SolrCloudTestCase {
     }
     assertArrayEquals(expectedIds, actualIds);
 
+    queryResponse = cluster.getSolrClient().query(COLLECTION, new SolrQuery("{!mlt qf=lowerfilt_u^10,lowerfilt1_u^1000 boost=false mintf=0 mindf=0}30"));
+    solrDocuments = queryResponse.getResults();
+    expectedIds = new int[]{31, 18, 23, 13, 14, 20, 22, 32, 19, 21};
+    actualIds = new int[solrDocuments.size()];
+    i = 0;
+    for (SolrDocument solrDocument : solrDocuments) {
+      actualIds[i++] = Integer.valueOf(String.valueOf(solrDocument.getFieldValue("id")));
+    }
+    System.out.println("DEBUG ACTUAL IDS 1: " + Arrays.toString(actualIds));
+    assertArrayEquals(expectedIds, actualIds);
+
+    queryResponse = cluster.getSolrClient().query(COLLECTION, new SolrQuery("{!mlt qf=lowerfilt_u^10,lowerfilt1_u^1000 boost=true mintf=0 mindf=0}30"));
+    solrDocuments = queryResponse.getResults();
+    expectedIds = new int[]{29, 31, 32, 18, 23, 13, 14, 20, 22, 19};
+    actualIds = new int[solrDocuments.size()];
+    i = 0;
+    for (SolrDocument solrDocument : solrDocuments) {
+      actualIds[i++] = Integer.valueOf(String.valueOf(solrDocument.getFieldValue("id")));
+    }
+    System.out.println("DEBUG ACTUAL IDS 2: " + Arrays.toString(actualIds));
+    assertArrayEquals(expectedIds, actualIds);
   }
 
   @Test
@@ -220,7 +241,7 @@ public class CloudMLTQParserTest extends SolrCloudTestCase {
     }
     assertArrayEquals(expectedIds, actualIds);
   }
-  
+
   public void testInvalidSourceDocument() throws IOException {
     SolrException e = expectThrows(SolrException.class, () -> {
       cluster.getSolrClient().query(COLLECTION, new SolrQuery("{!mlt qf=lowerfilt_u}999999"));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2b4e3dd9/solr/core/src/test/org/apache/solr/search/mlt/SimpleMLTQParserTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/mlt/SimpleMLTQParserTest.java b/solr/core/src/test/org/apache/solr/search/mlt/SimpleMLTQParserTest.java
index 6f3570f..026c594 100644
--- a/solr/core/src/test/org/apache/solr/search/mlt/SimpleMLTQParserTest.java
+++ b/solr/core/src/test/org/apache/solr/search/mlt/SimpleMLTQParserTest.java
@@ -108,8 +108,37 @@ public class SimpleMLTQParserTest extends SolrTestCaseJ4 {
     );
 
     params = new ModifiableSolrParams();
+    params.set(CommonParams.Q, "{!mlt qf=lowerfilt,lowerfilt1^1000 boost=false mintf=0 mindf=0}30");
+    assertQ(req(params),
+        "//result/doc[1]/int[@name='id'][.='31']",
+        "//result/doc[2]/int[@name='id'][.='13']",
+        "//result/doc[3]/int[@name='id'][.='14']",
+        "//result/doc[4]/int[@name='id'][.='18']",
+        "//result/doc[5]/int[@name='id'][.='20']",
+        "//result/doc[6]/int[@name='id'][.='22']",
+        "//result/doc[7]/int[@name='id'][.='23']",
+        "//result/doc[8]/int[@name='id'][.='32']",
+        "//result/doc[9]/int[@name='id'][.='15']",
+        "//result/doc[10]/int[@name='id'][.='16']"
+    );
+
+    params = new ModifiableSolrParams();
+    params.set(CommonParams.Q, "{!mlt qf=lowerfilt,lowerfilt1^1000 boost=true mintf=0 mindf=0}30");
+    assertQ(req(params),
+        "//result/doc[1]/int[@name='id'][.='29']",
+        "//result/doc[2]/int[@name='id'][.='31']",
+        "//result/doc[3]/int[@name='id'][.='32']",
+        "//result/doc[4]/int[@name='id'][.='13']",
+        "//result/doc[5]/int[@name='id'][.='14']",
+        "//result/doc[6]/int[@name='id'][.='18']",
+        "//result/doc[7]/int[@name='id'][.='20']",
+        "//result/doc[8]/int[@name='id'][.='22']",
+        "//result/doc[9]/int[@name='id'][.='23']",
+        "//result/doc[10]/int[@name='id'][.='15']"
+    );
+
+    params = new ModifiableSolrParams();
     params.set(CommonParams.Q, "{!mlt qf=lowerfilt mindf=0 mintf=1}26");
-    params.set(CommonParams.DEBUG, "true");
     assertQ(req(params),
         "//result/doc[1]/int[@name='id'][.='29']",
         "//result/doc[2]/int[@name='id'][.='27']",
@@ -118,14 +147,12 @@ public class SimpleMLTQParserTest extends SolrTestCaseJ4 {
 
     params = new ModifiableSolrParams();
     params.set(CommonParams.Q, "{!mlt qf=lowerfilt mindf=10 mintf=1}26");
-    params.set(CommonParams.DEBUG, "true");
     assertQ(req(params),
         "//result[@numFound='0']"
     );
 
     params = new ModifiableSolrParams();
     params.set(CommonParams.Q, "{!mlt qf=lowerfilt minwl=3 mintf=1 mindf=1}26");
-    params.set(CommonParams.DEBUG, "true");
     assertQ(req(params),
         "//result[@numFound='3']"
     );


[13/43] lucene-solr:jira/solr-8593: SOLR-9944: Map the nodes function name to the GatherNodesStream

Posted by kr...@apache.org.
SOLR-9944: Map the nodes function name to the GatherNodesStream


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/aae4217a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/aae4217a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/aae4217a

Branch: refs/heads/jira/solr-8593
Commit: aae4217abc09163837597bf761f21d8019091216
Parents: d817fd4
Author: Joel Bernstein <jb...@apache.org>
Authored: Sat Jan 7 22:17:17 2017 -0500
Committer: Joel Bernstein <jb...@apache.org>
Committed: Sat Jan 7 22:17:44 2017 -0500

----------------------------------------------------------------------
 solr/core/src/java/org/apache/solr/handler/GraphHandler.java      | 1 +
 solr/core/src/java/org/apache/solr/handler/StreamHandler.java     | 1 +
 .../apache/solr/client/solrj/io/graph/GraphExpressionTest.java    | 3 ++-
 3 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aae4217a/solr/core/src/java/org/apache/solr/handler/GraphHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/GraphHandler.java b/solr/core/src/java/org/apache/solr/handler/GraphHandler.java
index 5e5934f..3b52154 100644
--- a/solr/core/src/java/org/apache/solr/handler/GraphHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/GraphHandler.java
@@ -117,6 +117,7 @@ public class GraphHandler extends RequestHandlerBase implements SolrCoreAware, P
         .withFunctionName("topic", TopicStream.class)
         .withFunctionName("shortestPath", ShortestPathStream.class)
         .withFunctionName("gatherNodes", GatherNodesStream.class)
+        .withFunctionName("nodes", GatherNodesStream.class)
         .withFunctionName("sort", SortStream.class)
         .withFunctionName("scoreNodes", ScoreNodesStream.class)
         .withFunctionName("random", RandomStream.class)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aae4217a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
index 4b319f9..98486b8 100644
--- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
@@ -142,6 +142,7 @@ public class StreamHandler extends RequestHandlerBase implements SolrCoreAware,
       .withFunctionName("daemon", DaemonStream.class)
       .withFunctionName("shortestPath", ShortestPathStream.class)
       .withFunctionName("gatherNodes", GatherNodesStream.class)
+      .withFunctionName("nodes", GatherNodesStream.class)
       .withFunctionName("select", SelectStream.class)
       .withFunctionName("scoreNodes", ScoreNodesStream.class)
       .withFunctionName("model", ModelStream.class)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aae4217a/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java
index d6fc514..cf07058 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java
@@ -270,6 +270,7 @@ public class GraphExpressionTest extends SolrCloudTestCase {
     StreamFactory factory = new StreamFactory()
         .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress())
         .withFunctionName("gatherNodes", GatherNodesStream.class)
+        .withFunctionName("nodes", GatherNodesStream.class)
         .withFunctionName("search", CloudSolrStream.class)
         .withFunctionName("count", CountMetric.class)
         .withFunctionName("avg", MeanMetric.class)
@@ -277,7 +278,7 @@ public class GraphExpressionTest extends SolrCloudTestCase {
         .withFunctionName("min", MinMetric.class)
         .withFunctionName("max", MaxMetric.class);
 
-    String expr = "gatherNodes(collection1, " +
+    String expr = "nodes(collection1, " +
         "walk=\"product1->product_s\"," +
         "gather=\"basket_s\")";
 


[19/43] lucene-solr:jira/solr-8593: promote this test case to core

Posted by kr...@apache.org.
promote this test case to core


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f985fcaa
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f985fcaa
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f985fcaa

Branch: refs/heads/jira/solr-8593
Commit: f985fcaa23cb9ef96ed823e5bf7957049e0d9461
Parents: e64111c
Author: Mike McCandless <mi...@apache.org>
Authored: Sun Jan 8 06:38:37 2017 -0500
Committer: Mike McCandless <mi...@apache.org>
Committed: Sun Jan 8 06:38:37 2017 -0500

----------------------------------------------------------------------
 .../lucene/analysis/TestGraphTokenizers.java    | 600 +++++++++++++++++++
 .../lucene/analysis/TestGraphTokenizers.java    | 600 -------------------
 2 files changed, 600 insertions(+), 600 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f985fcaa/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java b/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
new file mode 100644
index 0000000..8899dd1
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
@@ -0,0 +1,600 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
+import org.apache.lucene.util.automaton.Automata;
+import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.automaton.Operations;
+
+import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES;
+
+public class TestGraphTokenizers extends BaseTokenStreamTestCase {
+
+  // Makes a graph TokenStream from the string; separate
+  // positions with single space, multiple tokens at the same
+  // position with /, and add optional position length with
+  // :.  EG "a b c" is a simple chain, "a/x b c" adds 'x'
+  // over 'a' at position 0 with posLen=1, "a/x:3 b c" adds
+  // 'x' over a with posLen=3.  Tokens are in normal-form!
+  // So, offsets are computed based on the first token at a
+  // given position.  NOTE: each token must be a single
+  // character!  We assume this when computing offsets...
+  
+  // NOTE: all input tokens must be length 1!!!  This means
+  // you cannot turn on MockCharFilter when random
+  // testing...
+
+  private static class GraphTokenizer extends Tokenizer {
+    private List<Token> tokens;
+    private int upto;
+    private int inputLength;
+
+    private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+    private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+    private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+    private final PositionLengthAttribute posLengthAtt = addAttribute(PositionLengthAttribute.class);
+
+    @Override
+    public void reset() throws IOException {
+      super.reset();
+      tokens = null;
+      upto = 0;
+    }
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      if (tokens == null) {
+        fillTokens();
+      }
+      //System.out.println("graphTokenizer: incr upto=" + upto + " vs " + tokens.size());
+      if (upto == tokens.size()) {
+        //System.out.println("  END @ " + tokens.size());
+        return false;
+      } 
+      final Token t = tokens.get(upto++);
+      //System.out.println("  return token=" + t);
+      clearAttributes();
+      termAtt.append(t.toString());
+      offsetAtt.setOffset(t.startOffset(), t.endOffset());
+      posIncrAtt.setPositionIncrement(t.getPositionIncrement());
+      posLengthAtt.setPositionLength(t.getPositionLength());
+      return true;
+    }
+
+    @Override
+    public void end() throws IOException {
+      super.end();
+      // NOTE: somewhat... hackish, but we need this to
+      // satisfy BTSTC:
+      final int lastOffset;
+      if (tokens != null && !tokens.isEmpty()) {
+        lastOffset = tokens.get(tokens.size()-1).endOffset();
+      } else {
+        lastOffset = 0;
+      }
+      offsetAtt.setOffset(correctOffset(lastOffset),
+                          correctOffset(inputLength));
+    }
+
+    private void fillTokens() throws IOException {
+      final StringBuilder sb = new StringBuilder();
+      final char[] buffer = new char[256];
+      while (true) {
+        final int count = input.read(buffer);
+        if (count == -1) {
+          break;
+        }
+        sb.append(buffer, 0, count);
+        //System.out.println("got count=" + count);
+      }
+      //System.out.println("fillTokens: " + sb);
+
+      inputLength = sb.length();
+
+      final String[] parts = sb.toString().split(" ");
+
+      tokens = new ArrayList<>();
+      int pos = 0;
+      int maxPos = -1;
+      int offset = 0;
+      //System.out.println("again");
+      for(String part : parts) {
+        final String[] overlapped = part.split("/");
+        boolean firstAtPos = true;
+        int minPosLength = Integer.MAX_VALUE;
+        for(String part2 : overlapped) {
+          final int colonIndex = part2.indexOf(':');
+          final String token;
+          final int posLength;
+          if (colonIndex != -1) {
+            token = part2.substring(0, colonIndex);
+            posLength = Integer.parseInt(part2.substring(1+colonIndex));
+          } else {
+            token = part2;
+            posLength = 1;
+          }
+          maxPos = Math.max(maxPos, pos + posLength);
+          minPosLength = Math.min(minPosLength, posLength);
+          final Token t = new Token(token, offset, offset + 2*posLength - 1);
+          t.setPositionLength(posLength);
+          t.setPositionIncrement(firstAtPos ? 1:0);
+          firstAtPos = false;
+          //System.out.println("  add token=" + t + " startOff=" + t.startOffset() + " endOff=" + t.endOffset());
+          tokens.add(t);
+        }
+        pos += minPosLength;
+        offset = 2 * pos;
+      }
+      assert maxPos <= pos: "input string mal-formed: posLength>1 tokens hang over the end";
+    }
+  }
+
+  public void testMockGraphTokenFilterBasic() throws Exception {
+
+    for(int iter=0;iter<10*RANDOM_MULTIPLIER;iter++) {
+
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter);
+      }
+
+      // Make new analyzer each time, because MGTF has fixed
+      // seed:
+      final Analyzer a = new Analyzer() {
+          @Override
+          protected TokenStreamComponents createComponents(String fieldName) {
+            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+            final TokenStream t2 = new MockGraphTokenFilter(random(), t);
+            return new TokenStreamComponents(t, t2);
+          }
+        };
+      
+      checkAnalysisConsistency(random(), a, false, "a b c d e f g h i j k");
+    }
+  }
+
+  public void testMockGraphTokenFilterOnGraphInput() throws Exception {
+    for(int iter=0;iter<100*RANDOM_MULTIPLIER;iter++) {
+
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter);
+      }
+
+      // Make new analyzer each time, because MGTF has fixed
+      // seed:
+      final Analyzer a = new Analyzer() {
+          @Override
+          protected TokenStreamComponents createComponents(String fieldName) {
+            final Tokenizer t = new GraphTokenizer();
+            final TokenStream t2 = new MockGraphTokenFilter(random(), t);
+            return new TokenStreamComponents(t, t2);
+          }
+        };
+      
+      checkAnalysisConsistency(random(), a, false, "a/x:3 c/y:2 d e f/z:4 g h i j k");
+    }
+  }
+
+  // Just deletes (leaving hole) token 'a':
+  private final static class RemoveATokens extends TokenFilter {
+    private int pendingPosInc;
+
+    private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+    private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
+
+    public RemoveATokens(TokenStream in) {
+      super(in);
+    }
+
+    @Override
+    public void reset() throws IOException {
+      super.reset();
+      pendingPosInc = 0;
+    }
+
+    @Override
+    public void end() throws IOException {
+      super.end();
+      posIncAtt.setPositionIncrement(pendingPosInc + posIncAtt.getPositionIncrement());
+    }
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      while (true) {
+        final boolean gotOne = input.incrementToken();
+        if (!gotOne) {
+          return false;
+        } else if (termAtt.toString().equals("a")) {
+          pendingPosInc += posIncAtt.getPositionIncrement();
+        } else {
+          posIncAtt.setPositionIncrement(pendingPosInc + posIncAtt.getPositionIncrement());
+          pendingPosInc = 0;
+          return true;
+        }
+      }
+    }
+  }
+
+  public void testMockGraphTokenFilterBeforeHoles() throws Exception {
+    for(int iter=0;iter<100*RANDOM_MULTIPLIER;iter++) {
+
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter);
+      }
+
+      // Make new analyzer each time, because MGTF has fixed
+      // seed:
+      final Analyzer a = new Analyzer() {
+          @Override
+          protected TokenStreamComponents createComponents(String fieldName) {
+            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+            final TokenStream t2 = new MockGraphTokenFilter(random(), t);
+            final TokenStream t3 = new RemoveATokens(t2);
+            return new TokenStreamComponents(t, t3);
+          }
+        };
+
+      Random random = random();
+      checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k");
+      checkAnalysisConsistency(random, a, false, "x y a b c d e f g h i j k");
+      checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k a");
+      checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k a x y");
+    }
+  }
+
+  public void testMockGraphTokenFilterAfterHoles() throws Exception {
+    for(int iter=0;iter<100*RANDOM_MULTIPLIER;iter++) {
+
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter);
+      }
+
+      // Make new analyzer each time, because MGTF has fixed
+      // seed:
+      final Analyzer a = new Analyzer() {
+          @Override
+          protected TokenStreamComponents createComponents(String fieldName) {
+            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+            final TokenStream t2 = new RemoveATokens(t);
+            final TokenStream t3 = new MockGraphTokenFilter(random(), t2);
+            return new TokenStreamComponents(t, t3);
+          }
+        };
+
+      Random random = random();
+      checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k");
+      checkAnalysisConsistency(random, a, false, "x y a b c d e f g h i j k");
+      checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k a");
+      checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k a x y");
+    }
+  }
+
+  public void testMockGraphTokenFilterRandom() throws Exception {
+    for(int iter=0;iter<3*RANDOM_MULTIPLIER;iter++) {
+
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter);
+      }
+
+      // Make new analyzer each time, because MGTF has fixed
+      // seed:
+      final Analyzer a = new Analyzer() {
+          @Override
+          protected TokenStreamComponents createComponents(String fieldName) {
+            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+            final TokenStream t2 = new MockGraphTokenFilter(random(), t);
+            return new TokenStreamComponents(t, t2);
+          }
+        };
+      
+      Random random = random();
+      checkRandomData(random, a, 5, atLeast(100));
+    }
+  }
+
+  // Two MockGraphTokenFilters
+  public void testDoubleMockGraphTokenFilterRandom() throws Exception {
+    for(int iter=0;iter<3*RANDOM_MULTIPLIER;iter++) {
+
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter);
+      }
+
+      // Make new analyzer each time, because MGTF has fixed
+      // seed:
+      final Analyzer a = new Analyzer() {
+          @Override
+          protected TokenStreamComponents createComponents(String fieldName) {
+            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+            final TokenStream t1 = new MockGraphTokenFilter(random(), t);
+            final TokenStream t2 = new MockGraphTokenFilter(random(), t1);
+            return new TokenStreamComponents(t, t2);
+          }
+        };
+      
+      Random random = random();
+      checkRandomData(random, a, 5, atLeast(100));
+    }
+  }
+
+  public void testMockGraphTokenFilterBeforeHolesRandom() throws Exception {
+    for(int iter=0;iter<3*RANDOM_MULTIPLIER;iter++) {
+
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter);
+      }
+
+      // Make new analyzer each time, because MGTF has fixed
+      // seed:
+      final Analyzer a = new Analyzer() {
+          @Override
+          protected TokenStreamComponents createComponents(String fieldName) {
+            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+            final TokenStream t1 = new MockGraphTokenFilter(random(), t);
+            final TokenStream t2 = new MockHoleInjectingTokenFilter(random(), t1);
+            return new TokenStreamComponents(t, t2);
+          }
+        };
+      
+      Random random = random();
+      checkRandomData(random, a, 5, atLeast(100));
+    }
+  }
+
+  public void testMockGraphTokenFilterAfterHolesRandom() throws Exception {
+    for(int iter=0;iter<3*RANDOM_MULTIPLIER;iter++) {
+
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter);
+      }
+
+      // Make new analyzer each time, because MGTF has fixed
+      // seed:
+      final Analyzer a = new Analyzer() {
+          @Override
+          protected TokenStreamComponents createComponents(String fieldName) {
+            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+            final TokenStream t1 = new MockHoleInjectingTokenFilter(random(), t);
+            final TokenStream t2 = new MockGraphTokenFilter(random(), t1);
+            return new TokenStreamComponents(t, t2);
+          }
+        };
+      
+      Random random = random();
+      checkRandomData(random, a, 5, atLeast(100));
+    }
+  }
+
+  private static Token token(String term, int posInc, int posLength) {
+    final Token t = new Token(term, 0, 0);
+    t.setPositionIncrement(posInc);
+    t.setPositionLength(posLength);
+    return t;
+  }
+
+  private static Token token(String term, int posInc, int posLength, int startOffset, int endOffset) {
+    final Token t = new Token(term, startOffset, endOffset);
+    t.setPositionIncrement(posInc);
+    t.setPositionLength(posLength);
+    return t;
+  }
+
+  public void testSingleToken() throws Exception {
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("abc", 1, 1),
+      });
+    assertSameLanguage(s2a("abc"), ts);
+  }
+
+  public void testMultipleHoles() throws Exception {
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("a", 1, 1),
+        token("b", 3, 1),
+      });
+    assertSameLanguage(join(s2a("a"), SEP_A, HOLE_A, SEP_A, HOLE_A, SEP_A, s2a("b")), ts);
+  }
+
+  public void testSynOverMultipleHoles() throws Exception {
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("a", 1, 1),
+        token("x", 0, 3),
+        token("b", 3, 1),
+      });
+    final Automaton a1 = join(s2a("a"), SEP_A, HOLE_A, SEP_A, HOLE_A, SEP_A, s2a("b")); 
+    final Automaton a2 = join(s2a("x"), SEP_A, s2a("b")); 
+    assertSameLanguage(Operations.union(a1, a2), ts);
+  }
+
+  // for debugging!
+  /*
+  private static void toDot(Automaton a) throws IOException {
+    final String s = a.toDot();
+    Writer w = new OutputStreamWriter(new FileOutputStream("/x/tmp/out.dot"));
+    w.write(s);
+    w.close();
+    System.out.println("TEST: saved to /x/tmp/out.dot");
+  }
+  */
+
+  private static final Automaton SEP_A = Automata.makeChar(TokenStreamToAutomaton.POS_SEP);
+  private static final Automaton HOLE_A = Automata.makeChar(TokenStreamToAutomaton.HOLE);
+
+  private Automaton join(String ... strings) {
+    List<Automaton> as = new ArrayList<>();
+    for(String s : strings) {
+      as.add(s2a(s));
+      as.add(SEP_A);
+    }
+    as.remove(as.size()-1);
+    return Operations.concatenate(as);
+  }
+
+  private Automaton join(Automaton ... as) {
+    return Operations.concatenate(Arrays.asList(as));
+  }
+
+  private Automaton s2a(String s) {
+    return Automata.makeString(s);
+  }
+
+  public void testTwoTokens() throws Exception {
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("abc", 1, 1),
+        token("def", 1, 1),
+      });
+    assertSameLanguage(join("abc", "def"), ts);
+  }
+
+  public void testHole() throws Exception {
+
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("abc", 1, 1),
+        token("def", 2, 1),
+      });
+    assertSameLanguage(join(s2a("abc"), SEP_A, HOLE_A, SEP_A, s2a("def")), ts);
+  }
+
+  public void testOverlappedTokensSausage() throws Exception {
+
+    // Two tokens on top of each other (sausage):
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("abc", 1, 1),
+        token("xyz", 0, 1)
+      });
+    final Automaton a1 = s2a("abc");
+    final Automaton a2 = s2a("xyz");
+    assertSameLanguage(Operations.union(a1, a2), ts);
+  }
+
+  public void testOverlappedTokensLattice() throws Exception {
+
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("abc", 1, 1),
+        token("xyz", 0, 2),
+        token("def", 1, 1),
+      });
+    final Automaton a1 = s2a("xyz");
+    final Automaton a2 = join("abc", "def");
+    assertSameLanguage(Operations.union(a1, a2), ts);
+  }
+
+  public void testSynOverHole() throws Exception {
+
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("a", 1, 1),
+        token("X", 0, 2),
+        token("b", 2, 1),
+      });
+    final Automaton a1 = Operations.union(join(s2a("a"), SEP_A, HOLE_A), s2a("X"));
+    final Automaton expected = Operations.concatenate(a1, join(SEP_A, s2a("b")));
+    assertSameLanguage(expected, ts);
+  }
+
+  public void testSynOverHole2() throws Exception {
+
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("xyz", 1, 1),
+        token("abc", 0, 3),
+        token("def", 2, 1),
+      });
+    final Automaton expected = Operations.union(
+      join(s2a("xyz"), SEP_A, HOLE_A, SEP_A, s2a("def")), s2a("abc"));
+    assertSameLanguage(expected, ts);
+  }
+
+  public void testOverlappedTokensLattice2() throws Exception {
+
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("abc", 1, 1),
+        token("xyz", 0, 3),
+        token("def", 1, 1),
+        token("ghi", 1, 1),
+      });
+    final Automaton a1 = s2a("xyz");
+    final Automaton a2 = join("abc", "def", "ghi");
+    assertSameLanguage(Operations.union(a1, a2), ts);
+  }
+
+  public void testToDot() throws Exception {
+    final TokenStream ts = new CannedTokenStream(new Token[] {token("abc", 1, 1, 0, 4)});
+    StringWriter w = new StringWriter();
+    new TokenStreamToDot("abcd", ts, new PrintWriter(w)).toDot();
+    assertTrue(w.toString().indexOf("abc / abcd") != -1);
+  }
+
+  public void testStartsWithHole() throws Exception {
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("abc", 2, 1),
+      });
+    assertSameLanguage(join(HOLE_A, SEP_A, s2a("abc")), ts);
+  }
+
+  // TODO: testEndsWithHole... but we need posInc to set in TS.end()
+
+  public void testSynHangingOverEnd() throws Exception {
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("a", 1, 1),
+        token("X", 0, 10),
+      });
+    assertSameLanguage(Operations.union(s2a("a"), s2a("X")), ts);
+  }
+
+  private void assertSameLanguage(Automaton expected, TokenStream ts) throws IOException {
+    assertSameLanguage(expected, new TokenStreamToAutomaton().toAutomaton(ts));
+  }
+
+  private void assertSameLanguage(Automaton expected, Automaton actual) {
+    assertTrue(Operations.sameLanguage(
+      Operations.determinize(Operations.removeDeadStates(expected), DEFAULT_MAX_DETERMINIZED_STATES),
+      Operations.determinize(Operations.removeDeadStates(actual), DEFAULT_MAX_DETERMINIZED_STATES)));
+  }
+
+  public void testTokenStreamGraphWithHoles() throws Exception {
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("abc", 1, 1),
+        token("xyz", 1, 8),
+        token("def", 1, 1),
+        token("ghi", 1, 1),
+      });
+    assertSameLanguage(Operations.union(join(s2a("abc"), SEP_A, s2a("xyz")),
+                                        join(s2a("abc"), SEP_A, HOLE_A, SEP_A, s2a("def"), SEP_A, s2a("ghi"))), ts);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f985fcaa/lucene/test-framework/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
deleted file mode 100644
index 8899dd1..0000000
--- a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
+++ /dev/null
@@ -1,600 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.analysis;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Random;
-
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
-import org.apache.lucene.util.automaton.Automata;
-import org.apache.lucene.util.automaton.Automaton;
-import org.apache.lucene.util.automaton.Operations;
-
-import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES;
-
-public class TestGraphTokenizers extends BaseTokenStreamTestCase {
-
-  // Makes a graph TokenStream from the string; separate
-  // positions with single space, multiple tokens at the same
-  // position with /, and add optional position length with
-  // :.  EG "a b c" is a simple chain, "a/x b c" adds 'x'
-  // over 'a' at position 0 with posLen=1, "a/x:3 b c" adds
-  // 'x' over a with posLen=3.  Tokens are in normal-form!
-  // So, offsets are computed based on the first token at a
-  // given position.  NOTE: each token must be a single
-  // character!  We assume this when computing offsets...
-  
-  // NOTE: all input tokens must be length 1!!!  This means
-  // you cannot turn on MockCharFilter when random
-  // testing...
-
-  private static class GraphTokenizer extends Tokenizer {
-    private List<Token> tokens;
-    private int upto;
-    private int inputLength;
-
-    private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
-    private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
-    private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
-    private final PositionLengthAttribute posLengthAtt = addAttribute(PositionLengthAttribute.class);
-
-    @Override
-    public void reset() throws IOException {
-      super.reset();
-      tokens = null;
-      upto = 0;
-    }
-
-    @Override
-    public boolean incrementToken() throws IOException {
-      if (tokens == null) {
-        fillTokens();
-      }
-      //System.out.println("graphTokenizer: incr upto=" + upto + " vs " + tokens.size());
-      if (upto == tokens.size()) {
-        //System.out.println("  END @ " + tokens.size());
-        return false;
-      } 
-      final Token t = tokens.get(upto++);
-      //System.out.println("  return token=" + t);
-      clearAttributes();
-      termAtt.append(t.toString());
-      offsetAtt.setOffset(t.startOffset(), t.endOffset());
-      posIncrAtt.setPositionIncrement(t.getPositionIncrement());
-      posLengthAtt.setPositionLength(t.getPositionLength());
-      return true;
-    }
-
-    @Override
-    public void end() throws IOException {
-      super.end();
-      // NOTE: somewhat... hackish, but we need this to
-      // satisfy BTSTC:
-      final int lastOffset;
-      if (tokens != null && !tokens.isEmpty()) {
-        lastOffset = tokens.get(tokens.size()-1).endOffset();
-      } else {
-        lastOffset = 0;
-      }
-      offsetAtt.setOffset(correctOffset(lastOffset),
-                          correctOffset(inputLength));
-    }
-
-    private void fillTokens() throws IOException {
-      final StringBuilder sb = new StringBuilder();
-      final char[] buffer = new char[256];
-      while (true) {
-        final int count = input.read(buffer);
-        if (count == -1) {
-          break;
-        }
-        sb.append(buffer, 0, count);
-        //System.out.println("got count=" + count);
-      }
-      //System.out.println("fillTokens: " + sb);
-
-      inputLength = sb.length();
-
-      final String[] parts = sb.toString().split(" ");
-
-      tokens = new ArrayList<>();
-      int pos = 0;
-      int maxPos = -1;
-      int offset = 0;
-      //System.out.println("again");
-      for(String part : parts) {
-        final String[] overlapped = part.split("/");
-        boolean firstAtPos = true;
-        int minPosLength = Integer.MAX_VALUE;
-        for(String part2 : overlapped) {
-          final int colonIndex = part2.indexOf(':');
-          final String token;
-          final int posLength;
-          if (colonIndex != -1) {
-            token = part2.substring(0, colonIndex);
-            posLength = Integer.parseInt(part2.substring(1+colonIndex));
-          } else {
-            token = part2;
-            posLength = 1;
-          }
-          maxPos = Math.max(maxPos, pos + posLength);
-          minPosLength = Math.min(minPosLength, posLength);
-          final Token t = new Token(token, offset, offset + 2*posLength - 1);
-          t.setPositionLength(posLength);
-          t.setPositionIncrement(firstAtPos ? 1:0);
-          firstAtPos = false;
-          //System.out.println("  add token=" + t + " startOff=" + t.startOffset() + " endOff=" + t.endOffset());
-          tokens.add(t);
-        }
-        pos += minPosLength;
-        offset = 2 * pos;
-      }
-      assert maxPos <= pos: "input string mal-formed: posLength>1 tokens hang over the end";
-    }
-  }
-
-  public void testMockGraphTokenFilterBasic() throws Exception {
-
-    for(int iter=0;iter<10*RANDOM_MULTIPLIER;iter++) {
-
-      if (VERBOSE) {
-        System.out.println("\nTEST: iter=" + iter);
-      }
-
-      // Make new analyzer each time, because MGTF has fixed
-      // seed:
-      final Analyzer a = new Analyzer() {
-          @Override
-          protected TokenStreamComponents createComponents(String fieldName) {
-            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-            final TokenStream t2 = new MockGraphTokenFilter(random(), t);
-            return new TokenStreamComponents(t, t2);
-          }
-        };
-      
-      checkAnalysisConsistency(random(), a, false, "a b c d e f g h i j k");
-    }
-  }
-
-  public void testMockGraphTokenFilterOnGraphInput() throws Exception {
-    for(int iter=0;iter<100*RANDOM_MULTIPLIER;iter++) {
-
-      if (VERBOSE) {
-        System.out.println("\nTEST: iter=" + iter);
-      }
-
-      // Make new analyzer each time, because MGTF has fixed
-      // seed:
-      final Analyzer a = new Analyzer() {
-          @Override
-          protected TokenStreamComponents createComponents(String fieldName) {
-            final Tokenizer t = new GraphTokenizer();
-            final TokenStream t2 = new MockGraphTokenFilter(random(), t);
-            return new TokenStreamComponents(t, t2);
-          }
-        };
-      
-      checkAnalysisConsistency(random(), a, false, "a/x:3 c/y:2 d e f/z:4 g h i j k");
-    }
-  }
-
-  // Just deletes (leaving hole) token 'a':
-  private final static class RemoveATokens extends TokenFilter {
-    private int pendingPosInc;
-
-    private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
-    private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
-
-    public RemoveATokens(TokenStream in) {
-      super(in);
-    }
-
-    @Override
-    public void reset() throws IOException {
-      super.reset();
-      pendingPosInc = 0;
-    }
-
-    @Override
-    public void end() throws IOException {
-      super.end();
-      posIncAtt.setPositionIncrement(pendingPosInc + posIncAtt.getPositionIncrement());
-    }
-
-    @Override
-    public boolean incrementToken() throws IOException {
-      while (true) {
-        final boolean gotOne = input.incrementToken();
-        if (!gotOne) {
-          return false;
-        } else if (termAtt.toString().equals("a")) {
-          pendingPosInc += posIncAtt.getPositionIncrement();
-        } else {
-          posIncAtt.setPositionIncrement(pendingPosInc + posIncAtt.getPositionIncrement());
-          pendingPosInc = 0;
-          return true;
-        }
-      }
-    }
-  }
-
-  public void testMockGraphTokenFilterBeforeHoles() throws Exception {
-    for(int iter=0;iter<100*RANDOM_MULTIPLIER;iter++) {
-
-      if (VERBOSE) {
-        System.out.println("\nTEST: iter=" + iter);
-      }
-
-      // Make new analyzer each time, because MGTF has fixed
-      // seed:
-      final Analyzer a = new Analyzer() {
-          @Override
-          protected TokenStreamComponents createComponents(String fieldName) {
-            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-            final TokenStream t2 = new MockGraphTokenFilter(random(), t);
-            final TokenStream t3 = new RemoveATokens(t2);
-            return new TokenStreamComponents(t, t3);
-          }
-        };
-
-      Random random = random();
-      checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k");
-      checkAnalysisConsistency(random, a, false, "x y a b c d e f g h i j k");
-      checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k a");
-      checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k a x y");
-    }
-  }
-
-  public void testMockGraphTokenFilterAfterHoles() throws Exception {
-    for(int iter=0;iter<100*RANDOM_MULTIPLIER;iter++) {
-
-      if (VERBOSE) {
-        System.out.println("\nTEST: iter=" + iter);
-      }
-
-      // Make new analyzer each time, because MGTF has fixed
-      // seed:
-      final Analyzer a = new Analyzer() {
-          @Override
-          protected TokenStreamComponents createComponents(String fieldName) {
-            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-            final TokenStream t2 = new RemoveATokens(t);
-            final TokenStream t3 = new MockGraphTokenFilter(random(), t2);
-            return new TokenStreamComponents(t, t3);
-          }
-        };
-
-      Random random = random();
-      checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k");
-      checkAnalysisConsistency(random, a, false, "x y a b c d e f g h i j k");
-      checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k a");
-      checkAnalysisConsistency(random, a, false, "a b c d e f g h i j k a x y");
-    }
-  }
-
-  public void testMockGraphTokenFilterRandom() throws Exception {
-    for(int iter=0;iter<3*RANDOM_MULTIPLIER;iter++) {
-
-      if (VERBOSE) {
-        System.out.println("\nTEST: iter=" + iter);
-      }
-
-      // Make new analyzer each time, because MGTF has fixed
-      // seed:
-      final Analyzer a = new Analyzer() {
-          @Override
-          protected TokenStreamComponents createComponents(String fieldName) {
-            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-            final TokenStream t2 = new MockGraphTokenFilter(random(), t);
-            return new TokenStreamComponents(t, t2);
-          }
-        };
-      
-      Random random = random();
-      checkRandomData(random, a, 5, atLeast(100));
-    }
-  }
-
-  // Two MockGraphTokenFilters
-  public void testDoubleMockGraphTokenFilterRandom() throws Exception {
-    for(int iter=0;iter<3*RANDOM_MULTIPLIER;iter++) {
-
-      if (VERBOSE) {
-        System.out.println("\nTEST: iter=" + iter);
-      }
-
-      // Make new analyzer each time, because MGTF has fixed
-      // seed:
-      final Analyzer a = new Analyzer() {
-          @Override
-          protected TokenStreamComponents createComponents(String fieldName) {
-            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-            final TokenStream t1 = new MockGraphTokenFilter(random(), t);
-            final TokenStream t2 = new MockGraphTokenFilter(random(), t1);
-            return new TokenStreamComponents(t, t2);
-          }
-        };
-      
-      Random random = random();
-      checkRandomData(random, a, 5, atLeast(100));
-    }
-  }
-
-  public void testMockGraphTokenFilterBeforeHolesRandom() throws Exception {
-    for(int iter=0;iter<3*RANDOM_MULTIPLIER;iter++) {
-
-      if (VERBOSE) {
-        System.out.println("\nTEST: iter=" + iter);
-      }
-
-      // Make new analyzer each time, because MGTF has fixed
-      // seed:
-      final Analyzer a = new Analyzer() {
-          @Override
-          protected TokenStreamComponents createComponents(String fieldName) {
-            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-            final TokenStream t1 = new MockGraphTokenFilter(random(), t);
-            final TokenStream t2 = new MockHoleInjectingTokenFilter(random(), t1);
-            return new TokenStreamComponents(t, t2);
-          }
-        };
-      
-      Random random = random();
-      checkRandomData(random, a, 5, atLeast(100));
-    }
-  }
-
-  public void testMockGraphTokenFilterAfterHolesRandom() throws Exception {
-    for(int iter=0;iter<3*RANDOM_MULTIPLIER;iter++) {
-
-      if (VERBOSE) {
-        System.out.println("\nTEST: iter=" + iter);
-      }
-
-      // Make new analyzer each time, because MGTF has fixed
-      // seed:
-      final Analyzer a = new Analyzer() {
-          @Override
-          protected TokenStreamComponents createComponents(String fieldName) {
-            final Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-            final TokenStream t1 = new MockHoleInjectingTokenFilter(random(), t);
-            final TokenStream t2 = new MockGraphTokenFilter(random(), t1);
-            return new TokenStreamComponents(t, t2);
-          }
-        };
-      
-      Random random = random();
-      checkRandomData(random, a, 5, atLeast(100));
-    }
-  }
-
-  private static Token token(String term, int posInc, int posLength) {
-    final Token t = new Token(term, 0, 0);
-    t.setPositionIncrement(posInc);
-    t.setPositionLength(posLength);
-    return t;
-  }
-
-  private static Token token(String term, int posInc, int posLength, int startOffset, int endOffset) {
-    final Token t = new Token(term, startOffset, endOffset);
-    t.setPositionIncrement(posInc);
-    t.setPositionLength(posLength);
-    return t;
-  }
-
-  public void testSingleToken() throws Exception {
-    final TokenStream ts = new CannedTokenStream(
-      new Token[] {
-        token("abc", 1, 1),
-      });
-    assertSameLanguage(s2a("abc"), ts);
-  }
-
-  public void testMultipleHoles() throws Exception {
-    final TokenStream ts = new CannedTokenStream(
-      new Token[] {
-        token("a", 1, 1),
-        token("b", 3, 1),
-      });
-    assertSameLanguage(join(s2a("a"), SEP_A, HOLE_A, SEP_A, HOLE_A, SEP_A, s2a("b")), ts);
-  }
-
-  public void testSynOverMultipleHoles() throws Exception {
-    final TokenStream ts = new CannedTokenStream(
-      new Token[] {
-        token("a", 1, 1),
-        token("x", 0, 3),
-        token("b", 3, 1),
-      });
-    final Automaton a1 = join(s2a("a"), SEP_A, HOLE_A, SEP_A, HOLE_A, SEP_A, s2a("b")); 
-    final Automaton a2 = join(s2a("x"), SEP_A, s2a("b")); 
-    assertSameLanguage(Operations.union(a1, a2), ts);
-  }
-
-  // for debugging!
-  /*
-  private static void toDot(Automaton a) throws IOException {
-    final String s = a.toDot();
-    Writer w = new OutputStreamWriter(new FileOutputStream("/x/tmp/out.dot"));
-    w.write(s);
-    w.close();
-    System.out.println("TEST: saved to /x/tmp/out.dot");
-  }
-  */
-
-  private static final Automaton SEP_A = Automata.makeChar(TokenStreamToAutomaton.POS_SEP);
-  private static final Automaton HOLE_A = Automata.makeChar(TokenStreamToAutomaton.HOLE);
-
-  private Automaton join(String ... strings) {
-    List<Automaton> as = new ArrayList<>();
-    for(String s : strings) {
-      as.add(s2a(s));
-      as.add(SEP_A);
-    }
-    as.remove(as.size()-1);
-    return Operations.concatenate(as);
-  }
-
-  private Automaton join(Automaton ... as) {
-    return Operations.concatenate(Arrays.asList(as));
-  }
-
-  private Automaton s2a(String s) {
-    return Automata.makeString(s);
-  }
-
-  public void testTwoTokens() throws Exception {
-    final TokenStream ts = new CannedTokenStream(
-      new Token[] {
-        token("abc", 1, 1),
-        token("def", 1, 1),
-      });
-    assertSameLanguage(join("abc", "def"), ts);
-  }
-
-  public void testHole() throws Exception {
-
-    final TokenStream ts = new CannedTokenStream(
-      new Token[] {
-        token("abc", 1, 1),
-        token("def", 2, 1),
-      });
-    assertSameLanguage(join(s2a("abc"), SEP_A, HOLE_A, SEP_A, s2a("def")), ts);
-  }
-
-  public void testOverlappedTokensSausage() throws Exception {
-
-    // Two tokens on top of each other (sausage):
-    final TokenStream ts = new CannedTokenStream(
-      new Token[] {
-        token("abc", 1, 1),
-        token("xyz", 0, 1)
-      });
-    final Automaton a1 = s2a("abc");
-    final Automaton a2 = s2a("xyz");
-    assertSameLanguage(Operations.union(a1, a2), ts);
-  }
-
-  public void testOverlappedTokensLattice() throws Exception {
-
-    final TokenStream ts = new CannedTokenStream(
-      new Token[] {
-        token("abc", 1, 1),
-        token("xyz", 0, 2),
-        token("def", 1, 1),
-      });
-    final Automaton a1 = s2a("xyz");
-    final Automaton a2 = join("abc", "def");
-    assertSameLanguage(Operations.union(a1, a2), ts);
-  }
-
-  public void testSynOverHole() throws Exception {
-
-    final TokenStream ts = new CannedTokenStream(
-      new Token[] {
-        token("a", 1, 1),
-        token("X", 0, 2),
-        token("b", 2, 1),
-      });
-    final Automaton a1 = Operations.union(join(s2a("a"), SEP_A, HOLE_A), s2a("X"));
-    final Automaton expected = Operations.concatenate(a1, join(SEP_A, s2a("b")));
-    assertSameLanguage(expected, ts);
-  }
-
-  public void testSynOverHole2() throws Exception {
-
-    final TokenStream ts = new CannedTokenStream(
-      new Token[] {
-        token("xyz", 1, 1),
-        token("abc", 0, 3),
-        token("def", 2, 1),
-      });
-    final Automaton expected = Operations.union(
-      join(s2a("xyz"), SEP_A, HOLE_A, SEP_A, s2a("def")), s2a("abc"));
-    assertSameLanguage(expected, ts);
-  }
-
-  public void testOverlappedTokensLattice2() throws Exception {
-
-    final TokenStream ts = new CannedTokenStream(
-      new Token[] {
-        token("abc", 1, 1),
-        token("xyz", 0, 3),
-        token("def", 1, 1),
-        token("ghi", 1, 1),
-      });
-    final Automaton a1 = s2a("xyz");
-    final Automaton a2 = join("abc", "def", "ghi");
-    assertSameLanguage(Operations.union(a1, a2), ts);
-  }
-
-  public void testToDot() throws Exception {
-    final TokenStream ts = new CannedTokenStream(new Token[] {token("abc", 1, 1, 0, 4)});
-    StringWriter w = new StringWriter();
-    new TokenStreamToDot("abcd", ts, new PrintWriter(w)).toDot();
-    assertTrue(w.toString().indexOf("abc / abcd") != -1);
-  }
-
-  public void testStartsWithHole() throws Exception {
-    final TokenStream ts = new CannedTokenStream(
-      new Token[] {
-        token("abc", 2, 1),
-      });
-    assertSameLanguage(join(HOLE_A, SEP_A, s2a("abc")), ts);
-  }
-
-  // TODO: testEndsWithHole... but we need posInc to set in TS.end()
-
-  public void testSynHangingOverEnd() throws Exception {
-    final TokenStream ts = new CannedTokenStream(
-      new Token[] {
-        token("a", 1, 1),
-        token("X", 0, 10),
-      });
-    assertSameLanguage(Operations.union(s2a("a"), s2a("X")), ts);
-  }
-
-  private void assertSameLanguage(Automaton expected, TokenStream ts) throws IOException {
-    assertSameLanguage(expected, new TokenStreamToAutomaton().toAutomaton(ts));
-  }
-
-  private void assertSameLanguage(Automaton expected, Automaton actual) {
-    assertTrue(Operations.sameLanguage(
-      Operations.determinize(Operations.removeDeadStates(expected), DEFAULT_MAX_DETERMINIZED_STATES),
-      Operations.determinize(Operations.removeDeadStates(actual), DEFAULT_MAX_DETERMINIZED_STATES)));
-  }
-
-  public void testTokenStreamGraphWithHoles() throws Exception {
-    final TokenStream ts = new CannedTokenStream(
-      new Token[] {
-        token("abc", 1, 1),
-        token("xyz", 1, 8),
-        token("def", 1, 1),
-        token("ghi", 1, 1),
-      });
-    assertSameLanguage(Operations.union(join(s2a("abc"), SEP_A, s2a("xyz")),
-                                        join(s2a("abc"), SEP_A, HOLE_A, SEP_A, s2a("def"), SEP_A, s2a("ghi"))), ts);
-  }
-}


[07/43] lucene-solr:jira/solr-8593: LUCENE-7611: Suggester uses LongValuesSource in place of ValueSource

Posted by kr...@apache.org.
LUCENE-7611: Suggester uses LongValuesSource in place of ValueSource


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1a95c5ac
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1a95c5ac
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1a95c5ac

Branch: refs/heads/jira/solr-8593
Commit: 1a95c5acd0f69efb1a24b2c980a289289e703758
Parents: 713b65d
Author: Alan Woodward <ro...@apache.org>
Authored: Fri Jan 6 11:03:09 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Sat Jan 7 11:52:25 2017 +0000

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   3 +
 .../apache/lucene/search/LongValuesSource.java  |  27 ++
 .../suggest/DocumentValueSourceDictionary.java  |  72 ++++-
 .../DocumentValueSourceDictionaryTest.java      | 264 ++++++++++++++++++-
 .../DocumentExpressionDictionaryFactory.java    |   6 +-
 5 files changed, 352 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a95c5ac/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index d0bedb7..30c9ab0 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -82,6 +82,9 @@ API Changes
 * LUCENE-7610: The facets module now uses the DoubleValuesSource API, and
   methods that take ValueSource parameters are deprecated (Alan Woodward)
 
+* LUCENE-7611: DocumentValueSourceDictionary now takes a LongValuesSource
+  as a parameter, and the ValueSource equivalent is deprecated (Alan Woodward)
+
 New features
 
 * LUCENE-5867: Added BooleanSimilarity. (Robert Muir, Adrien Grand)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a95c5ac/lucene/core/src/java/org/apache/lucene/search/LongValuesSource.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/LongValuesSource.java b/lucene/core/src/java/org/apache/lucene/search/LongValuesSource.java
index 9d00355..524822c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/LongValuesSource.java
+++ b/lucene/core/src/java/org/apache/lucene/search/LongValuesSource.java
@@ -74,6 +74,33 @@ public abstract class LongValuesSource {
     return fromLongField(field);
   }
 
+  /**
+   * Creates a LongValuesSource that always returns a constant value
+   */
+  public static LongValuesSource constant(long value) {
+    return new LongValuesSource() {
+      @Override
+      public LongValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
+        return new LongValues() {
+          @Override
+          public long longValue() throws IOException {
+            return value;
+          }
+
+          @Override
+          public boolean advanceExact(int doc) throws IOException {
+            return true;
+          }
+        };
+      }
+
+      @Override
+      public boolean needsScores() {
+        return false;
+      }
+    };
+  }
+
   private static class FieldValuesSource extends LongValuesSource {
 
     final String field;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a95c5ac/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
index 2c0b8f4..656dc04 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
@@ -17,15 +17,15 @@
 package org.apache.lucene.search.suggest;
 
 import java.io.IOException;
-import java.util.HashMap;
 import java.util.List;
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.LongValues;
+import org.apache.lucene.search.LongValuesSource;
 
 
 /**
@@ -34,7 +34,7 @@ import org.apache.lucene.queries.function.ValueSource;
  * optionally contexts information
  * taken from stored fields in a Lucene index. Similar to 
  * {@link DocumentDictionary}, except it obtains the weight
- * of the terms in a document based on a {@link ValueSource}.
+ * of the terms in a document based on a {@link LongValuesSource}.
  * </p>
  * <b>NOTE:</b> 
  *  <ul>
@@ -46,44 +46,75 @@ import org.apache.lucene.queries.function.ValueSource;
  *    </li>
  *  </ul>
  *  <p>
- *  In practice the {@link ValueSource} will likely be obtained
+ *  In practice the {@link LongValuesSource} will likely be obtained
  *  using the lucene expression module. The following example shows
- *  how to create a {@link ValueSource} from a simple addition of two
+ *  how to create a {@link LongValuesSource} from a simple addition of two
  *  fields:
  *  <code>
  *    Expression expression = JavascriptCompiler.compile("f1 + f2");
  *    SimpleBindings bindings = new SimpleBindings();
  *    bindings.add(new SortField("f1", SortField.Type.LONG));
  *    bindings.add(new SortField("f2", SortField.Type.LONG));
- *    ValueSource valueSource = expression.getValueSource(bindings);
+ *    LongValuesSource valueSource = expression.getDoubleValuesSource(bindings).toLongValuesSource();
  *  </code>
  *  </p>
  *
  */
 public class DocumentValueSourceDictionary extends DocumentDictionary {
   
-  private final ValueSource weightsValueSource;
+  private final LongValuesSource weightsValueSource;
   
   /**
    * Creates a new dictionary with the contents of the fields named <code>field</code>
    * for the terms, <code>payload</code> for the corresponding payloads, <code>contexts</code>
    * for the associated contexts and uses the <code>weightsValueSource</code> supplied 
    * to determine the score.
+   *
+   * @deprecated Use {@link #DocumentValueSourceDictionary(IndexReader, String, LongValuesSource, String, String)}
    */
+  @Deprecated
   public DocumentValueSourceDictionary(IndexReader reader, String field,
                                        ValueSource weightsValueSource, String payload, String contexts) {
     super(reader, field, null, payload, contexts);
+    this.weightsValueSource = weightsValueSource.asLongValuesSource();
+  }
+
+  /**
+   * Creates a new dictionary with the contents of the fields named <code>field</code>
+   * for the terms, <code>payload</code> for the corresponding payloads, <code>contexts</code>
+   * for the associated contexts and uses the <code>weightsValueSource</code> supplied
+   * to determine the score.
+   */
+  public DocumentValueSourceDictionary(IndexReader reader, String field,
+                                       LongValuesSource weightsValueSource, String payload, String contexts) {
+    super(reader, field, null, payload, contexts);
     this.weightsValueSource = weightsValueSource;
   }
+
   /**
    * Creates a new dictionary with the contents of the fields named <code>field</code>
    * for the terms, <code>payloadField</code> for the corresponding payloads
    * and uses the <code>weightsValueSource</code> supplied to determine the 
    * score.
+   *
+   * @deprecated Use {@link #DocumentValueSourceDictionary(IndexReader, String, LongValuesSource, String)}
    */
+  @Deprecated
   public DocumentValueSourceDictionary(IndexReader reader, String field,
                                        ValueSource weightsValueSource, String payload) {
     super(reader, field, null, payload);
+    this.weightsValueSource = weightsValueSource.asLongValuesSource();
+  }
+
+  /**
+   * Creates a new dictionary with the contents of the fields named <code>field</code>
+   * for the terms, <code>payloadField</code> for the corresponding payloads
+   * and uses the <code>weightsValueSource</code> supplied to determine the
+   * score.
+   */
+  public DocumentValueSourceDictionary(IndexReader reader, String field,
+                                       LongValuesSource weightsValueSource, String payload) {
+    super(reader, field, null, payload);
     this.weightsValueSource = weightsValueSource;
   }
   
@@ -91,11 +122,25 @@ public class DocumentValueSourceDictionary extends DocumentDictionary {
    * Creates a new dictionary with the contents of the fields named <code>field</code>
    * for the terms and uses the <code>weightsValueSource</code> supplied to determine the 
    * score.
+   *
+   * @deprecated Use {@link #DocumentValueSourceDictionary(IndexReader, String, LongValuesSource)}
    */
+  @Deprecated
   public DocumentValueSourceDictionary(IndexReader reader, String field,
                                        ValueSource weightsValueSource) {
     super(reader, field, null, null);
-    this.weightsValueSource = weightsValueSource;  
+    this.weightsValueSource = weightsValueSource.asLongValuesSource();
+  }
+
+  /**
+   * Creates a new dictionary with the contents of the fields named <code>field</code>
+   * for the terms and uses the <code>weightsValueSource</code> supplied to determine the
+   * score.
+   */
+  public DocumentValueSourceDictionary(IndexReader reader, String field,
+                                       LongValuesSource weightsValueSource) {
+    super(reader, field, null, null);
+    this.weightsValueSource = weightsValueSource;
   }
   
   @Override
@@ -105,7 +150,7 @@ public class DocumentValueSourceDictionary extends DocumentDictionary {
   
   final class DocumentValueSourceInputIterator extends DocumentDictionary.DocumentInputIterator {
     
-    private FunctionValues currentWeightValues;
+    private LongValues currentWeightValues;
     /** leaves of the reader */
     private final List<LeafReaderContext> leaves;
     /** starting docIds of all the leaves */
@@ -123,7 +168,7 @@ public class DocumentValueSourceDictionary extends DocumentDictionary {
       }
       starts[leaves.size()] = reader.maxDoc();
       currentWeightValues = (leaves.size() > 0) 
-          ? weightsValueSource.getValues(new HashMap<String, Object>(), leaves.get(currentLeafIndex))
+          ? weightsValueSource.getValues(leaves.get(currentLeafIndex), null)
           : null;
     }
     
@@ -140,13 +185,16 @@ public class DocumentValueSourceDictionary extends DocumentDictionary {
       if (subIndex != currentLeafIndex) {
         currentLeafIndex = subIndex;
         try {
-          currentWeightValues = weightsValueSource.getValues(new HashMap<String, Object>(), leaves.get(currentLeafIndex));
+          currentWeightValues = weightsValueSource.getValues(leaves.get(currentLeafIndex), null);
         } catch (IOException e) {
           throw new RuntimeException(e);
         }
       }
       try {
-        return currentWeightValues.longVal(docId - starts[subIndex]);
+        if (currentWeightValues.advanceExact(docId - starts[subIndex]))
+          return currentWeightValues.longValue();
+        else
+          return 0;
       } catch (IOException e) {
         throw new RuntimeException(e);
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a95c5ac/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java
index 9e58a4e..92799cd 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java
@@ -36,12 +36,16 @@ import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.DoubleConstValueSource;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
 import org.apache.lucene.queries.function.valuesource.SumFloatFunction;
+import org.apache.lucene.search.DoubleValues;
+import org.apache.lucene.search.LongValues;
+import org.apache.lucene.search.LongValuesSource;
 import org.apache.lucene.search.spell.Dictionary;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -59,7 +63,7 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
   static final String CONTEXTS_FIELD_NAME = "c1";
   
   @Test
-  public void testEmptyReader() throws IOException {
+  public void testValueSourceEmptyReader() throws IOException {
     Directory dir = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
@@ -78,9 +82,30 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
 
     IOUtils.close(ir, analyzer, dir);
   }
+
+  @Test
+  public void testLongValuesSourceEmptyReader() throws IOException {
+    Directory dir = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random());
+    IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
+    iwc.setMergePolicy(newLogMergePolicy());
+    // Make sure the index is created?
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
+    writer.commit();
+    writer.close();
+    IndexReader ir = DirectoryReader.open(dir);
+    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, LongValuesSource.constant(10), PAYLOAD_FIELD_NAME);
+    InputIterator inputIterator = dictionary.getEntryIterator();
+
+    assertNull(inputIterator.next());
+    assertEquals(inputIterator.weight(), 0);
+    assertNull(inputIterator.payload());
+
+    IOUtils.close(ir, analyzer, dir);
+  }
   
   @Test
-  public void testBasic() throws IOException {
+  public void testValueSourceBasic() throws IOException {
     Directory dir = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
@@ -112,9 +137,83 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
     assertTrue(docs.isEmpty());
     IOUtils.close(ir, analyzer, dir);
   }
+
+  private static LongValuesSource sum(String... fields) {
+    LongValuesSource[] sources = new LongValuesSource[fields.length];
+    for (int i = 0; i < fields.length; i++) {
+      sources[i] = LongValuesSource.fromLongField(fields[i]);
+    }
+    return new LongValuesSource() {
+      @Override
+      public LongValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
+        LongValues[] values = new LongValues[fields.length];
+        for (int i = 0; i < sources.length; i++) {
+          values[i] = sources[i].getValues(ctx, scores);
+        }
+        return new LongValues() {
+          @Override
+          public long longValue() throws IOException {
+            long v = 0;
+            for (LongValues value : values) {
+              v += value.longValue();
+            }
+            return v;
+          }
+
+          @Override
+          public boolean advanceExact(int doc) throws IOException {
+            boolean v = true;
+            for (LongValues value : values) {
+              v &= value.advanceExact(doc);
+            }
+            return v;
+          }
+        };
+      }
+
+      @Override
+      public boolean needsScores() {
+        return false;
+      }
+    };
+  }
+
+  @Test
+  public void testLongValuesSourceBasic() throws IOException {
+    Directory dir = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random());
+    IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
+    iwc.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
+    Map<String, Document> docs = generateIndexDocuments(atLeast(100));
+    for(Document doc: docs.values()) {
+      writer.addDocument(doc);
+    }
+    writer.commit();
+    writer.close();
+
+    IndexReader ir = DirectoryReader.open(dir);
+    LongValuesSource sumValueSource = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2, WEIGHT_FIELD_NAME_3);
+    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, sumValueSource, PAYLOAD_FIELD_NAME);
+    InputIterator inputIterator = dictionary.getEntryIterator();
+    BytesRef f;
+    while((f = inputIterator.next())!=null) {
+      Document doc = docs.remove(f.utf8ToString());
+      long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue();
+      long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue();
+      long w3 = doc.getField(WEIGHT_FIELD_NAME_3).numericValue().longValue();
+      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
+      assertEquals(inputIterator.weight(), (w1 + w2 + w3));
+      IndexableField payloadField = doc.getField(PAYLOAD_FIELD_NAME);
+      if (payloadField == null) assertTrue(inputIterator.payload().length == 0);
+      else assertEquals(inputIterator.payload(), payloadField.binaryValue());
+    }
+    assertTrue(docs.isEmpty());
+    IOUtils.close(ir, analyzer, dir);
+  }
   
   @Test
-  public void testWithContext() throws IOException {
+  public void testValueSourceWithContext() throws IOException {
     Directory dir = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
@@ -153,7 +252,46 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
   }
 
   @Test
-  public void testWithoutPayload() throws IOException {
+  public void testLongValuesSourceWithContext() throws IOException {
+    Directory dir = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random());
+    IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
+    iwc.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
+    Map<String, Document> docs = generateIndexDocuments(atLeast(100));
+    for(Document doc: docs.values()) {
+      writer.addDocument(doc);
+    }
+    writer.commit();
+    writer.close();
+
+    IndexReader ir = DirectoryReader.open(dir);
+    LongValuesSource sumValues = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2, WEIGHT_FIELD_NAME_3);
+    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, sumValues, PAYLOAD_FIELD_NAME, CONTEXTS_FIELD_NAME);
+    InputIterator inputIterator = dictionary.getEntryIterator();
+    BytesRef f;
+    while((f = inputIterator.next())!=null) {
+      Document doc = docs.remove(f.utf8ToString());
+      long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue();
+      long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue();
+      long w3 = doc.getField(WEIGHT_FIELD_NAME_3).numericValue().longValue();
+      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
+      assertEquals(inputIterator.weight(), (w1 + w2 + w3));
+      IndexableField payloadField = doc.getField(PAYLOAD_FIELD_NAME);
+      if (payloadField == null) assertTrue(inputIterator.payload().length == 0);
+      else assertEquals(inputIterator.payload(), payloadField.binaryValue());
+      Set<BytesRef> originalCtxs = new HashSet<>();
+      for (IndexableField ctxf: doc.getFields(CONTEXTS_FIELD_NAME)) {
+        originalCtxs.add(ctxf.binaryValue());
+      }
+      assertEquals(originalCtxs, inputIterator.contexts());
+    }
+    assertTrue(docs.isEmpty());
+    IOUtils.close(ir, analyzer, dir);
+  }
+
+  @Test
+  public void testValueSourceWithoutPayload() throws IOException {
     Directory dir = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
@@ -183,9 +321,41 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
     assertTrue(docs.isEmpty());
     IOUtils.close(ir, analyzer, dir);
   }
+
+  @Test
+  public void testLongValuesSourceWithoutPayload() throws IOException {
+    Directory dir = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random());
+    IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
+    iwc.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
+    Map<String, Document> docs = generateIndexDocuments(atLeast(100));
+    for(Document doc: docs.values()) {
+      writer.addDocument(doc);
+    }
+    writer.commit();
+    writer.close();
+
+    IndexReader ir = DirectoryReader.open(dir);
+    LongValuesSource sumValues = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2, WEIGHT_FIELD_NAME_3);
+    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, sumValues);
+    InputIterator inputIterator = dictionary.getEntryIterator();
+    BytesRef f;
+    while((f = inputIterator.next())!=null) {
+      Document doc = docs.remove(f.utf8ToString());
+      long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue();
+      long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue();
+      long w3 = doc.getField(WEIGHT_FIELD_NAME_3).numericValue().longValue();
+      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
+      assertEquals(inputIterator.weight(), (w1 + w2 + w3));
+      assertNull(inputIterator.payload());
+    }
+    assertTrue(docs.isEmpty());
+    IOUtils.close(ir, analyzer, dir);
+  }
   
   @Test
-  public void testWithDeletions() throws IOException {
+  public void testValueSourceWithDeletions() throws IOException {
     Directory dir = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
@@ -238,6 +408,60 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
     assertTrue(docs.isEmpty());
     IOUtils.close(ir, analyzer, dir);
   }
+
+  @Test
+  public void testLongValuesSourceWithDeletions() throws IOException {
+    Directory dir = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random());
+    IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
+    iwc.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
+    Map<String, Document> docs = generateIndexDocuments(atLeast(100));
+    Random rand = random();
+    List<String> termsToDel = new ArrayList<>();
+    for(Document doc : docs.values()) {
+      if(rand.nextBoolean() && termsToDel.size() < docs.size()-1) {
+        termsToDel.add(doc.get(FIELD_NAME));
+      }
+      writer.addDocument(doc);
+    }
+    writer.commit();
+
+    Term[] delTerms = new Term[termsToDel.size()];
+    for(int i=0; i < termsToDel.size() ; i++) {
+      delTerms[i] = new Term(FIELD_NAME, termsToDel.get(i));
+    }
+
+    for(Term delTerm: delTerms) {
+      writer.deleteDocuments(delTerm);
+    }
+    writer.commit();
+    writer.close();
+
+    for(String termToDel: termsToDel) {
+      assertTrue(null!=docs.remove(termToDel));
+    }
+
+    IndexReader ir = DirectoryReader.open(dir);
+    assertTrue("NumDocs should be > 0 but was " + ir.numDocs(), ir.numDocs() > 0);
+    assertEquals(ir.numDocs(), docs.size());
+    LongValuesSource sumValues = sum(WEIGHT_FIELD_NAME_1, WEIGHT_FIELD_NAME_2);
+    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, sumValues, PAYLOAD_FIELD_NAME);
+    InputIterator inputIterator = dictionary.getEntryIterator();
+    BytesRef f;
+    while((f = inputIterator.next())!=null) {
+      Document doc = docs.remove(f.utf8ToString());
+      long w1 = doc.getField(WEIGHT_FIELD_NAME_1).numericValue().longValue();
+      long w2 = doc.getField(WEIGHT_FIELD_NAME_2).numericValue().longValue();
+      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
+      assertEquals(inputIterator.weight(), w2+w1);
+      IndexableField payloadField = doc.getField(PAYLOAD_FIELD_NAME);
+      if (payloadField == null) assertTrue(inputIterator.payload().length == 0);
+      else assertEquals(inputIterator.payload(), payloadField.binaryValue());
+    }
+    assertTrue(docs.isEmpty());
+    IOUtils.close(ir, analyzer, dir);
+  }
   
   @Test
   public void testWithValueSource() throws IOException {
@@ -269,6 +493,36 @@ public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
     IOUtils.close(ir, analyzer, dir);
   }
 
+  @Test
+  public void testWithLongValuesSource() throws IOException {
+    Directory dir = newDirectory();
+    Analyzer analyzer = new MockAnalyzer(random());
+    IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
+    iwc.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
+    Map<String, Document> docs = generateIndexDocuments(atLeast(100));
+    for(Document doc: docs.values()) {
+      writer.addDocument(doc);
+    }
+    writer.commit();
+    writer.close();
+
+    IndexReader ir = DirectoryReader.open(dir);
+    Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, LongValuesSource.constant(10), PAYLOAD_FIELD_NAME);
+    InputIterator inputIterator = dictionary.getEntryIterator();
+    BytesRef f;
+    while((f = inputIterator.next())!=null) {
+      Document doc = docs.remove(f.utf8ToString());
+      assertTrue(f.equals(new BytesRef(doc.get(FIELD_NAME))));
+      assertEquals(inputIterator.weight(), 10);
+      IndexableField payloadField = doc.getField(PAYLOAD_FIELD_NAME);
+      if (payloadField == null) assertTrue(inputIterator.payload().length == 0);
+      else assertEquals(inputIterator.payload(), payloadField.binaryValue());
+    }
+    assertTrue(docs.isEmpty());
+    IOUtils.close(ir, analyzer, dir);
+  }
+
   private Map<String, Document> generateIndexDocuments(int ndocs) {
     Map<String, Document> docs = new HashMap<>();
     for(int i = 0; i < ndocs ; i++) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a95c5ac/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java b/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java
index 24f1553..b0d7007 100644
--- a/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java
+++ b/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java
@@ -23,7 +23,7 @@ import java.util.Set;
 import org.apache.lucene.expressions.Expression;
 import org.apache.lucene.expressions.SimpleBindings;
 import org.apache.lucene.expressions.js.JavascriptCompiler;
-import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.LongValuesSource;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.spell.Dictionary;
 import org.apache.lucene.search.suggest.DocumentValueSourceDictionary;
@@ -93,7 +93,7 @@ public class DocumentExpressionDictionaryFactory extends DictionaryFactory {
         sortFields), payloadField);
   }
 
-  public ValueSource fromExpression(String weightExpression, Set<SortField> sortFields) {
+  public LongValuesSource fromExpression(String weightExpression, Set<SortField> sortFields) {
     Expression expression = null;
     try {
       expression = JavascriptCompiler.compile(weightExpression);
@@ -104,7 +104,7 @@ public class DocumentExpressionDictionaryFactory extends DictionaryFactory {
     for (SortField sortField : sortFields) {
       bindings.add(sortField);
     }
-    return expression.getValueSource(bindings);
+    return expression.getDoubleValuesSource(bindings).toLongValuesSource();
   }
   
   private SortField.Type getSortFieldType(SolrCore core, String sortFieldName) {


[37/43] lucene-solr:jira/solr-8593: SOLR-9918: Add SkipExistingDocumentsProcessor that skips duplicate inserts and ignores updates to missing docs

Posted by kr...@apache.org.
SOLR-9918: Add SkipExistingDocumentsProcessor that skips duplicate inserts and ignores updates to missing docs


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d66bfba5
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d66bfba5
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d66bfba5

Branch: refs/heads/jira/solr-8593
Commit: d66bfba5dc1bd9154bd48898865f51d9715e8d0c
Parents: 118fc42
Author: koji <ko...@apache.org>
Authored: Wed Jan 11 11:48:33 2017 +0900
Committer: koji <ko...@apache.org>
Committed: Wed Jan 11 11:48:33 2017 +0900

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   3 +
 .../SkipExistingDocumentsProcessorFactory.java  | 255 ++++++++++++++
 ...ipExistingDocumentsProcessorFactoryTest.java | 336 +++++++++++++++++++
 3 files changed, 594 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d66bfba5/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 0ee18ba..204ea26 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -126,6 +126,9 @@ Upgrade Notes
 
 New Features
 ----------------------
+* SOLR-9918: Add SkipExistingDocumentsProcessor that skips duplicate inserts and ignores updates to missing docs
+  (Tim Owen via koji)
+
 * SOLR-9293: Solrj client support for hierarchical clusters and other topics 
   marker. (Dawid Weiss)
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d66bfba5/solr/core/src/java/org/apache/solr/update/processor/SkipExistingDocumentsProcessorFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/processor/SkipExistingDocumentsProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/SkipExistingDocumentsProcessorFactory.java
new file mode 100644
index 0000000..ec637a4
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/processor/SkipExistingDocumentsProcessorFactory.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.update.processor;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.handler.component.RealTimeGetComponent;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.search.SolrIndexSearcher;
+import org.apache.solr.update.AddUpdateCommand;
+import org.apache.solr.update.UpdateCommand;
+import org.apache.solr.util.RefCounted;
+import org.apache.solr.util.plugin.SolrCoreAware;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+
+import static org.apache.solr.common.SolrException.ErrorCode.SERVER_ERROR;
+import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
+
+/**
+ * <p>
+ *     This Factory generates an UpdateProcessor that will (by default) skip inserting new documents
+ *     if there already exists a document with the same uniqueKey value in the index. It will also
+ *     skip Atomic Updates to a document if that document does not already exist. This behaviour is applied
+ *     to each document in turn, so adding a batch of documents can result in some being added and some
+ *     ignored, depending on what is already in the index. If all of the documents are skipped, no changes
+ *     to the index will occur.
+ * </p>
+ * These two forms of skipping can be switched on or off independently, by using init params:
+ * <ul>
+ *     <li><code>skipInsertIfExists</code> - This boolean parameter defaults to
+ *          <code>true</code>, but if set to <code>false</code> then inserts (i.e. not Atomic Updates)
+ *          will be passed through unchanged even if the document already exists.</li>
+ *     <li><code>skipUpdateIfMissing</code> - This boolean parameter defaults to
+ *         <code>true</code>, but if set to <code>false</code> then Atomic Updates
+ *          will be passed through unchanged regardless of whether the document exists.</li>
+ * </ul>
+ * <p>
+ *     These params can also be specified per-request, to override the configured behaviour
+ *     for specific updates e.g. <code>/update?skipUpdateIfMissing=true</code>
+ * </p>
+ * <p>
+ *     This implementation is a simpler alternative to {@link DocBasedVersionConstraintsProcessorFactory}
+ *     when you are not concerned with versioning, and just want to quietly ignore duplicate documents and/or
+ *     silently skip updates to non-existent documents (in the same way a database <code>UPDATE</code> would).
+ *
+ *     If your documents do have an explicit version field, and you want to ensure older versions are
+ *     skipped instead of replacing the indexed document, you should consider {@link DocBasedVersionConstraintsProcessorFactory}
+ *     instead.
+ * </p>
+ * <p>
+ *     An example chain configuration to use this for skipping duplicate inserts, but not skipping updates to
+ *     missing documents by default, is:
+ * </p>
+ * <pre class="prettyprint">
+ * &lt;updateRequestProcessorChain name="skipexisting"&gt;
+ *   &lt;processor class="solr.LogUpdateProcessorFactory" /&gt;
+ *   &lt;processor class="solr.SkipExistingDocumentsProcessorFactory"&gt;
+ *     &lt;bool name="skipInsertIfExists"&gt;true&lt;/bool&gt;
+ *     &lt;bool name="skipUpdateIfMissing"&gt;false&lt;/bool&gt; &lt;!-- Can override this per-request --&gt;
+ *   &lt;/processor&gt;
+ *   &lt;processor class="solr.DistributedUpdateProcessorFactory" /&gt;
+ *   &lt;processor class="solr.RunUpdateProcessorFactory" /&gt;
+ * &lt;/updateRequestProcessorChain&gt;
+ * </pre>
+ */
+public class SkipExistingDocumentsProcessorFactory extends UpdateRequestProcessorFactory implements SolrCoreAware, UpdateRequestProcessorFactory.RunAlways {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private static final String PARAM_SKIP_INSERT_IF_EXISTS = "skipInsertIfExists";
+  private static final String PARAM_SKIP_UPDATE_IF_MISSING = "skipUpdateIfMissing";
+
+  private boolean skipInsertIfExists = true;
+  private boolean skipUpdateIfMissing = true;
+
+  @Override
+  public void init(NamedList args)  {
+    Object tmp = args.remove(PARAM_SKIP_INSERT_IF_EXISTS);
+    if (null != tmp) {
+      if (! (tmp instanceof Boolean) ) {
+        throw new SolrException(SERVER_ERROR, "'" + PARAM_SKIP_INSERT_IF_EXISTS + "' must be configured as a <bool>");
+      }
+      skipInsertIfExists = (Boolean)tmp;
+    }
+    tmp = args.remove(PARAM_SKIP_UPDATE_IF_MISSING);
+    if (null != tmp) {
+      if (! (tmp instanceof Boolean) ) {
+        throw new SolrException(SERVER_ERROR, "'" + PARAM_SKIP_UPDATE_IF_MISSING + "' must be configured as a <bool>");
+      }
+      skipUpdateIfMissing = (Boolean)tmp;
+    }
+
+    super.init(args);
+  }
+
+  @Override
+  public SkipExistingDocumentsUpdateProcessor getInstance(SolrQueryRequest req,
+                                                          SolrQueryResponse rsp,
+                                                          UpdateRequestProcessor next) {
+    // Ensure the parameters are forwarded to the leader
+    DistributedUpdateProcessorFactory.addParamToDistributedRequestWhitelist(req, PARAM_SKIP_INSERT_IF_EXISTS, PARAM_SKIP_UPDATE_IF_MISSING);
+
+    // Allow the particular request to override the plugin's configured behaviour
+    boolean skipInsertForRequest = req.getOriginalParams().getBool(PARAM_SKIP_INSERT_IF_EXISTS, this.skipInsertIfExists);
+    boolean skipUpdateForRequest = req.getOriginalParams().getBool(PARAM_SKIP_UPDATE_IF_MISSING, this.skipUpdateIfMissing);
+
+    return new SkipExistingDocumentsUpdateProcessor(req, next, skipInsertForRequest, skipUpdateForRequest);
+  }
+
+  @Override
+  public void inform(SolrCore core) {
+
+    if (core.getUpdateHandler().getUpdateLog() == null) {
+      throw new SolrException(SERVER_ERROR, "updateLog must be enabled.");
+    }
+
+    if (core.getLatestSchema().getUniqueKeyField() == null) {
+      throw new SolrException(SERVER_ERROR, "schema must have uniqueKey defined.");
+    }
+  }
+
+  static class SkipExistingDocumentsUpdateProcessor extends UpdateRequestProcessor {
+
+    private final boolean skipInsertIfExists;
+    private final boolean skipUpdateIfMissing;
+    private final SolrCore core;
+
+    private DistributedUpdateProcessor distribProc;  // the distributed update processor following us
+    private DistributedUpdateProcessor.DistribPhase phase;
+
+    SkipExistingDocumentsUpdateProcessor(SolrQueryRequest req,
+                                         UpdateRequestProcessor next,
+                                         boolean skipInsertIfExists,
+                                         boolean skipUpdateIfMissing) {
+      super(next);
+      this.skipInsertIfExists = skipInsertIfExists;
+      this.skipUpdateIfMissing = skipUpdateIfMissing;
+      this.core = req.getCore();
+
+      for (UpdateRequestProcessor proc = next ;proc != null; proc = proc.next) {
+        if (proc instanceof DistributedUpdateProcessor) {
+          distribProc = (DistributedUpdateProcessor)proc;
+          break;
+        }
+      }
+
+      if (distribProc == null) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "DistributedUpdateProcessor must follow SkipExistingDocumentsUpdateProcessor");
+      }
+
+      phase = DistributedUpdateProcessor.DistribPhase.parseParam(req.getParams().get(DISTRIB_UPDATE_PARAM));
+    }
+
+    boolean isSkipInsertIfExists() {
+      return this.skipInsertIfExists;
+    }
+
+    boolean isSkipUpdateIfMissing() {
+      return this.skipUpdateIfMissing;
+    }
+
+    boolean doesDocumentExist(BytesRef indexedDocId) throws IOException {
+      assert null != indexedDocId;
+
+      SolrInputDocument oldDoc = RealTimeGetComponent.getInputDocumentFromTlog(core, indexedDocId);
+      if (oldDoc == RealTimeGetComponent.DELETED) {
+        return false;
+      }
+      if (oldDoc != null) {
+        return true;
+      }
+
+      // need to look up in index now...
+      RefCounted<SolrIndexSearcher> newestSearcher = core.getRealtimeSearcher();
+      try {
+        SolrIndexSearcher searcher = newestSearcher.get();
+        return searcher.lookupId(indexedDocId) >= 0L;
+      } catch (IOException e) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error reading document from index", e);
+      } finally {
+        if (newestSearcher != null) {
+          newestSearcher.decref();
+        }
+      }
+    }
+
+    boolean isLeader(UpdateCommand cmd) {
+      if ((cmd.getFlags() & (UpdateCommand.REPLAY | UpdateCommand.PEER_SYNC)) != 0) {
+        return false;
+      }
+      if (phase == DistributedUpdateProcessor.DistribPhase.FROMLEADER) {
+        return false;
+      }
+      return distribProc.isLeader(cmd);
+    }
+
+    @Override
+    public void processAdd(AddUpdateCommand cmd) throws IOException {
+      BytesRef indexedDocId = cmd.getIndexedId();
+
+      boolean isUpdate = AtomicUpdateDocumentMerger.isAtomicUpdate(cmd);
+
+      // boolean existsByLookup = (RealTimeGetComponent.getInputDocument(core, indexedDocId) != null);
+      // if (docExists != existsByLookup) {
+      //   log.error("Found docExists {} but existsByLookup {} for doc {}", docExists, existsByLookup, indexedDocId.utf8ToString());
+      // }
+
+      if (log.isDebugEnabled()) {
+        log.debug("Document ID {} ... exists already? {} ... isAtomicUpdate? {} ... isLeader? {}",
+                  indexedDocId.utf8ToString(), doesDocumentExist(indexedDocId), isUpdate, isLeader(cmd));
+      }
+
+      if (skipInsertIfExists && !isUpdate && isLeader(cmd) && doesDocumentExist(indexedDocId)) {
+        if (log.isDebugEnabled()) {
+          log.debug("Skipping insert for pre-existing document ID {}", indexedDocId.utf8ToString());
+        }
+        return;
+      }
+
+      if (skipUpdateIfMissing && isUpdate && isLeader(cmd) && !doesDocumentExist(indexedDocId)) {
+        if (log.isDebugEnabled()) {
+          log.debug("Skipping update to non-existent document ID {}", indexedDocId.utf8ToString());
+        }
+        return;
+      }
+
+      if (log.isDebugEnabled()) {
+        log.debug("Passing on document ID {}", indexedDocId.utf8ToString());
+      }
+
+      super.processAdd(cmd);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d66bfba5/solr/core/src/test/org/apache/solr/update/processor/SkipExistingDocumentsProcessorFactoryTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/update/processor/SkipExistingDocumentsProcessorFactoryTest.java b/solr/core/src/test/org/apache/solr/update/processor/SkipExistingDocumentsProcessorFactoryTest.java
new file mode 100644
index 0000000..2afe35c
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/update/processor/SkipExistingDocumentsProcessorFactoryTest.java
@@ -0,0 +1,336 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.update.processor;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.io.IOException;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.util.BytesRef;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.request.LocalSolrQueryRequest;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.update.AddUpdateCommand;
+import org.apache.solr.update.processor.SkipExistingDocumentsProcessorFactory.SkipExistingDocumentsUpdateProcessor;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+public class SkipExistingDocumentsProcessorFactoryTest {
+
+  private BytesRef docId = new BytesRef();
+  private SolrQueryRequest defaultRequest = new LocalSolrQueryRequest(null, new NamedList());
+
+  // Tests for logic in the factory
+
+  @Test(expected=SolrException.class)
+  public void testExceptionIfSkipInsertParamNonBoolean() {
+    SkipExistingDocumentsProcessorFactory factory = new SkipExistingDocumentsProcessorFactory();
+    NamedList<Object> initArgs = new NamedList<>();
+    initArgs.add("skipInsertIfExists", "false");
+    factory.init(initArgs);
+  }
+
+  @Test(expected=SolrException.class)
+  public void testExceptionIfSkipUpdateParamNonBoolean() {
+    SkipExistingDocumentsProcessorFactory factory = new SkipExistingDocumentsProcessorFactory();
+    NamedList<Object> initArgs = new NamedList<>();
+    initArgs.add("skipUpdateIfMissing", 0);
+    factory.init(initArgs);
+  }
+
+  @Test(expected=SolrException.class)
+  public void testExceptionIfNextProcessorIsNull() {
+    SkipExistingDocumentsProcessorFactory factory = new SkipExistingDocumentsProcessorFactory();
+    NamedList<Object> initArgs = new NamedList<>();
+    factory.init(initArgs);
+
+    factory.getInstance(defaultRequest, new SolrQueryResponse(), null);
+  }
+
+  @Test(expected=SolrException.class)
+  public void testExceptionIfNextProcessorNotDistributed() {
+    SkipExistingDocumentsProcessorFactory factory = new SkipExistingDocumentsProcessorFactory();
+    NamedList<Object> initArgs = new NamedList<>();
+    factory.init(initArgs);
+    UpdateRequestProcessor next = new BufferingRequestProcessor(null);
+
+    factory.getInstance(defaultRequest, new SolrQueryResponse(), next);
+  }
+
+  @Test
+  public void testNoExceptionIfNextProcessorIsDistributed() {
+    SkipExistingDocumentsProcessorFactory factory = new SkipExistingDocumentsProcessorFactory();
+    NamedList<Object> initArgs = new NamedList<>();
+    factory.init(initArgs);
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+
+    factory.getInstance(defaultRequest, new SolrQueryResponse(), next);
+  }
+
+  @Test
+  public void testNoExceptionIfNextNextProcessorIsDistributed() {
+    SkipExistingDocumentsProcessorFactory factory = new SkipExistingDocumentsProcessorFactory();
+    NamedList<Object> initArgs = new NamedList<>();
+    factory.init(initArgs);
+    UpdateRequestProcessor distProcessor = Mockito.mock(DistributedUpdateProcessor.class);
+    UpdateRequestProcessor next = new BufferingRequestProcessor(distProcessor);
+
+    factory.getInstance(defaultRequest, new SolrQueryResponse(), next);
+  }
+
+  @Test
+  public void testSkipInsertsAndUpdatesDefaultToTrueIfNotConfigured() {
+    SkipExistingDocumentsProcessorFactory factory = new SkipExistingDocumentsProcessorFactory();
+    NamedList<Object> initArgs = new NamedList<>();
+    factory.init(initArgs);
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+
+    SkipExistingDocumentsUpdateProcessor processor = factory.getInstance(defaultRequest, new SolrQueryResponse(), next);
+    assertTrue("Expected skipInsertIfExists to be true", processor.isSkipInsertIfExists());
+    assertTrue("Expected skipUpdateIfMissing to be true", processor.isSkipUpdateIfMissing());
+  }
+
+  @Test
+  public void testSkipInsertsFalseIfInInitArgs() {
+    SkipExistingDocumentsProcessorFactory factory = new SkipExistingDocumentsProcessorFactory();
+    NamedList<Object> initArgs = new NamedList<>();
+    initArgs.add("skipInsertIfExists", false);
+    factory.init(initArgs);
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+
+    SkipExistingDocumentsUpdateProcessor processor = factory.getInstance(defaultRequest, new SolrQueryResponse(), next);
+    assertFalse("Expected skipInsertIfExists to be false", processor.isSkipInsertIfExists());
+    assertTrue("Expected skipUpdateIfMissing to be true", processor.isSkipUpdateIfMissing());
+  }
+
+  @Test
+  public void testSkipUpdatesFalseIfInInitArgs() {
+    SkipExistingDocumentsProcessorFactory factory = new SkipExistingDocumentsProcessorFactory();
+    NamedList<Object> initArgs = new NamedList<>();
+    initArgs.add("skipUpdateIfMissing", false);
+    factory.init(initArgs);
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+
+    SkipExistingDocumentsUpdateProcessor processor = factory.getInstance(defaultRequest, new SolrQueryResponse(), next);
+    assertTrue("Expected skipInsertIfExists to be true", processor.isSkipInsertIfExists());
+    assertFalse("Expected skipUpdateIfMissing to be false", processor.isSkipUpdateIfMissing());
+  }
+
+  @Test
+  public void testSkipBothFalseIfInInitArgs() {
+    SkipExistingDocumentsProcessorFactory factory = new SkipExistingDocumentsProcessorFactory();
+    NamedList<Object> initArgs = new NamedList<>();
+    initArgs.add("skipInsertIfExists", false);
+    initArgs.add("skipUpdateIfMissing", false);
+    factory.init(initArgs);
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+
+    SkipExistingDocumentsUpdateProcessor processor = factory.getInstance(defaultRequest, new SolrQueryResponse(), next);
+    assertFalse("Expected skipInsertIfExists to be false", processor.isSkipInsertIfExists());
+    assertFalse("Expected skipUpdateIfMissing to be false", processor.isSkipUpdateIfMissing());
+  }
+
+  @Test
+  public void testSkipInsertsFalseIfInitArgsTrueButFalseStringInRequest() {
+    SkipExistingDocumentsProcessorFactory factory = new SkipExistingDocumentsProcessorFactory();
+    NamedList<Object> initArgs = new NamedList<>();
+    initArgs.add("skipInsertIfExists", true);
+    factory.init(initArgs);
+    NamedList<String> requestArgs = new NamedList<>();
+    requestArgs.add("skipInsertIfExists", "false");
+    SolrQueryRequest req = new LocalSolrQueryRequest(null, requestArgs);
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+
+    SkipExistingDocumentsUpdateProcessor processor = factory.getInstance(req, new SolrQueryResponse(), next);
+    assertFalse("Expected skipInsertIfExists to be false", processor.isSkipInsertIfExists());
+    assertTrue("Expected skipUpdateIfMissing to be true", processor.isSkipUpdateIfMissing());
+  }
+
+  @Test
+  public void testSkipUpdatesFalseIfInitArgsTrueButFalseBooleanInRequest() {
+    SkipExistingDocumentsProcessorFactory factory = new SkipExistingDocumentsProcessorFactory();
+    NamedList<Object> initArgs = new NamedList<>();
+    initArgs.add("skipUpdateIfMissing", true);
+    factory.init(initArgs);
+    NamedList<Object> requestArgs = new NamedList<>();
+    requestArgs.add("skipUpdateIfMissing", false);
+    SolrQueryRequest req = new LocalSolrQueryRequest(null, requestArgs);
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+
+    SkipExistingDocumentsUpdateProcessor processor = factory.getInstance(req, new SolrQueryResponse(), next);
+    assertTrue("Expected skipInsertIfExists to be true", processor.isSkipInsertIfExists());
+    assertFalse("Expected skipUpdateIfMissing to be false", processor.isSkipUpdateIfMissing());
+  }
+
+  @Test
+  public void testSkipUpdatesTrueIfInitArgsFalseButTrueStringInRequest() {
+    SkipExistingDocumentsProcessorFactory factory = new SkipExistingDocumentsProcessorFactory();
+    NamedList<Object> initArgs = new NamedList<>();
+    initArgs.add("skipInsertIfExists", true);
+    initArgs.add("skipUpdateIfMissing", false);
+    factory.init(initArgs);
+    NamedList<Object> requestArgs = new NamedList<>();
+    requestArgs.add("skipUpdateIfMissing", "true");
+    SolrQueryRequest req = new LocalSolrQueryRequest(null, requestArgs);
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+
+    SkipExistingDocumentsUpdateProcessor processor = factory.getInstance(req, new SolrQueryResponse(), next);
+    assertTrue("Expected skipInsertIfExists to be true", processor.isSkipInsertIfExists());
+    assertTrue("Expected skipUpdateIfMissing to be true", processor.isSkipUpdateIfMissing());
+  }
+
+
+  // Tests for logic in the processor
+
+  @Test
+  public void testSkippableInsertIsNotSkippedIfNotLeader() throws IOException {
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+    SkipExistingDocumentsUpdateProcessor processor
+            = Mockito.spy(new SkipExistingDocumentsUpdateProcessor(defaultRequest, next, true, true));
+
+    AddUpdateCommand cmd = createInsertUpdateCmd(defaultRequest);
+    doReturn(false).when(processor).isLeader(cmd);
+    doReturn(true).when(processor).doesDocumentExist(docId);
+
+    processor.processAdd(cmd);
+    verify(next).processAdd(cmd);
+  }
+
+  @Test
+  public void testSkippableInsertIsNotSkippedIfSkipInsertsFalse() throws IOException {
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+    SkipExistingDocumentsUpdateProcessor processor
+            = Mockito.spy(new SkipExistingDocumentsUpdateProcessor(defaultRequest, next, false, false));
+
+    AddUpdateCommand cmd = createInsertUpdateCmd(defaultRequest);
+    doReturn(true).when(processor).isLeader(cmd);
+    doReturn(true).when(processor).doesDocumentExist(docId);
+
+    processor.processAdd(cmd);
+    verify(next).processAdd(cmd);
+  }
+
+  @Test
+  public void testSkippableInsertIsSkippedIfSkipInsertsTrue() throws IOException {
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+    SkipExistingDocumentsUpdateProcessor processor
+            = Mockito.spy(new SkipExistingDocumentsUpdateProcessor(defaultRequest, next, true, false));
+
+    AddUpdateCommand cmd = createInsertUpdateCmd(defaultRequest);
+    doReturn(true).when(processor).isLeader(cmd);
+    doReturn(true).when(processor).doesDocumentExist(docId);
+
+    processor.processAdd(cmd);
+    verify(next, never()).processAdd(cmd);
+  }
+
+  @Test
+  public void testNonSkippableInsertIsNotSkippedIfSkipInsertsTrue() throws IOException {
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+    SkipExistingDocumentsUpdateProcessor processor
+            = Mockito.spy(new SkipExistingDocumentsUpdateProcessor(defaultRequest, next, true, false));
+
+    AddUpdateCommand cmd = createInsertUpdateCmd(defaultRequest);
+    doReturn(true).when(processor).isLeader(cmd);
+    doReturn(false).when(processor).doesDocumentExist(docId);
+
+    processor.processAdd(cmd);
+    verify(next).processAdd(cmd);
+  }
+
+  @Test
+  public void testSkippableUpdateIsNotSkippedIfNotLeader() throws IOException {
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+    SkipExistingDocumentsUpdateProcessor processor
+            = Mockito.spy(new SkipExistingDocumentsUpdateProcessor(defaultRequest, next, true, true));
+
+    AddUpdateCommand cmd = createAtomicUpdateCmd(defaultRequest);
+    doReturn(false).when(processor).isLeader(cmd);
+    doReturn(false).when(processor).doesDocumentExist(docId);
+
+    processor.processAdd(cmd);
+    verify(next).processAdd(cmd);
+  }
+
+  @Test
+  public void testSkippableUpdateIsNotSkippedIfSkipUpdatesFalse() throws IOException {
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+    SkipExistingDocumentsUpdateProcessor processor
+            = Mockito.spy(new SkipExistingDocumentsUpdateProcessor(defaultRequest, next, false, false));
+
+    AddUpdateCommand cmd = createAtomicUpdateCmd(defaultRequest);
+    doReturn(true).when(processor).isLeader(cmd);
+    doReturn(false).when(processor).doesDocumentExist(docId);
+
+    processor.processAdd(cmd);
+    verify(next).processAdd(cmd);
+  }
+
+  @Test
+  public void testSkippableUpdateIsSkippedIfSkipUpdatesTrue() throws IOException {
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+    SkipExistingDocumentsUpdateProcessor processor
+            = Mockito.spy(new SkipExistingDocumentsUpdateProcessor(defaultRequest, next, false, true));
+
+    AddUpdateCommand cmd = createAtomicUpdateCmd(defaultRequest);
+    doReturn(true).when(processor).isLeader(cmd);
+    doReturn(false).when(processor).doesDocumentExist(docId);
+
+    processor.processAdd(cmd);
+    verify(next, never()).processAdd(cmd);
+  }
+
+  @Test
+  public void testNonSkippableUpdateIsNotSkippedIfSkipUpdatesTrue() throws IOException {
+    UpdateRequestProcessor next = Mockito.mock(DistributedUpdateProcessor.class);
+    SkipExistingDocumentsUpdateProcessor processor
+            = Mockito.spy(new SkipExistingDocumentsUpdateProcessor(defaultRequest, next, false, true));
+
+    AddUpdateCommand cmd = createAtomicUpdateCmd(defaultRequest);
+    doReturn(true).when(processor).isLeader(cmd);
+    doReturn(true).when(processor).doesDocumentExist(docId);
+
+    processor.processAdd(cmd);
+    verify(next).processAdd(cmd);
+  }
+
+  private AddUpdateCommand createInsertUpdateCmd(SolrQueryRequest req) {
+    AddUpdateCommand cmd = new AddUpdateCommand(req);
+    cmd.setIndexedId(docId);
+    cmd.solrDoc = new SolrInputDocument();
+    assertFalse(AtomicUpdateDocumentMerger.isAtomicUpdate(cmd));
+    return cmd;
+  }
+
+  private AddUpdateCommand createAtomicUpdateCmd(SolrQueryRequest req) {
+    AddUpdateCommand cmd = new AddUpdateCommand(req);
+    cmd.setIndexedId(docId);
+    cmd.solrDoc = new SolrInputDocument();
+    cmd.solrDoc.addField("last_name", ImmutableMap.of("set", "Smith"));
+    assertTrue(AtomicUpdateDocumentMerger.isAtomicUpdate(cmd));
+    return cmd;
+  }
+}


[39/43] lucene-solr:jira/solr-8593: SOLR-9941: Clear deletes lists before log replay

Posted by kr...@apache.org.
SOLR-9941: Clear deletes lists before log replay


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/04f45aab
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/04f45aab
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/04f45aab

Branch: refs/heads/jira/solr-8593
Commit: 04f45aab768b053f32feece7343f994d25f0bb26
Parents: 2437204
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Wed Jan 11 21:51:04 2017 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Wed Jan 11 21:51:04 2017 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   3 +
 .../java/org/apache/solr/update/UpdateLog.java  |   6 +
 .../org/apache/solr/search/TestRecovery.java    | 127 +++++++++++++++++++
 3 files changed, 136 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04f45aab/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 204ea26..afcd10b 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -74,6 +74,9 @@ Optimizations
 * SOLR-9584: Support Solr being proxied with another endpoint than default /solr, by using relative links
   in AdminUI javascripts (Yun Jie Zhou via janhoy)
 
+* SOLR-9941: Clear the deletes lists at UpdateLog before replaying from log. This prevents redundantly pre-applying
+  DBQs, during the log replay, to every update in the log as if the DBQs were out of order. (hossman, Ishan Chattopadhyaya)
+
 ==================  6.4.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04f45aab/solr/core/src/java/org/apache/solr/update/UpdateLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
index c40eafd..626dcd6 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
@@ -888,6 +888,12 @@ public class UpdateLog implements PluginInfoInitialized, SolrMetricProducer {
     versionInfo.blockUpdates();
     try {
       state = State.REPLAYING;
+
+      // The deleteByQueries and oldDeletes lists
+      // would've been populated by items from the logs themselves (which we
+      // will replay now). So lets clear them out here before the replay.
+      deleteByQueries.clear();
+      oldDeletes.clear();
     } finally {
       versionInfo.unblockUpdates();
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04f45aab/solr/core/src/test/org/apache/solr/search/TestRecovery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRecovery.java b/solr/core/src/test/org/apache/solr/search/TestRecovery.java
index f3e98dd..cc3ca47 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRecovery.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRecovery.java
@@ -25,6 +25,7 @@ import com.codahale.metrics.Metric;
 import com.codahale.metrics.MetricRegistry;
 import org.apache.solr.metrics.SolrMetricManager;
 import org.noggit.ObjectBuilder;
+import org.apache.lucene.util.TestUtil;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.update.DirectUpdateHandler2;
@@ -198,6 +199,132 @@ public class TestRecovery extends SolrTestCaseJ4 {
   }
 
   @Test
+  public void testNewDBQAndDocMatchingOldDBQDuringLogReplay() throws Exception {
+    try {
+
+      DirectUpdateHandler2.commitOnClose = false;
+      final Semaphore logReplay = new Semaphore(0);
+      final Semaphore logReplayFinish = new Semaphore(0);
+
+      UpdateLog.testing_logReplayHook = () -> {
+        try {
+          assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      };
+
+      UpdateLog.testing_logReplayFinishHook = () -> logReplayFinish.release();
+
+      clearIndex();
+      assertU(commit());
+
+      // because we're sending updates during log replay, we can't emulate replica logic -- we need to use
+      // normal updates like a leader / single-node instance would get.
+      //
+      // (In SolrCloud mode, when a replica run recoverFromLog, replica in this time period will have state = DOWN,
+      // so It won't receive any updates.)
+      
+      updateJ(jsonAdd(sdoc("id","B0")),params());
+      updateJ(jsonAdd(sdoc("id","B1")),params()); // should be deleted by subsequent DBQ in tlog
+      updateJ(jsonAdd(sdoc("id","B2")),params()); // should be deleted by DBQ that arives during tlog replay
+      updateJ(jsonDelQ("id:B1 OR id:B3 OR id:B6"),params());
+      updateJ(jsonAdd(sdoc("id","B3")),params()); // should *NOT* be deleted by previous DBQ in tlog
+      updateJ(jsonAdd(sdoc("id","B4")),params()); // should be deleted by DBQ that arives during tlog replay
+      updateJ(jsonAdd(sdoc("id","B5")),params());
+      
+      // sanity check no updates have been applied yet (just in tlog)
+      assertJQ(req("q","*:*"),"/response/numFound==0");
+
+      h.close();
+      createCore(); // (Attempts to) kick off recovery (which is currently blocked by semaphore)
+
+      // verify that previous close didn't do a commit & that recovery should be blocked by our hook
+      assertJQ(req("q","*:*") ,"/response/numFound==0");
+
+      // begin recovery (first few items)
+      logReplay.release(TestUtil.nextInt(random(),1,6));
+      // ... but before recover is completely unblocked/finished, have a *new* DBQ arrive
+      // that should delete some items we either have just replayed, or are about to replay (or maybe both)...
+      updateJ(jsonDelQ("id:B2 OR id:B4"),params());
+      // ...and re-add a doc that would have matched a DBQ already in the tlog
+      // (which may/may-not have been replayed yet)
+      updateJ(jsonAdd(sdoc("id","B6")),params()); // should *NOT* be deleted by DBQ from tlog
+      assertU(commit());
+
+      // now completely unblock recovery
+      logReplay.release(1000);
+
+      // wait until recovery has finished
+      assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
+
+      // verify only the expected docs are found, even with out of order DBQ and DBQ that arived during recovery
+      assertJQ(req("q", "*:*", "fl", "id", "sort", "id asc")
+               , "/response/docs==[{'id':'B0'}, {'id':'B3'}, {'id':'B5'}, {'id':'B6'}]");
+      
+    } finally {
+      DirectUpdateHandler2.commitOnClose = true;
+      UpdateLog.testing_logReplayHook = null;
+      UpdateLog.testing_logReplayFinishHook = null;
+    }
+
+  }
+
+  @Test
+  public void testLogReplayWithReorderedDBQ() throws Exception {
+    try {
+
+      DirectUpdateHandler2.commitOnClose = false;
+      final Semaphore logReplay = new Semaphore(0);
+      final Semaphore logReplayFinish = new Semaphore(0);
+
+      UpdateLog.testing_logReplayHook = () -> {
+        try {
+          assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      };
+
+      UpdateLog.testing_logReplayFinishHook = () -> logReplayFinish.release();
+
+
+      clearIndex();
+      assertU(commit());
+
+      updateJ(jsonAdd(sdoc("id","B1", "_version_","1010")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonDelQ("id:B2"), params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_","-1017")); // This should've arrived after the 1015th update
+      updateJ(jsonAdd(sdoc("id","B2", "_version_","1015")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","B3", "_version_","1020")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+
+      assertJQ(req("q","*:*"),"/response/numFound==0");
+
+      h.close();
+      createCore();
+      // Solr should kick this off now
+      // h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog();
+
+      // verify that previous close didn't do a commit
+      // recovery should be blocked by our hook
+      assertJQ(req("q","*:*") ,"/response/numFound==0");
+
+      // unblock recovery
+      logReplay.release(1000);
+
+      // wait until recovery has finished
+      assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
+
+      assertJQ(req("q","*:*") ,"/response/numFound==2");
+    } finally {
+      DirectUpdateHandler2.commitOnClose = true;
+      UpdateLog.testing_logReplayHook = null;
+      UpdateLog.testing_logReplayFinishHook = null;
+    }
+
+  }
+
+  @Test
   public void testBuffering() throws Exception {
 
     DirectUpdateHandler2.commitOnClose = false;


[30/43] lucene-solr:jira/solr-8593: SOLR-9856 Collect metrics for shard replication and tlog replay on replicas.

Posted by kr...@apache.org.
SOLR-9856 Collect metrics for shard replication and tlog replay on replicas.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b8383db0
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b8383db0
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b8383db0

Branch: refs/heads/jira/solr-8593
Commit: b8383db06ee194b9195cd95f058dc820cb70baf8
Parents: 1d7379b
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Mon Jan 9 21:00:00 2017 +0100
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Mon Jan 9 21:00:00 2017 +0100

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 +
 .../org/apache/solr/core/SolrInfoMBean.java     |  2 +-
 .../java/org/apache/solr/update/PeerSync.java   | 42 +++++++++++++--
 .../java/org/apache/solr/update/UpdateLog.java  | 54 +++++++++++++++++++-
 .../solr/cloud/PeerSyncReplicationTest.java     | 15 ++++++
 .../apache/solr/cloud/TestCloudRecovery.java    | 26 ++++++++++
 .../org/apache/solr/search/TestRecovery.java    | 40 ++++++++++++++-
 7 files changed, 174 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b8383db0/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index b28df9c..c79b3c6 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -228,6 +228,8 @@ New Features
 * SOLR-9935: Add hl.fragsize support when using the UnifiedHighlighter to avoid snippets/Passages that are too small.
   Defaults to 70. (David Smiley)
 
+* SOLR-9856: Collect metrics for shard replication and tlog replay on replicas (ab).
+
 Optimizations
 ----------------------
 * SOLR-9704: Facet Module / JSON Facet API: Optimize blockChildren facets that have

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b8383db0/solr/core/src/java/org/apache/solr/core/SolrInfoMBean.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrInfoMBean.java b/solr/core/src/java/org/apache/solr/core/SolrInfoMBean.java
index 421a4ef..04c8395 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrInfoMBean.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrInfoMBean.java
@@ -32,7 +32,7 @@ public interface SolrInfoMBean {
   /**
    * Category of {@link SolrCore} component.
    */
-  enum Category { CORE, QUERYHANDLER, UPDATEHANDLER, CACHE, HIGHLIGHTING, QUERYPARSER, SEARCHER, INDEX, DIRECTORY, HTTP, OTHER }
+  enum Category { CORE, QUERYHANDLER, UPDATEHANDLER, CACHE, HIGHLIGHTING, QUERYPARSER, SEARCHER, REPLICATION, TLOG, INDEX, DIRECTORY, HTTP, OTHER }
 
   /**
    * Top-level group of beans for a subsystem.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b8383db0/solr/core/src/java/org/apache/solr/update/PeerSync.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/PeerSync.java b/solr/core/src/java/org/apache/solr/update/PeerSync.java
index 12ba7ae..861cbf7 100644
--- a/solr/core/src/java/org/apache/solr/update/PeerSync.java
+++ b/solr/core/src/java/org/apache/solr/update/PeerSync.java
@@ -29,6 +29,8 @@ import java.util.Optional;
 import java.util.Set;
 import java.util.stream.Collectors;
 
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Timer;
 import org.apache.http.NoHttpResponseException;
 import org.apache.http.client.HttpClient;
 import org.apache.http.conn.ConnectTimeoutException;
@@ -40,12 +42,15 @@ import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.StrUtils;
 import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.SolrInfoMBean;
 import org.apache.solr.handler.component.HttpShardHandlerFactory;
 import org.apache.solr.handler.component.ShardHandler;
 import org.apache.solr.handler.component.ShardHandlerFactory;
 import org.apache.solr.handler.component.ShardRequest;
 import org.apache.solr.handler.component.ShardResponse;
 import org.apache.solr.logging.MDCLoggingContext;
+import org.apache.solr.metrics.SolrMetricManager;
+import org.apache.solr.metrics.SolrMetricProducer;
 import org.apache.solr.request.LocalSolrQueryRequest;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
@@ -58,7 +63,7 @@ import static org.apache.solr.update.processor.DistributedUpdateProcessor.Distri
 import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
 
 /** @lucene.experimental */
-public class PeerSync  {
+public class PeerSync implements SolrMetricProducer {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
   private boolean debug = log.isDebugEnabled();
 
@@ -87,6 +92,11 @@ public class PeerSync  {
   private final boolean onlyIfActive;
   private SolrCore core;
 
+  // metrics
+  private Timer syncTime;
+  private Counter syncErrors;
+  private Counter syncSkipped;
+
   // comparator that sorts by absolute value, putting highest first
   public static Comparator<Long> absComparator = (o1, o2) -> {
     long l1 = Math.abs(o1);
@@ -112,7 +122,6 @@ public class PeerSync  {
     return 0;
   };
 
-
   private static class SyncShardRequest extends ShardRequest {
     List<Long> reportedVersions;
     IndexFingerprint fingerprint;
@@ -147,6 +156,15 @@ public class PeerSync  {
     // TODO: close
     shardHandlerFactory = (HttpShardHandlerFactory) core.getCoreDescriptor().getCoreContainer().getShardHandlerFactory();
     shardHandler = shardHandlerFactory.getShardHandler(client);
+
+    core.getCoreMetricManager().registerMetricProducer(SolrInfoMBean.Category.REPLICATION.toString(), this);
+  }
+
+  @Override
+  public void initializeMetrics(SolrMetricManager manager, String registry, String scope) {
+    syncTime = manager.timer(registry, "time", scope);
+    syncErrors = manager.counter(registry, "errors", scope);
+    syncSkipped = manager.counter(registry, "skipped", scope);
   }
 
   /** optional list of updates we had before possibly receiving new updates */
@@ -208,9 +226,11 @@ public class PeerSync  {
    */
   public PeerSyncResult sync() {
     if (ulog == null) {
+      syncErrors.inc();
       return PeerSyncResult.failure();
     }
     MDCLoggingContext.setCore(core);
+    Timer.Context timerContext = null;
     try {
       log.info(msg() + "START replicas=" + replicas + " nUpdates=" + nUpdates);
       
@@ -221,10 +241,13 @@ public class PeerSync  {
       }
       // check if we already in sync to begin with 
       if(doFingerprint && alreadyInSync()) {
+        syncSkipped.inc();
         return PeerSyncResult.success();
       }
-      
-      
+
+      // measure only when actual sync is performed
+      timerContext = syncTime.time();
+
       // Fire off the requests before getting our own recent updates (for better concurrency)
       // This also allows us to avoid getting updates we don't need... if we got our updates and then got their updates,
       // they would
@@ -242,6 +265,7 @@ public class PeerSync  {
       if (startingVersions != null) {
         if (startingVersions.size() == 0) {
           log.warn("no frame of reference to tell if we've missed updates");
+          syncErrors.inc();
           return PeerSyncResult.failure();
         }
         Collections.sort(startingVersions, absComparator);
@@ -257,6 +281,7 @@ public class PeerSync  {
         if (Math.abs(startingVersions.get(0)) < smallestNewUpdate) {
           log.warn(msg()
               + "too many updates received since start - startingUpdates no longer overlaps with our currentUpdates");
+          syncErrors.inc();
           return PeerSyncResult.failure();
         }
         
@@ -285,10 +310,12 @@ public class PeerSync  {
             if (srsp.getException() == null)  {
               List<Long> otherVersions = (List<Long>)srsp.getSolrResponse().getResponse().get("versions");
               if (otherVersions != null && !otherVersions.isEmpty())  {
+                syncErrors.inc();
                 return PeerSyncResult.failure(true);
               }
             }
           }
+          syncErrors.inc();
           return PeerSyncResult.failure(false);
         }
       }
@@ -304,6 +331,7 @@ public class PeerSync  {
         if (!success) {
           log.info(msg() + "DONE. sync failed");
           shardHandler.cancelAll();
+          syncErrors.inc();
           return PeerSyncResult.failure();
         }
       }
@@ -318,8 +346,14 @@ public class PeerSync  {
       }
 
       log.info(msg() + "DONE. sync " + (success ? "succeeded" : "failed"));
+      if (!success) {
+        syncErrors.inc();
+      }
       return success ?  PeerSyncResult.success() : PeerSyncResult.failure();
     } finally {
+      if (timerContext != null) {
+        timerContext.close();
+      }
       MDCLoggingContext.clear();
     }
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b8383db0/solr/core/src/java/org/apache/solr/update/UpdateLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
index b79290d..c40eafd 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
@@ -40,6 +40,8 @@ import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Meter;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.lucene.util.BytesRef;
 import org.apache.solr.common.SolrException;
@@ -50,6 +52,9 @@ import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.ExecutorUtil;
 import org.apache.solr.core.PluginInfo;
 import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.SolrInfoMBean;
+import org.apache.solr.metrics.SolrMetricManager;
+import org.apache.solr.metrics.SolrMetricProducer;
 import org.apache.solr.request.LocalSolrQueryRequest;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.request.SolrRequestInfo;
@@ -71,7 +76,7 @@ import static org.apache.solr.update.processor.DistributingUpdateProcessorFactor
 
 
 /** @lucene.experimental */
-public class UpdateLog implements PluginInfoInitialized {
+public class UpdateLog implements PluginInfoInitialized, SolrMetricProducer {
   private static final long STATUS_TIME = TimeUnit.NANOSECONDS.convert(60, TimeUnit.SECONDS);
   public static String LOG_FILENAME_PATTERN = "%s.%019d";
   public static String TLOG_NAME="tlog";
@@ -186,6 +191,14 @@ public class UpdateLog implements PluginInfoInitialized {
   List<Long> startingVersions;
   int startingOperation;  // last operation in the logs on startup
 
+  // metrics
+  protected Gauge<Integer> bufferedOpsGauge;
+  protected Gauge<Integer> replayLogsCountGauge;
+  protected Gauge<Long> replayBytesGauge;
+  protected Gauge<Integer> stateGauge;
+  protected Meter applyingBufferedOpsMeter;
+  protected Meter replayOpsMeter;
+
   public static class LogPtr {
     final long pointer;
     final long version;
@@ -333,7 +346,39 @@ public class UpdateLog implements PluginInfoInitialized {
       }
 
     }
+    core.getCoreMetricManager().registerMetricProducer(SolrInfoMBean.Category.TLOG.toString(), this);
+  }
 
+  @Override
+  public void initializeMetrics(SolrMetricManager manager, String registry, String scope) {
+    bufferedOpsGauge = () -> {
+      if (tlog == null) {
+        return 0;
+      } else if (state == State.APPLYING_BUFFERED) {
+        // numRecords counts header as a record
+        return tlog.numRecords() - 1 - recoveryInfo.adds - recoveryInfo.deleteByQuery - recoveryInfo.deletes - recoveryInfo.errors;
+      } else if (state == State.BUFFERING) {
+        // numRecords counts header as a record
+        return tlog.numRecords() - 1;
+      } else {
+        return 0;
+      }
+    };
+    replayLogsCountGauge = () -> logs.size();
+    replayBytesGauge = () -> {
+      if (state == State.REPLAYING) {
+        return getTotalLogsSize();
+      } else {
+        return 0L;
+      }
+    };
+    manager.register(registry, bufferedOpsGauge, true, "ops", scope, "buffered");
+    manager.register(registry, replayLogsCountGauge, true, "logs", scope, "replay", "remaining");
+    manager.register(registry, replayBytesGauge, true, "bytes", scope, "replay", "remaining");
+    applyingBufferedOpsMeter = manager.meter(registry, "ops", scope, "applying_buffered");
+    replayOpsMeter = manager.meter(registry, "ops", scope, "replay");
+    stateGauge = () -> state.ordinal();
+    manager.register(registry, stateGauge, true, "state", scope);
   }
 
   /**
@@ -1427,6 +1472,13 @@ public class UpdateLog implements PluginInfoInitialized {
               loglog.error("REPLAY_ERR: Exception replaying log", rsp.getException());
               throw rsp.getException();
             }
+            if (state == State.REPLAYING) {
+              replayOpsMeter.mark();
+            } else if (state == State.APPLYING_BUFFERED) {
+              applyingBufferedOpsMeter.mark();
+            } else {
+              // XXX should not happen?
+            }
           } catch (IOException ex) {
             recoveryInfo.errors++;
             loglog.warn("REPLAY_ERR: IOException reading log", ex);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b8383db0/solr/core/src/test/org/apache/solr/cloud/PeerSyncReplicationTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/PeerSyncReplicationTest.java b/solr/core/src/test/org/apache/solr/cloud/PeerSyncReplicationTest.java
index 4084ad7..57784b6 100644
--- a/solr/core/src/test/org/apache/solr/cloud/PeerSyncReplicationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/PeerSyncReplicationTest.java
@@ -27,9 +27,14 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.stream.Collectors;
 
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Metric;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrQuery;
@@ -172,6 +177,16 @@ public class PeerSyncReplicationTest extends AbstractFullDistribZkTestBase {
 
       // make sure leader has not changed after bringing initial leader back
       assertEquals(nodePeerSynced, shardToLeaderJetty.get("shard1"));
+
+      // assert metrics
+      MetricRegistry registry = nodePeerSynced.jetty.getCoreContainer().getMetricManager().registry("solr.core.collection1");
+      Map<String, Metric> metrics = registry.getMetrics();
+      assertTrue("REPLICATION.time present", metrics.containsKey("REPLICATION.time"));
+      assertTrue("REPLICATION.errors present", metrics.containsKey("REPLICATION.errors"));
+      Timer timer = (Timer)metrics.get("REPLICATION.time");
+      assertEquals(1L, timer.getCount());
+      Counter counter = (Counter)metrics.get("REPLICATION.errors");
+      assertEquals(0L, counter.getCount());
       success = true;
     } finally {
       System.clearProperty("solr.disableFingerprint");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b8383db0/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
index e2f3bfd..164eeab 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudRecovery.java
@@ -23,9 +23,14 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
 
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Metric;
+import com.codahale.metrics.Timer;
 import org.apache.commons.io.IOUtils;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
@@ -35,6 +40,7 @@ import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.cloud.ClusterStateUtil;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.core.SolrCore;
+import org.apache.solr.metrics.SolrMetricManager;
 import org.apache.solr.update.DirectUpdateHandler2;
 import org.apache.solr.update.UpdateLog;
 import org.apache.solr.util.TestInjection;
@@ -102,6 +108,26 @@ public class TestCloudRecovery extends SolrCloudTestCase {
     assertEquals(4, resp.getResults().getNumFound());
     // Make sure all nodes is recover from tlog
     assertEquals(4, countReplayLog.get());
+
+    // check metrics
+    int replicationCount = 0;
+    int errorsCount = 0;
+    int skippedCount = 0;
+    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
+      SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
+      List<String> registryNames = manager.registryNames().stream()
+          .filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
+      for (String registry : registryNames) {
+        Map<String, Metric> metrics = manager.registry(registry).getMetrics();
+        Timer timer = (Timer)metrics.get("REPLICATION.time");
+        Counter counter = (Counter)metrics.get("REPLICATION.errors");
+        Counter skipped = (Counter)metrics.get("REPLICATION.skipped");
+        replicationCount += timer.getCount();
+        errorsCount += counter.getCount();
+        skippedCount += skipped.getCount();
+      }
+    }
+    assertEquals(2, replicationCount);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b8383db0/solr/core/src/test/org/apache/solr/search/TestRecovery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRecovery.java b/solr/core/src/test/org/apache/solr/search/TestRecovery.java
index 9b88ec4..7bd0951 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRecovery.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRecovery.java
@@ -19,6 +19,11 @@ package org.apache.solr.search;
 
 import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
 
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Meter;
+import com.codahale.metrics.Metric;
+import com.codahale.metrics.MetricRegistry;
+import org.apache.solr.metrics.SolrMetricManager;
 import org.noggit.ObjectBuilder;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.request.SolrQueryRequest;
@@ -55,7 +60,7 @@ public class TestRecovery extends SolrTestCaseJ4 {
 
   // TODO: fix this test to not require FSDirectory
   static String savedFactory;
-  
+
   @BeforeClass
   public static void beforeClass() throws Exception {
     savedFactory = System.getProperty("solr.DirectoryFactory");
@@ -72,6 +77,12 @@ public class TestRecovery extends SolrTestCaseJ4 {
     }
   }
 
+  private Map<String, Metric> getMetrics() {
+    SolrMetricManager manager = h.getCoreContainer().getMetricManager();
+    MetricRegistry registry = manager.registry(h.getCore().getCoreMetricManager().getRegistryName());
+    return registry.getMetrics();
+  }
+
   @Test
   public void testLogReplay() throws Exception {
     try {
@@ -107,6 +118,9 @@ public class TestRecovery extends SolrTestCaseJ4 {
 
       h.close();
       createCore();
+
+      Map<String, Metric> metrics = getMetrics(); // live map view
+
       // Solr should kick this off now
       // h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog();
 
@@ -117,6 +131,15 @@ public class TestRecovery extends SolrTestCaseJ4 {
       // make sure we can still access versions after a restart
       assertJQ(req("qt","/get", "getVersions",""+versions.size()),"/versions==" + versions);
 
+      assertEquals(UpdateLog.State.REPLAYING, h.getCore().getUpdateHandler().getUpdateLog().getState());
+      // check metrics
+      Gauge<Integer> state = (Gauge<Integer>)metrics.get("TLOG.state");
+      assertEquals(UpdateLog.State.REPLAYING.ordinal(), state.getValue().intValue());
+      Gauge<Integer> replayingLogs = (Gauge<Integer>)metrics.get("TLOG.replay.remaining.logs");
+      assertTrue(replayingLogs.getValue().intValue() > 0);
+      Gauge<Long> replayingDocs = (Gauge<Long>)metrics.get("TLOG.replay.remaining.bytes");
+      assertTrue(replayingDocs.getValue().longValue() > 0);
+
       // unblock recovery
       logReplay.release(1000);
 
@@ -128,6 +151,10 @@ public class TestRecovery extends SolrTestCaseJ4 {
 
       assertJQ(req("q","*:*") ,"/response/numFound==3");
 
+      Meter replayDocs = (Meter)metrics.get("TLOG.replay.ops");
+      assertEquals(5L, replayDocs.getCount());
+      assertEquals(UpdateLog.State.ACTIVE.ordinal(), state.getValue().intValue());
+
       // make sure we can still access versions after recovery
       assertJQ(req("qt","/get", "getVersions",""+versions.size()) ,"/versions==" + versions);
 
@@ -195,15 +222,20 @@ public class TestRecovery extends SolrTestCaseJ4 {
       clearIndex();
       assertU(commit());
 
+      Map<String, Metric> metrics = getMetrics();
+
       assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
       ulog.bufferUpdates();
       assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
+
       Future<UpdateLog.RecoveryInfo> rinfoFuture = ulog.applyBufferedUpdates();
       assertTrue(rinfoFuture == null);
       assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
 
       ulog.bufferUpdates();
       assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
+      Gauge<Integer> state = (Gauge<Integer>)metrics.get("TLOG.state");
+      assertEquals(UpdateLog.State.BUFFERING.ordinal(), state.getValue().intValue());
 
       // simulate updates from a leader
       updateJ(jsonAdd(sdoc("id","B1", "_version_","1010")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
@@ -235,6 +267,8 @@ public class TestRecovery extends SolrTestCaseJ4 {
           ,"=={'doc':null}"
       );
 
+      Gauge<Integer> bufferedOps = (Gauge<Integer>)metrics.get("TLOG.buffered.ops");
+      assertEquals(6, bufferedOps.getValue().intValue());
 
       rinfoFuture = ulog.applyBufferedUpdates();
       assertTrue(rinfoFuture != null);
@@ -246,6 +280,8 @@ public class TestRecovery extends SolrTestCaseJ4 {
       UpdateLog.RecoveryInfo rinfo = rinfoFuture.get();
       assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
 
+      Meter applyingBuffered = (Meter)metrics.get("TLOG.applying_buffered.ops");
+      assertEquals(6L, applyingBuffered.getCount());
 
       assertJQ(req("qt","/get", "getVersions","6")
           ,"=={'versions':[-2010,1030,1020,-1017,1015,1010]}"
@@ -312,6 +348,8 @@ public class TestRecovery extends SolrTestCaseJ4 {
       assertEquals(1, recInfo.deleteByQuery);
 
       assertEquals(UpdateLog.State.ACTIVE, ulog.getState()); // leave each test method in a good state
+
+      assertEquals(0, bufferedOps.getValue().intValue());
     } finally {
       DirectUpdateHandler2.commitOnClose = true;
       UpdateLog.testing_logReplayHook = null;


[34/43] lucene-solr:jira/solr-8593: use relative URL path instead of absolute path starting from /solr

Posted by kr...@apache.org.
use relative URL path instead of absolute path starting from /solr


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e0b4cacc
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e0b4cacc
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e0b4cacc

Branch: refs/heads/jira/solr-8593
Commit: e0b4caccd3312b011cdfbb3951ea43812486ca98
Parents: 98422e0
Author: Yun Jie Zhou <yu...@cn.ibm.com>
Authored: Fri Sep 30 11:18:48 2016 -0400
Committer: Jan H�ydahl <ja...@apache.org>
Committed: Tue Jan 10 14:18:31 2017 +0100

----------------------------------------------------------------------
 solr/webapp/web/js/angular/services.js | 46 ++++++++++++++---------------
 1 file changed, 23 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e0b4cacc/solr/webapp/web/js/angular/services.js
----------------------------------------------------------------------
diff --git a/solr/webapp/web/js/angular/services.js b/solr/webapp/web/js/angular/services.js
index 146939b..07043c3 100644
--- a/solr/webapp/web/js/angular/services.js
+++ b/solr/webapp/web/js/angular/services.js
@@ -19,11 +19,11 @@ var solrAdminServices = angular.module('solrAdminServices', ['ngResource']);
 
 solrAdminServices.factory('System',
   ['$resource', function($resource) {
-    return $resource('/solr/admin/info/system', {"wt":"json", "_":Date.now()});
+    return $resource('admin/info/system', {"wt":"json", "_":Date.now()});
   }])
 .factory('Collections',
   ['$resource', function($resource) {
-    return $resource('/solr/admin/collections',
+    return $resource('admin/collections',
     {'wt':'json', '_':Date.now()}, {
     "list": {params:{action: "LIST"}},
     "status": {params:{action: "CLUSTERSTATUS"}},
@@ -40,7 +40,7 @@ solrAdminServices.factory('System',
   }])
 .factory('Cores',
   ['$resource', function($resource) {
-    return $resource('/solr/admin/cores',
+    return $resource('admin/cores',
     {'wt':'json', '_':Date.now()}, {
     "query": {},
     "list": {params:{indexInfo: false}},
@@ -54,7 +54,7 @@ solrAdminServices.factory('System',
   }])
 .factory('Logging',
   ['$resource', function($resource) {
-    return $resource('/solr/admin/info/logging', {'wt':'json', '_':Date.now()}, {
+    return $resource('admin/info/logging', {'wt':'json', '_':Date.now()}, {
       "events": {params: {since:'0'}},
       "levels": {},
       "setLevel": {}
@@ -62,7 +62,7 @@ solrAdminServices.factory('System',
   }])
 .factory('Zookeeper',
   ['$resource', function($resource) {
-    return $resource('/solr/admin/zookeeper', {wt:'json', _:Date.now()}, {
+    return $resource('admin/zookeeper', {wt:'json', _:Date.now()}, {
       "simple": {},
       "dump": {params: {dump: "true"}},
       "liveNodes": {params: {path: '/live_nodes'}},
@@ -81,30 +81,30 @@ solrAdminServices.factory('System',
   }])
 .factory('Properties',
   ['$resource', function($resource) {
-    return $resource('/solr/admin/info/properties', {'wt':'json', '_':Date.now()});
+    return $resource('admin/info/properties', {'wt':'json', '_':Date.now()});
   }])
 .factory('Threads',
   ['$resource', function($resource) {
-    return $resource('/solr/admin/info/threads', {'wt':'json', '_':Date.now()});
+    return $resource('admin/info/threads', {'wt':'json', '_':Date.now()});
   }])
 .factory('Properties',
   ['$resource', function($resource) {
-    return $resource('/solr/admin/info/properties', {'wt':'json', '_':Date.now()});
+    return $resource('admin/info/properties', {'wt':'json', '_':Date.now()});
   }])
 .factory('Replication',
   ['$resource', function($resource) {
-    return $resource('/solr/:core/replication', {'wt':'json', core: "@core", '_':Date.now()}, {
+    return $resource(':core/replication', {'wt':'json', core: "@core", '_':Date.now()}, {
       "details": {params: {command: "details"}},
       "command": {params: {}}
     });
   }])
 .factory('CoreSystem',
   ['$resource', function($resource) {
-    return $resource('/solr/:core/admin/system', {wt:'json', core: "@core", _:Date.now()});
+    return $resource(':core/admin/system', {wt:'json', core: "@core", _:Date.now()});
   }])
 .factory('Update',
   ['$resource', function($resource) {
-    return $resource('/solr/:core/:handler', {core: '@core', wt:'json', _:Date.now(), handler:'update'}, {
+    return $resource(':core/:handler', {core: '@core', wt:'json', _:Date.now(), handler:'update'}, {
       "optimize": {params: { optimize: "true"}},
       "commit": {params: {commit: "true"}},
       "post": {headers: {'Content-type': 'application/json'}, method: "POST", params: {handler: '@handler'}},
@@ -115,7 +115,7 @@ solrAdminServices.factory('System',
   }])
 .service('FileUpload', function ($http) {
     this.upload = function(params, file, success, error){
-        var url = "/solr/" + params.core + "/" + params.handler + "?";
+        var url = "" + params.core + "/" + params.handler + "?";
         raw = params.raw;
         delete params.core;
         delete params.handler;
@@ -135,7 +135,7 @@ solrAdminServices.factory('System',
 })
 .factory('Luke',
   ['$resource', function($resource) {
-    return $resource('/solr/:core/admin/luke', {core: '@core', wt:'json', _:Date.now()}, {
+    return $resource(':core/admin/luke', {core: '@core', wt:'json', _:Date.now()}, {
       "index":  {params: {numTerms: 0, show: 'index'}},
       "raw": {params: {numTerms: 0}},
       "schema": {params: {show:'schema'}},
@@ -156,13 +156,13 @@ solrAdminServices.factory('System',
   }])
 .factory('Analysis',
   ['$resource', function($resource) {
-    return $resource('/solr/:core/analysis/field', {core: '@core', wt:'json', _:Date.now()}, {
+    return $resource(':core/analysis/field', {core: '@core', wt:'json', _:Date.now()}, {
       "field": {params: {"analysis.showmatch": true}}
     });
   }])
 .factory('DataImport',
   ['$resource', function($resource) {
-    return $resource('/solr/:core/:name', {core: '@core', name: '@name', indent:'on', wt:'json', _:Date.now()}, {
+    return $resource(':core/:name', {core: '@core', name: '@name', indent:'on', wt:'json', _:Date.now()}, {
       "config": {params: {command: "show-config"}, headers: {doNotIntercept: "true"},
                  transformResponse: function(data) {
                     return {config: data};
@@ -177,14 +177,14 @@ solrAdminServices.factory('System',
   }])
 .factory('Ping',
   ['$resource', function($resource) {
-    return $resource('/solr/:core/admin/ping', {wt:'json', core: '@core', ts:Date.now(), _:Date.now()}, {
+    return $resource(':core/admin/ping', {wt:'json', core: '@core', ts:Date.now(), _:Date.now()}, {
      "ping": {},
      "status": {params:{action:"status"}, headers: {doNotIntercept: "true"}
     }});
   }])
 .factory('Mbeans',
   ['$resource', function($resource) {
-    return $resource('/solr/:core/admin/mbeans', {'wt':'json', core: '@core', '_':Date.now()}, {
+    return $resource(':core/admin/mbeans', {'wt':'json', core: '@core', '_':Date.now()}, {
         stats: {params: {stats: true}},
         info: {},
         reference: {
@@ -203,7 +203,7 @@ solrAdminServices.factory('System',
   }])
 .factory('Files',
   ['$resource', function($resource) {
-    return $resource('/solr/:core/admin/file', {'wt':'json', core: '@core', '_':Date.now()}, {
+    return $resource(':core/admin/file', {'wt':'json', core: '@core', '_':Date.now()}, {
       "list": {},
       "get": {method: "GET", interceptor: {
           response: function(config) {return config;}
@@ -214,7 +214,7 @@ solrAdminServices.factory('System',
   }])
 .factory('Query',
     ['$resource', function($resource) {
-       var resource = $resource('/solr/:core/:handler', {core: '@core', handler: '@handler', '_':Date.now()}, {
+       var resource = $resource(':core/:handler', {core: '@core', handler: '@handler', '_':Date.now()}, {
            "query": {
              method: "GET",
              transformResponse: function (data) {
@@ -232,19 +232,19 @@ solrAdminServices.factory('System',
                    }
                }
            }
-           return "/solr/" + params.core + "/" + params.handler + "?" + qs.sort().join("&");
+           return "" + params.core + "/" + params.handler + "?" + qs.sort().join("&");
        }
        return resource;
 }])
 .factory('Segments',
    ['$resource', function($resource) {
-       return $resource('/solr/:core/admin/segments', {'wt':'json', core: '@core', _:Date.now()}, {
+       return $resource(':core/admin/segments', {'wt':'json', core: '@core', _:Date.now()}, {
            get: {}
        });
 }])
 .factory('Schema',
    ['$resource', function($resource) {
-     return $resource('/solr/:core/schema', {wt: 'json', core: '@core', _:Date.now()}, {
+     return $resource(':core/schema', {wt: 'json', core: '@core', _:Date.now()}, {
        get: {method: "GET"},
        check: {method: "GET", headers: {doNotIntercept: "true"}},
        post: {method: "POST"}
@@ -252,7 +252,7 @@ solrAdminServices.factory('System',
 }])
 .factory('Config',
    ['$resource', function($resource) {
-     return $resource('/solr/:core/config', {wt: 'json', core: '@core', _:Date.now()}, {
+     return $resource(':core/config', {wt: 'json', core: '@core', _:Date.now()}, {
        get: {method: "GET"}
      })
 }]);


[26/43] lucene-solr:jira/solr-8593: SOLR-9932: add TestSolrCoreParser class

Posted by kr...@apache.org.
SOLR-9932: add TestSolrCoreParser class


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/5e9f9279
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/5e9f9279
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/5e9f9279

Branch: refs/heads/jira/solr-8593
Commit: 5e9f9279cccb49050c04f75e4551f64151d1001c
Parents: d7beb0f
Author: Christine Poerschke <cp...@apache.org>
Authored: Mon Jan 9 13:05:43 2017 +0000
Committer: Christine Poerschke <cp...@apache.org>
Committed: Mon Jan 9 13:05:43 2017 +0000

----------------------------------------------------------------------
 .../ApacheLuceneSolrNearQueryBuilder.java       |  51 +++++++++
 .../apache/solr/search/TestSolrCoreParser.java  | 110 +++++++++++++++++++
 2 files changed, 161 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5e9f9279/solr/core/src/test/org/apache/solr/search/ApacheLuceneSolrNearQueryBuilder.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/ApacheLuceneSolrNearQueryBuilder.java b/solr/core/src/test/org/apache/solr/search/ApacheLuceneSolrNearQueryBuilder.java
new file mode 100644
index 0000000..135ec45
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/search/ApacheLuceneSolrNearQueryBuilder.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.search;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queryparser.xml.DOMUtils;
+import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.queryparser.xml.QueryBuilder;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.spans.SpanNearQuery;
+import org.apache.lucene.search.spans.SpanQuery;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.solr.request.SolrQueryRequest;
+import org.w3c.dom.Element;
+
+public class ApacheLuceneSolrNearQueryBuilder extends SolrQueryBuilder {
+
+  public ApacheLuceneSolrNearQueryBuilder(String defaultField, Analyzer analyzer,
+      SolrQueryRequest req, QueryBuilder queryFactory) {
+    super(defaultField, analyzer, req, queryFactory);
+  }
+
+  @Override
+  public Query getQuery(Element e) throws ParserException {
+    final String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
+    final SpanQuery[] spanQueries = new SpanQuery[]{
+        new SpanTermQuery(new Term(fieldName, "Apache")),
+        new SpanTermQuery(new Term(fieldName, "Lucene")),
+        new SpanTermQuery(new Term(fieldName, "Solr"))
+    };
+    final int slop = 42;
+    final boolean inOrder = false;
+    return new SpanNearQuery(spanQueries, slop, inOrder);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/5e9f9279/solr/core/src/test/org/apache/solr/search/TestSolrCoreParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestSolrCoreParser.java b/solr/core/src/test/org/apache/solr/search/TestSolrCoreParser.java
new file mode 100644
index 0000000..3ef96c3
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/search/TestSolrCoreParser.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.search;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenFilter;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.queryparser.xml.CoreParser;
+import org.apache.lucene.queryparser.xml.ParserException;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.MatchNoDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.spans.SpanNearQuery;
+import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.request.SolrQueryRequest;
+
+public class TestSolrCoreParser extends LuceneTestCase {
+
+  private SolrCoreParser solrCoreParser;
+
+  private CoreParser solrCoreParser() {
+    if (solrCoreParser == null) {
+      final String defaultField = "contents";
+      final Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET);
+      final SolrQueryRequest req = null;
+      solrCoreParser = new SolrCoreParser(defaultField, analyzer, req);
+      {
+        final NamedList<String> args = new NamedList<>();
+        args.add("HelloQuery", HelloQueryBuilder.class.getCanonicalName());
+        args.add("GoodbyeQuery", GoodbyeQueryBuilder.class.getCanonicalName());
+        args.add("HandyQuery", HandyQueryBuilder.class.getCanonicalName());
+        args.add("ApacheLuceneSolr", ApacheLuceneSolrNearQueryBuilder.class.getCanonicalName());
+        solrCoreParser.init(args);
+      }
+    }
+    return solrCoreParser;
+  }
+
+  private Query parseXmlString(String xml) throws IOException, ParserException {
+    final byte[] xmlBytes = xml.getBytes(StandardCharsets.UTF_8);
+    final InputStream xmlStream = new ByteArrayInputStream(xmlBytes);
+    return solrCoreParser().parse(xmlStream);
+  }
+
+  private Query parseHandyQuery(String lhsXml, String rhsXml) throws IOException, ParserException {
+    final String xml = "<HandyQuery>"
+        + "<Left>" + lhsXml + "</Left>"
+        + "<Right>" + rhsXml + "</Right>"
+        + "</HandyQuery>";
+    return parseXmlString(xml);
+  }
+
+  public void testHello() throws IOException, ParserException {
+    final Query query = parseXmlString("<HelloQuery/>");
+    assertTrue(query instanceof MatchAllDocsQuery);
+  }
+
+  public void testGoodbye() throws IOException, ParserException {
+    final Query query = parseXmlString("<GoodbyeQuery/>");
+    assertTrue(query instanceof MatchNoDocsQuery);
+  }
+
+  public void testApacheLuceneSolr() throws IOException, ParserException {
+    final String fieldName = "contents";
+    final Query query = parseXmlString("<ApacheLuceneSolr fieldName='"+fieldName+"'/>");
+    assertTrue(query instanceof SpanNearQuery);
+    final SpanNearQuery snq = (SpanNearQuery)query;
+    assertEquals(fieldName, snq.getField());
+    assertEquals(42, snq.getSlop());
+    assertFalse(snq.isInOrder());
+    assertEquals(3, snq.getClauses().length);
+    assertTrue(snq.getClauses()[0] instanceof SpanTermQuery);
+    assertTrue(snq.getClauses()[1] instanceof SpanTermQuery);
+    assertTrue(snq.getClauses()[2] instanceof SpanTermQuery);
+  }
+
+  public void testHandyQuery() throws IOException, ParserException {
+    final String lhsXml = "<HelloQuery/>";
+    final String rhsXml = "<GoodbyeQuery/>";
+    final Query query = parseHandyQuery(lhsXml, rhsXml);
+    assertTrue(query instanceof BooleanQuery);
+    final BooleanQuery bq = (BooleanQuery)query;
+    assertEquals(2, bq.clauses().size());
+    assertTrue(bq.clauses().get(0).getQuery() instanceof MatchAllDocsQuery);
+    assertTrue(bq.clauses().get(1).getQuery() instanceof MatchNoDocsQuery);
+  }
+
+}


[40/43] lucene-solr:jira/solr-8593: LUCENE-7559: UH: Also expose OffsetsEnum, and test it's exposure

Posted by kr...@apache.org.
LUCENE-7559: UH: Also expose OffsetsEnum, and test it's exposure


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/7435ab18
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/7435ab18
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/7435ab18

Branch: refs/heads/jira/solr-8593
Commit: 7435ab18786a43f9004c44713446380711137b79
Parents: 04f45aa
Author: David Smiley <ds...@apache.org>
Authored: Thu Jan 12 00:29:17 2017 -0500
Committer: David Smiley <ds...@apache.org>
Committed: Thu Jan 12 00:29:17 2017 -0500

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  4 +-
 .../search/uhighlight/FieldHighlighter.java     |  4 +-
 .../lucene/search/uhighlight/OffsetsEnum.java   | 45 +++++++++++-----
 .../TestUnifiedHighlighterExtensibility.java    | 57 +++++++++++++++++++-
 4 files changed, 92 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7435ab18/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 109a534..4912920 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -252,8 +252,8 @@ Other
 
 * LUCENE-7534: fix smokeTestRelease.py to run on Cygwin (Mikhail Khludnev)
 
-* LUCENE-7559: UnifiedHighlighter: Make Passage more exposed to allow passage creation to
-  be customized. (David Smiley)
+* LUCENE-7559: UnifiedHighlighter: Make Passage and OffsetsEnum more exposed to allow
+  passage creation to be customized. (David Smiley)
 
 * LUCENE-7599: Simplify TestRandomChains using Java's built-in Predicate and
   Function interfaces. (Ahmet Arslan via Adrien Grand)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7435ab18/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/FieldHighlighter.java
----------------------------------------------------------------------
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/FieldHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/FieldHighlighter.java
index 1caa739..cc9f318 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/FieldHighlighter.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/FieldHighlighter.java
@@ -138,7 +138,7 @@ public class FieldHighlighter {
 
     PriorityQueue<OffsetsEnum> offsetsEnumQueue = new PriorityQueue<>(offsetsEnums.size() + 1);
     for (OffsetsEnum off : offsetsEnums) {
-      off.weight = scorer.weight(contentLength, off.postingsEnum.freq());
+      off.setWeight(scorer.weight(contentLength, off.freq()));
       off.nextPosition(); // go to first position
       offsetsEnumQueue.add(off);
     }
@@ -214,7 +214,7 @@ public class FieldHighlighter {
           break;
         }
       }
-      passage.setScore(passage.getScore() + off.weight * scorer.tf(tf, passage.getEndOffset() - passage.getStartOffset()));
+      passage.setScore(passage.getScore() + off.getWeight() * scorer.tf(tf, passage.getEndOffset() - passage.getStartOffset()));
     }
 
     Passage[] passages = passageQueue.toArray(new Passage[passageQueue.size()]);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7435ab18/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/OffsetsEnum.java
----------------------------------------------------------------------
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/OffsetsEnum.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/OffsetsEnum.java
index db1ea1f..708f5c3 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/OffsetsEnum.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/OffsetsEnum.java
@@ -18,24 +18,25 @@ package org.apache.lucene.search.uhighlight;
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.List;
 import java.util.Objects;
 
 import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.util.BytesRef;
 
 /**
- * Holds the term &amp; PostingsEnum, and info for tracking the occurrences of a term within the text.
- * It is advanced with the underlying postings and is placed in a priority queue by highlightOffsetsEnums
- * based on the start offset.
+ * Holds the term ({@link BytesRef}), {@link PostingsEnum}, offset iteration tracking.
+ * It is advanced with the underlying postings and is placed in a priority queue by
+ * {@link FieldHighlighter#highlightOffsetsEnums(List)} based on the start offset.
  *
  * @lucene.internal
  */
 public class OffsetsEnum implements Comparable<OffsetsEnum>, Closeable {
   private final BytesRef term;
-  final PostingsEnum postingsEnum; // with offsets
+  private final PostingsEnum postingsEnum; // with offsets
 
-  float weight; // set once in highlightOffsetsEnums
-  private int pos = 0; // the occurrence counter of this term within the text being highlighted.
+  private float weight; // set once in highlightOffsetsEnums
+  private int posCounter = 0; // the occurrence counter of this term within the text being highlighted.
 
   public OffsetsEnum(BytesRef term, PostingsEnum postingsEnum) throws IOException {
     this.term = term; // can be null
@@ -65,29 +66,47 @@ public class OffsetsEnum implements Comparable<OffsetsEnum>, Closeable {
     }
   }
 
-  BytesRef getTerm() throws IOException {
+  /** The term at this position; usually always the same. This term is a reference that is safe to continue to refer to,
+   * even after we move to next position. */
+  public BytesRef getTerm() throws IOException {
     // TODO TokenStreamOffsetStrategy could override OffsetsEnum; then remove this hack here
     return term != null ? term : postingsEnum.getPayload(); // abusing payload like this is a total hack!
   }
 
-  boolean hasMorePositions() throws IOException {
-    return pos < postingsEnum.freq();
+  public PostingsEnum getPostingsEnum() {
+    return postingsEnum;
   }
 
-  void nextPosition() throws IOException {
+  public int freq() throws IOException {
+    return postingsEnum.freq();
+  }
+
+  public boolean hasMorePositions() throws IOException {
+    return posCounter < postingsEnum.freq();
+  }
+
+  public void nextPosition() throws IOException {
     assert hasMorePositions();
-    pos++;
+    posCounter++;
     postingsEnum.nextPosition();
   }
 
-  int startOffset() throws IOException {
+  public int startOffset() throws IOException {
     return postingsEnum.startOffset();
   }
 
-  int endOffset() throws IOException {
+  public int endOffset() throws IOException {
     return postingsEnum.endOffset();
   }
 
+  public float getWeight() {
+    return weight;
+  }
+
+  public void setWeight(float weight) {
+    this.weight = weight;
+  }
+
   @Override
   public void close() throws IOException {
     // TODO TokenStreamOffsetStrategy could override OffsetsEnum; then this base impl would be no-op.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7435ab18/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/visibility/TestUnifiedHighlighterExtensibility.java
----------------------------------------------------------------------
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/visibility/TestUnifiedHighlighterExtensibility.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/visibility/TestUnifiedHighlighterExtensibility.java
index 10757a5..08055a2 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/visibility/TestUnifiedHighlighterExtensibility.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/visibility/TestUnifiedHighlighterExtensibility.java
@@ -40,12 +40,16 @@ import org.apache.lucene.search.uhighlight.Passage;
 import org.apache.lucene.search.uhighlight.PassageFormatter;
 import org.apache.lucene.search.uhighlight.PassageScorer;
 import org.apache.lucene.search.uhighlight.PhraseHelper;
+import org.apache.lucene.search.uhighlight.SplittingBreakIterator;
 import org.apache.lucene.search.uhighlight.UnifiedHighlighter;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.automaton.CharacterRunAutomaton;
 import org.junit.Test;
 
+/**
+ * Helps us be aware of visibility/extensibility concerns.
+ */
 public class TestUnifiedHighlighterExtensibility extends LuceneTestCase {
 
   /**
@@ -144,7 +148,19 @@ public class TestUnifiedHighlighterExtensibility extends LuceneTestCase {
 
       @Override
       protected FieldHighlighter getFieldHighlighter(String field, Query query, Set<Term> allTerms, int maxPassages) {
-        return super.getFieldHighlighter(field, query, allTerms, maxPassages);
+        // THIS IS A COPY of the superclass impl; but use CustomFieldHighlighter
+        BytesRef[] terms = filterExtractedTerms(getFieldMatcher(field), allTerms);
+        Set<HighlightFlag> highlightFlags = getFlags(field);
+        PhraseHelper phraseHelper = getPhraseHelper(field, query, highlightFlags);
+        CharacterRunAutomaton[] automata = getAutomata(field, query, highlightFlags);
+        OffsetSource offsetSource = getOptimizedOffsetSource(field, terms, phraseHelper, automata);
+        return new CustomFieldHighlighter(field,
+            getOffsetStrategy(offsetSource, field, terms, phraseHelper, automata, highlightFlags),
+            new SplittingBreakIterator(getBreakIterator(field), UnifiedHighlighter.MULTIVAL_SEP_CHAR),
+            getScorer(field),
+            maxPassages,
+            getMaxNoHighlightPassages(field),
+            getFormatter(field));
       }
 
       @Override
@@ -185,4 +201,43 @@ public class TestUnifiedHighlighterExtensibility extends LuceneTestCase {
     assertEquals(fieldHighlighter.getField(), fieldName);
   }
 
+  /** Tests maintaining extensibility/visibility of {@link org.apache.lucene.search.uhighlight.FieldHighlighter} out of package. */
+  private class CustomFieldHighlighter extends FieldHighlighter {
+    CustomFieldHighlighter(String field, FieldOffsetStrategy fieldOffsetStrategy, BreakIterator breakIterator, PassageScorer passageScorer, int maxPassages, int maxNoHighlightPassages, PassageFormatter passageFormatter) {
+      super(field, fieldOffsetStrategy, breakIterator, passageScorer, maxPassages, maxNoHighlightPassages, passageFormatter);
+    }
+
+    @Override
+    public Object highlightFieldForDoc(IndexReader reader, int docId, String content) throws IOException {
+      return super.highlightFieldForDoc(reader, docId, content);
+    }
+
+    @Override
+    protected Passage[] highlightOffsetsEnums(List<OffsetsEnum> offsetsEnums) throws IOException {
+      // TEST OffsetsEnums & Passage visibility
+
+      // this code never runs; just for compilation
+      OffsetsEnum oe = new OffsetsEnum(null, EMPTY);
+      oe.getTerm();
+      oe.getPostingsEnum();
+      oe.freq();
+      oe.hasMorePositions();
+      oe.nextPosition();
+      oe.startOffset();
+      oe.endOffset();
+      oe.getWeight();
+      oe.setWeight(2f);
+
+      Passage p = new Passage();
+      p.setStartOffset(0);
+      p.setEndOffset(9);
+      p.setScore(1f);
+      p.addMatch(1, 2, new BytesRef());
+      p.reset();
+      p.sort();
+      //... getters are all exposed; custom PassageFormatter impls uses them
+
+      return super.highlightOffsetsEnums(offsetsEnums);
+    }
+  }
 }


[28/43] lucene-solr:jira/solr-8593: LUCENE-7624: Remove deprecated TermsQuery

Posted by kr...@apache.org.
LUCENE-7624: Remove deprecated TermsQuery


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/17cd0f00
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/17cd0f00
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/17cd0f00

Branch: refs/heads/jira/solr-8593
Commit: 17cd0f00cc1a7bce647eedfe56c860a02aa22654
Parents: 22940f5
Author: Alan Woodward <ro...@apache.org>
Authored: Mon Jan 9 15:33:07 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Mon Jan 9 15:33:07 2017 +0000

----------------------------------------------------------------------
 .../org/apache/lucene/queries/TermsQuery.java   | 65 --------------------
 1 file changed, 65 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/17cd0f00/lucene/queries/src/java/org/apache/lucene/queries/TermsQuery.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/TermsQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/TermsQuery.java
deleted file mode 100644
index 5effa83..0000000
--- a/lucene/queries/src/java/org/apache/lucene/queries/TermsQuery.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.queries;
-
-import java.util.Arrays;
-import java.util.Collection;
-
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.TermInSetQuery;
-import org.apache.lucene.util.BytesRef;
-
-/**
- * @deprecated Use {@link org.apache.lucene.search.TermInSetQuery}
- */
-@Deprecated
-public class TermsQuery extends TermInSetQuery {
-
-  /**
-   * Creates a new {@link TermsQuery} from the given collection. It
-   * can contain duplicate terms and multiple fields.
-   */
-  public TermsQuery(Collection<Term> terms) {
-    super(terms);
-  }
-
-  /**
-   * Creates a new {@link TermsQuery} from the given collection for
-   * a single field. It can contain duplicate terms.
-   */
-  public TermsQuery(String field, Collection<BytesRef> terms) {
-    super(field, terms);
-  }
-
-  /**
-   * Creates a new {@link TermsQuery} from the given {@link BytesRef} array for
-   * a single field.
-   */
-  public TermsQuery(String field, BytesRef...terms) {
-    this(field, Arrays.asList(terms));
-  }
-
-  /**
-   * Creates a new {@link TermsQuery} from the given array. The array can
-   * contain duplicate terms and multiple fields.
-   */
-  public TermsQuery(final Term... terms) {
-    this(Arrays.asList(terms));
-  }
-
-
-}


[03/43] lucene-solr:jira/solr-8593: LUCENE-7617: Grouping collector API cleanup

Posted by kr...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/Grouper.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/Grouper.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/Grouper.java
new file mode 100644
index 0000000..2ff79a1
--- /dev/null
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/Grouper.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.search.grouping;
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.lucene.search.Sort;
+
+/**
+ * A factory object to create first and second-pass collectors, run by a {@link GroupingSearch}
+ * @param <T> the type the group value
+ */
+public abstract class Grouper<T> {
+
+  /**
+   * Create a first-pass collector
+   * @param sort  the order in which groups should be returned
+   * @param count how many groups to return
+   */
+  public abstract FirstPassGroupingCollector<T> getFirstPassCollector(Sort sort, int count) throws IOException;
+
+  /**
+   * Create an {@link AllGroupsCollector}
+   */
+  public abstract AllGroupsCollector<T> getAllGroupsCollector();
+
+  /**
+   * Create an {@link AllGroupHeadsCollector}
+   * @param sort a within-group sort order to determine which doc is the group head
+   */
+  public abstract AllGroupHeadsCollector<T> getGroupHeadsCollector(Sort sort);
+
+  /**
+   * Create a second-pass collector
+   */
+  public abstract SecondPassGroupingCollector<T> getSecondPassCollector(
+      Collection<SearchGroup<T>> groups, Sort groupSort, Sort withinGroupSort,
+      int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) throws IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java
index d0bab09..f4319d5 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupingSearch.java
@@ -16,6 +16,11 @@
  */
 package org.apache.lucene.search.grouping;
 
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.CachingCollector;
 import org.apache.lucene.search.Collector;
@@ -25,25 +30,12 @@ import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.Weight;
-import org.apache.lucene.search.grouping.function.FunctionAllGroupHeadsCollector;
-import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector;
-import org.apache.lucene.search.grouping.function.FunctionFirstPassGroupingCollector;
-import org.apache.lucene.search.grouping.function.FunctionSecondPassGroupingCollector;
-import org.apache.lucene.search.grouping.term.TermAllGroupHeadsCollector;
-import org.apache.lucene.search.grouping.term.TermAllGroupsCollector;
-import org.apache.lucene.search.grouping.term.TermFirstPassGroupingCollector;
-import org.apache.lucene.search.grouping.term.TermSecondPassGroupingCollector;
+import org.apache.lucene.search.grouping.function.FunctionGrouper;
+import org.apache.lucene.search.grouping.term.TermGrouper;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.mutable.MutableValue;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
 /**
  * Convenience class to perform grouping in a non distributed environment.
  *
@@ -51,9 +43,7 @@ import java.util.Map;
  */
 public class GroupingSearch {
 
-  private final String groupField;
-  private final ValueSource groupFunction;
-  private final Map<?, ?> valueSourceContext;
+  private final Grouper grouper;
   private final Query groupEndDocs;
 
   private Sort groupSort = Sort.RELEVANCE;
@@ -70,7 +60,6 @@ public class GroupingSearch {
   private boolean cacheScores;
   private boolean allGroups;
   private boolean allGroupHeads;
-  private int initialSize = 128;
 
   private Collection<?> matchingGroups;
   private Bits matchingGroupHeads;
@@ -82,7 +71,11 @@ public class GroupingSearch {
    * @param groupField The name of the field to group by.
    */
   public GroupingSearch(String groupField) {
-    this(groupField, null, null, null);
+    this(new TermGrouper(groupField, 128), null);
+  }
+
+  public GroupingSearch(String groupField, int initialSize) {
+    this(new TermGrouper(groupField, initialSize), null);
   }
 
   /**
@@ -93,7 +86,7 @@ public class GroupingSearch {
    * @param valueSourceContext The context of the specified groupFunction
    */
   public GroupingSearch(ValueSource groupFunction, Map<?, ?> valueSourceContext) {
-    this(null, groupFunction, valueSourceContext, null);
+    this(new FunctionGrouper(groupFunction, valueSourceContext), null);
   }
 
   /**
@@ -103,13 +96,11 @@ public class GroupingSearch {
    * @param groupEndDocs The query that marks the last document in all doc blocks
    */
   public GroupingSearch(Query groupEndDocs) {
-    this(null, null, null, groupEndDocs);
+    this(null, groupEndDocs);
   }
 
-  private GroupingSearch(String groupField, ValueSource groupFunction, Map<?, ?> valueSourceContext, Query groupEndDocs) {
-    this.groupField = groupField;
-    this.groupFunction = groupFunction;
-    this.valueSourceContext = valueSourceContext;
+  private GroupingSearch(Grouper grouper, Query groupEndDocs) {
+    this.grouper = grouper;
     this.groupEndDocs = groupEndDocs;
   }
 
@@ -125,7 +116,7 @@ public class GroupingSearch {
    */
   @SuppressWarnings("unchecked")
   public <T> TopGroups<T> search(IndexSearcher searcher, Query query, int groupOffset, int groupLimit) throws IOException {
-    if (groupField != null || groupFunction != null) {
+    if (grouper != null) {
       return groupByFieldOrFunction(searcher, query, groupOffset, groupLimit);
     } else if (groupEndDocs != null) {
       return (TopGroups<T>) groupByDocBlock(searcher, query, groupOffset, groupLimit);
@@ -137,49 +128,13 @@ public class GroupingSearch {
   @SuppressWarnings({"unchecked", "rawtypes"})
   protected TopGroups groupByFieldOrFunction(IndexSearcher searcher, Query query, int groupOffset, int groupLimit) throws IOException {
     int topN = groupOffset + groupLimit;
-    final AbstractFirstPassGroupingCollector firstPassCollector;
-    final AbstractAllGroupsCollector allGroupsCollector;
-    final AbstractAllGroupHeadsCollector allGroupHeadsCollector;
-    if (groupFunction != null) {
-      firstPassCollector = new FunctionFirstPassGroupingCollector(groupFunction, valueSourceContext, groupSort, topN);
-      if (allGroups) {
-        allGroupsCollector = new FunctionAllGroupsCollector(groupFunction, valueSourceContext);
-      } else {
-        allGroupsCollector = null;
-      }
-      if (allGroupHeads) {
-        allGroupHeadsCollector = new FunctionAllGroupHeadsCollector(groupFunction, valueSourceContext, sortWithinGroup);
-      } else {
-        allGroupHeadsCollector = null;
-      }
-    } else {
-      firstPassCollector = new TermFirstPassGroupingCollector(groupField, groupSort, topN);
-      if (allGroups) {
-        allGroupsCollector = new TermAllGroupsCollector(groupField, initialSize);
-      } else {
-        allGroupsCollector = null;
-      }
-      if (allGroupHeads) {
-        allGroupHeadsCollector = TermAllGroupHeadsCollector.create(groupField, sortWithinGroup, initialSize);
-      } else {
-        allGroupHeadsCollector = null;
-      }
-    }
 
-    final Collector firstRound;
-    if (allGroupHeads || allGroups) {
-      List<Collector> collectors = new ArrayList<>();
-      collectors.add(firstPassCollector);
-      if (allGroups) {
-        collectors.add(allGroupsCollector);
-      }
-      if (allGroupHeads) {
-        collectors.add(allGroupHeadsCollector);
-      }
-      firstRound = MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()]));
-    } else {
-      firstRound = firstPassCollector;
-    }
+    final FirstPassGroupingCollector firstPassCollector = grouper.getFirstPassCollector(groupSort, topN);
+    final AllGroupsCollector allGroupsCollector = allGroups ? grouper.getAllGroupsCollector() : null;
+    final AllGroupHeadsCollector allGroupHeadsCollector
+        = allGroupHeads ? grouper.getGroupHeadsCollector(sortWithinGroup) : null;
+
+    final Collector firstRound = MultiCollector.wrap(firstPassCollector, allGroupsCollector, allGroupHeadsCollector);
 
     CachingCollector cachedCollector = null;
     if (maxCacheRAMMB != null || maxDocsToCache != null) {
@@ -193,16 +148,9 @@ public class GroupingSearch {
       searcher.search(query, firstRound);
     }
 
-    if (allGroups) {
-      matchingGroups = allGroupsCollector.getGroups();
-    } else {
-      matchingGroups = Collections.emptyList();
-    }
-    if (allGroupHeads) {
-      matchingGroupHeads = allGroupHeadsCollector.retrieveGroupHeads(searcher.getIndexReader().maxDoc());
-    } else {
-      matchingGroupHeads = new Bits.MatchNoBits(searcher.getIndexReader().maxDoc());
-    }
+    matchingGroups = allGroups ? allGroupsCollector.getGroups() : Collections.emptyList();
+    matchingGroupHeads = allGroupHeads ? allGroupHeadsCollector.retrieveGroupHeads(searcher.getIndexReader().maxDoc())
+        : new Bits.MatchNoBits(searcher.getIndexReader().maxDoc());
 
     Collection<SearchGroup> topSearchGroups = firstPassCollector.getTopGroups(groupOffset, fillSortFields);
     if (topSearchGroups == null) {
@@ -210,12 +158,9 @@ public class GroupingSearch {
     }
 
     int topNInsideGroup = groupDocsOffset + groupDocsLimit;
-    AbstractSecondPassGroupingCollector secondPassCollector;
-    if (groupFunction != null) {
-      secondPassCollector = new FunctionSecondPassGroupingCollector((Collection) topSearchGroups, groupSort, sortWithinGroup, topNInsideGroup, includeScores, includeMaxScore, fillSortFields, groupFunction, valueSourceContext);
-    } else {
-      secondPassCollector = new TermSecondPassGroupingCollector(groupField, (Collection) topSearchGroups, groupSort, sortWithinGroup, topNInsideGroup, includeScores, includeMaxScore, fillSortFields);
-    }
+    SecondPassGroupingCollector secondPassCollector
+        = grouper.getSecondPassCollector(topSearchGroups, groupSort, sortWithinGroup, topNInsideGroup,
+                                         includeScores, includeMaxScore, fillSortFields);
 
     if (cachedCollector != null && cachedCollector.isCached()) {
       cachedCollector.replay(secondPassCollector);
@@ -411,19 +356,4 @@ public class GroupingSearch {
     return matchingGroupHeads;
   }
 
-  /**
-   * Sets the initial size of some internal used data structures.
-   * This prevents growing data structures many times. This can improve the performance of the grouping at the cost of
-   * more initial RAM.
-   * <p>
-   * The {@link #setAllGroups} and {@link #setAllGroupHeads} features use this option.
-   * Defaults to 128.
-   *
-   * @param initialSize The initial size of some internal used data structures
-   * @return <code>this</code>
-   */
-  public GroupingSearch setInitialSize(int initialSize) {
-    this.initialSize = initialSize;
-    return this;
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java
index b3a43cb..95a507c 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java
@@ -16,28 +16,37 @@
  */
 package org.apache.lucene.search.grouping;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+
 import org.apache.lucene.search.FieldComparator;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
 
-import java.io.IOException;
-import java.util.*;
-
 /**
  * Represents a group that is found during the first pass search.
  *
  * @lucene.experimental
  */
-public class SearchGroup<GROUP_VALUE_TYPE> {
+public class SearchGroup<T> {
 
   /** The value that defines this group  */
-  public GROUP_VALUE_TYPE groupValue;
+  public T groupValue;
 
   /** The sort values used during sorting. These are the
    *  groupSort field values of the highest rank document
    *  (by the groupSort) within the group.  Can be
    * <code>null</code> if <code>fillFields=false</code> had
-   * been passed to {@link AbstractFirstPassGroupingCollector#getTopGroups} */
+   * been passed to {@link FirstPassGroupingCollector#getTopGroups} */
   public Object[] sortValues;
 
   @Override
@@ -327,7 +336,7 @@ public class SearchGroup<GROUP_VALUE_TYPE> {
    *  groupSort must match how the groups were sorted, and
    *  the provided SearchGroups must have been computed
    *  with fillFields=true passed to {@link
-   *  AbstractFirstPassGroupingCollector#getTopGroups}.
+   *  FirstPassGroupingCollector#getTopGroups}.
    *
    * <p>NOTE: this returns null if the topGroups is empty.
    */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java
new file mode 100644
index 0000000..f8feb75
--- /dev/null
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search.grouping;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopDocsCollector;
+import org.apache.lucene.search.TopFieldCollector;
+import org.apache.lucene.search.TopScoreDocCollector;
+
+/**
+ * SecondPassGroupingCollector is the second of two passes
+ * necessary to collect grouped docs.  This pass gathers the
+ * top N documents per top group computed from the
+ * first pass. Concrete subclasses define what a group is and how it
+ * is internally collected.
+ *
+ * <p>See {@link org.apache.lucene.search.grouping} for more
+ * details including a full code example.</p>
+ *
+ * @lucene.experimental
+ */
+public abstract class SecondPassGroupingCollector<T> extends SimpleCollector {
+
+  private final Collection<SearchGroup<T>> groups;
+  private final Sort groupSort;
+  private final Sort withinGroupSort;
+  private final int maxDocsPerGroup;
+  private final boolean needsScores;
+  protected final Map<T, SearchGroupDocs<T>> groupMap;
+
+  protected SearchGroupDocs<T>[] groupDocs;
+
+  private int totalHitCount;
+  private int totalGroupedHitCount;
+
+  public SecondPassGroupingCollector(Collection<SearchGroup<T>> groups, Sort groupSort, Sort withinGroupSort,
+                                     int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields)
+    throws IOException {
+
+    //System.out.println("SP init");
+    if (groups.isEmpty()) {
+      throw new IllegalArgumentException("no groups to collect (groups is empty)");
+    }
+
+    this.groups = Objects.requireNonNull(groups);
+    this.groupSort = Objects.requireNonNull(groupSort);
+    this.withinGroupSort = Objects.requireNonNull(withinGroupSort);
+    this.maxDocsPerGroup = maxDocsPerGroup;
+    this.needsScores = getScores || getMaxScores || withinGroupSort.needsScores();
+
+    this.groupMap = new HashMap<>(groups.size());
+    for (SearchGroup<T> group : groups) {
+      //System.out.println("  prep group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString()));
+      final TopDocsCollector<?> collector;
+      if (withinGroupSort.equals(Sort.RELEVANCE)) { // optimize to use TopScoreDocCollector
+        // Sort by score
+        collector = TopScoreDocCollector.create(maxDocsPerGroup);
+      } else {
+        // Sort by fields
+        collector = TopFieldCollector.create(withinGroupSort, maxDocsPerGroup, fillSortFields, getScores, getMaxScores);
+      }
+      groupMap.put(group.groupValue, new SearchGroupDocs<>(group.groupValue, collector));
+    }
+  }
+
+  @Override
+  public boolean needsScores() {
+    return needsScores;
+  }
+
+  @Override
+  public void setScorer(Scorer scorer) throws IOException {
+    for (SearchGroupDocs<T> group : groupMap.values()) {
+      group.leafCollector.setScorer(scorer);
+    }
+  }
+
+  @Override
+  public void collect(int doc) throws IOException {
+    totalHitCount++;
+    SearchGroupDocs<T> group = retrieveGroup(doc);
+    if (group != null) {
+      totalGroupedHitCount++;
+      group.leafCollector.collect(doc);
+    }
+  }
+
+  /**
+   * Returns the group the specified doc belongs to or <code>null</code> if no group could be retrieved.
+   *
+   * @param doc The specified doc
+   * @return the group the specified doc belongs to or <code>null</code> if no group could be retrieved
+   * @throws IOException If an I/O related error occurred
+   */
+  protected abstract SearchGroupDocs<T> retrieveGroup(int doc) throws IOException;
+
+  @Override
+  protected void doSetNextReader(LeafReaderContext readerContext) throws IOException {
+    //System.out.println("SP.setNextReader");
+    for (SearchGroupDocs<T> group : groupMap.values()) {
+      group.leafCollector = group.collector.getLeafCollector(readerContext);
+    }
+  }
+
+  public TopGroups<T> getTopGroups(int withinGroupOffset) {
+    @SuppressWarnings({"unchecked","rawtypes"})
+    final GroupDocs<T>[] groupDocsResult = (GroupDocs<T>[]) new GroupDocs[groups.size()];
+
+    int groupIDX = 0;
+    float maxScore = Float.MIN_VALUE;
+    for(SearchGroup<?> group : groups) {
+      final SearchGroupDocs<T> groupDocs = groupMap.get(group.groupValue);
+      final TopDocs topDocs = groupDocs.collector.topDocs(withinGroupOffset, maxDocsPerGroup);
+      groupDocsResult[groupIDX++] = new GroupDocs<>(Float.NaN,
+                                                                    topDocs.getMaxScore(),
+                                                                    topDocs.totalHits,
+                                                                    topDocs.scoreDocs,
+                                                                    groupDocs.groupValue,
+                                                                    group.sortValues);
+      maxScore = Math.max(maxScore, topDocs.getMaxScore());
+    }
+
+    return new TopGroups<>(groupSort.getSort(),
+                                           withinGroupSort.getSort(),
+                                           totalHitCount, totalGroupedHitCount, groupDocsResult,
+                                           maxScore);
+  }
+
+
+  // TODO: merge with SearchGroup or not?
+  // ad: don't need to build a new hashmap
+  // disad: blows up the size of SearchGroup if we need many of them, and couples implementations
+  public class SearchGroupDocs<T> {
+
+    public final T groupValue;
+    public final TopDocsCollector<?> collector;
+    public LeafCollector leafCollector;
+
+    public SearchGroupDocs(T groupValue, TopDocsCollector<?> collector) {
+      this.groupValue = groupValue;
+      this.collector = collector;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java
index a60c8f5..803482b 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java
@@ -16,18 +16,18 @@
  */
 package org.apache.lucene.search.grouping;
 
+import java.io.IOException;
+
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.TopFieldDocs;
 
-import java.io.IOException;
-
 /** Represents result returned by a grouping search.
  *
  * @lucene.experimental */
-public class TopGroups<GROUP_VALUE_TYPE> {
+public class TopGroups<T> {
   /** Number of documents matching the search */
   public final int totalHitCount;
 
@@ -38,7 +38,7 @@ public class TopGroups<GROUP_VALUE_TYPE> {
   public final Integer totalGroupCount;
 
   /** Group results in groupSort order */
-  public final GroupDocs<GROUP_VALUE_TYPE>[] groups;
+  public final GroupDocs<T>[] groups;
 
   /** How groups are sorted against each other */
   public final SortField[] groupSort;
@@ -50,7 +50,7 @@ public class TopGroups<GROUP_VALUE_TYPE> {
    *  <code>Float.NaN</code> if scores were not computed. */
   public final float maxScore;
 
-  public TopGroups(SortField[] groupSort, SortField[] withinGroupSort, int totalHitCount, int totalGroupedHitCount, GroupDocs<GROUP_VALUE_TYPE>[] groups, float maxScore) {
+  public TopGroups(SortField[] groupSort, SortField[] withinGroupSort, int totalHitCount, int totalGroupedHitCount, GroupDocs<T>[] groups, float maxScore) {
     this.groupSort = groupSort;
     this.withinGroupSort = withinGroupSort;
     this.totalHitCount = totalHitCount;
@@ -60,7 +60,7 @@ public class TopGroups<GROUP_VALUE_TYPE> {
     this.maxScore = maxScore;
   }
 
-  public TopGroups(TopGroups<GROUP_VALUE_TYPE> oldTopGroups, Integer totalGroupCount) {
+  public TopGroups(TopGroups<T> oldTopGroups, Integer totalGroupCount) {
     this.groupSort = oldTopGroups.groupSort;
     this.withinGroupSort = oldTopGroups.withinGroupSort;
     this.totalHitCount = oldTopGroups.totalHitCount;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupHeadsCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupHeadsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupHeadsCollector.java
index 4c6071c..f4d4668 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupHeadsCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupHeadsCollector.java
@@ -16,6 +16,11 @@
  */
 package org.apache.lucene.search.grouping.function;
 
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
@@ -24,25 +29,20 @@ import org.apache.lucene.search.LeafFieldComparator;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.grouping.AbstractAllGroupHeadsCollector;
+import org.apache.lucene.search.grouping.AllGroupHeadsCollector;
 import org.apache.lucene.util.mutable.MutableValue;
 
-import java.io.IOException;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-
 /**
- * An implementation of {@link AbstractAllGroupHeadsCollector} for retrieving the most relevant groups when grouping
+ * An implementation of {@link AllGroupHeadsCollector} for retrieving the most relevant groups when grouping
  * by {@link ValueSource}.
  *
  * @lucene.experimental
  */
-public class FunctionAllGroupHeadsCollector extends AbstractAllGroupHeadsCollector<FunctionAllGroupHeadsCollector.GroupHead> {
+public class FunctionAllGroupHeadsCollector extends AllGroupHeadsCollector<MutableValue> {
 
   private final ValueSource groupBy;
   private final Map<?, ?> vsContext;
-  private final Map<MutableValue, GroupHead> groups;
+  private final Map<MutableValue, FunctionGroupHead> groups;
   private final Sort sortWithinGroup;
 
   private FunctionValues.ValueFiller filler;
@@ -73,10 +73,10 @@ public class FunctionAllGroupHeadsCollector extends AbstractAllGroupHeadsCollect
   @Override
   protected void retrieveGroupHeadAndAddIfNotExist(int doc) throws IOException {
     filler.fillValue(doc);
-    GroupHead groupHead = groups.get(mval);
+    FunctionGroupHead groupHead = groups.get(mval);
     if (groupHead == null) {
       MutableValue groupValue = mval.duplicate();
-      groupHead = new GroupHead(groupValue, sortWithinGroup, doc);
+      groupHead = new FunctionGroupHead(groupValue, sortWithinGroup, doc);
       groups.put(groupValue, groupHead);
       temporalResult.stop = true;
     } else {
@@ -86,14 +86,14 @@ public class FunctionAllGroupHeadsCollector extends AbstractAllGroupHeadsCollect
   }
 
   @Override
-  protected Collection<GroupHead> getCollectedGroupHeads() {
+  protected Collection<FunctionGroupHead> getCollectedGroupHeads() {
     return groups.values();
   }
 
   @Override
   public void setScorer(Scorer scorer) throws IOException {
     this.scorer = scorer;
-    for (GroupHead groupHead : groups.values()) {
+    for (FunctionGroupHead groupHead : groups.values()) {
       for (LeafFieldComparator comparator : groupHead.leafComparators) {
         comparator.setScorer(scorer);
       }
@@ -107,7 +107,7 @@ public class FunctionAllGroupHeadsCollector extends AbstractAllGroupHeadsCollect
     filler = values.getValueFiller();
     mval = filler.getValue();
 
-    for (GroupHead groupHead : groups.values()) {
+    for (FunctionGroupHead groupHead : groups.values()) {
       for (int i = 0; i < groupHead.comparators.length; i++) {
         groupHead.leafComparators[i] = groupHead.comparators[i].getLeafComparator(context);
       }
@@ -117,13 +117,13 @@ public class FunctionAllGroupHeadsCollector extends AbstractAllGroupHeadsCollect
   /** Holds current head document for a single group.
    *
    * @lucene.experimental */
-  public class GroupHead extends AbstractAllGroupHeadsCollector.GroupHead<MutableValue> {
+  public class FunctionGroupHead extends AllGroupHeadsCollector.GroupHead<MutableValue> {
 
     final FieldComparator<?>[] comparators;
     final LeafFieldComparator[] leafComparators;
 
     @SuppressWarnings({"unchecked","rawtypes"})
-    private GroupHead(MutableValue groupValue, Sort sort, int doc) throws IOException {
+    private FunctionGroupHead(MutableValue groupValue, Sort sort, int doc) throws IOException {
       super(groupValue, doc + readerContext.docBase);
       final SortField[] sortFields = sort.getSort();
       comparators = new FieldComparator[sortFields.length];

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java
index 58418ed..1609d4d 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java
@@ -19,7 +19,7 @@ package org.apache.lucene.search.grouping.function;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.search.grouping.AbstractAllGroupsCollector;
+import org.apache.lucene.search.grouping.AllGroupsCollector;
 import org.apache.lucene.util.mutable.MutableValue;
 
 import java.io.IOException;
@@ -39,7 +39,7 @@ import java.util.TreeSet;
  *
  * @lucene.experimental
  */
-public class FunctionAllGroupsCollector extends AbstractAllGroupsCollector<MutableValue> {
+public class FunctionAllGroupsCollector extends AllGroupsCollector<MutableValue> {
 
   private final Map<?, ?> vsContext;
   private final ValueSource groupBy;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionDistinctValuesCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionDistinctValuesCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionDistinctValuesCollector.java
index 32e9324..69ead07 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionDistinctValuesCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionDistinctValuesCollector.java
@@ -16,27 +16,31 @@
  */
 package org.apache.lucene.search.grouping.function;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.search.grouping.AbstractDistinctValuesCollector;
+import org.apache.lucene.search.grouping.DistinctValuesCollector;
 import org.apache.lucene.search.grouping.SearchGroup;
 import org.apache.lucene.util.mutable.MutableValue;
 
-import java.io.IOException;
-import java.util.*;
-
 /**
- * Function based implementation of {@link org.apache.lucene.search.grouping.AbstractDistinctValuesCollector}.
+ * Function based implementation of {@link DistinctValuesCollector}.
  *
  * @lucene.experimental
  */
-public class FunctionDistinctValuesCollector extends AbstractDistinctValuesCollector<FunctionDistinctValuesCollector.GroupCount> {
+public class FunctionDistinctValuesCollector extends DistinctValuesCollector<MutableValue> {
 
   private final Map<?, ?> vsContext;
   private final ValueSource groupSource;
   private final ValueSource countSource;
-  private final Map<MutableValue, GroupCount> groupMap;
+  private final Map<MutableValue, GroupCount<MutableValue>> groupMap;
 
   private FunctionValues.ValueFiller groupFiller;
   private FunctionValues.ValueFiller countFiller;
@@ -49,19 +53,19 @@ public class FunctionDistinctValuesCollector extends AbstractDistinctValuesColle
     this.countSource = countSource;
     groupMap = new LinkedHashMap<>();
     for (SearchGroup<MutableValue> group : groups) {
-      groupMap.put(group.groupValue, new GroupCount(group.groupValue));
+      groupMap.put(group.groupValue, new GroupCount<>(group.groupValue));
     }
   }
 
   @Override
-  public List<GroupCount> getGroups() {
+  public List<GroupCount<MutableValue>> getGroups() {
     return new ArrayList<>(groupMap.values());
   }
 
   @Override
   public void collect(int doc) throws IOException {
     groupFiller.fillValue(doc);
-    GroupCount groupCount = groupMap.get(groupMval);
+    GroupCount<MutableValue> groupCount = groupMap.get(groupMval);
     if (groupCount != null) {
       countFiller.fillValue(doc);
       groupCount.uniqueValues.add(countMval.duplicate());
@@ -78,15 +82,4 @@ public class FunctionDistinctValuesCollector extends AbstractDistinctValuesColle
     countMval = countFiller.getValue();
   }
 
-  /** Holds distinct values for a single group.
-   *
-   * @lucene.experimental */
-  public static class GroupCount extends AbstractDistinctValuesCollector.GroupCount<MutableValue> {
-
-    GroupCount(MutableValue groupValue) {
-      super(groupValue);
-    }
-
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionFirstPassGroupingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionFirstPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionFirstPassGroupingCollector.java
index 0237e21..85376e6 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionFirstPassGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionFirstPassGroupingCollector.java
@@ -20,19 +20,19 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.grouping.AbstractFirstPassGroupingCollector;
+import org.apache.lucene.search.grouping.FirstPassGroupingCollector;
 import org.apache.lucene.util.mutable.MutableValue;
 
 import java.io.IOException;
 import java.util.Map;
 
 /**
- * Concrete implementation of {@link AbstractFirstPassGroupingCollector} that groups based on
+ * Concrete implementation of {@link FirstPassGroupingCollector} that groups based on
  * {@link ValueSource} instances.
  *
  * @lucene.experimental
  */
-public class FunctionFirstPassGroupingCollector extends AbstractFirstPassGroupingCollector<MutableValue> {
+public class FunctionFirstPassGroupingCollector extends FirstPassGroupingCollector<MutableValue> {
 
   private final ValueSource groupByVS;
   private final Map<?, ?> vsContext;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionGrouper.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionGrouper.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionGrouper.java
new file mode 100644
index 0000000..5204dc2
--- /dev/null
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionGrouper.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.search.grouping.function;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.grouping.AllGroupHeadsCollector;
+import org.apache.lucene.search.grouping.AllGroupsCollector;
+import org.apache.lucene.search.grouping.FirstPassGroupingCollector;
+import org.apache.lucene.search.grouping.SecondPassGroupingCollector;
+import org.apache.lucene.search.grouping.Grouper;
+import org.apache.lucene.search.grouping.SearchGroup;
+import org.apache.lucene.util.mutable.MutableValue;
+
+/**
+ * Collector factory for grouping by ValueSource
+ */
+public class FunctionGrouper extends Grouper<MutableValue> {
+
+  private final ValueSource valueSource;
+  private final Map<?, ?> context;
+
+  /**
+   * Create a Grouper for the provided ValueSource and context
+   */
+  public FunctionGrouper(ValueSource valueSource, Map<?, ?> context) {
+    this.valueSource = valueSource;
+    this.context = context;
+  }
+
+  @Override
+  public FirstPassGroupingCollector<MutableValue> getFirstPassCollector(Sort sort, int count) throws IOException {
+    return new FunctionFirstPassGroupingCollector(valueSource, context, sort, count);
+  }
+
+  @Override
+  public AllGroupHeadsCollector<MutableValue> getGroupHeadsCollector(Sort sort) {
+    return new FunctionAllGroupHeadsCollector(valueSource, context, sort);
+  }
+
+  @Override
+  public AllGroupsCollector<MutableValue> getAllGroupsCollector() {
+    return new FunctionAllGroupsCollector(valueSource, context);
+  }
+
+  @Override
+  public SecondPassGroupingCollector<MutableValue> getSecondPassCollector(Collection<SearchGroup<MutableValue>> searchGroups, Sort groupSort, Sort withinGroupSort, int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) throws IOException {
+    return new FunctionSecondPassGroupingCollector(searchGroups, groupSort, withinGroupSort, maxDocsPerGroup, getScores, getMaxScores, fillSortFields, valueSource, context);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionSecondPassGroupingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionSecondPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionSecondPassGroupingCollector.java
index d363267..45f2b37 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionSecondPassGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionSecondPassGroupingCollector.java
@@ -20,7 +20,7 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.grouping.AbstractSecondPassGroupingCollector;
+import org.apache.lucene.search.grouping.SecondPassGroupingCollector;
 import org.apache.lucene.search.grouping.SearchGroup;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.search.grouping.TopGroups; //javadoc
@@ -30,12 +30,12 @@ import java.util.Collection;
 import java.util.Map;
 
 /**
- * Concrete implementation of {@link AbstractSecondPassGroupingCollector} that groups based on
+ * Concrete implementation of {@link SecondPassGroupingCollector} that groups based on
  * {@link ValueSource} instances.
  *
  * @lucene.experimental
  */
-public class FunctionSecondPassGroupingCollector extends AbstractSecondPassGroupingCollector<MutableValue> {
+public class FunctionSecondPassGroupingCollector extends SecondPassGroupingCollector<MutableValue> {
 
   private final ValueSource groupByVS;
   private final Map<?, ?> vsContext;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java
index 14636ec..54e2399 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupHeadsCollector.java
@@ -31,19 +31,19 @@ import org.apache.lucene.search.LeafFieldComparator;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.grouping.AbstractAllGroupHeadsCollector;
+import org.apache.lucene.search.grouping.AllGroupHeadsCollector;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.SentinelIntSet;
 
 /**
- * A base implementation of {@link org.apache.lucene.search.grouping.AbstractAllGroupHeadsCollector} for retrieving the most relevant groups when grouping
+ * A base implementation of {@link AllGroupHeadsCollector} for retrieving the most relevant groups when grouping
  * on a string based group field. More specifically this all concrete implementations of this base implementation
  * use {@link SortedDocValues}.
  *
  * @lucene.experimental
  */
-public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHeadsCollector.GroupHead<?>> extends AbstractAllGroupHeadsCollector<GH> {
+public abstract class TermAllGroupHeadsCollector extends AllGroupHeadsCollector<BytesRef> {
 
   private static final int DEFAULT_INITIAL_SIZE = 128;
 
@@ -67,7 +67,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
    * @param sortWithinGroup The sort within each group
    * @return an <code>AbstractAllGroupHeadsCollector</code> instance based on the supplied arguments
    */
-  public static AbstractAllGroupHeadsCollector<?> create(String groupField, Sort sortWithinGroup) {
+  public static AllGroupHeadsCollector<BytesRef> create(String groupField, Sort sortWithinGroup) {
     return create(groupField, sortWithinGroup, DEFAULT_INITIAL_SIZE);
   }
 
@@ -82,7 +82,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
    *                    4 bytes * initialSize.
    * @return an <code>AbstractAllGroupHeadsCollector</code> instance based on the supplied arguments
    */
-  public static AbstractAllGroupHeadsCollector<?> create(String groupField, Sort sortWithinGroup, int initialSize) {
+  public static AllGroupHeadsCollector<BytesRef> create(String groupField, Sort sortWithinGroup, int initialSize) {
     boolean sortAllScore = true;
     boolean sortAllFieldValue = true;
 
@@ -113,7 +113,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
   }
 
   // A general impl that works for any group sort.
-  static class GeneralAllGroupHeadsCollector extends TermAllGroupHeadsCollector<GeneralAllGroupHeadsCollector.GroupHead> {
+  static class GeneralAllGroupHeadsCollector extends TermAllGroupHeadsCollector {
 
     private final Sort sortWithinGroup;
     private final Map<BytesRef, GroupHead> groups;
@@ -199,7 +199,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
       }
     }
 
-    class GroupHead extends AbstractAllGroupHeadsCollector.GroupHead<BytesRef> {
+    class GroupHead extends AllGroupHeadsCollector.GroupHead<BytesRef> {
 
       @SuppressWarnings({"unchecked", "rawtypes"})
       final FieldComparator[] comparators;
@@ -239,7 +239,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
 
 
   // AbstractAllGroupHeadsCollector optimized for ord fields and scores.
-  static class OrdScoreAllGroupHeadsCollector extends TermAllGroupHeadsCollector<OrdScoreAllGroupHeadsCollector.GroupHead> {
+  static class OrdScoreAllGroupHeadsCollector extends TermAllGroupHeadsCollector {
 
     private final SentinelIntSet ordSet;
     private final List<GroupHead> collectedGroups;
@@ -365,7 +365,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
       }
     }
 
-    class GroupHead extends AbstractAllGroupHeadsCollector.GroupHead<BytesRef> {
+    class GroupHead extends AllGroupHeadsCollector.GroupHead<BytesRef> {
 
       BytesRefBuilder[] sortValues;
       int[] sortOrds;
@@ -452,7 +452,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
 
 
   // AbstractAllGroupHeadsCollector optimized for ord fields.
-  static class OrdAllGroupHeadsCollector extends TermAllGroupHeadsCollector<OrdAllGroupHeadsCollector.GroupHead> {
+  static class OrdAllGroupHeadsCollector extends TermAllGroupHeadsCollector {
 
     private final SentinelIntSet ordSet;
     private final List<GroupHead> collectedGroups;
@@ -566,7 +566,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
       }
     }
 
-    class GroupHead extends AbstractAllGroupHeadsCollector.GroupHead<BytesRef> {
+    class GroupHead extends AllGroupHeadsCollector.GroupHead<BytesRef> {
 
       BytesRefBuilder[] sortValues;
       int[] sortOrds;
@@ -635,7 +635,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
 
 
   // AbstractAllGroupHeadsCollector optimized for scores.
-  static class ScoreAllGroupHeadsCollector extends TermAllGroupHeadsCollector<ScoreAllGroupHeadsCollector.GroupHead> {
+  static class ScoreAllGroupHeadsCollector extends TermAllGroupHeadsCollector {
 
     final SentinelIntSet ordSet;
     final List<GroupHead> collectedGroups;
@@ -727,7 +727,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
       }
     }
 
-    class GroupHead extends AbstractAllGroupHeadsCollector.GroupHead<BytesRef> {
+    class GroupHead extends AllGroupHeadsCollector.GroupHead<BytesRef> {
 
       float[] scores;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java
index 0009c36..125555a 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermAllGroupsCollector.java
@@ -24,7 +24,7 @@ import java.util.List;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.search.grouping.AbstractAllGroupsCollector;
+import org.apache.lucene.search.grouping.AllGroupsCollector;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.SentinelIntSet;
 
@@ -42,7 +42,7 @@ import org.apache.lucene.util.SentinelIntSet;
  *
  * @lucene.experimental
  */
-public class TermAllGroupsCollector extends AbstractAllGroupsCollector<BytesRef> {
+public class TermAllGroupsCollector extends AllGroupsCollector<BytesRef> {
 
   private static final int DEFAULT_INITIAL_SIZE = 128;
 
@@ -53,7 +53,7 @@ public class TermAllGroupsCollector extends AbstractAllGroupsCollector<BytesRef>
   private SortedDocValues index;
 
   /**
-   * Expert: Constructs a {@link AbstractAllGroupsCollector}
+   * Expert: Constructs a {@link AllGroupsCollector}
    *
    * @param groupField  The field to group by
    * @param initialSize The initial allocation size of the
@@ -69,7 +69,7 @@ public class TermAllGroupsCollector extends AbstractAllGroupsCollector<BytesRef>
   }
 
   /**
-   * Constructs a {@link AbstractAllGroupsCollector}. This sets the
+   * Constructs a {@link AllGroupsCollector}. This sets the
    * initial allocation size for the internal int set and group
    * list to 128.
    *

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java
index 7980c48..e5356a3 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermDistinctValuesCollector.java
@@ -25,24 +25,24 @@ import java.util.List;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.search.grouping.AbstractDistinctValuesCollector;
+import org.apache.lucene.search.grouping.DistinctValuesCollector;
 import org.apache.lucene.search.grouping.SearchGroup;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.SentinelIntSet;
 
 /**
- * A term based implementation of {@link org.apache.lucene.search.grouping.AbstractDistinctValuesCollector} that relies
+ * A term based implementation of {@link DistinctValuesCollector} that relies
  * on {@link SortedDocValues} to count the distinct values per group.
  *
  * @lucene.experimental
  */
-public class TermDistinctValuesCollector extends AbstractDistinctValuesCollector<TermDistinctValuesCollector.GroupCount> {
+public class TermDistinctValuesCollector extends DistinctValuesCollector<BytesRef> {
 
   private final String groupField;
   private final String countField;
-  private final List<GroupCount> groups;
+  private final List<TermGroupCount> groups;
   private final SentinelIntSet ordSet;
-  private final GroupCount groupCounts[];
+  private final TermGroupCount groupCounts[];
 
   private SortedDocValues groupFieldTermIndex;
   private SortedDocValues countFieldTermIndex;
@@ -59,10 +59,10 @@ public class TermDistinctValuesCollector extends AbstractDistinctValuesCollector
     this.countField = countField;
     this.groups = new ArrayList<>(groups.size());
     for (SearchGroup<BytesRef> group : groups) {
-      this.groups.add(new GroupCount(group.groupValue));
+      this.groups.add(new TermGroupCount(group.groupValue));
     }
     ordSet = new SentinelIntSet(groups.size(), -2);
-    groupCounts = new GroupCount[ordSet.keys.length];
+    groupCounts = new TermGroupCount[ordSet.keys.length];
   }
 
   @Override
@@ -81,7 +81,7 @@ public class TermDistinctValuesCollector extends AbstractDistinctValuesCollector
       return;
     }
 
-    GroupCount gc = groupCounts[slot];
+    TermGroupCount gc = groupCounts[slot];
     if (doc > countFieldTermIndex.docID()) {
       countFieldTermIndex.advance(doc);
     }
@@ -119,8 +119,8 @@ public class TermDistinctValuesCollector extends AbstractDistinctValuesCollector
   }
 
   @Override
-  public List<GroupCount> getGroups() {
-    return groups;
+  public List<GroupCount<BytesRef>> getGroups() {
+    return new ArrayList<>(groups);
   }
 
   @Override
@@ -128,7 +128,7 @@ public class TermDistinctValuesCollector extends AbstractDistinctValuesCollector
     groupFieldTermIndex = DocValues.getSorted(context.reader(), groupField);
     countFieldTermIndex = DocValues.getSorted(context.reader(), countField);
     ordSet.clear();
-    for (GroupCount group : groups) {
+    for (TermGroupCount group : groups) {
       int groupOrd = group.groupValue == null ? -1 : groupFieldTermIndex.lookupTerm(group.groupValue);
       if (group.groupValue != null && groupOrd < 0) {
         continue;
@@ -150,11 +150,11 @@ public class TermDistinctValuesCollector extends AbstractDistinctValuesCollector
   /** Holds distinct values for a single group.
    *
    * @lucene.experimental */
-  public static class GroupCount extends AbstractDistinctValuesCollector.GroupCount<BytesRef> {
+  public static class TermGroupCount extends DistinctValuesCollector.GroupCount<BytesRef> {
 
     int[] ords;
 
-    GroupCount(BytesRef groupValue) {
+    TermGroupCount(BytesRef groupValue) {
       super(groupValue);
     }
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java
index 063521f..3c35fa8 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermFirstPassGroupingCollector.java
@@ -22,18 +22,18 @@ import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.grouping.AbstractFirstPassGroupingCollector;
+import org.apache.lucene.search.grouping.FirstPassGroupingCollector;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
 
 /**
- * Concrete implementation of {@link org.apache.lucene.search.grouping.AbstractFirstPassGroupingCollector} that groups based on
+ * Concrete implementation of {@link FirstPassGroupingCollector} that groups based on
  * field values and more specifically uses {@link SortedDocValues}
  * to collect groups.
  *
  * @lucene.experimental
  */
-public class TermFirstPassGroupingCollector extends AbstractFirstPassGroupingCollector<BytesRef> {
+public class TermFirstPassGroupingCollector extends FirstPassGroupingCollector<BytesRef> {
 
   private SortedDocValues index;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java
index efa75d6..cee327c 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGroupFacetCollector.java
@@ -25,19 +25,19 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.grouping.AbstractGroupFacetCollector;
+import org.apache.lucene.search.grouping.GroupFacetCollector;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.SentinelIntSet;
 import org.apache.lucene.util.UnicodeUtil;
 
 /**
- * An implementation of {@link AbstractGroupFacetCollector} that computes grouped facets based on the indexed terms
+ * An implementation of {@link GroupFacetCollector} that computes grouped facets based on the indexed terms
  * from DocValues.
  *
  * @lucene.experimental
  */
-public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollector {
+public abstract class TermGroupFacetCollector extends GroupFacetCollector {
 
   final List<GroupedFacetHit> groupedFacetHits;
   final SentinelIntSet segmentGroupedFacetHits;
@@ -190,7 +190,7 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
       return new SegmentResult(segmentFacetCounts, segmentTotalCount, facetFieldTermsIndex.termsEnum(), startFacetOrd, endFacetOrd);
     }
 
-    private static class SegmentResult extends AbstractGroupFacetCollector.SegmentResult {
+    private static class SegmentResult extends GroupFacetCollector.SegmentResult {
 
       final TermsEnum tenum;
 
@@ -380,7 +380,7 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
       return new SegmentResult(segmentFacetCounts, segmentTotalCount, facetFieldNumTerms, facetOrdTermsEnum, startFacetOrd, endFacetOrd);
     }
 
-    private static class SegmentResult extends AbstractGroupFacetCollector.SegmentResult {
+    private static class SegmentResult extends GroupFacetCollector.SegmentResult {
 
       final TermsEnum tenum;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGrouper.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGrouper.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGrouper.java
new file mode 100644
index 0000000..246ee78
--- /dev/null
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermGrouper.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.search.grouping.term;
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.grouping.AllGroupHeadsCollector;
+import org.apache.lucene.search.grouping.AllGroupsCollector;
+import org.apache.lucene.search.grouping.FirstPassGroupingCollector;
+import org.apache.lucene.search.grouping.SecondPassGroupingCollector;
+import org.apache.lucene.search.grouping.Grouper;
+import org.apache.lucene.search.grouping.SearchGroup;
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * Collector factory for grouping by term
+ */
+public class TermGrouper extends Grouper<BytesRef> {
+
+  private final String field;
+  private final int initialSize;
+
+  /**
+   * Create a new TermGrouper
+   * @param field the field to group on
+   */
+  public TermGrouper(String field) {
+    this(field, 128);
+  }
+
+  /**
+   * Create a new TermGrouper
+   * @param field       the field to group on
+   * @param initialSize the initial size of various internal datastructures
+   */
+  public TermGrouper(String field, int initialSize) {
+    this.field = field;
+    this.initialSize = initialSize;
+  }
+
+  @Override
+  public FirstPassGroupingCollector<BytesRef> getFirstPassCollector(Sort sort, int count) throws IOException {
+    return new TermFirstPassGroupingCollector(field, sort, count);
+  }
+
+  @Override
+  public AllGroupHeadsCollector<BytesRef> getGroupHeadsCollector(Sort sort) {
+    return TermAllGroupHeadsCollector.create(field, sort, initialSize);
+  }
+
+  @Override
+  public AllGroupsCollector<BytesRef> getAllGroupsCollector() {
+    return new TermAllGroupsCollector(field, initialSize);
+  }
+
+  @Override
+  public SecondPassGroupingCollector<BytesRef> getSecondPassCollector(
+      Collection<SearchGroup<BytesRef>> groups, Sort groupSort, Sort withinGroupSort,
+      int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) throws IOException {
+    return new TermSecondPassGroupingCollector(field, groups, groupSort, withinGroupSort, maxDocsPerGroup, getScores, getMaxScores, fillSortFields);
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java
index c7b16e9..75d2210 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/term/TermSecondPassGroupingCollector.java
@@ -23,19 +23,19 @@ import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.grouping.AbstractSecondPassGroupingCollector;
+import org.apache.lucene.search.grouping.SecondPassGroupingCollector;
 import org.apache.lucene.search.grouping.SearchGroup;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.SentinelIntSet;
 
 /**
- * Concrete implementation of {@link org.apache.lucene.search.grouping.AbstractSecondPassGroupingCollector} that groups based on
+ * Concrete implementation of {@link SecondPassGroupingCollector} that groups based on
  * field values and more specifically uses {@link SortedDocValues}
  * to collect grouped docs.
  *
  * @lucene.experimental
  */
-public class TermSecondPassGroupingCollector extends AbstractSecondPassGroupingCollector<BytesRef> {
+public class TermSecondPassGroupingCollector extends SecondPassGroupingCollector<BytesRef> {
 
   private final String groupField;
   private final SentinelIntSet ordSet;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
index 316676b..0c99456 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
@@ -52,6 +52,7 @@ import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.grouping.function.FunctionAllGroupHeadsCollector;
 import org.apache.lucene.search.grouping.term.TermAllGroupHeadsCollector;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.LuceneTestCase;
@@ -138,7 +139,7 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
     int maxDoc = reader.maxDoc();
 
     Sort sortWithinGroup = new Sort(new SortField("id_1", SortField.Type.INT, true));
-    AbstractAllGroupHeadsCollector<?> allGroupHeadsCollector = createRandomCollector(groupField, sortWithinGroup);
+    AllGroupHeadsCollector<?> allGroupHeadsCollector = createRandomCollector(groupField, sortWithinGroup);
     indexSearcher.search(new TermQuery(new Term("content", "random")), allGroupHeadsCollector);
     assertTrue(arrayContains(new int[]{2, 3, 5, 7}, allGroupHeadsCollector.retrieveGroupHeads()));
     assertTrue(openBitSetContains(new int[]{2, 3, 5, 7}, allGroupHeadsCollector.retrieveGroupHeads(maxDoc), maxDoc));
@@ -326,7 +327,7 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
         final String searchTerm = "real" + random().nextInt(3);
         boolean sortByScoreOnly = random().nextBoolean();
         Sort sortWithinGroup = getRandomSort(sortByScoreOnly);
-        AbstractAllGroupHeadsCollector<?> allGroupHeadsCollector = createRandomCollector("group", sortWithinGroup);
+        AllGroupHeadsCollector<?> allGroupHeadsCollector = createRandomCollector("group", sortWithinGroup);
         s.search(new TermQuery(new Term("content", searchTerm)), allGroupHeadsCollector);
         int[] expectedGroupHeads = createExpectedGroupHeads(searchTerm, groupDocs, sortWithinGroup, sortByScoreOnly, fieldIdToDocID);
         int[] actualGroupHeads = allGroupHeadsCollector.retrieveGroupHeads();
@@ -402,8 +403,9 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
     return true;
   }
 
-  private boolean openBitSetContains(int[] expectedDocs, FixedBitSet actual, int maxDoc) throws IOException {
-    if (expectedDocs.length != actual.cardinality()) {
+  private boolean openBitSetContains(int[] expectedDocs, Bits actual, int maxDoc) throws IOException {
+    assert actual instanceof FixedBitSet;
+    if (expectedDocs.length != ((FixedBitSet)actual).cardinality()) {
       return false;
     }
 
@@ -510,8 +512,8 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
-  private AbstractAllGroupHeadsCollector<?> createRandomCollector(String groupField, Sort sortWithinGroup) {
-    AbstractAllGroupHeadsCollector<? extends AbstractAllGroupHeadsCollector.GroupHead> collector;
+  private AllGroupHeadsCollector<?> createRandomCollector(String groupField, Sort sortWithinGroup) {
+    AllGroupHeadsCollector<?> collector;
     if (random().nextBoolean()) {
       ValueSource vs = new BytesRefFieldSource(groupField);
       collector =  new FunctionAllGroupHeadsCollector(vs, new HashMap<>(), sortWithinGroup);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java
index 30a147e..ab70fad 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java
@@ -102,7 +102,7 @@ public class AllGroupsCollectorTest extends LuceneTestCase {
     IndexSearcher indexSearcher = newSearcher(w.getReader());
     w.close();
 
-    AbstractAllGroupsCollector<?> allGroupsCollector = createRandomCollector(groupField);
+    AllGroupsCollector<?> allGroupsCollector = createRandomCollector(groupField);
     indexSearcher.search(new TermQuery(new Term("content", "random")), allGroupsCollector);
     assertEquals(4, allGroupsCollector.getGroupCount());
 
@@ -123,8 +123,8 @@ public class AllGroupsCollectorTest extends LuceneTestCase {
     doc.add(new SortedDocValuesField(groupField, new BytesRef(value)));
   }
 
-  private AbstractAllGroupsCollector<?> createRandomCollector(String groupField) {
-    AbstractAllGroupsCollector<?> selected;
+  private AllGroupsCollector<?> createRandomCollector(String groupField) {
+    AllGroupsCollector<?> selected;
     if (random().nextBoolean()) {
       selected = new TermAllGroupsCollector(groupField);
     } else {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
index 313e92f..ba43ca8 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
@@ -126,10 +126,10 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
     IndexSearcher indexSearcher = newSearcher(w.getReader());
     w.close();
 
-    Comparator<AbstractDistinctValuesCollector.GroupCount<Comparable<Object>>> cmp = new Comparator<AbstractDistinctValuesCollector.GroupCount<Comparable<Object>>>() {
+    Comparator<DistinctValuesCollector.GroupCount<Comparable<Object>>> cmp = new Comparator<DistinctValuesCollector.GroupCount<Comparable<Object>>>() {
 
       @Override
-      public int compare(AbstractDistinctValuesCollector.GroupCount<Comparable<Object>> groupCount1, AbstractDistinctValuesCollector.GroupCount<Comparable<Object>> groupCount2) {
+      public int compare(DistinctValuesCollector.GroupCount<Comparable<Object>> groupCount1, DistinctValuesCollector.GroupCount<Comparable<Object>> groupCount2) {
         if (groupCount1.groupValue == null) {
           if (groupCount2.groupValue == null) {
             return 0;
@@ -145,13 +145,13 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
     };
 
     // === Search for content:random
-    AbstractFirstPassGroupingCollector<Comparable<Object>> firstCollector = createRandomFirstPassCollector(new Sort(), groupField, 10);
+    FirstPassGroupingCollector<Comparable<Object>> firstCollector = createRandomFirstPassCollector(new Sort(), groupField, 10);
     indexSearcher.search(new TermQuery(new Term("content", "random")), firstCollector);
-    AbstractDistinctValuesCollector<? extends AbstractDistinctValuesCollector.GroupCount<Comparable<Object>>> distinctValuesCollector
+    DistinctValuesCollector<Comparable<Object>> distinctValuesCollector
         = createDistinctCountCollector(firstCollector, groupField, countField);
     indexSearcher.search(new TermQuery(new Term("content", "random")), distinctValuesCollector);
 
-    List<? extends AbstractDistinctValuesCollector.GroupCount<Comparable<Object>>> gcs = distinctValuesCollector.getGroups();
+    List<DistinctValuesCollector.GroupCount<Comparable<Object>>> gcs = distinctValuesCollector.getGroups();
     Collections.sort(gcs, cmp);
     assertEquals(4, gcs.size());
 
@@ -240,15 +240,15 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
         Sort groupSort = new Sort(new SortField("id", SortField.Type.STRING));
         int topN = 1 + random.nextInt(10);
 
-        List<AbstractDistinctValuesCollector.GroupCount<Comparable<?>>> expectedResult = createExpectedResult(context, term, groupSort, topN);
+        List<DistinctValuesCollector.GroupCount<Comparable<?>>> expectedResult = createExpectedResult(context, term, groupSort, topN);
 
-        AbstractFirstPassGroupingCollector<Comparable<?>> firstCollector = createRandomFirstPassCollector(groupSort, groupField, topN);
+        FirstPassGroupingCollector<Comparable<?>> firstCollector = createRandomFirstPassCollector(groupSort, groupField, topN);
         searcher.search(new TermQuery(new Term("content", term)), firstCollector);
-        AbstractDistinctValuesCollector<? extends AbstractDistinctValuesCollector.GroupCount<Comparable<?>>> distinctValuesCollector
+        DistinctValuesCollector<Comparable<?>> distinctValuesCollector
             = createDistinctCountCollector(firstCollector, groupField, countField);
         searcher.search(new TermQuery(new Term("content", term)), distinctValuesCollector);
         @SuppressWarnings("unchecked")
-        List<AbstractDistinctValuesCollector.GroupCount<Comparable<?>>> actualResult = (List<AbstractDistinctValuesCollector.GroupCount<Comparable<?>>>) distinctValuesCollector.getGroups();
+        List<DistinctValuesCollector.GroupCount<Comparable<?>>> actualResult = distinctValuesCollector.getGroups();
 
         if (VERBOSE) {
           System.out.println("Index iter=" + indexIter);
@@ -265,8 +265,8 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
 
         assertEquals(expectedResult.size(), actualResult.size());
         for (int i = 0; i < expectedResult.size(); i++) {
-          AbstractDistinctValuesCollector.GroupCount<Comparable<?>> expected = expectedResult.get(i);
-          AbstractDistinctValuesCollector.GroupCount<Comparable<?>> actual = actualResult.get(i);
+          DistinctValuesCollector.GroupCount<Comparable<?>> expected = expectedResult.get(i);
+          DistinctValuesCollector.GroupCount<Comparable<?>> actual = actualResult.get(i);
           assertValues(expected.groupValue, actual.groupValue);
           assertEquals(expected.uniqueValues.size(), actual.uniqueValues.size());
           List<Comparable<?>> expectedUniqueValues = new ArrayList<>(expected.uniqueValues);
@@ -283,9 +283,9 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
     }
   }
 
-  private void printGroups(List<AbstractDistinctValuesCollector.GroupCount<Comparable<?>>> results) {
+  private void printGroups(List<? extends DistinctValuesCollector.GroupCount<Comparable<?>>> results) {
     for(int i=0;i<results.size();i++) {
-      AbstractDistinctValuesCollector.GroupCount<Comparable<?>> group = results.get(i);
+      DistinctValuesCollector.GroupCount<Comparable<?>> group = results.get(i);
       Object gv = group.groupValue;
       if (gv instanceof BytesRef) {
         System.out.println(i + ": groupValue=" + ((BytesRef) gv).utf8ToString());
@@ -350,31 +350,31 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
-  private <T extends Comparable> AbstractDistinctValuesCollector<AbstractDistinctValuesCollector.GroupCount<T>> createDistinctCountCollector(AbstractFirstPassGroupingCollector<T> firstPassGroupingCollector,
-                                                                      String groupField,
-                                                                      String countField) throws IOException {
+  private <T extends Comparable> DistinctValuesCollector<T> createDistinctCountCollector(FirstPassGroupingCollector<T> firstPassGroupingCollector,
+                                                                                                                             String groupField,
+                                                                                                                             String countField) throws IOException {
     Random random = random();
     Collection<SearchGroup<T>> searchGroups = firstPassGroupingCollector.getTopGroups(0, false);
     if (FunctionFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
-      return (AbstractDistinctValuesCollector) new FunctionDistinctValuesCollector(new HashMap<>(), new BytesRefFieldSource(groupField), new BytesRefFieldSource(countField), (Collection) searchGroups);
+      return (DistinctValuesCollector) new FunctionDistinctValuesCollector(new HashMap<>(), new BytesRefFieldSource(groupField), new BytesRefFieldSource(countField), (Collection) searchGroups);
     } else {
-      return (AbstractDistinctValuesCollector) new TermDistinctValuesCollector(groupField, countField, (Collection) searchGroups);
+      return (DistinctValuesCollector) new TermDistinctValuesCollector(groupField, countField, (Collection) searchGroups);
     }
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
-  private <T> AbstractFirstPassGroupingCollector<T> createRandomFirstPassCollector(Sort groupSort, String groupField, int topNGroups) throws IOException {
+  private <T> FirstPassGroupingCollector<T> createRandomFirstPassCollector(Sort groupSort, String groupField, int topNGroups) throws IOException {
     Random random = random();
     if (random.nextBoolean()) {
-      return (AbstractFirstPassGroupingCollector<T>) new FunctionFirstPassGroupingCollector(new BytesRefFieldSource(groupField), new HashMap<>(), groupSort, topNGroups);
+      return (FirstPassGroupingCollector<T>) new FunctionFirstPassGroupingCollector(new BytesRefFieldSource(groupField), new HashMap<>(), groupSort, topNGroups);
     } else {
-      return (AbstractFirstPassGroupingCollector<T>) new TermFirstPassGroupingCollector(groupField, groupSort, topNGroups);
+      return (FirstPassGroupingCollector<T>) new TermFirstPassGroupingCollector(groupField, groupSort, topNGroups);
     }
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
-  private List<AbstractDistinctValuesCollector.GroupCount<Comparable<?>>> createExpectedResult(IndexContext context,  String term, Sort groupSort, int topN) {
-    class GroupCount extends AbstractDistinctValuesCollector.GroupCount<BytesRef> {
+  private List<DistinctValuesCollector.GroupCount<Comparable<?>>> createExpectedResult(IndexContext context, String term, Sort groupSort, int topN) {
+    class GroupCount extends DistinctValuesCollector.GroupCount<BytesRef> {
       GroupCount(BytesRef groupValue, Collection<BytesRef> uniqueValues) {
         super(groupValue);
         this.uniqueValues.addAll(uniqueValues);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
index 82bbd39..46141bc 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
@@ -103,7 +103,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
     IndexSearcher indexSearcher = newSearcher(w.getReader());
 
     List<TermGroupFacetCollector.FacetEntry> entries;
-    AbstractGroupFacetCollector groupedAirportFacetCollector;
+    GroupFacetCollector groupedAirportFacetCollector;
     TermGroupFacetCollector.GroupedFacetResult airportResult;
     
     for (int limit : new int[] { 2, 10, 100, Integer.MAX_VALUE }) {
@@ -136,7 +136,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
       assertEquals(1, entries.get(0).getCount());
     }
 
-    AbstractGroupFacetCollector groupedDurationFacetCollector = createRandomCollector(useDv ? "hotel_dv" : "hotel", useDv ? "duration_dv" : "duration", null, false);
+    GroupFacetCollector groupedDurationFacetCollector = createRandomCollector(useDv ? "hotel_dv" : "hotel", useDv ? "duration_dv" : "duration", null, false);
     indexSearcher.search(new MatchAllDocsQuery(), groupedDurationFacetCollector);
     TermGroupFacetCollector.GroupedFacetResult durationResult = groupedDurationFacetCollector.mergeSegmentResults(10, 0, false);
     assertEquals(4, durationResult.getTotalCount());
@@ -344,7 +344,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
 
     w.close();
     IndexSearcher indexSearcher = newSearcher(DirectoryReader.open(dir));
-    AbstractGroupFacetCollector groupedAirportFacetCollector = createRandomCollector(groupField + "_dv", "airport", null, true);
+    GroupFacetCollector groupedAirportFacetCollector = createRandomCollector(groupField + "_dv", "airport", null, true);
     indexSearcher.search(new MatchAllDocsQuery(), groupedAirportFacetCollector);
     TermGroupFacetCollector.GroupedFacetResult airportResult = groupedAirportFacetCollector.mergeSegmentResults(10, 0, false);
     assertEquals(3, airportResult.getTotalCount());
@@ -404,7 +404,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
         }
 
         GroupedFacetResult expectedFacetResult = createExpectedFacetResult(searchTerm, context, offset, limit, minCount, orderByCount, facetPrefix);
-        AbstractGroupFacetCollector groupFacetCollector = createRandomCollector("group", "facet", facetPrefix, multipleFacetsPerDocument);
+        GroupFacetCollector groupFacetCollector = createRandomCollector("group", "facet", facetPrefix, multipleFacetsPerDocument);
         searcher.search(new TermQuery(new Term("content", searchTerm)), groupFacetCollector);
         TermGroupFacetCollector.GroupedFacetResult actualFacetResult = groupFacetCollector.mergeSegmentResults(size, minCount, orderByCount);
 
@@ -704,7 +704,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
     return new GroupedFacetResult(totalCount, totalMissCount, entriesResult);
   }
 
-  private AbstractGroupFacetCollector createRandomCollector(String groupField, String facetField, String facetPrefix, boolean multipleFacetsPerDocument) {
+  private GroupFacetCollector createRandomCollector(String groupField, String facetField, String facetPrefix, boolean multipleFacetsPerDocument) {
     BytesRef facetPrefixBR = facetPrefix == null ? null : new BytesRef(facetPrefix);
     return TermGroupFacetCollector.createTermGroupFacetCollector(groupField, facetField, multipleFacetsPerDocument, facetPrefixBR, random().nextInt(1024));
   }


[16/43] lucene-solr:jira/solr-8593: SOLR-9935: Add hl.fragsize support when using the UnifiedHighlighter

Posted by kr...@apache.org.
SOLR-9935: Add hl.fragsize support when using the UnifiedHighlighter


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/570880d3
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/570880d3
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/570880d3

Branch: refs/heads/jira/solr-8593
Commit: 570880d3acb45c925e8dc77172e0725ab8ba07b8
Parents: ea49989
Author: David Smiley <ds...@apache.org>
Authored: Sat Jan 7 23:32:37 2017 -0500
Committer: David Smiley <ds...@apache.org>
Committed: Sat Jan 7 23:32:37 2017 -0500

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  5 +++-
 .../solr/highlight/UnifiedSolrHighlighter.java  | 12 +++++++++-
 .../highlight/TestUnifiedSolrHighlighter.java   | 24 ++++++++++++++++----
 .../solr/common/params/HighlightParams.java     |  2 +-
 4 files changed, 35 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/570880d3/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 899dcd3..c18381e 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -109,7 +109,7 @@ Upgrade Notes
 
 * SOLR-9708: You are encouraged to try out the UnifiedHighlighter by setting hl.method=unified and report feedback. It
   might become the default in 7.0. It's more efficient/faster than the other highlighters, especially compared to the
-  original Highlighter. That said, some options aren't supported yet, notably hl.fragsize.
+  original Highlighter. That said, some options aren't supported yet.
   It will get more features in time, especially with your input. See HighlightParams.java
   for a listing of highlight parameters annotated with which highlighters use them.
   hl.useFastVectorHighlighter is now considered deprecated in lieu of hl.method=fastVector.
@@ -225,6 +225,9 @@ New Features
 * SOLR-7466: Enable leading wildcard in complexphrase query parser, optimize it with  ReversedWildcardFilterFactory 
   when it's provided (Mikhail Khludnev)
 
+* SOLR-9935: Add hl.fragsize support when using the UnifiedHighlighter to avoid snippets/Passages that are too small.
+  Defaults to 70. (David Smiley)
+
 Optimizations
 ----------------------
 * SOLR-9704: Facet Module / JSON Facet API: Optimize blockChildren facets that have

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/570880d3/solr/core/src/java/org/apache/solr/highlight/UnifiedSolrHighlighter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/highlight/UnifiedSolrHighlighter.java b/solr/core/src/java/org/apache/solr/highlight/UnifiedSolrHighlighter.java
index 910fa2b..5b59b85 100644
--- a/solr/core/src/java/org/apache/solr/highlight/UnifiedSolrHighlighter.java
+++ b/solr/core/src/java/org/apache/solr/highlight/UnifiedSolrHighlighter.java
@@ -30,6 +30,7 @@ import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.postingshighlight.WholeBreakIterator;
 import org.apache.lucene.search.uhighlight.DefaultPassageFormatter;
+import org.apache.lucene.search.uhighlight.LengthGoalBreakIterator;
 import org.apache.lucene.search.uhighlight.PassageFormatter;
 import org.apache.lucene.search.uhighlight.PassageScorer;
 import org.apache.lucene.search.uhighlight.UnifiedHighlighter;
@@ -299,7 +300,16 @@ public class UnifiedSolrHighlighter extends SolrHighlighter implements PluginInf
       String variant = params.getFieldParam(field, HighlightParams.BS_VARIANT);
       Locale locale = parseLocale(language, country, variant);
       String type = params.getFieldParam(field, HighlightParams.BS_TYPE);
-      return parseBreakIterator(type, locale);
+      BreakIterator baseBI = parseBreakIterator(type, locale);
+
+      // Use a default fragsize the same as the regex Fragmenter (original Highlighter) since we're
+      //  both likely shooting for sentence-like patterns.
+      int fragsize = params.getFieldInt(field, HighlightParams.FRAGSIZE, LuceneRegexFragmenter.DEFAULT_FRAGMENT_SIZE);
+      if (fragsize <= 1 || baseBI instanceof WholeBreakIterator) { // no real minimum size
+        return baseBI;
+      }
+      return LengthGoalBreakIterator.createMinLength(baseBI, fragsize);
+      // TODO option for using createClosestToLength()
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/570880d3/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java b/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java
index e2511be..2eb4ba3 100644
--- a/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java
+++ b/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java
@@ -78,7 +78,8 @@ public class TestUnifiedSolrHighlighter extends SolrTestCaseJ4 {
         "text2", "document one", "text3", "crappy document", "id", "101"));
     assertU(commit());
     assertQ("multiple snippets test",
-        req("q", "text:document", "sort", "id asc", "hl", "true", "hl.snippets", "2", "hl.bs.type", "SENTENCE"),
+        req("q", "text:document", "sort", "id asc", "hl", "true", "hl.snippets", "2", "hl.bs.type", "SENTENCE",
+            "hl.fragsize", "0"),
         "count(//lst[@name='highlighting']/lst[@name='101']/arr[@name='text']/*)=2",
         "//lst[@name='highlighting']/lst[@name='101']/arr/str[1]='<em>Document</em> snippet one. '",
         "//lst[@name='highlighting']/lst[@name='101']/arr/str[2]='<em>Document</em> snippet two.'");
@@ -202,21 +203,34 @@ public class TestUnifiedSolrHighlighter extends SolrTestCaseJ4 {
         "//lst[@name='highlighting']/lst[@name='102']/arr[@name='text3']/str='crappier [document]'");
   }
   
-  public void testBreakIterator() {
+  public void testBreakIteratorWord() {
     assertQ("different breakiterator", 
-        req("q", "text:document", "sort", "id asc", "hl", "true", "hl.bs.type", "WORD"),
+        req("q", "text:document", "sort", "id asc", "hl", "true", "hl.bs.type", "WORD", "hl.fragsize", "-1"),
         "count(//lst[@name='highlighting']/*)=2",
         "//lst[@name='highlighting']/lst[@name='101']/arr[@name='text']/str='<em>document</em>'",
         "//lst[@name='highlighting']/lst[@name='102']/arr[@name='text']/str='<em>document</em>'");
   }
   
-  public void testBreakIterator2() {
+  public void testBreakIteratorWhole() {
     assertU(adoc("text", "Document one has a first sentence. Document two has a second sentence.", "id", "103"));
     assertU(commit());
     assertQ("different breakiterator", 
-        req("q", "text:document", "sort", "id asc", "hl", "true", "hl.bs.type", "WHOLE"),
+        req("q", "text:document", "sort", "id asc", "hl", "true", "hl.bs.type", "WHOLE", "hl.fragsize", "-1"),
         "//lst[@name='highlighting']/lst[@name='103']/arr[@name='text']/str='<em>Document</em> one has a first sentence. <em>Document</em> two has a second sentence.'");
   }
+
+  public void testFragsize() {
+    // test default is 70... so make a sentence that is a little less (closer to 70 than end of text)
+    clearIndex();
+    assertU(adoc("id", "10", "text", "This is a sentence just under seventy chars in length blah blah. Next sentence is here."));
+    assertU(commit());
+    assertQ("default fragsize",
+        req("q", "text:seventy", "hl", "true"),
+        "//lst[@name='highlighting']/lst[@name='10']/arr[@name='text']/str='This is a sentence just under <em>seventy</em> chars in length blah blah. Next sentence is here.'");
+    assertQ("smaller fragsize",
+        req("q", "text:seventy", "hl", "true", "hl.fragsize", "60"), // a bit smaller
+        "//lst[@name='highlighting']/lst[@name='10']/arr[@name='text']/str='This is a sentence just under <em>seventy</em> chars in length blah blah. '");
+  }
   
   public void testEncoder() {
     assertU(adoc("text", "Document one has a first <i>sentence</i>.", "id", "103"));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/570880d3/solr/solrj/src/java/org/apache/solr/common/params/HighlightParams.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/HighlightParams.java b/solr/solrj/src/java/org/apache/solr/common/params/HighlightParams.java
index 917e9f5..997fc7e 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/HighlightParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/HighlightParams.java
@@ -49,7 +49,7 @@ public interface HighlightParams {
   public static final String HIGHLIGHT_ALTERNATE = HIGHLIGHT+".highlightAlternate"; // OH, FVH
 
   // sizing
-  public static final String FRAGSIZE    = HIGHLIGHT+".fragsize"; // OH, FVH
+  public static final String FRAGSIZE    = HIGHLIGHT+".fragsize"; // OH, FVH, UH
   public static final String FRAGMENTER  = HIGHLIGHT+".fragmenter"; // OH
   public static final String INCREMENT   = HIGHLIGHT+".increment"; // OH
   public static final String REGEX       = "regex"; // OH


[27/43] lucene-solr:jira/solr-8593: LUCENE-7624: Move TermsQuery into core as TermInSetQuery

Posted by kr...@apache.org.
LUCENE-7624: Move TermsQuery into core as TermInSetQuery


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/22940f5c
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/22940f5c
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/22940f5c

Branch: refs/heads/jira/solr-8593
Commit: 22940f5c49297b606d710c6775309d67ff064f2f
Parents: 5e9f927
Author: Alan Woodward <ro...@apache.org>
Authored: Mon Jan 9 14:01:33 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Mon Jan 9 14:25:05 2017 +0000

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   3 +
 .../apache/lucene/search/TermInSetQuery.java    | 369 +++++++++++++++++++
 .../search/UsageTrackingQueryCachingPolicy.java |   5 +-
 .../lucene/search/TermInSetQueryTest.java       | 328 +++++++++++++++++
 .../apache/lucene/facet/MultiFacetQuery.java    |  12 +-
 .../org/apache/lucene/queries/TermsQuery.java   | 332 +----------------
 .../apache/lucene/queries/TermsQueryTest.java   | 339 -----------------
 .../prefix/TermQueryPrefixTreeStrategy.java     |  10 +-
 .../spatial/prefix/NumberRangeFacetsTest.java   |   6 +-
 .../solr/handler/component/ExpandComponent.java |  25 +-
 .../java/org/apache/solr/schema/FieldType.java  |   4 +-
 .../apache/solr/search/TermsQParserPlugin.java  |   4 +-
 .../org/apache/solr/search/join/GraphQuery.java |   4 +-
 .../apache/solr/search/TestSolrQueryParser.java |  12 +-
 14 files changed, 748 insertions(+), 705 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 4bbf9ee..109a534 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -85,6 +85,9 @@ API Changes
 * LUCENE-7611: DocumentValueSourceDictionary now takes a LongValuesSource
   as a parameter, and the ValueSource equivalent is deprecated (Alan Woodward)
 
+* LUCENE-7624: TermsQuery has been renamed as TermInSetQuery and moved to core.
+  (Alan Woodward)
+
 New features
 
 * LUCENE-5867: Added BooleanSimilarity. (Robert Muir, Adrien Grand)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java b/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
new file mode 100644
index 0000000..e1a1575
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
@@ -0,0 +1,369 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.SortedSet;
+
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.PrefixCodedTerms;
+import org.apache.lucene.index.PrefixCodedTerms.TermIterator;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.index.TermState;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.util.Accountable;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.DocIdSetBuilder;
+import org.apache.lucene.util.RamUsageEstimator;
+
+/**
+ * Specialization for a disjunction over many terms that behaves like a
+ * {@link ConstantScoreQuery} over a {@link BooleanQuery} containing only
+ * {@link org.apache.lucene.search.BooleanClause.Occur#SHOULD} clauses.
+ * <p>For instance in the following example, both @{code q1} and {@code q2}
+ * would yield the same scores:
+ * <pre class="prettyprint">
+ * Query q1 = new TermInSetQuery(new Term("field", "foo"), new Term("field", "bar"));
+ *
+ * BooleanQuery bq = new BooleanQuery();
+ * bq.add(new TermQuery(new Term("field", "foo")), Occur.SHOULD);
+ * bq.add(new TermQuery(new Term("field", "bar")), Occur.SHOULD);
+ * Query q2 = new ConstantScoreQuery(bq);
+ * </pre>
+ * <p>When there are few terms, this query executes like a regular disjunction.
+ * However, when there are many terms, instead of merging iterators on the fly,
+ * it will populate a bit set with matching docs and return a {@link Scorer}
+ * over this bit set.
+ * <p>NOTE: This query produces scores that are equal to its boost
+ */
+public class TermInSetQuery extends Query implements Accountable {
+
+  private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(TermInSetQuery.class);
+  // Same threshold as MultiTermQueryConstantScoreWrapper
+  static final int BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD = 16;
+
+  private final boolean singleField; // whether all terms are from the same field
+  private final PrefixCodedTerms termData;
+  private final int termDataHashCode; // cached hashcode of termData
+
+  /**
+   * Creates a new {@link TermInSetQuery} from the given collection. It
+   * can contain duplicate terms and multiple fields.
+   */
+  public TermInSetQuery(Collection<Term> terms) {
+    Term[] sortedTerms = terms.toArray(new Term[terms.size()]);
+    // already sorted if we are a SortedSet with natural order
+    boolean sorted = terms instanceof SortedSet && ((SortedSet<Term>)terms).comparator() == null;
+    if (!sorted) {
+      ArrayUtil.timSort(sortedTerms);
+    }
+    PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
+    Set<String> fields = new HashSet<>();
+    Term previous = null;
+    for (Term term : sortedTerms) {
+      if (term.equals(previous) == false) {
+        fields.add(term.field());
+        builder.add(term);
+      }
+      previous = term;
+    }
+    singleField = fields.size() == 1;
+    termData = builder.finish();
+    termDataHashCode = termData.hashCode();
+  }
+
+  /**
+   * Creates a new {@link TermInSetQuery} from the given collection for
+   * a single field. It can contain duplicate terms.
+   */
+  public TermInSetQuery(String field, Collection<BytesRef> terms) {
+    BytesRef[] sortedTerms = terms.toArray(new BytesRef[terms.size()]);
+    // already sorted if we are a SortedSet with natural order
+    boolean sorted = terms instanceof SortedSet && ((SortedSet<BytesRef>)terms).comparator() == null;
+    if (!sorted) {
+      ArrayUtil.timSort(sortedTerms);
+    }
+    PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
+    BytesRefBuilder previous = null;
+    for (BytesRef term : sortedTerms) {
+      if (previous == null) {
+        previous = new BytesRefBuilder();
+      } else if (previous.get().equals(term)) {
+        continue; // deduplicate
+      }
+      builder.add(field, term);
+      previous.copyBytes(term);
+    }
+    singleField = true;
+    termData = builder.finish();
+    termDataHashCode = termData.hashCode();
+  }
+
+  /**
+   * Creates a new {@link TermInSetQuery} from the given {@link BytesRef} array for
+   * a single field.
+   */
+  public TermInSetQuery(String field, BytesRef...terms) {
+    this(field, Arrays.asList(terms));
+  }
+
+  /**
+   * Creates a new {@link TermInSetQuery} from the given array. The array can
+   * contain duplicate terms and multiple fields.
+   */
+  public TermInSetQuery(final Term... terms) {
+    this(Arrays.asList(terms));
+  }
+
+  @Override
+  public Query rewrite(IndexReader reader) throws IOException {
+    final int threshold = Math.min(BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD, BooleanQuery.getMaxClauseCount());
+    if (termData.size() <= threshold) {
+      BooleanQuery.Builder bq = new BooleanQuery.Builder();
+      TermIterator iterator = termData.iterator();
+      for (BytesRef term = iterator.next(); term != null; term = iterator.next()) {
+        bq.add(new TermQuery(new Term(iterator.field(), BytesRef.deepCopyOf(term))), Occur.SHOULD);
+      }
+      return new ConstantScoreQuery(bq.build());
+    }
+    return super.rewrite(reader);
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    return sameClassAs(other) &&
+        equalsTo(getClass().cast(other));
+  }
+
+  private boolean equalsTo(TermInSetQuery other) {
+    // termData might be heavy to compare so check the hash code first
+    return termDataHashCode == other.termDataHashCode &&
+        termData.equals(other.termData);
+  }
+
+  @Override
+  public int hashCode() {
+    return 31 * classHash() + termDataHashCode;
+  }
+
+  /** Returns the terms wrapped in a PrefixCodedTerms. */
+  public PrefixCodedTerms getTermData() {
+    return termData;
+  }
+
+  @Override
+  public String toString(String defaultField) {
+    StringBuilder builder = new StringBuilder();
+    boolean first = true;
+    TermIterator iterator = termData.iterator();
+    for (BytesRef term = iterator.next(); term != null; term = iterator.next()) {
+      if (!first) {
+        builder.append(' ');
+      }
+      first = false;
+      builder.append(new Term(iterator.field(), term).toString());
+    }
+
+    return builder.toString();
+  }
+
+  @Override
+  public long ramBytesUsed() {
+    return BASE_RAM_BYTES_USED + termData.ramBytesUsed();
+  }
+
+  @Override
+  public Collection<Accountable> getChildResources() {
+    return Collections.emptyList();
+  }
+
+  private static class TermAndState {
+    final String field;
+    final TermsEnum termsEnum;
+    final BytesRef term;
+    final TermState state;
+    final int docFreq;
+    final long totalTermFreq;
+
+    TermAndState(String field, TermsEnum termsEnum) throws IOException {
+      this.field = field;
+      this.termsEnum = termsEnum;
+      this.term = BytesRef.deepCopyOf(termsEnum.term());
+      this.state = termsEnum.termState();
+      this.docFreq = termsEnum.docFreq();
+      this.totalTermFreq = termsEnum.totalTermFreq();
+    }
+  }
+
+  private static class WeightOrDocIdSet {
+    final Weight weight;
+    final DocIdSet set;
+
+    WeightOrDocIdSet(Weight weight) {
+      this.weight = Objects.requireNonNull(weight);
+      this.set = null;
+    }
+
+    WeightOrDocIdSet(DocIdSet bitset) {
+      this.set = bitset;
+      this.weight = null;
+    }
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
+    return new ConstantScoreWeight(this, boost) {
+
+      @Override
+      public void extractTerms(Set<Term> terms) {
+        // no-op
+        // This query is for abuse cases when the number of terms is too high to
+        // run efficiently as a BooleanQuery. So likewise we hide its terms in
+        // order to protect highlighters
+      }
+
+      /**
+       * On the given leaf context, try to either rewrite to a disjunction if
+       * there are few matching terms, or build a bitset containing matching docs.
+       */
+      private WeightOrDocIdSet rewrite(LeafReaderContext context) throws IOException {
+        final LeafReader reader = context.reader();
+
+        // We will first try to collect up to 'threshold' terms into 'matchingTerms'
+        // if there are two many terms, we will fall back to building the 'builder'
+        final int threshold = Math.min(BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD, BooleanQuery.getMaxClauseCount());
+        assert termData.size() > threshold : "Query should have been rewritten";
+        List<TermAndState> matchingTerms = new ArrayList<>(threshold);
+        DocIdSetBuilder builder = null;
+
+        final Fields fields = reader.fields();
+        String lastField = null;
+        Terms terms = null;
+        TermsEnum termsEnum = null;
+        PostingsEnum docs = null;
+        TermIterator iterator = termData.iterator();
+        for (BytesRef term = iterator.next(); term != null; term = iterator.next()) {
+          String field = iterator.field();
+          // comparing references is fine here
+          if (field != lastField) {
+            terms = fields.terms(field);
+            if (terms == null) {
+              termsEnum = null;
+            } else {
+              termsEnum = terms.iterator();
+            }
+            lastField = field;
+          }
+          if (termsEnum != null && termsEnum.seekExact(term)) {
+            if (matchingTerms == null) {
+              docs = termsEnum.postings(docs, PostingsEnum.NONE);
+              builder.add(docs);
+            } else if (matchingTerms.size() < threshold) {
+              matchingTerms.add(new TermAndState(field, termsEnum));
+            } else {
+              assert matchingTerms.size() == threshold;
+              if (singleField) {
+                // common case: all terms are in the same field
+                // use an optimized builder that leverages terms stats to be more efficient
+                builder = new DocIdSetBuilder(reader.maxDoc(), terms);
+              } else {
+                // corner case: different fields
+                // don't make assumptions about the docs we will get
+                builder = new DocIdSetBuilder(reader.maxDoc());
+              }
+              docs = termsEnum.postings(docs, PostingsEnum.NONE);
+              builder.add(docs);
+              for (TermAndState t : matchingTerms) {
+                t.termsEnum.seekExact(t.term, t.state);
+                docs = t.termsEnum.postings(docs, PostingsEnum.NONE);
+                builder.add(docs);
+              }
+              matchingTerms = null;
+            }
+          }
+        }
+        if (matchingTerms != null) {
+          assert builder == null;
+          BooleanQuery.Builder bq = new BooleanQuery.Builder();
+          for (TermAndState t : matchingTerms) {
+            final TermContext termContext = new TermContext(searcher.getTopReaderContext());
+            termContext.register(t.state, context.ord, t.docFreq, t.totalTermFreq);
+            bq.add(new TermQuery(new Term(t.field, t.term), termContext), Occur.SHOULD);
+          }
+          Query q = new ConstantScoreQuery(bq.build());
+          final Weight weight = searcher.rewrite(q).createWeight(searcher, needsScores, score());
+          return new WeightOrDocIdSet(weight);
+        } else {
+          assert builder != null;
+          return new WeightOrDocIdSet(builder.build());
+        }
+      }
+
+      private Scorer scorer(DocIdSet set) throws IOException {
+        if (set == null) {
+          return null;
+        }
+        final DocIdSetIterator disi = set.iterator();
+        if (disi == null) {
+          return null;
+        }
+        return new ConstantScoreScorer(this, score(), disi);
+      }
+
+      @Override
+      public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
+        final WeightOrDocIdSet weightOrBitSet = rewrite(context);
+        if (weightOrBitSet.weight != null) {
+          return weightOrBitSet.weight.bulkScorer(context);
+        } else {
+          final Scorer scorer = scorer(weightOrBitSet.set);
+          if (scorer == null) {
+            return null;
+          }
+          return new DefaultBulkScorer(scorer);
+        }
+      }
+
+      @Override
+      public Scorer scorer(LeafReaderContext context) throws IOException {
+        final WeightOrDocIdSet weightOrBitSet = rewrite(context);
+        if (weightOrBitSet.weight != null) {
+          return weightOrBitSet.weight.scorer(context);
+        } else {
+          return scorer(weightOrBitSet.set);
+        }
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/lucene/core/src/java/org/apache/lucene/search/UsageTrackingQueryCachingPolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/UsageTrackingQueryCachingPolicy.java b/lucene/core/src/java/org/apache/lucene/search/UsageTrackingQueryCachingPolicy.java
index ab68eeb..035947f 100644
--- a/lucene/core/src/java/org/apache/lucene/search/UsageTrackingQueryCachingPolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/search/UsageTrackingQueryCachingPolicy.java
@@ -50,9 +50,8 @@ public final class UsageTrackingQueryCachingPolicy implements QueryCachingPolicy
     // DocIdSet in the first place
     return query instanceof MultiTermQuery ||
         query instanceof MultiTermQueryConstantScoreWrapper ||
-        isPointQuery(query) ||
-        // can't refer to TermsQuery directly as it is in another module
-        "TermsQuery".equals(query.getClass().getSimpleName());
+        query instanceof TermInSetQuery ||
+        isPointQuery(query);
   }
 
   static boolean isCheap(Query query) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java b/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java
new file mode 100644
index 0000000..e694d97
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java
@@ -0,0 +1,328 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.FilterDirectoryReader;
+import org.apache.lucene.index.FilterLeafReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.RamUsageTester;
+import org.apache.lucene.util.TestUtil;
+
+public class TermInSetQueryTest extends LuceneTestCase {
+
+  public void testDuel() throws IOException {
+    final int iters = atLeast(2);
+    for (int iter = 0; iter < iters; ++iter) {
+      final List<Term> allTerms = new ArrayList<>();
+      final int numTerms = TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 10));
+      for (int i = 0; i < numTerms; ++i) {
+        final String field = usually() ? "f" : "g";
+        final String value = TestUtil.randomAnalysisString(random(), 10, true);
+        allTerms.add(new Term(field, value));
+      }
+      Directory dir = newDirectory();
+      RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+      final int numDocs = atLeast(100);
+      for (int i = 0; i < numDocs; ++i) {
+        Document doc = new Document();
+        final Term term = allTerms.get(random().nextInt(allTerms.size()));
+        doc.add(new StringField(term.field(), term.text(), Store.NO));
+        iw.addDocument(doc);
+      }
+      if (numTerms > 1 && random().nextBoolean()) {
+        iw.deleteDocuments(new TermQuery(allTerms.get(0)));
+      }
+      iw.commit();
+      final IndexReader reader = iw.getReader();
+      final IndexSearcher searcher = newSearcher(reader);
+      iw.close();
+
+      if (reader.numDocs() == 0) {
+        // may occasionally happen if all documents got the same term
+        IOUtils.close(reader, dir);
+        continue;
+      }
+
+      for (int i = 0; i < 100; ++i) {
+        final float boost = random().nextFloat() * 10;
+        final int numQueryTerms = TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 8));
+        List<Term> queryTerms = new ArrayList<>();
+        for (int j = 0; j < numQueryTerms; ++j) {
+          queryTerms.add(allTerms.get(random().nextInt(allTerms.size())));
+        }
+        final BooleanQuery.Builder bq = new BooleanQuery.Builder();
+        for (Term t : queryTerms) {
+          bq.add(new TermQuery(t), Occur.SHOULD);
+        }
+        final Query q1 = new ConstantScoreQuery(bq.build());
+        final Query q2 = new TermInSetQuery(queryTerms);
+        assertSameMatches(searcher, new BoostQuery(q1, boost), new BoostQuery(q2, boost), true);
+      }
+
+      reader.close();
+      dir.close();
+    }
+  }
+
+  private void assertSameMatches(IndexSearcher searcher, Query q1, Query q2, boolean scores) throws IOException {
+    final int maxDoc = searcher.getIndexReader().maxDoc();
+    final TopDocs td1 = searcher.search(q1, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER);
+    final TopDocs td2 = searcher.search(q2, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER);
+    assertEquals(td1.totalHits, td2.totalHits);
+    for (int i = 0; i < td1.scoreDocs.length; ++i) {
+      assertEquals(td1.scoreDocs[i].doc, td2.scoreDocs[i].doc);
+      if (scores) {
+        assertEquals(td1.scoreDocs[i].score, td2.scoreDocs[i].score, 10e-7);
+      }
+    }
+  }
+
+  private TermInSetQuery termsQuery(boolean singleField, Term...terms) {
+    return termsQuery(singleField, Arrays.asList(terms));
+  }
+
+  private TermInSetQuery termsQuery(boolean singleField, Collection<Term> termList) {
+    if (!singleField) {
+      return new TermInSetQuery(new ArrayList<>(termList));
+    }
+    final TermInSetQuery filter;
+    List<BytesRef> bytes = new ArrayList<>();
+    String field = null;
+    for (Term term : termList) {
+        bytes.add(term.bytes());
+        if (field != null) {
+          assertEquals(term.field(), field);
+        }
+        field = term.field();
+    }
+    assertNotNull(field);
+    filter = new TermInSetQuery(field, bytes);
+    return filter;
+  }
+
+  public void testHashCodeAndEquals() {
+    int num = atLeast(100);
+    final boolean singleField = random().nextBoolean();
+    List<Term> terms = new ArrayList<>();
+    Set<Term> uniqueTerms = new HashSet<>();
+    for (int i = 0; i < num; i++) {
+      String field = "field" + (singleField ? "1" : random().nextInt(100));
+      String string = TestUtil.randomRealisticUnicodeString(random());
+      terms.add(new Term(field, string));
+      uniqueTerms.add(new Term(field, string));
+      TermInSetQuery left = termsQuery(singleField ? random().nextBoolean() : false, uniqueTerms);
+      Collections.shuffle(terms, random());
+      TermInSetQuery right = termsQuery(singleField ? random().nextBoolean() : false, terms);
+      assertEquals(right, left);
+      assertEquals(right.hashCode(), left.hashCode());
+      if (uniqueTerms.size() > 1) {
+        List<Term> asList = new ArrayList<>(uniqueTerms);
+        asList.remove(0);
+        TermInSetQuery notEqual = termsQuery(singleField ? random().nextBoolean() : false, asList);
+        assertFalse(left.equals(notEqual));
+        assertFalse(right.equals(notEqual));
+      }
+    }
+
+    TermInSetQuery tq1 = new TermInSetQuery(new Term("thing", "apple"));
+    TermInSetQuery tq2 = new TermInSetQuery(new Term("thing", "orange"));
+    assertFalse(tq1.hashCode() == tq2.hashCode());
+
+    // different fields with the same term should have differing hashcodes
+    tq1 = new TermInSetQuery(new Term("thing1", "apple"));
+    tq2 = new TermInSetQuery(new Term("thing2", "apple"));
+    assertFalse(tq1.hashCode() == tq2.hashCode());
+  }
+
+  public void testSingleFieldEquals() {
+    // Two terms with the same hash code
+    assertEquals("AaAaBB".hashCode(), "BBBBBB".hashCode());
+    TermInSetQuery left = termsQuery(true, new Term("id", "AaAaAa"), new Term("id", "AaAaBB"));
+    TermInSetQuery right = termsQuery(true, new Term("id", "AaAaAa"), new Term("id", "BBBBBB"));
+    assertFalse(left.equals(right));
+  }
+
+  public void testToString() {
+    TermInSetQuery termsQuery = new TermInSetQuery(new Term("field1", "a"),
+                                              new Term("field1", "b"),
+                                              new Term("field1", "c"));
+    assertEquals("field1:a field1:b field1:c", termsQuery.toString());
+  }
+
+  public void testDedup() {
+    Query query1 = new TermInSetQuery(new Term("foo", "bar"));
+    Query query2 = new TermInSetQuery(new Term("foo", "bar"), new Term("foo", "bar"));
+    QueryUtils.checkEqual(query1, query2);
+  }
+
+  public void testOrderDoesNotMatter() {
+    // order of terms if different
+    Query query1 = new TermInSetQuery(new Term("foo", "bar"), new Term("foo", "baz"));
+    Query query2 = new TermInSetQuery(new Term("foo", "baz"), new Term("foo", "bar"));
+    QueryUtils.checkEqual(query1, query2);
+
+    // order of fields is different
+    query1 = new TermInSetQuery(new Term("foo", "bar"), new Term("bar", "bar"));
+    query2 = new TermInSetQuery(new Term("bar", "bar"), new Term("foo", "bar"));
+    QueryUtils.checkEqual(query1, query2);
+  }
+
+  public void testRamBytesUsed() {
+    List<Term> terms = new ArrayList<>();
+    final int numTerms = 1000 + random().nextInt(1000);
+    for (int i = 0; i < numTerms; ++i) {
+      terms.add(new Term("f", RandomStrings.randomUnicodeOfLength(random(), 10)));
+    }
+    TermInSetQuery query = new TermInSetQuery(terms);
+    final long actualRamBytesUsed = RamUsageTester.sizeOf(query);
+    final long expectedRamBytesUsed = query.ramBytesUsed();
+    // error margin within 5%
+    assertEquals(actualRamBytesUsed, expectedRamBytesUsed, actualRamBytesUsed / 20);
+  }
+
+  private static class TermsCountingDirectoryReaderWrapper extends FilterDirectoryReader {
+
+    private final AtomicInteger counter;
+    
+    public TermsCountingDirectoryReaderWrapper(DirectoryReader in, AtomicInteger counter) throws IOException {
+      super(in, new TermsCountingSubReaderWrapper(counter));
+      this.counter = counter;
+    }
+
+    private static class TermsCountingSubReaderWrapper extends SubReaderWrapper {
+      private final AtomicInteger counter;
+
+      public TermsCountingSubReaderWrapper(AtomicInteger counter) {
+        this.counter = counter;
+      }
+
+      @Override
+      public LeafReader wrap(LeafReader reader) {
+        return new TermsCountingLeafReaderWrapper(reader, counter);
+      }
+    }
+
+    private static class TermsCountingLeafReaderWrapper extends FilterLeafReader {
+
+      private final AtomicInteger counter;
+
+      public TermsCountingLeafReaderWrapper(LeafReader in, AtomicInteger counter) {
+        super(in);
+        this.counter = counter;
+      }
+
+      @Override
+      public Fields fields() throws IOException {
+        return new FilterFields(in.fields()) {
+          @Override
+          public Terms terms(String field) throws IOException {
+            final Terms in = this.in.terms(field);
+            if (in == null) {
+              return null;
+            }
+            return new FilterTerms(in) {
+              @Override
+              public TermsEnum iterator() throws IOException {
+                counter.incrementAndGet();
+                return super.iterator();
+              }
+            };
+          }
+        };
+      }
+      
+    }
+
+    @Override
+    protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
+      return new TermsCountingDirectoryReaderWrapper(in, counter);
+    }
+
+  }
+
+  public void testPullOneTermsEnumPerField() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    doc.add(new StringField("foo", "1", Store.NO));
+    doc.add(new StringField("bar", "2", Store.NO));
+    doc.add(new StringField("baz", "3", Store.NO));
+    w.addDocument(doc);
+    DirectoryReader reader = w.getReader();
+    w.close();
+    final AtomicInteger counter = new AtomicInteger();
+    DirectoryReader wrapped = new TermsCountingDirectoryReaderWrapper(reader, counter);
+
+    final List<Term> terms = new ArrayList<>();
+    final Set<String> fields = new HashSet<>();
+    // enough terms to avoid the rewrite
+    final int numTerms = TestUtil.nextInt(random(), TermInSetQuery.BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD + 1, 100);
+    for (int i = 0; i < numTerms; ++i) {
+      final String field = RandomPicks.randomFrom(random(), new String[] {"foo", "bar", "baz"});
+      final BytesRef term = new BytesRef(RandomStrings.randomUnicodeOfCodepointLength(random(), 10));
+      fields.add(field);
+      terms.add(new Term(field, term));
+    }
+
+    new IndexSearcher(wrapped).count(new TermInSetQuery(terms));
+    assertEquals(fields.size(), counter.get());
+    wrapped.close();
+    dir.close();
+  }
+  
+  public void testBinaryToString() {
+    TermInSetQuery query = new TermInSetQuery(new Term("field", new BytesRef(new byte[] { (byte) 0xff, (byte) 0xfe })));
+    assertEquals("field:[ff fe]", query.toString());
+  }
+
+  public void testIsConsideredCostlyByQueryCache() throws IOException {
+    TermInSetQuery query = new TermInSetQuery(new Term("foo", "bar"), new Term("foo", "baz"));
+    UsageTrackingQueryCachingPolicy policy = new UsageTrackingQueryCachingPolicy();
+    assertFalse(policy.shouldCache(query));
+    policy.onUse(query);
+    policy.onUse(query);
+    // cached after two uses
+    assertTrue(policy.shouldCache(query));
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/lucene/facet/src/java/org/apache/lucene/facet/MultiFacetQuery.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/MultiFacetQuery.java b/lucene/facet/src/java/org/apache/lucene/facet/MultiFacetQuery.java
index dd212c6..a010709 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/MultiFacetQuery.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/MultiFacetQuery.java
@@ -16,13 +16,13 @@
  */
 package org.apache.lucene.facet;
 
-import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.TermsQuery;
-import org.apache.lucene.search.Query;
-
 import java.util.ArrayList;
 import java.util.Collection;
 
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermInSetQuery;
+
 /**
  * A multi-terms {@link Query} over a {@link FacetField}.
  * <p>
@@ -30,9 +30,9 @@ import java.util.Collection;
  * especially in cases where you don't intend to use {@link DrillSideways}
  *
  * @lucene.experimental
- * @see org.apache.lucene.queries.TermsQuery
+ * @see org.apache.lucene.search.TermInSetQuery
  */
-public class MultiFacetQuery extends TermsQuery {
+public class MultiFacetQuery extends TermInSetQuery {
 
   /**
    * Creates a new {@code MultiFacetQuery} filtering the query on the given dimension.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/lucene/queries/src/java/org/apache/lucene/queries/TermsQuery.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/TermsQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/TermsQuery.java
index 7b7f094..5effa83 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/TermsQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/TermsQuery.java
@@ -16,130 +16,33 @@
  */
 package org.apache.lucene.queries;
 
-import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Objects;
-import java.util.Set;
-import java.util.SortedSet;
 
-import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.PrefixCodedTerms;
-import org.apache.lucene.index.PrefixCodedTerms.TermIterator;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermContext;
-import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.BulkScorer;
-import org.apache.lucene.search.ConstantScoreQuery;
-import org.apache.lucene.search.ConstantScoreScorer;
-import org.apache.lucene.search.ConstantScoreWeight;
-import org.apache.lucene.search.DocIdSet;
-import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.Weight;
-import org.apache.lucene.util.Accountable;
-import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.DocIdSetBuilder;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.RamUsageEstimator;
 
 /**
- * Specialization for a disjunction over many terms that behaves like a
- * {@link ConstantScoreQuery} over a {@link BooleanQuery} containing only
- * {@link org.apache.lucene.search.BooleanClause.Occur#SHOULD} clauses.
- * <p>For instance in the following example, both @{code q1} and {@code q2}
- * would yield the same scores:
- * <pre class="prettyprint">
- * Query q1 = new TermsQuery(new Term("field", "foo"), new Term("field", "bar"));
- *
- * BooleanQuery bq = new BooleanQuery();
- * bq.add(new TermQuery(new Term("field", "foo")), Occur.SHOULD);
- * bq.add(new TermQuery(new Term("field", "bar")), Occur.SHOULD);
- * Query q2 = new ConstantScoreQuery(bq);
- * </pre>
- * <p>When there are few terms, this query executes like a regular disjunction.
- * However, when there are many terms, instead of merging iterators on the fly,
- * it will populate a bit set with matching docs and return a {@link Scorer}
- * over this bit set.
- * <p>NOTE: This query produces scores that are equal to its boost
+ * @deprecated Use {@link org.apache.lucene.search.TermInSetQuery}
  */
-public class TermsQuery extends Query implements Accountable {
-
-  private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(TermsQuery.class);
-  // Same threshold as MultiTermQueryConstantScoreWrapper
-  static final int BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD = 16;
-
-  private final boolean singleField; // whether all terms are from the same field
-  private final PrefixCodedTerms termData;
-  private final int termDataHashCode; // cached hashcode of termData
+@Deprecated
+public class TermsQuery extends TermInSetQuery {
 
   /**
    * Creates a new {@link TermsQuery} from the given collection. It
    * can contain duplicate terms and multiple fields.
    */
   public TermsQuery(Collection<Term> terms) {
-    Term[] sortedTerms = terms.toArray(new Term[terms.size()]);
-    // already sorted if we are a SortedSet with natural order
-    boolean sorted = terms instanceof SortedSet && ((SortedSet<Term>)terms).comparator() == null;
-    if (!sorted) {
-      ArrayUtil.timSort(sortedTerms);
-    }
-    PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
-    Set<String> fields = new HashSet<>();
-    Term previous = null;
-    for (Term term : sortedTerms) {
-      if (term.equals(previous) == false) {
-        fields.add(term.field());
-        builder.add(term);
-      }
-      previous = term;
-    }
-    singleField = fields.size() == 1;
-    termData = builder.finish();
-    termDataHashCode = termData.hashCode();
+    super(terms);
   }
-  
+
   /**
    * Creates a new {@link TermsQuery} from the given collection for
    * a single field. It can contain duplicate terms.
    */
   public TermsQuery(String field, Collection<BytesRef> terms) {
-    BytesRef[] sortedTerms = terms.toArray(new BytesRef[terms.size()]);
-    // already sorted if we are a SortedSet with natural order
-    boolean sorted = terms instanceof SortedSet && ((SortedSet<BytesRef>)terms).comparator() == null;
-    if (!sorted) {
-      ArrayUtil.timSort(sortedTerms);
-    }
-    PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
-    BytesRefBuilder previous = null;
-    for (BytesRef term : sortedTerms) {
-      if (previous == null) {
-        previous = new BytesRefBuilder();
-      } else if (previous.get().equals(term)) {
-        continue; // deduplicate
-      }
-      builder.add(field, term);
-      previous.copyBytes(term);
-    }
-    singleField = true;
-    termData = builder.finish();
-    termDataHashCode = termData.hashCode();
+    super(field, terms);
   }
 
   /**
@@ -147,7 +50,7 @@ public class TermsQuery extends Query implements Accountable {
    * a single field.
    */
   public TermsQuery(String field, BytesRef...terms) {
-   this(field, Arrays.asList(terms));
+    this(field, Arrays.asList(terms));
   }
 
   /**
@@ -158,224 +61,5 @@ public class TermsQuery extends Query implements Accountable {
     this(Arrays.asList(terms));
   }
 
-  @Override
-  public Query rewrite(IndexReader reader) throws IOException {
-    final int threshold = Math.min(BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD, BooleanQuery.getMaxClauseCount());
-    if (termData.size() <= threshold) {
-      BooleanQuery.Builder bq = new BooleanQuery.Builder();
-      TermIterator iterator = termData.iterator();
-      for (BytesRef term = iterator.next(); term != null; term = iterator.next()) {
-        bq.add(new TermQuery(new Term(iterator.field(), BytesRef.deepCopyOf(term))), Occur.SHOULD);
-      }
-      return new ConstantScoreQuery(bq.build());
-    }
-    return super.rewrite(reader);
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    return sameClassAs(other) &&
-           equalsTo(getClass().cast(other));
-  }
-
-  private boolean equalsTo(TermsQuery other) {
-    // termData might be heavy to compare so check the hash code first
-    return termDataHashCode == other.termDataHashCode && 
-           termData.equals(other.termData);
-  }
-
-  @Override
-  public int hashCode() {
-    return 31 * classHash() + termDataHashCode;
-  }
-
-  /** Returns the terms wrapped in a PrefixCodedTerms. */
-  public PrefixCodedTerms getTermData() {
-    return termData;
-  }
-
-  @Override
-  public String toString(String defaultField) {
-    StringBuilder builder = new StringBuilder();
-    boolean first = true;
-    TermIterator iterator = termData.iterator();
-    for (BytesRef term = iterator.next(); term != null; term = iterator.next()) {
-      if (!first) {
-        builder.append(' ');
-      }
-      first = false;
-      builder.append(new Term(iterator.field(), term).toString());
-    }
-
-    return builder.toString();
-  }
-
-  @Override
-  public long ramBytesUsed() {
-    return BASE_RAM_BYTES_USED + termData.ramBytesUsed();
-  }
 
-  @Override
-  public Collection<Accountable> getChildResources() {
-    return Collections.emptyList();
-  }
-
-  private static class TermAndState {
-    final String field;
-    final TermsEnum termsEnum;
-    final BytesRef term;
-    final TermState state;
-    final int docFreq;
-    final long totalTermFreq;
-
-    TermAndState(String field, TermsEnum termsEnum) throws IOException {
-      this.field = field;
-      this.termsEnum = termsEnum;
-      this.term = BytesRef.deepCopyOf(termsEnum.term());
-      this.state = termsEnum.termState();
-      this.docFreq = termsEnum.docFreq();
-      this.totalTermFreq = termsEnum.totalTermFreq();
-    }
-  }
-
-  private static class WeightOrDocIdSet {
-    final Weight weight;
-    final DocIdSet set;
-
-    WeightOrDocIdSet(Weight weight) {
-      this.weight = Objects.requireNonNull(weight);
-      this.set = null;
-    }
-
-    WeightOrDocIdSet(DocIdSet bitset) {
-      this.set = bitset;
-      this.weight = null;
-    }
-  }
-
-  @Override
-  public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
-    return new ConstantScoreWeight(this, boost) {
-
-      @Override
-      public void extractTerms(Set<Term> terms) {
-        // no-op
-        // This query is for abuse cases when the number of terms is too high to
-        // run efficiently as a BooleanQuery. So likewise we hide its terms in
-        // order to protect highlighters
-      }
-
-      /**
-       * On the given leaf context, try to either rewrite to a disjunction if
-       * there are few matching terms, or build a bitset containing matching docs.
-       */
-      private WeightOrDocIdSet rewrite(LeafReaderContext context) throws IOException {
-        final LeafReader reader = context.reader();
-
-        // We will first try to collect up to 'threshold' terms into 'matchingTerms'
-        // if there are two many terms, we will fall back to building the 'builder'
-        final int threshold = Math.min(BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD, BooleanQuery.getMaxClauseCount());
-        assert termData.size() > threshold : "Query should have been rewritten";
-        List<TermAndState> matchingTerms = new ArrayList<>(threshold);
-        DocIdSetBuilder builder = null;
-
-        final Fields fields = reader.fields();
-        String lastField = null;
-        Terms terms = null;
-        TermsEnum termsEnum = null;
-        PostingsEnum docs = null;
-        TermIterator iterator = termData.iterator();
-        for (BytesRef term = iterator.next(); term != null; term = iterator.next()) {
-          String field = iterator.field();
-          // comparing references is fine here
-          if (field != lastField) {
-            terms = fields.terms(field);
-            if (terms == null) {
-              termsEnum = null;
-            } else {
-              termsEnum = terms.iterator();
-            }
-            lastField = field;
-          }
-          if (termsEnum != null && termsEnum.seekExact(term)) {
-            if (matchingTerms == null) {
-              docs = termsEnum.postings(docs, PostingsEnum.NONE);
-              builder.add(docs);
-            } else if (matchingTerms.size() < threshold) {
-              matchingTerms.add(new TermAndState(field, termsEnum));
-            } else {
-              assert matchingTerms.size() == threshold;
-              if (singleField) {
-                // common case: all terms are in the same field
-                // use an optimized builder that leverages terms stats to be more efficient
-                builder = new DocIdSetBuilder(reader.maxDoc(), terms);
-              } else {
-                // corner case: different fields
-                // don't make assumptions about the docs we will get
-                builder = new DocIdSetBuilder(reader.maxDoc());
-              }
-              docs = termsEnum.postings(docs, PostingsEnum.NONE);
-              builder.add(docs);
-              for (TermAndState t : matchingTerms) {
-                t.termsEnum.seekExact(t.term, t.state);
-                docs = t.termsEnum.postings(docs, PostingsEnum.NONE);
-                builder.add(docs);
-              }
-              matchingTerms = null;
-            }
-          }
-        }
-        if (matchingTerms != null) {
-          assert builder == null;
-          BooleanQuery.Builder bq = new BooleanQuery.Builder();
-          for (TermAndState t : matchingTerms) {
-            final TermContext termContext = new TermContext(searcher.getTopReaderContext());
-            termContext.register(t.state, context.ord, t.docFreq, t.totalTermFreq);
-            bq.add(new TermQuery(new Term(t.field, t.term), termContext), Occur.SHOULD);
-          }
-          Query q = new ConstantScoreQuery(bq.build());
-          final Weight weight = searcher.rewrite(q).createWeight(searcher, needsScores, score());
-          return new WeightOrDocIdSet(weight);
-        } else {
-          assert builder != null;
-          return new WeightOrDocIdSet(builder.build());
-        }
-      }
-
-      private Scorer scorer(DocIdSet set) throws IOException {
-        if (set == null) {
-          return null;
-        }
-        final DocIdSetIterator disi = set.iterator();
-        if (disi == null) {
-          return null;
-        }
-        return new ConstantScoreScorer(this, score(), disi);
-      }
-
-      @Override
-      public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
-        final WeightOrDocIdSet weightOrBitSet = rewrite(context);
-        if (weightOrBitSet.weight != null) {
-          return weightOrBitSet.weight.bulkScorer(context);
-        } else {
-          final Scorer scorer = scorer(weightOrBitSet.set);
-          if (scorer == null) {
-            return null;
-          }
-          return new DefaultBulkScorer(scorer);
-        }
-      }
-
-      @Override
-      public Scorer scorer(LeafReaderContext context) throws IOException {
-        final WeightOrDocIdSet weightOrBitSet = rewrite(context);
-        if (weightOrBitSet.weight != null) {
-          return weightOrBitSet.weight.scorer(context);
-        } else {
-          return scorer(weightOrBitSet.set);
-        }
-      }
-    };
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/lucene/queries/src/test/org/apache/lucene/queries/TermsQueryTest.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/TermsQueryTest.java b/lucene/queries/src/test/org/apache/lucene/queries/TermsQueryTest.java
deleted file mode 100644
index f8b10ef..0000000
--- a/lucene/queries/src/test/org/apache/lucene/queries/TermsQueryTest.java
+++ /dev/null
@@ -1,339 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.queries;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.FilterDirectoryReader;
-import org.apache.lucene.index.FilterLeafReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.BoostQuery;
-import org.apache.lucene.search.ConstantScoreQuery;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.QueryUtils;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.RamUsageTester;
-import org.apache.lucene.util.TestUtil;
-
-import com.carrotsearch.randomizedtesting.generators.RandomPicks;
-import com.carrotsearch.randomizedtesting.generators.RandomStrings;
-
-public class TermsQueryTest extends LuceneTestCase {
-
-  public void testDuel() throws IOException {
-    final int iters = atLeast(2);
-    for (int iter = 0; iter < iters; ++iter) {
-      final List<Term> allTerms = new ArrayList<>();
-      final int numTerms = TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 10));
-      for (int i = 0; i < numTerms; ++i) {
-        final String field = usually() ? "f" : "g";
-        final String value = TestUtil.randomAnalysisString(random(), 10, true);
-        allTerms.add(new Term(field, value));
-      }
-      Directory dir = newDirectory();
-      RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-      final int numDocs = atLeast(100);
-      for (int i = 0; i < numDocs; ++i) {
-        Document doc = new Document();
-        final Term term = allTerms.get(random().nextInt(allTerms.size()));
-        doc.add(new StringField(term.field(), term.text(), Store.NO));
-        iw.addDocument(doc);
-      }
-      if (numTerms > 1 && random().nextBoolean()) {
-        iw.deleteDocuments(new TermQuery(allTerms.get(0)));
-      }
-      iw.commit();
-      final IndexReader reader = iw.getReader();
-      final IndexSearcher searcher = newSearcher(reader);
-      iw.close();
-
-      if (reader.numDocs() == 0) {
-        // may occasionally happen if all documents got the same term
-        IOUtils.close(reader, dir);
-        continue;
-      }
-
-      for (int i = 0; i < 100; ++i) {
-        final float boost = random().nextFloat() * 10;
-        final int numQueryTerms = TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 8));
-        List<Term> queryTerms = new ArrayList<>();
-        for (int j = 0; j < numQueryTerms; ++j) {
-          queryTerms.add(allTerms.get(random().nextInt(allTerms.size())));
-        }
-        final BooleanQuery.Builder bq = new BooleanQuery.Builder();
-        for (Term t : queryTerms) {
-          bq.add(new TermQuery(t), Occur.SHOULD);
-        }
-        final Query q1 = new ConstantScoreQuery(bq.build());
-        final Query q2 = new TermsQuery(queryTerms);
-        assertSameMatches(searcher, new BoostQuery(q1, boost), new BoostQuery(q2, boost), true);
-      }
-
-      reader.close();
-      dir.close();
-    }
-  }
-
-  private void assertSameMatches(IndexSearcher searcher, Query q1, Query q2, boolean scores) throws IOException {
-    final int maxDoc = searcher.getIndexReader().maxDoc();
-    final TopDocs td1 = searcher.search(q1, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER);
-    final TopDocs td2 = searcher.search(q2, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER);
-    assertEquals(td1.totalHits, td2.totalHits);
-    for (int i = 0; i < td1.scoreDocs.length; ++i) {
-      assertEquals(td1.scoreDocs[i].doc, td2.scoreDocs[i].doc);
-      if (scores) {
-        assertEquals(td1.scoreDocs[i].score, td2.scoreDocs[i].score, 10e-7);
-      }
-    }
-  }
-
-  private TermsQuery termsQuery(boolean singleField, Term...terms) {
-    return termsQuery(singleField, Arrays.asList(terms));
-  }
-
-  private TermsQuery termsQuery(boolean singleField, Collection<Term> termList) {
-    if (!singleField) {
-      return new TermsQuery(new ArrayList<>(termList));
-    }
-    final TermsQuery filter;
-    List<BytesRef> bytes = new ArrayList<>();
-    String field = null;
-    for (Term term : termList) {
-        bytes.add(term.bytes());
-        if (field != null) {
-          assertEquals(term.field(), field);
-        }
-        field = term.field();
-    }
-    assertNotNull(field);
-    filter = new TermsQuery(field, bytes);
-    return filter;
-  }
-
-  public void testHashCodeAndEquals() {
-    int num = atLeast(100);
-    final boolean singleField = random().nextBoolean();
-    List<Term> terms = new ArrayList<>();
-    Set<Term> uniqueTerms = new HashSet<>();
-    for (int i = 0; i < num; i++) {
-      String field = "field" + (singleField ? "1" : random().nextInt(100));
-      String string = TestUtil.randomRealisticUnicodeString(random());
-      terms.add(new Term(field, string));
-      uniqueTerms.add(new Term(field, string));
-      TermsQuery left = termsQuery(singleField ? random().nextBoolean() : false, uniqueTerms);
-      Collections.shuffle(terms, random());
-      TermsQuery right = termsQuery(singleField ? random().nextBoolean() : false, terms);
-      assertEquals(right, left);
-      assertEquals(right.hashCode(), left.hashCode());
-      if (uniqueTerms.size() > 1) {
-        List<Term> asList = new ArrayList<>(uniqueTerms);
-        asList.remove(0);
-        TermsQuery notEqual = termsQuery(singleField ? random().nextBoolean() : false, asList);
-        assertFalse(left.equals(notEqual));
-        assertFalse(right.equals(notEqual));
-      }
-    }
-
-    TermsQuery tq1 = new TermsQuery(new Term("thing", "apple"));
-    TermsQuery tq2 = new TermsQuery(new Term("thing", "orange"));
-    assertFalse(tq1.hashCode() == tq2.hashCode());
-
-    // different fields with the same term should have differing hashcodes
-    tq1 = new TermsQuery(new Term("thing1", "apple"));
-    tq2 = new TermsQuery(new Term("thing2", "apple"));
-    assertFalse(tq1.hashCode() == tq2.hashCode());
-  }
-
-  public void testSingleFieldEquals() {
-    // Two terms with the same hash code
-    assertEquals("AaAaBB".hashCode(), "BBBBBB".hashCode());
-    TermsQuery left = termsQuery(true, new Term("id", "AaAaAa"), new Term("id", "AaAaBB"));
-    TermsQuery right = termsQuery(true, new Term("id", "AaAaAa"), new Term("id", "BBBBBB"));
-    assertFalse(left.equals(right));
-  }
-
-  public void testToString() {
-    TermsQuery termsQuery = new TermsQuery(new Term("field1", "a"),
-                                              new Term("field1", "b"),
-                                              new Term("field1", "c"));
-    assertEquals("field1:a field1:b field1:c", termsQuery.toString());
-  }
-
-  public void testDedup() {
-    Query query1 = new TermsQuery(new Term("foo", "bar"));
-    Query query2 = new TermsQuery(new Term("foo", "bar"), new Term("foo", "bar"));
-    QueryUtils.checkEqual(query1, query2);
-  }
-
-  public void testOrderDoesNotMatter() {
-    // order of terms if different
-    Query query1 = new TermsQuery(new Term("foo", "bar"), new Term("foo", "baz"));
-    Query query2 = new TermsQuery(new Term("foo", "baz"), new Term("foo", "bar"));
-    QueryUtils.checkEqual(query1, query2);
-
-    // order of fields is different
-    query1 = new TermsQuery(new Term("foo", "bar"), new Term("bar", "bar"));
-    query2 = new TermsQuery(new Term("bar", "bar"), new Term("foo", "bar"));
-    QueryUtils.checkEqual(query1, query2);
-  }
-
-  public void testRamBytesUsed() {
-    List<Term> terms = new ArrayList<>();
-    final int numTerms = 1000 + random().nextInt(1000);
-    for (int i = 0; i < numTerms; ++i) {
-      terms.add(new Term("f", RandomStrings.randomUnicodeOfLength(random(), 10)));
-    }
-    TermsQuery query = new TermsQuery(terms);
-    final long actualRamBytesUsed = RamUsageTester.sizeOf(query);
-    final long expectedRamBytesUsed = query.ramBytesUsed();
-    // error margin within 5%
-    assertEquals(actualRamBytesUsed, expectedRamBytesUsed, actualRamBytesUsed / 20);
-  }
-
-  private static class TermsCountingDirectoryReaderWrapper extends FilterDirectoryReader {
-
-    private final AtomicInteger counter;
-    
-    public TermsCountingDirectoryReaderWrapper(DirectoryReader in, AtomicInteger counter) throws IOException {
-      super(in, new TermsCountingSubReaderWrapper(counter));
-      this.counter = counter;
-    }
-
-    private static class TermsCountingSubReaderWrapper extends SubReaderWrapper {
-      private final AtomicInteger counter;
-
-      public TermsCountingSubReaderWrapper(AtomicInteger counter) {
-        this.counter = counter;
-      }
-
-      @Override
-      public LeafReader wrap(LeafReader reader) {
-        return new TermsCountingLeafReaderWrapper(reader, counter);
-      }
-    }
-
-    private static class TermsCountingLeafReaderWrapper extends FilterLeafReader {
-
-      private final AtomicInteger counter;
-
-      public TermsCountingLeafReaderWrapper(LeafReader in, AtomicInteger counter) {
-        super(in);
-        this.counter = counter;
-      }
-
-      @Override
-      public Fields fields() throws IOException {
-        return new FilterFields(in.fields()) {
-          @Override
-          public Terms terms(String field) throws IOException {
-            final Terms in = this.in.terms(field);
-            if (in == null) {
-              return null;
-            }
-            return new FilterTerms(in) {
-              @Override
-              public TermsEnum iterator() throws IOException {
-                counter.incrementAndGet();
-                return super.iterator();
-              }
-            };
-          }
-        };
-      }
-      
-    }
-
-    @Override
-    protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
-      return new TermsCountingDirectoryReaderWrapper(in, counter);
-    }
-
-  }
-
-  public void testPullOneTermsEnumPerField() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new StringField("foo", "1", Store.NO));
-    doc.add(new StringField("bar", "2", Store.NO));
-    doc.add(new StringField("baz", "3", Store.NO));
-    w.addDocument(doc);
-    DirectoryReader reader = w.getReader();
-    w.close();
-    final AtomicInteger counter = new AtomicInteger();
-    DirectoryReader wrapped = new TermsCountingDirectoryReaderWrapper(reader, counter);
-
-    final List<Term> terms = new ArrayList<>();
-    final Set<String> fields = new HashSet<>();
-    // enough terms to avoid the rewrite
-    final int numTerms = TestUtil.nextInt(random(), TermsQuery.BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD + 1, 100);
-    for (int i = 0; i < numTerms; ++i) {
-      final String field = RandomPicks.randomFrom(random(), new String[] {"foo", "bar", "baz"});
-      final BytesRef term = new BytesRef(RandomStrings.randomUnicodeOfCodepointLength(random(), 10));
-      fields.add(field);
-      terms.add(new Term(field, term));
-    }
-
-    new IndexSearcher(wrapped).count(new TermsQuery(terms));
-    assertEquals(fields.size(), counter.get());
-    wrapped.close();
-    dir.close();
-  }
-  
-  public void testBinaryToString() {
-    TermsQuery query = new TermsQuery(new Term("field", new BytesRef(new byte[] { (byte) 0xff, (byte) 0xfe })));
-    assertEquals("field:[ff fe]", query.toString());
-  }
-
-  public void testIsConsideredCostlyByQueryCache() throws IOException {
-    TermsQuery query = new TermsQuery(new Term("foo", "bar"), new Term("foo", "baz"));
-    UsageTrackingQueryCachingPolicy policy = new UsageTrackingQueryCachingPolicy();
-    assertFalse(policy.shouldCache(query));
-    policy.onUse(query);
-    policy.onUse(query);
-    // cached after two uses
-    assertTrue(policy.shouldCache(query));
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java
index 0273466..4e37f5d 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java
@@ -19,10 +19,8 @@ package org.apache.lucene.spatial.prefix;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.locationtech.spatial4j.shape.Point;
-import org.locationtech.spatial4j.shape.Shape;
-import org.apache.lucene.queries.TermsQuery;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.spatial.prefix.tree.Cell;
 import org.apache.lucene.spatial.prefix.tree.CellIterator;
 import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
@@ -31,10 +29,12 @@ import org.apache.lucene.spatial.query.SpatialOperation;
 import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
+import org.locationtech.spatial4j.shape.Point;
+import org.locationtech.spatial4j.shape.Shape;
 
 /**
  * A basic implementation of {@link PrefixTreeStrategy} using a large
- * {@link TermsQuery} of all the cells from
+ * {@link TermInSetQuery} of all the cells from
  * {@link SpatialPrefixTree#getTreeCellIterator(org.locationtech.spatial4j.shape.Shape, int)}.
  * It only supports the search of indexed Point shapes.
  * <p>
@@ -105,7 +105,7 @@ public class TermQueryPrefixTreeStrategy extends PrefixTreeStrategy {
     }
     //unfortunately TermsQuery will needlessly sort & dedupe
     //TODO an automatonQuery might be faster?
-    return new TermsQuery(getFieldName(), terms);
+    return new TermInSetQuery(getFieldName(), terms);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/NumberRangeFacetsTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/NumberRangeFacetsTest.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/NumberRangeFacetsTest.java
index 514c18e..bb26a2e 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/NumberRangeFacetsTest.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/NumberRangeFacetsTest.java
@@ -23,12 +23,11 @@ import java.util.Collections;
 import java.util.List;
 
 import com.carrotsearch.randomizedtesting.annotations.Repeat;
-import org.locationtech.spatial4j.shape.Shape;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.TermsQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SimpleCollector;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.spatial.StrategyTestCase;
 import org.apache.lucene.spatial.prefix.NumberRangePrefixTreeStrategy.Facets;
 import org.apache.lucene.spatial.prefix.tree.Cell;
@@ -40,6 +39,7 @@ import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.FixedBitSet;
 import org.junit.Before;
 import org.junit.Test;
+import org.locationtech.spatial4j.shape.Shape;
 
 import static com.carrotsearch.randomizedtesting.RandomizedTest.randomInt;
 import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween;
@@ -132,7 +132,7 @@ public class NumberRangeFacetsTest extends StrategyTestCase {
             terms.add(new Term("id", acceptDocId.toString()));
           }
 
-          topAcceptDocs = searchForDocBits(new TermsQuery(terms));
+          topAcceptDocs = searchForDocBits(new TermInSetQuery(terms));
         }
       }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
index 366c4a9..a5cbee2 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
@@ -26,6 +26,15 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
+import com.carrotsearch.hppc.IntHashSet;
+import com.carrotsearch.hppc.IntObjectHashMap;
+import com.carrotsearch.hppc.LongHashSet;
+import com.carrotsearch.hppc.LongObjectHashMap;
+import com.carrotsearch.hppc.LongObjectMap;
+import com.carrotsearch.hppc.cursors.IntObjectCursor;
+import com.carrotsearch.hppc.cursors.LongCursor;
+import com.carrotsearch.hppc.cursors.LongObjectCursor;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.FieldInfo;
@@ -36,7 +45,6 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.MultiDocValues;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.queries.TermsQuery;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.Collector;
@@ -46,6 +54,7 @@ import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.TopDocsCollector;
 import org.apache.lucene.search.TopFieldCollector;
@@ -83,16 +92,6 @@ import org.apache.solr.uninverting.UninvertingReader;
 import org.apache.solr.util.plugin.PluginInfoInitialized;
 import org.apache.solr.util.plugin.SolrCoreAware;
 
-import com.carrotsearch.hppc.IntHashSet;
-import com.carrotsearch.hppc.IntObjectHashMap;
-import com.carrotsearch.hppc.LongHashSet;
-import com.carrotsearch.hppc.LongObjectHashMap;
-import com.carrotsearch.hppc.LongObjectMap;
-import com.carrotsearch.hppc.cursors.IntObjectCursor;
-import com.carrotsearch.hppc.cursors.LongCursor;
-import com.carrotsearch.hppc.cursors.LongObjectCursor;
-import com.carrotsearch.hppc.cursors.ObjectCursor;
-
 /**
  * The ExpandComponent is designed to work with the CollapsingPostFilter.
  * The CollapsingPostFilter collapses a result set on a field.
@@ -720,7 +719,7 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
       bytesRefs[++index] = term.toBytesRef();
     }
 
-    return new SolrConstantScoreQuery(new QueryWrapperFilter(new TermsQuery(fname, bytesRefs)));
+    return new SolrConstantScoreQuery(new QueryWrapperFilter(new TermInSetQuery(fname, bytesRefs)));
   }
 
   private Query getGroupQuery(String fname,
@@ -733,7 +732,7 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
       IntObjectCursor<BytesRef> cursor = it.next();
       bytesRefs[++index] = cursor.value;
     }
-    return new SolrConstantScoreQuery(new QueryWrapperFilter(new TermsQuery(fname, bytesRefs)));
+    return new SolrConstantScoreQuery(new QueryWrapperFilter(new TermInSetQuery(fname, bytesRefs)));
   }
 
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/solr/core/src/java/org/apache/solr/schema/FieldType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/FieldType.java b/solr/core/src/java/org/apache/solr/schema/FieldType.java
index b67f88d..a5c898a 100644
--- a/solr/core/src/java/org/apache/solr/schema/FieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/FieldType.java
@@ -39,7 +39,6 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.legacy.LegacyNumericType;
-import org.apache.lucene.queries.TermsQuery;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
@@ -51,6 +50,7 @@ import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.SortedNumericSelector;
 import org.apache.lucene.search.SortedSetSelector;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.util.BytesRef;
@@ -765,7 +765,7 @@ public abstract class FieldType extends FieldProperties {
       readableToIndexed(externalVal, br);
       lst.add( br.toBytesRef() );
     }
-    return new TermsQuery(field.getName() , lst);
+    return new TermInSetQuery(field.getName() , lst);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/solr/core/src/java/org/apache/solr/search/TermsQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/TermsQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/TermsQParserPlugin.java
index d53dcbf..3a60149 100644
--- a/solr/core/src/java/org/apache/solr/search/TermsQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/TermsQParserPlugin.java
@@ -20,13 +20,13 @@ import java.util.Arrays;
 import java.util.regex.Pattern;
 
 import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.TermsQuery;
 import org.apache.lucene.search.AutomatonQuery;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.DocValuesTermsQuery;
 import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
@@ -60,7 +60,7 @@ public class TermsQParserPlugin extends QParserPlugin {
     termsFilter {
       @Override
       Filter makeFilter(String fname, BytesRef[] bytesRefs) {
-        return new QueryWrapperFilter(new TermsQuery(fname, bytesRefs));
+        return new QueryWrapperFilter(new TermInSetQuery(fname, bytesRefs));
       }
     },
     booleanQuery {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/solr/core/src/java/org/apache/solr/search/join/GraphQuery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/join/GraphQuery.java b/solr/core/src/java/org/apache/solr/search/join/GraphQuery.java
index 8cfcf79..3f762e3 100644
--- a/solr/core/src/java/org/apache/solr/search/join/GraphQuery.java
+++ b/solr/core/src/java/org/apache/solr/search/join/GraphQuery.java
@@ -25,7 +25,6 @@ import java.util.TreeSet;
 
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.TermsQuery;
 import org.apache.lucene.search.AutomatonQuery;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanQuery;
@@ -35,6 +34,7 @@ import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.WildcardQuery;
 import org.apache.lucene.util.BytesRef;
@@ -281,7 +281,7 @@ public class GraphQuery extends Query {
             collectorTerms.get(i, ref);
             termList.add(ref);
           }
-          q = new TermsQuery(fromField, termList);
+          q = new TermInSetQuery(fromField, termList);
         }
         
         // If there is a filter to be used while crawling the graph, add that.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/22940f5c/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
index d3e6a7f..76b441b 100644
--- a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
@@ -19,12 +19,12 @@ package org.apache.solr.search;
 import java.util.Locale;
 import java.util.Random;
 
-import org.apache.lucene.queries.TermsQuery;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.BoostQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.TermQuery;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.core.SolrInfoMBean;
@@ -224,13 +224,13 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
     qParser = QParser.getParser("foo_s:(a b c d e f g h i j k l m n o p q r s t u v w x y z)", req);
     qParser.setIsFilter(true); // this may change in the future
     q = qParser.getQuery();
-    assertEquals(26, ((TermsQuery)q).getTermData().size());
+    assertEquals(26, ((TermInSetQuery)q).getTermData().size());
 
     // large numeric filter query should use TermsQuery (for trie fields)
     qParser = QParser.getParser("foo_i:(1 2 3 4 5 6 7 8 9 10 20 19 18 17 16 15 14 13 12 11)", req);
     qParser.setIsFilter(true); // this may change in the future
     q = qParser.getQuery();
-    assertEquals(20, ((TermsQuery)q).getTermData().size());
+    assertEquals(20, ((TermInSetQuery)q).getTermData().size());
 
     // a filter() clause inside a relevancy query should be able to use a TermsQuery
     qParser = QParser.getParser("foo_s:aaa filter(foo_s:(a b c d e f g h i j k l m n o p q r s t u v w x y z))", req);
@@ -245,7 +245,7 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
       qq = ((FilterQuery)qq).getQuery();
     }
 
-    assertEquals(26, ((TermsQuery)qq).getTermData().size());
+    assertEquals(26, ((TermInSetQuery)qq).getTermData().size());
 
     // test mixed boolean query, including quotes (which shouldn't matter)
     qParser = QParser.getParser("foo_s:(a +aaa b -bbb c d e f bar_s:(qqq www) g h i j k l m n o p q r s t u v w x y z)", req);
@@ -255,9 +255,9 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
     qq = null;
     for (BooleanClause clause : ((BooleanQuery)q).clauses()) {
       qq = clause.getQuery();
-      if (qq instanceof TermsQuery) break;
+      if (qq instanceof TermInSetQuery) break;
     }
-    assertEquals(26, ((TermsQuery)qq).getTermData().size());
+    assertEquals(26, ((TermInSetQuery)qq).getTermData().size());
 
     req.close();
   }


[35/43] lucene-solr:jira/solr-8593: SOLR-9584: Support Solr being proxied with another endpoint than default /solr This closes #86 - see original commit e0b4caccd3312b011cdfbb3951ea43812486ca98

Posted by kr...@apache.org.
SOLR-9584: Support Solr being proxied with another endpoint than default /solr
This closes #86 - see original commit e0b4caccd3312b011cdfbb3951ea43812486ca98


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f99c9676
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f99c9676
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f99c9676

Branch: refs/heads/jira/solr-8593
Commit: f99c9676325c1749e570b9337a8c67a089d1fb28
Parents: e0b4cac
Author: Jan H�ydahl <ja...@apache.org>
Authored: Tue Jan 10 14:32:38 2017 +0100
Committer: Jan H�ydahl <ja...@apache.org>
Committed: Tue Jan 10 14:32:38 2017 +0100

----------------------------------------------------------------------
 solr/CHANGES.txt | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f99c9676/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 2a5d5bb..0131b7b 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -71,6 +71,8 @@ Optimizations
   string fields from the FieldCache, resulting in up to 56% better throughput for those cases.
   (yonik)
 
+* SOLR-9584: Support Solr being proxied with another endpoint than default /solr, by using relative links
+  in AdminUI javascripts (Yun Jie Zhou via janhoy)
 
 ==================  6.4.0 ==================
 


[41/43] lucene-solr:jira/solr-8593: SOLR-9948: Add a way to configure granularity of metrics for http connections

Posted by kr...@apache.org.
SOLR-9948: Add a way to configure granularity of metrics for http connections


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d2664b10
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d2664b10
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d2664b10

Branch: refs/heads/jira/solr-8593
Commit: d2664b100463ada22162d53aad1c6d306d2cc9c1
Parents: 7435ab1
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Thu Jan 12 13:00:00 2017 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Thu Jan 12 13:00:00 2017 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  4 +-
 .../org/apache/solr/core/SolrXmlConfig.java     |  9 ++-
 .../component/HttpShardHandlerFactory.java      | 15 +++-
 .../apache/solr/update/UpdateShardHandler.java  | 14 +++-
 .../solr/update/UpdateShardHandlerConfig.java   | 14 +++-
 .../stats/HttpClientMetricNameStrategy.java     | 28 +++++++
 .../stats/InstrumentedHttpRequestExecutor.java  | 81 +++++++++++++++-----
 .../java/org/apache/solr/util/TestHarness.java  |  3 +-
 8 files changed, 143 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d2664b10/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index afcd10b..0cf50d4 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -209,7 +209,9 @@ New Features
 
 * SOLR-9725: Substitute properties into JdbcDataSource configuration ( Jamie Jackson, Yuri Sashevsky via Mikhail Khludnev)
 
-* SOLR-9877: SOLR-9923: Use instrumented http client and connection pool. (shalin)
+* SOLR-9877: SOLR-9923: SOLR-9948: Use instrumented http client and connection pool in HttpShardHandler and
+  UpdateShardHandler. The metrics are collected per query-less URL and method by default but it can be configured
+  to host/method and per-method as well. (shalin)
 
 * SOLR-9880: Add Ganglia, Graphite and SLF4J metrics reporters. (ab)
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d2664b10/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java b/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java
index 49d9ae5..e41cd8d 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrXmlConfig.java
@@ -282,6 +282,7 @@ public class SolrXmlConfig {
     int maxUpdateConnectionsPerHost = UpdateShardHandlerConfig.DEFAULT_MAXUPDATECONNECTIONSPERHOST;
     int distributedSocketTimeout = UpdateShardHandlerConfig.DEFAULT_DISTRIBUPDATESOTIMEOUT;
     int distributedConnectionTimeout = UpdateShardHandlerConfig.DEFAULT_DISTRIBUPDATECONNTIMEOUT;
+    String metricNameStrategy = UpdateShardHandlerConfig.DEFAULT_METRICNAMESTRATEGY;
 
     Object muc = nl.remove("maxUpdateConnections");
     if (muc != null) {
@@ -307,10 +308,16 @@ public class SolrXmlConfig {
       defined = true;
     }
 
+    Object mns = nl.remove("metricNameStrategy");
+    if (mns != null)  {
+      metricNameStrategy = mns.toString();
+      defined = true;
+    }
+
     if (!defined && !alwaysDefine)
       return null;
 
-    return new UpdateShardHandlerConfig(maxUpdateConnections, maxUpdateConnectionsPerHost, distributedSocketTimeout, distributedConnectionTimeout);
+    return new UpdateShardHandlerConfig(maxUpdateConnections, maxUpdateConnectionsPerHost, distributedSocketTimeout, distributedConnectionTimeout, metricNameStrategy);
 
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d2664b10/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
index 258be97..be6e763 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
@@ -25,6 +25,7 @@ import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
 import org.apache.solr.client.solrj.impl.LBHttpSolrClient.Builder;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.cloud.ZkController;
+import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
@@ -41,6 +42,7 @@ import org.apache.solr.metrics.SolrMetricProducer;
 import org.apache.solr.update.UpdateShardHandlerConfig;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.util.DefaultSolrThreadFactory;
+import org.apache.solr.util.stats.HttpClientMetricNameStrategy;
 import org.apache.solr.util.stats.InstrumentedHttpRequestExecutor;
 import org.apache.solr.util.stats.InstrumentedPoolingHttpClientConnectionManager;
 import org.apache.solr.util.stats.MetricUtils;
@@ -61,6 +63,8 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.solr.util.stats.InstrumentedHttpRequestExecutor.KNOWN_METRIC_NAME_STRATEGIES;
+
 
 public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.apache.solr.util.plugin.PluginInfoInitialized, SolrMetricProducer {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -97,6 +101,8 @@ public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.
 
   private String scheme = null;
 
+  private HttpClientMetricNameStrategy metricNameStrategy;
+
   private final Random r = new Random();
 
   private final ReplicaListTransformer shufflingReplicaListTransformer = new ShufflingReplicaListTransformer(r);
@@ -148,6 +154,13 @@ public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.
       this.scheme = StringUtils.removeEnd(this.scheme, "://");
     }
 
+    String strategy = getParameter(args, "metricNameStrategy", UpdateShardHandlerConfig.DEFAULT_METRICNAMESTRATEGY, sb);
+    this.metricNameStrategy = KNOWN_METRIC_NAME_STRATEGIES.get(strategy);
+    if (this.metricNameStrategy == null)  {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+          "Unknown metricNameStrategy: " + strategy + " found. Must be one of: " + KNOWN_METRIC_NAME_STRATEGIES.keySet());
+    }
+
     this.connectionTimeout = getParameter(args, HttpClientUtil.PROP_CONNECTION_TIMEOUT, connectionTimeout, sb);
     this.maxConnectionsPerHost = getParameter(args, HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, maxConnectionsPerHost,sb);
     this.maxConnections = getParameter(args, HttpClientUtil.PROP_MAX_CONNECTIONS, maxConnections,sb);
@@ -177,7 +190,7 @@ public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.
     );
 
     ModifiableSolrParams clientParams = getClientParams();
-    httpRequestExecutor = new InstrumentedHttpRequestExecutor();
+    httpRequestExecutor = new InstrumentedHttpRequestExecutor(this.metricNameStrategy);
     clientConnectionManager = new InstrumentedPoolingHttpClientConnectionManager(HttpClientUtil.getSchemaRegisteryProvider().getSchemaRegistry());
     this.defaultClient = HttpClientUtil.createClient(clientParams, clientConnectionManager, false, httpRequestExecutor);
     this.loadbalancer = createLoadbalancer(defaultClient);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d2664b10/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java b/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
index f13cfb5..67447a3 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
@@ -34,11 +34,14 @@ import org.apache.solr.common.util.SolrjNamedThreadFactory;
 import org.apache.solr.core.SolrInfoMBean;
 import org.apache.solr.metrics.SolrMetricManager;
 import org.apache.solr.metrics.SolrMetricProducer;
+import org.apache.solr.util.stats.HttpClientMetricNameStrategy;
 import org.apache.solr.util.stats.InstrumentedHttpRequestExecutor;
 import org.apache.solr.util.stats.InstrumentedPoolingHttpClientConnectionManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.solr.util.stats.InstrumentedHttpRequestExecutor.KNOWN_METRIC_NAME_STRATEGIES;
+
 public class UpdateShardHandler implements SolrMetricProducer, SolrInfoMBean {
   
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -74,7 +77,16 @@ public class UpdateShardHandler implements SolrMetricProducer, SolrInfoMBean {
       clientParams.set(HttpClientUtil.PROP_SO_TIMEOUT, cfg.getDistributedSocketTimeout());
       clientParams.set(HttpClientUtil.PROP_CONNECTION_TIMEOUT, cfg.getDistributedConnectionTimeout());
     }
-    httpRequestExecutor = new InstrumentedHttpRequestExecutor();
+    HttpClientMetricNameStrategy metricNameStrategy = KNOWN_METRIC_NAME_STRATEGIES.get(UpdateShardHandlerConfig.DEFAULT_METRICNAMESTRATEGY);
+    if (cfg != null)  {
+      metricNameStrategy = KNOWN_METRIC_NAME_STRATEGIES.get(cfg.getMetricNameStrategy());
+      if (metricNameStrategy == null) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+            "Unknown metricNameStrategy: " + cfg.getMetricNameStrategy() + " found. Must be one of: " + KNOWN_METRIC_NAME_STRATEGIES.keySet());
+      }
+    }
+
+    httpRequestExecutor = new InstrumentedHttpRequestExecutor(metricNameStrategy);
     client = HttpClientUtil.createClient(clientParams, clientConnectionManager, false, httpRequestExecutor);
 
     // following is done only for logging complete configuration.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d2664b10/solr/core/src/java/org/apache/solr/update/UpdateShardHandlerConfig.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateShardHandlerConfig.java b/solr/core/src/java/org/apache/solr/update/UpdateShardHandlerConfig.java
index ffb06c4..d31ce50 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateShardHandlerConfig.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateShardHandlerConfig.java
@@ -22,10 +22,12 @@ public class UpdateShardHandlerConfig {
   public static final int DEFAULT_DISTRIBUPDATESOTIMEOUT = 600000;
   public static final int DEFAULT_MAXUPDATECONNECTIONS = 100000;
   public static final int DEFAULT_MAXUPDATECONNECTIONSPERHOST = 100000;
+  public static final String DEFAULT_METRICNAMESTRATEGY = "queryLessURLAndMethod";
 
   public static final UpdateShardHandlerConfig DEFAULT
       = new UpdateShardHandlerConfig(DEFAULT_MAXUPDATECONNECTIONS, DEFAULT_MAXUPDATECONNECTIONSPERHOST,
-                                     DEFAULT_DISTRIBUPDATESOTIMEOUT, DEFAULT_DISTRIBUPDATECONNTIMEOUT);
+                                     DEFAULT_DISTRIBUPDATESOTIMEOUT, DEFAULT_DISTRIBUPDATECONNTIMEOUT,
+                                      DEFAULT_METRICNAMESTRATEGY);
 
   private final int maxUpdateConnections;
 
@@ -35,11 +37,15 @@ public class UpdateShardHandlerConfig {
 
   private final int distributedConnectionTimeout;
 
-  public UpdateShardHandlerConfig(int maxUpdateConnections, int maxUpdateConnectionsPerHost, int distributedSocketTimeout, int distributedConnectionTimeout) {
+  private final String metricNameStrategy;
+
+  public UpdateShardHandlerConfig(int maxUpdateConnections, int maxUpdateConnectionsPerHost, int distributedSocketTimeout, int distributedConnectionTimeout,
+                                  String metricNameStrategy) {
     this.maxUpdateConnections = maxUpdateConnections;
     this.maxUpdateConnectionsPerHost = maxUpdateConnectionsPerHost;
     this.distributedSocketTimeout = distributedSocketTimeout;
     this.distributedConnectionTimeout = distributedConnectionTimeout;
+    this.metricNameStrategy = metricNameStrategy;
   }
 
   public int getMaxUpdateConnectionsPerHost() {
@@ -57,4 +63,8 @@ public class UpdateShardHandlerConfig {
   public int getDistributedConnectionTimeout() {
     return distributedConnectionTimeout;
   }
+
+  public String getMetricNameStrategy() {
+    return metricNameStrategy;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d2664b10/solr/core/src/java/org/apache/solr/util/stats/HttpClientMetricNameStrategy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/stats/HttpClientMetricNameStrategy.java b/solr/core/src/java/org/apache/solr/util/stats/HttpClientMetricNameStrategy.java
new file mode 100644
index 0000000..930c5f7
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/util/stats/HttpClientMetricNameStrategy.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.util.stats;
+
+import org.apache.http.HttpRequest;
+
+/**
+ * Strategy for creating metric names for HttpClient
+ * Copied from metrics-httpclient library
+ */
+public interface HttpClientMetricNameStrategy {
+  String getNameFor(String scope, HttpRequest request);
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d2664b10/solr/core/src/java/org/apache/solr/util/stats/InstrumentedHttpRequestExecutor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/stats/InstrumentedHttpRequestExecutor.java b/solr/core/src/java/org/apache/solr/util/stats/InstrumentedHttpRequestExecutor.java
index 0caa2d1..bd8d368 100644
--- a/solr/core/src/java/org/apache/solr/util/stats/InstrumentedHttpRequestExecutor.java
+++ b/solr/core/src/java/org/apache/solr/util/stats/InstrumentedHttpRequestExecutor.java
@@ -19,7 +19,9 @@ package org.apache.solr.util.stats;
 
 import java.io.IOException;
 import java.net.URISyntaxException;
+import java.util.HashMap;
 import java.util.Locale;
+import java.util.Map;
 
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.Timer;
@@ -35,13 +37,72 @@ import org.apache.http.protocol.HttpRequestExecutor;
 import org.apache.solr.metrics.SolrMetricManager;
 import org.apache.solr.metrics.SolrMetricProducer;
 
+import static org.apache.solr.metrics.SolrMetricManager.mkName;
+
 /**
  * Sub-class of HttpRequestExecutor which tracks metrics interesting to solr
  * Inspired and partially copied from dropwizard httpclient library
  */
 public class InstrumentedHttpRequestExecutor extends HttpRequestExecutor implements SolrMetricProducer {
+  public static final HttpClientMetricNameStrategy QUERYLESS_URL_AND_METHOD =
+      (scope, request) -> {
+        try {
+          final RequestLine requestLine = request.getRequestLine();
+          String schemeHostPort = null;
+          if (request instanceof HttpRequestWrapper) {
+            HttpRequestWrapper wrapper = (HttpRequestWrapper) request;
+            if (wrapper.getTarget() != null) {
+              schemeHostPort = wrapper.getTarget().getSchemeName() + "://" + wrapper.getTarget().getHostName() + ":" + wrapper.getTarget().getPort();
+            }
+          }
+          final URIBuilder url = new URIBuilder(requestLine.getUri());
+          return mkName((schemeHostPort != null ? schemeHostPort : "") + url.removeQuery().build().toString() + "." + methodNameString(request), scope);
+        } catch (URISyntaxException e) {
+          throw new IllegalArgumentException(e);
+        }
+      };
+
+  public static final HttpClientMetricNameStrategy METHOD_ONLY =
+      (scope, request) -> mkName(methodNameString(request), scope);
+
+  public static final HttpClientMetricNameStrategy HOST_AND_METHOD =
+      (scope, request) -> {
+        try {
+          final RequestLine requestLine = request.getRequestLine();
+          String schemeHostPort = null;
+          if (request instanceof HttpRequestWrapper) {
+            HttpRequestWrapper wrapper = (HttpRequestWrapper) request;
+            if (wrapper.getTarget() != null) {
+              schemeHostPort = wrapper.getTarget().getSchemeName() + "://" + wrapper.getTarget().getHostName() + ":" + wrapper.getTarget().getPort();
+            }
+          }
+          final URIBuilder url = new URIBuilder(requestLine.getUri());
+          return mkName((schemeHostPort != null ? schemeHostPort : "") + "." + methodNameString(request), scope);
+        } catch (URISyntaxException e) {
+          throw new IllegalArgumentException(e);
+        }
+      };
+
+  public static final Map<String, HttpClientMetricNameStrategy> KNOWN_METRIC_NAME_STRATEGIES = new HashMap<>(3);
+
+  static  {
+    KNOWN_METRIC_NAME_STRATEGIES.put("queryLessURLAndMethod", QUERYLESS_URL_AND_METHOD);
+    KNOWN_METRIC_NAME_STRATEGIES.put("hostAndMethod", HOST_AND_METHOD);
+    KNOWN_METRIC_NAME_STRATEGIES.put("methodOnly", METHOD_ONLY);
+  }
+
   protected MetricRegistry metricsRegistry;
   protected String scope;
+  protected HttpClientMetricNameStrategy nameStrategy;
+
+  public InstrumentedHttpRequestExecutor(int waitForContinue, HttpClientMetricNameStrategy nameStrategy) {
+    super(waitForContinue);
+    this.nameStrategy = nameStrategy;
+  }
+
+  public InstrumentedHttpRequestExecutor(HttpClientMetricNameStrategy nameStrategy) {
+    this.nameStrategy = nameStrategy;
+  }
 
   private static String methodNameString(HttpRequest request) {
     return request.getRequestLine().getMethod().toLowerCase(Locale.ROOT) + ".requests";
@@ -50,7 +111,7 @@ public class InstrumentedHttpRequestExecutor extends HttpRequestExecutor impleme
   @Override
   public HttpResponse execute(HttpRequest request, HttpClientConnection conn, HttpContext context) throws IOException, HttpException {
     Timer.Context timerContext = null;
-    if (metricsRegistry != null)  {
+    if (metricsRegistry != null) {
       timerContext = timer(request).time();
     }
     try {
@@ -63,7 +124,7 @@ public class InstrumentedHttpRequestExecutor extends HttpRequestExecutor impleme
   }
 
   private Timer timer(HttpRequest request) {
-    return metricsRegistry.timer(getNameFor(request));
+    return metricsRegistry.timer(nameStrategy.getNameFor(scope, request));
   }
 
   @Override
@@ -72,20 +133,4 @@ public class InstrumentedHttpRequestExecutor extends HttpRequestExecutor impleme
     this.scope = scope;
   }
 
-  private String getNameFor(HttpRequest request) {
-    try {
-      final RequestLine requestLine = request.getRequestLine();
-      String schemeHostPort = null;
-      if (request instanceof HttpRequestWrapper) {
-        HttpRequestWrapper wrapper = (HttpRequestWrapper) request;
-        if (wrapper.getTarget() != null)  {
-          schemeHostPort = wrapper.getTarget().getSchemeName() + "://" + wrapper.getTarget().getHostName() + ":" +  wrapper.getTarget().getPort();
-        }
-      }
-      final URIBuilder url = new URIBuilder(requestLine.getUri());
-      return SolrMetricManager.mkName((schemeHostPort != null ? schemeHostPort : "") + url.removeQuery().build().toString() + "." + methodNameString(request), scope);
-    } catch (URISyntaxException e) {
-      throw new IllegalArgumentException(e);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d2664b10/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java b/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
index be8a24c..2386681 100644
--- a/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
+++ b/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
@@ -190,7 +190,8 @@ public class TestHarness extends BaseTestHarness {
     UpdateShardHandlerConfig updateShardHandlerConfig
         = new UpdateShardHandlerConfig(UpdateShardHandlerConfig.DEFAULT_MAXUPDATECONNECTIONS,
                                        UpdateShardHandlerConfig.DEFAULT_MAXUPDATECONNECTIONSPERHOST,
-                                       30000, 30000);
+                                       30000, 30000,
+                                        UpdateShardHandlerConfig.DEFAULT_METRICNAMESTRATEGY);
     // universal default metric reporter
     Map<String,String> attributes = new HashMap<>();
     attributes.put("name", "default");


[04/43] lucene-solr:jira/solr-8593: LUCENE-7617: Grouping collector API cleanup

Posted by kr...@apache.org.
LUCENE-7617: Grouping collector API cleanup


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/da30f21f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/da30f21f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/da30f21f

Branch: refs/heads/jira/solr-8593
Commit: da30f21f5d2c90a4e3d4fae87a297adfd4bbb3cb
Parents: 52f2a77
Author: Alan Woodward <ro...@apache.org>
Authored: Tue Jan 3 11:00:47 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Sat Jan 7 09:58:47 2017 +0000

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   6 +
 .../AbstractAllGroupHeadsCollector.java         | 176 ---------
 .../grouping/AbstractAllGroupsCollector.java    |  67 ----
 .../AbstractDistinctValuesCollector.java        |  59 ---
 .../AbstractFirstPassGroupingCollector.java     | 354 ------------------
 .../grouping/AbstractGroupFacetCollector.java   | 319 ----------------
 .../AbstractSecondPassGroupingCollector.java    | 162 ---------
 .../search/grouping/AllGroupHeadsCollector.java | 176 +++++++++
 .../search/grouping/AllGroupsCollector.java     |  67 ++++
 .../search/grouping/CollectedSearchGroup.java   |   2 +-
 .../grouping/DistinctValuesCollector.java       |  59 +++
 .../grouping/FirstPassGroupingCollector.java    | 363 +++++++++++++++++++
 .../lucene/search/grouping/GroupDocs.java       |   8 +-
 .../search/grouping/GroupFacetCollector.java    | 324 +++++++++++++++++
 .../apache/lucene/search/grouping/Grouper.java  |  56 +++
 .../lucene/search/grouping/GroupingSearch.java  | 130 ++-----
 .../lucene/search/grouping/SearchGroup.java     |  23 +-
 .../grouping/SecondPassGroupingCollector.java   | 169 +++++++++
 .../lucene/search/grouping/TopGroups.java       |  12 +-
 .../FunctionAllGroupHeadsCollector.java         |  32 +-
 .../function/FunctionAllGroupsCollector.java    |   4 +-
 .../FunctionDistinctValuesCollector.java        |  35 +-
 .../FunctionFirstPassGroupingCollector.java     |   6 +-
 .../grouping/function/FunctionGrouper.java      |  69 ++++
 .../FunctionSecondPassGroupingCollector.java    |   6 +-
 .../term/TermAllGroupHeadsCollector.java        |  26 +-
 .../grouping/term/TermAllGroupsCollector.java   |   8 +-
 .../term/TermDistinctValuesCollector.java       |  26 +-
 .../term/TermFirstPassGroupingCollector.java    |   6 +-
 .../grouping/term/TermGroupFacetCollector.java  |  10 +-
 .../search/grouping/term/TermGrouper.java       |  81 +++++
 .../term/TermSecondPassGroupingCollector.java   |   6 +-
 .../grouping/AllGroupHeadsCollectorTest.java    |  14 +-
 .../search/grouping/AllGroupsCollectorTest.java |   6 +-
 .../grouping/DistinctValuesCollectorTest.java   |  46 +--
 .../grouping/GroupFacetCollectorTest.java       |  10 +-
 .../lucene/search/grouping/TestGrouping.java    |  70 ++--
 .../org/apache/solr/request/SimpleFacets.java   |   4 +-
 .../java/org/apache/solr/search/Grouping.java   |  14 +-
 .../solr/search/grouping/CommandHandler.java    |   4 +-
 .../command/SearchGroupsFieldCommand.java       |   8 +-
 .../command/TopGroupsFieldCommand.java          |   4 +-
 42 files changed, 1597 insertions(+), 1430 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index b74056f..fa5cc1c 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -68,6 +68,12 @@ API Changes
 * LUCENE-7607: LeafFieldComparator.setScorer and SimpleFieldComparator.setScorer
   are declared as throwing IOException (Alan Woodward)
 
+* LUCENE-7617: Collector construction for two-pass grouping queries is
+  abstracted into a new Grouper class, which can be passed as a constructor
+  parameter to GroupingSearch.  The abstract base classes for the different
+  grouping Collectors are renamed to remove the Abstract* prefix.
+  (Alan Woodward, Martijn van Groningen)
+
 New features
 
 * LUCENE-5867: Added BooleanSimilarity. (Robert Muir, Adrien Grand)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupHeadsCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupHeadsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupHeadsCollector.java
deleted file mode 100644
index 7108762..0000000
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupHeadsCollector.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search.grouping;
-
-import java.io.IOException;
-import java.util.Collection;
-
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.SimpleCollector;
-import org.apache.lucene.util.FixedBitSet;
-
-/**
- * This collector specializes in collecting the most relevant document (group head) for each group that match the query.
- *
- * @lucene.experimental
- */
-@SuppressWarnings({"unchecked","rawtypes"})
-public abstract class AbstractAllGroupHeadsCollector<GH extends AbstractAllGroupHeadsCollector.GroupHead> extends SimpleCollector {
-
-  protected final int[] reversed;
-  protected final int compIDXEnd;
-  protected final TemporalResult temporalResult;
-
-  protected AbstractAllGroupHeadsCollector(int numberOfSorts) {
-    this.reversed = new int[numberOfSorts];
-    this.compIDXEnd = numberOfSorts - 1;
-    temporalResult = new TemporalResult();
-  }
-
-  /**
-   * @param maxDoc The maxDoc of the top level {@link IndexReader}.
-   * @return a {@link FixedBitSet} containing all group heads.
-   */
-  public FixedBitSet retrieveGroupHeads(int maxDoc) {
-    FixedBitSet bitSet = new FixedBitSet(maxDoc);
-
-    Collection<GH> groupHeads = getCollectedGroupHeads();
-    for (GroupHead groupHead : groupHeads) {
-      bitSet.set(groupHead.doc);
-    }
-
-    return bitSet;
-  }
-
-  /**
-   * @return an int array containing all group heads. The size of the array is equal to number of collected unique groups.
-   */
-  public int[] retrieveGroupHeads() {
-    Collection<GH> groupHeads = getCollectedGroupHeads();
-    int[] docHeads = new int[groupHeads.size()];
-
-    int i = 0;
-    for (GroupHead groupHead : groupHeads) {
-      docHeads[i++] = groupHead.doc;
-    }
-
-    return docHeads;
-  }
-
-  /**
-   * @return the number of group heads found for a query.
-   */
-  public int groupHeadsSize() {
-    return getCollectedGroupHeads().size();
-  }
-
-  /**
-   * Returns the group head and puts it into {@link #temporalResult}.
-   * If the group head wasn't encountered before then it will be added to the collected group heads.
-   * <p>
-   * The {@link TemporalResult#stop} property will be <code>true</code> if the group head wasn't encountered before
-   * otherwise <code>false</code>.
-   *
-   * @param doc The document to retrieve the group head for.
-   * @throws IOException If I/O related errors occur
-   */
-  protected abstract void retrieveGroupHeadAndAddIfNotExist(int doc) throws IOException;
-
-  /**
-   * Returns the collected group heads.
-   * Subsequent calls should return the same group heads.
-   *
-   * @return the collected group heads
-   */
-  protected abstract Collection<GH> getCollectedGroupHeads();
-
-  @Override
-  public void collect(int doc) throws IOException {
-    retrieveGroupHeadAndAddIfNotExist(doc);
-    if (temporalResult.stop) {
-      return;
-    }
-    GH groupHead = temporalResult.groupHead;
-
-    // Ok now we need to check if the current doc is more relevant then current doc for this group
-    for (int compIDX = 0; ; compIDX++) {
-      final int c = reversed[compIDX] * groupHead.compare(compIDX, doc);
-      if (c < 0) {
-        // Definitely not competitive. So don't even bother to continue
-        return;
-      } else if (c > 0) {
-        // Definitely competitive.
-        break;
-      } else if (compIDX == compIDXEnd) {
-        // Here c=0. If we're at the last comparator, this doc is not
-        // competitive, since docs are visited in doc Id order, which means
-        // this doc cannot compete with any other document in the queue.
-        return;
-      }
-    }
-    groupHead.updateDocHead(doc);
-  }
-
-  /**
-   * Contains the result of group head retrieval.
-   * To prevent new object creations of this class for every collect.
-   */
-  protected class TemporalResult {
-
-    public GH groupHead;
-    public boolean stop;
-
-  }
-
-  /**
-   * Represents a group head. A group head is the most relevant document for a particular group.
-   * The relevancy is based is usually based on the sort.
-   *
-   * The group head contains a group value with its associated most relevant document id.
-   */
-  public static abstract class GroupHead<GROUP_VALUE_TYPE> {
-
-    public final GROUP_VALUE_TYPE groupValue;
-    public int doc;
-
-    protected GroupHead(GROUP_VALUE_TYPE groupValue, int doc) {
-      this.groupValue = groupValue;
-      this.doc = doc;
-    }
-
-    /**
-     * Compares the specified document for a specified comparator against the current most relevant document.
-     *
-     * @param compIDX The comparator index of the specified comparator.
-     * @param doc The specified document.
-     * @return -1 if the specified document wasn't competitive against the current most relevant document, 1 if the
-     *         specified document was competitive against the current most relevant document. Otherwise 0.
-     * @throws IOException If I/O related errors occur
-     */
-    protected abstract int compare(int compIDX, int doc) throws IOException;
-
-    /**
-     * Updates the current most relevant document with the specified document.
-     *
-     * @param doc The specified document
-     * @throws IOException If I/O related errors occur
-     */
-    protected abstract void updateDocHead(int doc) throws IOException;
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupsCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupsCollector.java
deleted file mode 100644
index 954f9e0..0000000
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupsCollector.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search.grouping;
-
-import java.io.IOException;
-import java.util.Collection;
-
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.SimpleCollector;
-import org.apache.lucene.util.BytesRef;
-
-/**
- * A collector that collects all groups that match the
- * query. Only the group value is collected, and the order
- * is undefined.  This collector does not determine
- * the most relevant document of a group.
- * <p>
- * This is an abstract version. Concrete implementations define
- * what a group actually is and how it is internally collected.
- *
- * @lucene.experimental
- */
-public abstract class AbstractAllGroupsCollector<GROUP_VALUE_TYPE> extends SimpleCollector {
-
-  /**
-   * Returns the total number of groups for the executed search.
-   * This is a convenience method. The following code snippet has the same effect: <pre>getGroups().size()</pre>
-   *
-   * @return The total number of groups for the executed search
-   */
-  public int getGroupCount() {
-    return getGroups().size();
-  }
-
-  /**
-   * Returns the group values
-   * <p>
-   * This is an unordered collections of group values. For each group that matched the query there is a {@link BytesRef}
-   * representing a group value.
-   *
-   * @return the group values
-   */
-  public abstract Collection<GROUP_VALUE_TYPE> getGroups();
-
-  // Empty not necessary
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {}
-
-  @Override
-  public boolean needsScores() {
-    return false; // the result is unaffected by relevancy
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractDistinctValuesCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractDistinctValuesCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractDistinctValuesCollector.java
deleted file mode 100644
index b2181e4..0000000
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractDistinctValuesCollector.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search.grouping;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.lucene.search.SimpleCollector;
-
-/**
- * A second pass grouping collector that keeps track of distinct values for a specified field for the top N group.
- *
- * @lucene.experimental
- */
-public abstract class AbstractDistinctValuesCollector<GC extends AbstractDistinctValuesCollector.GroupCount<?>> extends SimpleCollector {
-
-  /**
-   * Returns all unique values for each top N group.
-   *
-   * @return all unique values for each top N group
-   */
-  public abstract List<GC> getGroups();
-
-  /**
-   * Returned by {@link AbstractDistinctValuesCollector#getGroups()},
-   * representing the value and set of distinct values for the group.
-   */
-  public abstract static class GroupCount<GROUP_VALUE_TYPE> {
-
-    public final GROUP_VALUE_TYPE groupValue;
-    public final Set<GROUP_VALUE_TYPE> uniqueValues;
-
-    public GroupCount(GROUP_VALUE_TYPE groupValue) {
-      this.groupValue = groupValue;
-      this.uniqueValues = new HashSet<>();
-    }
-  }
-
-  @Override
-  public boolean needsScores() {
-    return false; // not needed to fetch all values
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractFirstPassGroupingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractFirstPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractFirstPassGroupingCollector.java
deleted file mode 100644
index 4de04f0..0000000
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractFirstPassGroupingCollector.java
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search.grouping;
-
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.search.*;
-
-import java.io.IOException;
-import java.util.*;
-
-/** FirstPassGroupingCollector is the first of two passes necessary
- *  to collect grouped hits.  This pass gathers the top N sorted
- *  groups. Concrete subclasses define what a group is and how it
- *  is internally collected.
- *
- *  <p>See {@link org.apache.lucene.search.grouping} for more
- *  details including a full code example.</p>
- *
- * @lucene.experimental
- */
-abstract public class AbstractFirstPassGroupingCollector<GROUP_VALUE_TYPE> extends SimpleCollector {
-
-  private final FieldComparator<?>[] comparators;
-  private final LeafFieldComparator[] leafComparators;
-  private final int[] reversed;
-  private final int topNGroups;
-  private final boolean needsScores;
-  private final HashMap<GROUP_VALUE_TYPE, CollectedSearchGroup<GROUP_VALUE_TYPE>> groupMap;
-  private final int compIDXEnd;
-
-  // Set once we reach topNGroups unique groups:
-  /** @lucene.internal */
-  protected TreeSet<CollectedSearchGroup<GROUP_VALUE_TYPE>> orderedGroups;
-  private int docBase;
-  private int spareSlot;
-
-  /**
-   * Create the first pass collector.
-   *
-   *  @param groupSort The {@link Sort} used to sort the
-   *    groups.  The top sorted document within each group
-   *    according to groupSort, determines how that group
-   *    sorts against other groups.  This must be non-null,
-   *    ie, if you want to groupSort by relevance use
-   *    Sort.RELEVANCE.
-   *  @param topNGroups How many top groups to keep.
-   *  @throws IOException If I/O related errors occur
-   */
-  @SuppressWarnings({"unchecked", "rawtypes"})
-  public AbstractFirstPassGroupingCollector(Sort groupSort, int topNGroups) throws IOException {
-    if (topNGroups < 1) {
-      throw new IllegalArgumentException("topNGroups must be >= 1 (got " + topNGroups + ")");
-    }
-
-    // TODO: allow null groupSort to mean "by relevance",
-    // and specialize it?
-
-    this.topNGroups = topNGroups;
-    this.needsScores = groupSort.needsScores();
-    final SortField[] sortFields = groupSort.getSort();
-    comparators = new FieldComparator[sortFields.length];
-    leafComparators = new LeafFieldComparator[sortFields.length];
-    compIDXEnd = comparators.length - 1;
-    reversed = new int[sortFields.length];
-    for (int i = 0; i < sortFields.length; i++) {
-      final SortField sortField = sortFields[i];
-
-      // use topNGroups + 1 so we have a spare slot to use for comparing (tracked by this.spareSlot):
-      comparators[i] = sortField.getComparator(topNGroups + 1, i);
-      reversed[i] = sortField.getReverse() ? -1 : 1;
-    }
-
-    spareSlot = topNGroups;
-    groupMap = new HashMap<>(topNGroups);
-  }
-
-  @Override
-  public boolean needsScores() {
-    return needsScores;
-  }
-
-  /**
-   * Returns top groups, starting from offset.  This may
-   * return null, if no groups were collected, or if the
-   * number of unique groups collected is &lt;= offset.
-   *
-   * @param groupOffset The offset in the collected groups
-   * @param fillFields Whether to fill to {@link SearchGroup#sortValues}
-   * @return top groups, starting from offset
-   */
-  public Collection<SearchGroup<GROUP_VALUE_TYPE>> getTopGroups(int groupOffset, boolean fillFields) throws IOException {
-
-    //System.out.println("FP.getTopGroups groupOffset=" + groupOffset + " fillFields=" + fillFields + " groupMap.size()=" + groupMap.size());
-
-    if (groupOffset < 0) {
-      throw new IllegalArgumentException("groupOffset must be >= 0 (got " + groupOffset + ")");
-    }
-
-    if (groupMap.size() <= groupOffset) {
-      return null;
-    }
-
-    if (orderedGroups == null) {
-      buildSortedSet();
-    }
-
-    final Collection<SearchGroup<GROUP_VALUE_TYPE>> result = new ArrayList<>();
-    int upto = 0;
-    final int sortFieldCount = comparators.length;
-    for(CollectedSearchGroup<GROUP_VALUE_TYPE> group : orderedGroups) {
-      if (upto++ < groupOffset) {
-        continue;
-      }
-      //System.out.println("  group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString()));
-      SearchGroup<GROUP_VALUE_TYPE> searchGroup = new SearchGroup<>();
-      searchGroup.groupValue = group.groupValue;
-      if (fillFields) {
-        searchGroup.sortValues = new Object[sortFieldCount];
-        for(int sortFieldIDX=0;sortFieldIDX<sortFieldCount;sortFieldIDX++) {
-          searchGroup.sortValues[sortFieldIDX] = comparators[sortFieldIDX].value(group.comparatorSlot);
-        }
-      }
-      result.add(searchGroup);
-    }
-    //System.out.println("  return " + result.size() + " groups");
-    return result;
-  }
-
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    for (LeafFieldComparator comparator : leafComparators) {
-      comparator.setScorer(scorer);
-    }
-  }
-
-  @Override
-  public void collect(int doc) throws IOException {
-    //System.out.println("FP.collect doc=" + doc);
-
-    // If orderedGroups != null we already have collected N groups and
-    // can short circuit by comparing this document to the bottom group,
-    // without having to find what group this document belongs to.
-    
-    // Even if this document belongs to a group in the top N, we'll know that
-    // we don't have to update that group.
-
-    // Downside: if the number of unique groups is very low, this is
-    // wasted effort as we will most likely be updating an existing group.
-    if (orderedGroups != null) {
-      for (int compIDX = 0;; compIDX++) {
-        final int c = reversed[compIDX] * leafComparators[compIDX].compareBottom(doc);
-        if (c < 0) {
-          // Definitely not competitive. So don't even bother to continue
-          return;
-        } else if (c > 0) {
-          // Definitely competitive.
-          break;
-        } else if (compIDX == compIDXEnd) {
-          // Here c=0. If we're at the last comparator, this doc is not
-          // competitive, since docs are visited in doc Id order, which means
-          // this doc cannot compete with any other document in the queue.
-          return;
-        }
-      }
-    }
-
-    // TODO: should we add option to mean "ignore docs that
-    // don't have the group field" (instead of stuffing them
-    // under null group)?
-    final GROUP_VALUE_TYPE groupValue = getDocGroupValue(doc);
-
-    final CollectedSearchGroup<GROUP_VALUE_TYPE> group = groupMap.get(groupValue);
-
-    if (group == null) {
-
-      // First time we are seeing this group, or, we've seen
-      // it before but it fell out of the top N and is now
-      // coming back
-
-      if (groupMap.size() < topNGroups) {
-
-        // Still in startup transient: we have not
-        // seen enough unique groups to start pruning them;
-        // just keep collecting them
-
-        // Add a new CollectedSearchGroup:
-        CollectedSearchGroup<GROUP_VALUE_TYPE> sg = new CollectedSearchGroup<>();
-        sg.groupValue = copyDocGroupValue(groupValue, null);
-        sg.comparatorSlot = groupMap.size();
-        sg.topDoc = docBase + doc;
-        for (LeafFieldComparator fc : leafComparators) {
-          fc.copy(sg.comparatorSlot, doc);
-        }
-        groupMap.put(sg.groupValue, sg);
-
-        if (groupMap.size() == topNGroups) {
-          // End of startup transient: we now have max
-          // number of groups; from here on we will drop
-          // bottom group when we insert new one:
-          buildSortedSet();
-        }
-
-        return;
-      }
-
-      // We already tested that the document is competitive, so replace
-      // the bottom group with this new group.
-      final CollectedSearchGroup<GROUP_VALUE_TYPE> bottomGroup = orderedGroups.pollLast();
-      assert orderedGroups.size() == topNGroups -1;
-
-      groupMap.remove(bottomGroup.groupValue);
-
-      // reuse the removed CollectedSearchGroup
-      bottomGroup.groupValue = copyDocGroupValue(groupValue, bottomGroup.groupValue);
-      bottomGroup.topDoc = docBase + doc;
-
-      for (LeafFieldComparator fc : leafComparators) {
-        fc.copy(bottomGroup.comparatorSlot, doc);
-      }
-
-      groupMap.put(bottomGroup.groupValue, bottomGroup);
-      orderedGroups.add(bottomGroup);
-      assert orderedGroups.size() == topNGroups;
-
-      final int lastComparatorSlot = orderedGroups.last().comparatorSlot;
-      for (LeafFieldComparator fc : leafComparators) {
-        fc.setBottom(lastComparatorSlot);
-      }
-
-      return;
-    }
-
-    // Update existing group:
-    for (int compIDX = 0;; compIDX++) {
-      leafComparators[compIDX].copy(spareSlot, doc);
-
-      final int c = reversed[compIDX] * comparators[compIDX].compare(group.comparatorSlot, spareSlot);
-      if (c < 0) {
-        // Definitely not competitive.
-        return;
-      } else if (c > 0) {
-        // Definitely competitive; set remaining comparators:
-        for (int compIDX2=compIDX+1; compIDX2<comparators.length; compIDX2++) {
-          leafComparators[compIDX2].copy(spareSlot, doc);
-        }
-        break;
-      } else if (compIDX == compIDXEnd) {
-        // Here c=0. If we're at the last comparator, this doc is not
-        // competitive, since docs are visited in doc Id order, which means
-        // this doc cannot compete with any other document in the queue.
-        return;
-      }
-    }
-
-    // Remove before updating the group since lookup is done via comparators
-    // TODO: optimize this
-
-    final CollectedSearchGroup<GROUP_VALUE_TYPE> prevLast;
-    if (orderedGroups != null) {
-      prevLast = orderedGroups.last();
-      orderedGroups.remove(group);
-      assert orderedGroups.size() == topNGroups-1;
-    } else {
-      prevLast = null;
-    }
-
-    group.topDoc = docBase + doc;
-
-    // Swap slots
-    final int tmp = spareSlot;
-    spareSlot = group.comparatorSlot;
-    group.comparatorSlot = tmp;
-
-    // Re-add the changed group
-    if (orderedGroups != null) {
-      orderedGroups.add(group);
-      assert orderedGroups.size() == topNGroups;
-      final CollectedSearchGroup<?> newLast = orderedGroups.last();
-      // If we changed the value of the last group, or changed which group was last, then update bottom:
-      if (group == newLast || prevLast != newLast) {
-        for (LeafFieldComparator fc : leafComparators) {
-          fc.setBottom(newLast.comparatorSlot);
-        }
-      }
-    }
-  }
-
-  private void buildSortedSet() throws IOException {
-    final Comparator<CollectedSearchGroup<?>> comparator = new Comparator<CollectedSearchGroup<?>>() {
-      @Override
-      public int compare(CollectedSearchGroup<?> o1, CollectedSearchGroup<?> o2) {
-        for (int compIDX = 0;; compIDX++) {
-          FieldComparator<?> fc = comparators[compIDX];
-          final int c = reversed[compIDX] * fc.compare(o1.comparatorSlot, o2.comparatorSlot);
-          if (c != 0) {
-            return c;
-          } else if (compIDX == compIDXEnd) {
-            return o1.topDoc - o2.topDoc;
-          }
-        }
-      }
-    };
-
-    orderedGroups = new TreeSet<>(comparator);
-    orderedGroups.addAll(groupMap.values());
-    assert orderedGroups.size() > 0;
-
-    for (LeafFieldComparator fc : leafComparators) {
-      fc.setBottom(orderedGroups.last().comparatorSlot);
-    }
-  }
-
-  @Override
-  protected void doSetNextReader(LeafReaderContext readerContext) throws IOException {
-    docBase = readerContext.docBase;
-    for (int i=0; i<comparators.length; i++) {
-      leafComparators[i] = comparators[i].getLeafComparator(readerContext);
-    }
-  }
-
-  /**
-   * Returns the group value for the specified doc.
-   *
-   * @param doc The specified doc
-   * @return the group value for the specified doc
-   */
-  protected abstract GROUP_VALUE_TYPE getDocGroupValue(int doc) throws IOException;
-
-  /**
-   * Returns a copy of the specified group value by creating a new instance and copying the value from the specified
-   * groupValue in the new instance. Or optionally the reuse argument can be used to copy the group value in.
-   *
-   * @param groupValue The group value to copy
-   * @param reuse Optionally a reuse instance to prevent a new instance creation
-   * @return a copy of the specified group value
-   */
-  protected abstract GROUP_VALUE_TYPE copyDocGroupValue(GROUP_VALUE_TYPE groupValue, GROUP_VALUE_TYPE reuse);
-
-}
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractGroupFacetCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractGroupFacetCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractGroupFacetCollector.java
deleted file mode 100644
index 6824684..0000000
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractGroupFacetCollector.java
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search.grouping;
-
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.SimpleCollector;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.PriorityQueue;
-
-import java.io.IOException;
-import java.util.*;
-
-/**
- * Base class for computing grouped facets.
- *
- * @lucene.experimental
- */
-public abstract class AbstractGroupFacetCollector extends SimpleCollector {
-
-  protected final String groupField;
-  protected final String facetField;
-  protected final BytesRef facetPrefix;
-  protected final List<SegmentResult> segmentResults;
-
-  protected int[] segmentFacetCounts;
-  protected int segmentTotalCount;
-  protected int startFacetOrd;
-  protected int endFacetOrd;
-
-  protected AbstractGroupFacetCollector(String groupField, String facetField, BytesRef facetPrefix) {
-    this.groupField = groupField;
-    this.facetField = facetField;
-    this.facetPrefix = facetPrefix;
-    segmentResults = new ArrayList<>();
-  }
-
-  /**
-   * Returns grouped facet results that were computed over zero or more segments.
-   * Grouped facet counts are merged from zero or more segment results.
-   *
-   * @param size The total number of facets to include. This is typically offset + limit
-   * @param minCount The minimum count a facet entry should have to be included in the grouped facet result
-   * @param orderByCount Whether to sort the facet entries by facet entry count. If <code>false</code> then the facets
-   *                     are sorted lexicographically in ascending order.
-   * @return grouped facet results
-   * @throws IOException If I/O related errors occur during merging segment grouped facet counts.
-   */
-  public GroupedFacetResult mergeSegmentResults(int size, int minCount, boolean orderByCount) throws IOException {
-    if (segmentFacetCounts != null) {
-      segmentResults.add(createSegmentResult());
-      segmentFacetCounts = null; // reset
-    }
-
-    int totalCount = 0;
-    int missingCount = 0;
-    SegmentResultPriorityQueue segments = new SegmentResultPriorityQueue(segmentResults.size());
-    for (SegmentResult segmentResult : segmentResults) {
-      missingCount += segmentResult.missing;
-      if (segmentResult.mergePos >= segmentResult.maxTermPos) {
-        continue;
-      }
-      totalCount += segmentResult.total;
-      segments.add(segmentResult);
-    }
-
-    GroupedFacetResult facetResult = new GroupedFacetResult(size, minCount, orderByCount, totalCount, missingCount);
-    while (segments.size() > 0) {
-      SegmentResult segmentResult = segments.top();
-      BytesRef currentFacetValue = BytesRef.deepCopyOf(segmentResult.mergeTerm);
-      int count = 0;
-
-      do {
-        count += segmentResult.counts[segmentResult.mergePos++];
-        if (segmentResult.mergePos < segmentResult.maxTermPos) {
-          segmentResult.nextTerm();
-          segmentResult = segments.updateTop();
-        } else {
-          segments.pop();
-          segmentResult = segments.top();
-          if (segmentResult == null) {
-            break;
-          }
-        }
-      } while (currentFacetValue.equals(segmentResult.mergeTerm));
-      facetResult.addFacetCount(currentFacetValue, count);
-    }
-    return facetResult;
-  }
-
-  protected abstract SegmentResult createSegmentResult() throws IOException;
-
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {
-  }
-
-  @Override
-  public boolean needsScores() {
-    return false;
-  }
-
-  /**
-   * The grouped facet result. Containing grouped facet entries, total count and total missing count.
-   */
-  public static class GroupedFacetResult {
-
-    private final static Comparator<FacetEntry> orderByCountAndValue = new Comparator<FacetEntry>() {
-
-      @Override
-      public int compare(FacetEntry a, FacetEntry b) {
-        int cmp = b.count - a.count; // Highest count first!
-        if (cmp != 0) {
-          return cmp;
-        }
-        return a.value.compareTo(b.value);
-      }
-
-    };
-
-    private final static Comparator<FacetEntry> orderByValue = new Comparator<FacetEntry>() {
-
-      @Override
-      public int compare(FacetEntry a, FacetEntry b) {
-        return a.value.compareTo(b.value);
-      }
-
-    };
-
-    private final int maxSize;
-    private final NavigableSet<FacetEntry> facetEntries;
-    private final int totalMissingCount;
-    private final int totalCount;
-
-    private int currentMin;
-
-    public GroupedFacetResult(int size, int minCount, boolean orderByCount, int totalCount, int totalMissingCount) {
-      this.facetEntries = new TreeSet<>(orderByCount ? orderByCountAndValue : orderByValue);
-      this.totalMissingCount = totalMissingCount;
-      this.totalCount = totalCount;
-      maxSize = size;
-      currentMin = minCount;
-    }
-
-    public void addFacetCount(BytesRef facetValue, int count) {
-      if (count < currentMin) {
-        return;
-      }
-
-      FacetEntry facetEntry = new FacetEntry(facetValue, count);
-      if (facetEntries.size() == maxSize) {
-        if (facetEntries.higher(facetEntry) == null) {
-          return;
-        }
-        facetEntries.pollLast();
-      }
-      facetEntries.add(facetEntry);
-
-      if (facetEntries.size() == maxSize) {
-        currentMin = facetEntries.last().count;
-      }
-    }
-
-    /**
-     * Returns a list of facet entries to be rendered based on the specified offset and limit.
-     * The facet entries are retrieved from the facet entries collected during merging.
-     *
-     * @param offset The offset in the collected facet entries during merging
-     * @param limit The number of facets to return starting from the offset.
-     * @return a list of facet entries to be rendered based on the specified offset and limit
-     */
-    public List<FacetEntry> getFacetEntries(int offset, int limit) {
-      List<FacetEntry> entries = new LinkedList<>();
-
-      int skipped = 0;
-      int included = 0;
-      for (FacetEntry facetEntry : facetEntries) {
-        if (skipped < offset) {
-          skipped++;
-          continue;
-        }
-        if (included++ >= limit) {
-          break;
-        }
-        entries.add(facetEntry);
-      }
-      return entries;
-    }
-
-    /**
-     * Returns the sum of all facet entries counts.
-     *
-     * @return the sum of all facet entries counts
-     */
-    public int getTotalCount() {
-      return totalCount;
-    }
-
-    /**
-     * Returns the number of groups that didn't have a facet value.
-     *
-     * @return the number of groups that didn't have a facet value
-     */
-    public int getTotalMissingCount() {
-      return totalMissingCount;
-    }
-  }
-
-  /**
-   * Represents a facet entry with a value and a count.
-   */
-  public static class FacetEntry {
-
-    private final BytesRef value;
-    private final int count;
-
-    public FacetEntry(BytesRef value, int count) {
-      this.value = value;
-      this.count = count;
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (this == o) return true;
-      if (o == null || getClass() != o.getClass()) return false;
-
-      FacetEntry that = (FacetEntry) o;
-
-      if (count != that.count) return false;
-      if (!value.equals(that.value)) return false;
-
-      return true;
-    }
-
-    @Override
-    public int hashCode() {
-      int result = value.hashCode();
-      result = 31 * result + count;
-      return result;
-    }
-
-    @Override
-    public String toString() {
-      return "FacetEntry{" +
-          "value=" + value.utf8ToString() +
-          ", count=" + count +
-          '}';
-    }
-
-    /**
-     * @return The value of this facet entry
-     */
-    public BytesRef getValue() {
-      return value;
-    }
-
-    /**
-     * @return The count (number of groups) of this facet entry.
-     */
-    public int getCount() {
-      return count;
-    }
-  }
-
-  /**
-   * Contains the local grouped segment counts for a particular segment.
-   * Each <code>SegmentResult</code> must be added together.
-   */
-  protected abstract static class SegmentResult {
-
-    protected final int[] counts;
-    protected final int total;
-    protected final int missing;
-    protected final int maxTermPos;
-
-    protected BytesRef mergeTerm;
-    protected int mergePos;
-
-    protected SegmentResult(int[] counts, int total, int missing, int maxTermPos) {
-      this.counts = counts;
-      this.total = total;
-      this.missing = missing;
-      this.maxTermPos = maxTermPos;
-    }
-
-    /**
-     * Go to next term in this <code>SegmentResult</code> in order to retrieve the grouped facet counts.
-     *
-     * @throws IOException If I/O related errors occur
-     */
-    protected abstract void nextTerm() throws IOException;
-
-  }
-
-  private static class SegmentResultPriorityQueue extends PriorityQueue<SegmentResult> {
-
-    SegmentResultPriorityQueue(int maxSize) {
-      super(maxSize);
-    }
-
-    @Override
-    protected boolean lessThan(SegmentResult a, SegmentResult b) {
-      return a.mergeTerm.compareTo(b.mergeTerm) < 0;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractSecondPassGroupingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractSecondPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractSecondPassGroupingCollector.java
deleted file mode 100644
index 13b6189..0000000
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AbstractSecondPassGroupingCollector.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search.grouping;
-
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.search.*;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Objects;
-
-/**
- * SecondPassGroupingCollector is the second of two passes
- * necessary to collect grouped docs.  This pass gathers the
- * top N documents per top group computed from the
- * first pass. Concrete subclasses define what a group is and how it
- * is internally collected.
- *
- * <p>See {@link org.apache.lucene.search.grouping} for more
- * details including a full code example.</p>
- *
- * @lucene.experimental
- */
-public abstract class AbstractSecondPassGroupingCollector<GROUP_VALUE_TYPE> extends SimpleCollector {
-
-  private final Collection<SearchGroup<GROUP_VALUE_TYPE>> groups;
-  private final Sort groupSort;
-  private final Sort withinGroupSort;
-  private final int maxDocsPerGroup;
-  private final boolean needsScores;
-  protected final Map<GROUP_VALUE_TYPE, SearchGroupDocs<GROUP_VALUE_TYPE>> groupMap;
-
-  protected SearchGroupDocs<GROUP_VALUE_TYPE>[] groupDocs;
-
-  private int totalHitCount;
-  private int totalGroupedHitCount;
-
-  public AbstractSecondPassGroupingCollector(Collection<SearchGroup<GROUP_VALUE_TYPE>> groups, Sort groupSort, Sort withinGroupSort,
-                                             int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields)
-    throws IOException {
-
-    //System.out.println("SP init");
-    if (groups.isEmpty()) {
-      throw new IllegalArgumentException("no groups to collect (groups is empty)");
-    }
-
-    this.groups = Objects.requireNonNull(groups);
-    this.groupSort = Objects.requireNonNull(groupSort);
-    this.withinGroupSort = Objects.requireNonNull(withinGroupSort);
-    this.maxDocsPerGroup = maxDocsPerGroup;
-    this.needsScores = getScores || getMaxScores || withinGroupSort.needsScores();
-
-    this.groupMap = new HashMap<>(groups.size());
-    for (SearchGroup<GROUP_VALUE_TYPE> group : groups) {
-      //System.out.println("  prep group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString()));
-      final TopDocsCollector<?> collector;
-      if (withinGroupSort.equals(Sort.RELEVANCE)) { // optimize to use TopScoreDocCollector
-        // Sort by score
-        collector = TopScoreDocCollector.create(maxDocsPerGroup);
-      } else {
-        // Sort by fields
-        collector = TopFieldCollector.create(withinGroupSort, maxDocsPerGroup, fillSortFields, getScores, getMaxScores);
-      }
-      groupMap.put(group.groupValue, new SearchGroupDocs<>(group.groupValue, collector));
-    }
-  }
-
-  @Override
-  public boolean needsScores() {
-    return needsScores;
-  }
-
-  @Override
-  public void setScorer(Scorer scorer) throws IOException {
-    for (SearchGroupDocs<GROUP_VALUE_TYPE> group : groupMap.values()) {
-      group.leafCollector.setScorer(scorer);
-    }
-  }
-
-  @Override
-  public void collect(int doc) throws IOException {
-    totalHitCount++;
-    SearchGroupDocs<GROUP_VALUE_TYPE> group = retrieveGroup(doc);
-    if (group != null) {
-      totalGroupedHitCount++;
-      group.leafCollector.collect(doc);
-    }
-  }
-
-  /**
-   * Returns the group the specified doc belongs to or <code>null</code> if no group could be retrieved.
-   *
-   * @param doc The specified doc
-   * @return the group the specified doc belongs to or <code>null</code> if no group could be retrieved
-   * @throws IOException If an I/O related error occurred
-   */
-  protected abstract SearchGroupDocs<GROUP_VALUE_TYPE> retrieveGroup(int doc) throws IOException;
-
-  @Override
-  protected void doSetNextReader(LeafReaderContext readerContext) throws IOException {
-    //System.out.println("SP.setNextReader");
-    for (SearchGroupDocs<GROUP_VALUE_TYPE> group : groupMap.values()) {
-      group.leafCollector = group.collector.getLeafCollector(readerContext);
-    }
-  }
-
-  public TopGroups<GROUP_VALUE_TYPE> getTopGroups(int withinGroupOffset) {
-    @SuppressWarnings({"unchecked","rawtypes"})
-    final GroupDocs<GROUP_VALUE_TYPE>[] groupDocsResult = (GroupDocs<GROUP_VALUE_TYPE>[]) new GroupDocs[groups.size()];
-
-    int groupIDX = 0;
-    float maxScore = Float.MIN_VALUE;
-    for(SearchGroup<?> group : groups) {
-      final SearchGroupDocs<GROUP_VALUE_TYPE> groupDocs = groupMap.get(group.groupValue);
-      final TopDocs topDocs = groupDocs.collector.topDocs(withinGroupOffset, maxDocsPerGroup);
-      groupDocsResult[groupIDX++] = new GroupDocs<>(Float.NaN,
-                                                                    topDocs.getMaxScore(),
-                                                                    topDocs.totalHits,
-                                                                    topDocs.scoreDocs,
-                                                                    groupDocs.groupValue,
-                                                                    group.sortValues);
-      maxScore = Math.max(maxScore, topDocs.getMaxScore());
-    }
-
-    return new TopGroups<>(groupSort.getSort(),
-                                           withinGroupSort.getSort(),
-                                           totalHitCount, totalGroupedHitCount, groupDocsResult,
-                                           maxScore);
-  }
-
-
-  // TODO: merge with SearchGroup or not?
-  // ad: don't need to build a new hashmap
-  // disad: blows up the size of SearchGroup if we need many of them, and couples implementations
-  public class SearchGroupDocs<GROUP_VALUE_TYPE> {
-
-    public final GROUP_VALUE_TYPE groupValue;
-    public final TopDocsCollector<?> collector;
-    public LeafCollector leafCollector;
-
-    public SearchGroupDocs(GROUP_VALUE_TYPE groupValue, TopDocsCollector<?> collector) {
-      this.groupValue = groupValue;
-      this.collector = collector;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupHeadsCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupHeadsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupHeadsCollector.java
new file mode 100644
index 0000000..b5fbdc3
--- /dev/null
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupHeadsCollector.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search.grouping;
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.SimpleCollector;
+import org.apache.lucene.util.FixedBitSet;
+
+/**
+ * This collector specializes in collecting the most relevant document (group head) for each group that match the query.
+ *
+ * @lucene.experimental
+ */
+@SuppressWarnings({"unchecked","rawtypes"})
+public abstract class AllGroupHeadsCollector<T> extends SimpleCollector {
+
+  protected final int[] reversed;
+  protected final int compIDXEnd;
+  protected final TemporalResult temporalResult;
+
+  protected AllGroupHeadsCollector(int numberOfSorts) {
+    this.reversed = new int[numberOfSorts];
+    this.compIDXEnd = numberOfSorts - 1;
+    temporalResult = new TemporalResult();
+  }
+
+  /**
+   * @param maxDoc The maxDoc of the top level {@link IndexReader}.
+   * @return a {@link FixedBitSet} containing all group heads.
+   */
+  public FixedBitSet retrieveGroupHeads(int maxDoc) {
+    FixedBitSet bitSet = new FixedBitSet(maxDoc);
+
+    Collection<? extends GroupHead<T>> groupHeads = getCollectedGroupHeads();
+    for (GroupHead groupHead : groupHeads) {
+      bitSet.set(groupHead.doc);
+    }
+
+    return bitSet;
+  }
+
+  /**
+   * @return an int array containing all group heads. The size of the array is equal to number of collected unique groups.
+   */
+  public int[] retrieveGroupHeads() {
+    Collection<? extends GroupHead<T>> groupHeads = getCollectedGroupHeads();
+    int[] docHeads = new int[groupHeads.size()];
+
+    int i = 0;
+    for (GroupHead groupHead : groupHeads) {
+      docHeads[i++] = groupHead.doc;
+    }
+
+    return docHeads;
+  }
+
+  /**
+   * @return the number of group heads found for a query.
+   */
+  public int groupHeadsSize() {
+    return getCollectedGroupHeads().size();
+  }
+
+  /**
+   * Returns the group head and puts it into {@link #temporalResult}.
+   * If the group head wasn't encountered before then it will be added to the collected group heads.
+   * <p>
+   * The {@link TemporalResult#stop} property will be <code>true</code> if the group head wasn't encountered before
+   * otherwise <code>false</code>.
+   *
+   * @param doc The document to retrieve the group head for.
+   * @throws IOException If I/O related errors occur
+   */
+  protected abstract void retrieveGroupHeadAndAddIfNotExist(int doc) throws IOException;
+
+  /**
+   * Returns the collected group heads.
+   * Subsequent calls should return the same group heads.
+   *
+   * @return the collected group heads
+   */
+  protected abstract Collection<? extends GroupHead<T>> getCollectedGroupHeads();
+
+  @Override
+  public void collect(int doc) throws IOException {
+    retrieveGroupHeadAndAddIfNotExist(doc);
+    if (temporalResult.stop) {
+      return;
+    }
+    GroupHead<T> groupHead = temporalResult.groupHead;
+
+    // Ok now we need to check if the current doc is more relevant then current doc for this group
+    for (int compIDX = 0; ; compIDX++) {
+      final int c = reversed[compIDX] * groupHead.compare(compIDX, doc);
+      if (c < 0) {
+        // Definitely not competitive. So don't even bother to continue
+        return;
+      } else if (c > 0) {
+        // Definitely competitive.
+        break;
+      } else if (compIDX == compIDXEnd) {
+        // Here c=0. If we're at the last comparator, this doc is not
+        // competitive, since docs are visited in doc Id order, which means
+        // this doc cannot compete with any other document in the queue.
+        return;
+      }
+    }
+    groupHead.updateDocHead(doc);
+  }
+
+  /**
+   * Contains the result of group head retrieval.
+   * To prevent new object creations of this class for every collect.
+   */
+  protected class TemporalResult {
+
+    public GroupHead<T> groupHead;
+    public boolean stop;
+
+  }
+
+  /**
+   * Represents a group head. A group head is the most relevant document for a particular group.
+   * The relevancy is based is usually based on the sort.
+   *
+   * The group head contains a group value with its associated most relevant document id.
+   */
+  public static abstract class GroupHead<T> {
+
+    public final T groupValue;
+    public int doc;
+
+    protected GroupHead(T groupValue, int doc) {
+      this.groupValue = groupValue;
+      this.doc = doc;
+    }
+
+    /**
+     * Compares the specified document for a specified comparator against the current most relevant document.
+     *
+     * @param compIDX The comparator index of the specified comparator.
+     * @param doc The specified document.
+     * @return -1 if the specified document wasn't competitive against the current most relevant document, 1 if the
+     *         specified document was competitive against the current most relevant document. Otherwise 0.
+     * @throws IOException If I/O related errors occur
+     */
+    protected abstract int compare(int compIDX, int doc) throws IOException;
+
+    /**
+     * Updates the current most relevant document with the specified document.
+     *
+     * @param doc The specified document
+     * @throws IOException If I/O related errors occur
+     */
+    protected abstract void updateDocHead(int doc) throws IOException;
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java
new file mode 100644
index 0000000..af697af
--- /dev/null
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search.grouping;
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * A collector that collects all groups that match the
+ * query. Only the group value is collected, and the order
+ * is undefined.  This collector does not determine
+ * the most relevant document of a group.
+ * <p>
+ * This is an abstract version. Concrete implementations define
+ * what a group actually is and how it is internally collected.
+ *
+ * @lucene.experimental
+ */
+public abstract class AllGroupsCollector<T> extends SimpleCollector {
+
+  /**
+   * Returns the total number of groups for the executed search.
+   * This is a convenience method. The following code snippet has the same effect: <pre>getGroups().size()</pre>
+   *
+   * @return The total number of groups for the executed search
+   */
+  public int getGroupCount() {
+    return getGroups().size();
+  }
+
+  /**
+   * Returns the group values
+   * <p>
+   * This is an unordered collections of group values. For each group that matched the query there is a {@link BytesRef}
+   * representing a group value.
+   *
+   * @return the group values
+   */
+  public abstract Collection<T> getGroups();
+
+  // Empty not necessary
+  @Override
+  public void setScorer(Scorer scorer) throws IOException {}
+
+  @Override
+  public boolean needsScores() {
+    return false; // the result is unaffected by relevancy
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/CollectedSearchGroup.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/CollectedSearchGroup.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/CollectedSearchGroup.java
index af6fd04..5e4bf14 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/CollectedSearchGroup.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/CollectedSearchGroup.java
@@ -19,7 +19,7 @@ package org.apache.lucene.search.grouping;
 import org.apache.lucene.search.FieldComparator; // javadocs
 
 /** 
- * Expert: representation of a group in {@link AbstractFirstPassGroupingCollector},
+ * Expert: representation of a group in {@link FirstPassGroupingCollector},
  * tracking the top doc and {@link FieldComparator} slot.
  * @lucene.internal */
 public class CollectedSearchGroup<T> extends SearchGroup<T> {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/DistinctValuesCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/DistinctValuesCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/DistinctValuesCollector.java
new file mode 100644
index 0000000..54d752c
--- /dev/null
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/DistinctValuesCollector.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search.grouping;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.search.SimpleCollector;
+
+/**
+ * A second pass grouping collector that keeps track of distinct values for a specified field for the top N group.
+ *
+ * @lucene.experimental
+ */
+public abstract class DistinctValuesCollector<T> extends SimpleCollector {
+
+  /**
+   * Returns all unique values for each top N group.
+   *
+   * @return all unique values for each top N group
+   */
+  public abstract List<GroupCount<T>> getGroups();
+
+  /**
+   * Returned by {@link DistinctValuesCollector#getGroups()},
+   * representing the value and set of distinct values for the group.
+   */
+  public static class GroupCount<T> {
+
+    public final T groupValue;
+    public final Set<T> uniqueValues;
+
+    public GroupCount(T groupValue) {
+      this.groupValue = groupValue;
+      this.uniqueValues = new HashSet<>();
+    }
+  }
+
+  @Override
+  public boolean needsScores() {
+    return false; // not needed to fetch all values
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java
new file mode 100644
index 0000000..ef47f96
--- /dev/null
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java
@@ -0,0 +1,363 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search.grouping;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.TreeSet;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.LeafFieldComparator;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+
+/** FirstPassGroupingCollector is the first of two passes necessary
+ *  to collect grouped hits.  This pass gathers the top N sorted
+ *  groups. Concrete subclasses define what a group is and how it
+ *  is internally collected.
+ *
+ *  <p>See {@link org.apache.lucene.search.grouping} for more
+ *  details including a full code example.</p>
+ *
+ * @lucene.experimental
+ */
+abstract public class FirstPassGroupingCollector<T> extends SimpleCollector {
+
+  private final FieldComparator<?>[] comparators;
+  private final LeafFieldComparator[] leafComparators;
+  private final int[] reversed;
+  private final int topNGroups;
+  private final boolean needsScores;
+  private final HashMap<T, CollectedSearchGroup<T>> groupMap;
+  private final int compIDXEnd;
+
+  // Set once we reach topNGroups unique groups:
+  /** @lucene.internal */
+  protected TreeSet<CollectedSearchGroup<T>> orderedGroups;
+  private int docBase;
+  private int spareSlot;
+
+  /**
+   * Create the first pass collector.
+   *
+   *  @param groupSort The {@link Sort} used to sort the
+   *    groups.  The top sorted document within each group
+   *    according to groupSort, determines how that group
+   *    sorts against other groups.  This must be non-null,
+   *    ie, if you want to groupSort by relevance use
+   *    Sort.RELEVANCE.
+   *  @param topNGroups How many top groups to keep.
+   *  @throws IOException If I/O related errors occur
+   */
+  @SuppressWarnings({"unchecked", "rawtypes"})
+  public FirstPassGroupingCollector(Sort groupSort, int topNGroups) throws IOException {
+    if (topNGroups < 1) {
+      throw new IllegalArgumentException("topNGroups must be >= 1 (got " + topNGroups + ")");
+    }
+
+    // TODO: allow null groupSort to mean "by relevance",
+    // and specialize it?
+
+    this.topNGroups = topNGroups;
+    this.needsScores = groupSort.needsScores();
+    final SortField[] sortFields = groupSort.getSort();
+    comparators = new FieldComparator[sortFields.length];
+    leafComparators = new LeafFieldComparator[sortFields.length];
+    compIDXEnd = comparators.length - 1;
+    reversed = new int[sortFields.length];
+    for (int i = 0; i < sortFields.length; i++) {
+      final SortField sortField = sortFields[i];
+
+      // use topNGroups + 1 so we have a spare slot to use for comparing (tracked by this.spareSlot):
+      comparators[i] = sortField.getComparator(topNGroups + 1, i);
+      reversed[i] = sortField.getReverse() ? -1 : 1;
+    }
+
+    spareSlot = topNGroups;
+    groupMap = new HashMap<>(topNGroups);
+  }
+
+  @Override
+  public boolean needsScores() {
+    return needsScores;
+  }
+
+  /**
+   * Returns top groups, starting from offset.  This may
+   * return null, if no groups were collected, or if the
+   * number of unique groups collected is &lt;= offset.
+   *
+   * @param groupOffset The offset in the collected groups
+   * @param fillFields Whether to fill to {@link SearchGroup#sortValues}
+   * @return top groups, starting from offset
+   */
+  public Collection<SearchGroup<T>> getTopGroups(int groupOffset, boolean fillFields) throws IOException {
+
+    //System.out.println("FP.getTopGroups groupOffset=" + groupOffset + " fillFields=" + fillFields + " groupMap.size()=" + groupMap.size());
+
+    if (groupOffset < 0) {
+      throw new IllegalArgumentException("groupOffset must be >= 0 (got " + groupOffset + ")");
+    }
+
+    if (groupMap.size() <= groupOffset) {
+      return null;
+    }
+
+    if (orderedGroups == null) {
+      buildSortedSet();
+    }
+
+    final Collection<SearchGroup<T>> result = new ArrayList<>();
+    int upto = 0;
+    final int sortFieldCount = comparators.length;
+    for(CollectedSearchGroup<T> group : orderedGroups) {
+      if (upto++ < groupOffset) {
+        continue;
+      }
+      //System.out.println("  group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString()));
+      SearchGroup<T> searchGroup = new SearchGroup<>();
+      searchGroup.groupValue = group.groupValue;
+      if (fillFields) {
+        searchGroup.sortValues = new Object[sortFieldCount];
+        for(int sortFieldIDX=0;sortFieldIDX<sortFieldCount;sortFieldIDX++) {
+          searchGroup.sortValues[sortFieldIDX] = comparators[sortFieldIDX].value(group.comparatorSlot);
+        }
+      }
+      result.add(searchGroup);
+    }
+    //System.out.println("  return " + result.size() + " groups");
+    return result;
+  }
+
+  @Override
+  public void setScorer(Scorer scorer) throws IOException {
+    for (LeafFieldComparator comparator : leafComparators) {
+      comparator.setScorer(scorer);
+    }
+  }
+
+  @Override
+  public void collect(int doc) throws IOException {
+    //System.out.println("FP.collect doc=" + doc);
+
+    // If orderedGroups != null we already have collected N groups and
+    // can short circuit by comparing this document to the bottom group,
+    // without having to find what group this document belongs to.
+    
+    // Even if this document belongs to a group in the top N, we'll know that
+    // we don't have to update that group.
+
+    // Downside: if the number of unique groups is very low, this is
+    // wasted effort as we will most likely be updating an existing group.
+    if (orderedGroups != null) {
+      for (int compIDX = 0;; compIDX++) {
+        final int c = reversed[compIDX] * leafComparators[compIDX].compareBottom(doc);
+        if (c < 0) {
+          // Definitely not competitive. So don't even bother to continue
+          return;
+        } else if (c > 0) {
+          // Definitely competitive.
+          break;
+        } else if (compIDX == compIDXEnd) {
+          // Here c=0. If we're at the last comparator, this doc is not
+          // competitive, since docs are visited in doc Id order, which means
+          // this doc cannot compete with any other document in the queue.
+          return;
+        }
+      }
+    }
+
+    // TODO: should we add option to mean "ignore docs that
+    // don't have the group field" (instead of stuffing them
+    // under null group)?
+    final T groupValue = getDocGroupValue(doc);
+
+    final CollectedSearchGroup<T> group = groupMap.get(groupValue);
+
+    if (group == null) {
+
+      // First time we are seeing this group, or, we've seen
+      // it before but it fell out of the top N and is now
+      // coming back
+
+      if (groupMap.size() < topNGroups) {
+
+        // Still in startup transient: we have not
+        // seen enough unique groups to start pruning them;
+        // just keep collecting them
+
+        // Add a new CollectedSearchGroup:
+        CollectedSearchGroup<T> sg = new CollectedSearchGroup<>();
+        sg.groupValue = copyDocGroupValue(groupValue, null);
+        sg.comparatorSlot = groupMap.size();
+        sg.topDoc = docBase + doc;
+        for (LeafFieldComparator fc : leafComparators) {
+          fc.copy(sg.comparatorSlot, doc);
+        }
+        groupMap.put(sg.groupValue, sg);
+
+        if (groupMap.size() == topNGroups) {
+          // End of startup transient: we now have max
+          // number of groups; from here on we will drop
+          // bottom group when we insert new one:
+          buildSortedSet();
+        }
+
+        return;
+      }
+
+      // We already tested that the document is competitive, so replace
+      // the bottom group with this new group.
+      final CollectedSearchGroup<T> bottomGroup = orderedGroups.pollLast();
+      assert orderedGroups.size() == topNGroups -1;
+
+      groupMap.remove(bottomGroup.groupValue);
+
+      // reuse the removed CollectedSearchGroup
+      bottomGroup.groupValue = copyDocGroupValue(groupValue, bottomGroup.groupValue);
+      bottomGroup.topDoc = docBase + doc;
+
+      for (LeafFieldComparator fc : leafComparators) {
+        fc.copy(bottomGroup.comparatorSlot, doc);
+      }
+
+      groupMap.put(bottomGroup.groupValue, bottomGroup);
+      orderedGroups.add(bottomGroup);
+      assert orderedGroups.size() == topNGroups;
+
+      final int lastComparatorSlot = orderedGroups.last().comparatorSlot;
+      for (LeafFieldComparator fc : leafComparators) {
+        fc.setBottom(lastComparatorSlot);
+      }
+
+      return;
+    }
+
+    // Update existing group:
+    for (int compIDX = 0;; compIDX++) {
+      leafComparators[compIDX].copy(spareSlot, doc);
+
+      final int c = reversed[compIDX] * comparators[compIDX].compare(group.comparatorSlot, spareSlot);
+      if (c < 0) {
+        // Definitely not competitive.
+        return;
+      } else if (c > 0) {
+        // Definitely competitive; set remaining comparators:
+        for (int compIDX2=compIDX+1; compIDX2<comparators.length; compIDX2++) {
+          leafComparators[compIDX2].copy(spareSlot, doc);
+        }
+        break;
+      } else if (compIDX == compIDXEnd) {
+        // Here c=0. If we're at the last comparator, this doc is not
+        // competitive, since docs are visited in doc Id order, which means
+        // this doc cannot compete with any other document in the queue.
+        return;
+      }
+    }
+
+    // Remove before updating the group since lookup is done via comparators
+    // TODO: optimize this
+
+    final CollectedSearchGroup<T> prevLast;
+    if (orderedGroups != null) {
+      prevLast = orderedGroups.last();
+      orderedGroups.remove(group);
+      assert orderedGroups.size() == topNGroups-1;
+    } else {
+      prevLast = null;
+    }
+
+    group.topDoc = docBase + doc;
+
+    // Swap slots
+    final int tmp = spareSlot;
+    spareSlot = group.comparatorSlot;
+    group.comparatorSlot = tmp;
+
+    // Re-add the changed group
+    if (orderedGroups != null) {
+      orderedGroups.add(group);
+      assert orderedGroups.size() == topNGroups;
+      final CollectedSearchGroup<?> newLast = orderedGroups.last();
+      // If we changed the value of the last group, or changed which group was last, then update bottom:
+      if (group == newLast || prevLast != newLast) {
+        for (LeafFieldComparator fc : leafComparators) {
+          fc.setBottom(newLast.comparatorSlot);
+        }
+      }
+    }
+  }
+
+  private void buildSortedSet() throws IOException {
+    final Comparator<CollectedSearchGroup<?>> comparator = new Comparator<CollectedSearchGroup<?>>() {
+      @Override
+      public int compare(CollectedSearchGroup<?> o1, CollectedSearchGroup<?> o2) {
+        for (int compIDX = 0;; compIDX++) {
+          FieldComparator<?> fc = comparators[compIDX];
+          final int c = reversed[compIDX] * fc.compare(o1.comparatorSlot, o2.comparatorSlot);
+          if (c != 0) {
+            return c;
+          } else if (compIDX == compIDXEnd) {
+            return o1.topDoc - o2.topDoc;
+          }
+        }
+      }
+    };
+
+    orderedGroups = new TreeSet<>(comparator);
+    orderedGroups.addAll(groupMap.values());
+    assert orderedGroups.size() > 0;
+
+    for (LeafFieldComparator fc : leafComparators) {
+      fc.setBottom(orderedGroups.last().comparatorSlot);
+    }
+  }
+
+  @Override
+  protected void doSetNextReader(LeafReaderContext readerContext) throws IOException {
+    docBase = readerContext.docBase;
+    for (int i=0; i<comparators.length; i++) {
+      leafComparators[i] = comparators[i].getLeafComparator(readerContext);
+    }
+  }
+
+  /**
+   * Returns the group value for the specified doc.
+   *
+   * @param doc The specified doc
+   * @return the group value for the specified doc
+   */
+  protected abstract T getDocGroupValue(int doc) throws IOException;
+
+  /**
+   * Returns a copy of the specified group value by creating a new instance and copying the value from the specified
+   * groupValue in the new instance. Or optionally the reuse argument can be used to copy the group value in.
+   *
+   * @param groupValue The group value to copy
+   * @param reuse Optionally a reuse instance to prevent a new instance creation
+   * @return a copy of the specified group value
+   */
+  protected abstract T copyDocGroupValue(T groupValue, T reuse);
+
+}
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupDocs.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupDocs.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupDocs.java
index a310703..48f12aa 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupDocs.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupDocs.java
@@ -21,10 +21,10 @@ import org.apache.lucene.search.ScoreDoc;
 /** Represents one group in the results.
  * 
  * @lucene.experimental */
-public class GroupDocs<GROUP_VALUE_TYPE> {
+public class GroupDocs<T> {
   /** The groupField value for all docs in this group; this
    *  may be null if hits did not have the groupField. */
-  public final GROUP_VALUE_TYPE groupValue;
+  public final T groupValue;
 
   /** Max score in this group */
   public final float maxScore;
@@ -42,14 +42,14 @@ public class GroupDocs<GROUP_VALUE_TYPE> {
   public final int totalHits;
 
   /** Matches the groupSort passed to {@link
-   *  AbstractFirstPassGroupingCollector}. */
+   *  FirstPassGroupingCollector}. */
   public final Object[] groupSortValues;
 
   public GroupDocs(float score,
                    float maxScore,
                    int totalHits,
                    ScoreDoc[] scoreDocs,
-                   GROUP_VALUE_TYPE groupValue,
+                   T groupValue,
                    Object[] groupSortValues) {
     this.score = score;
     this.maxScore = maxScore;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupFacetCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupFacetCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupFacetCollector.java
new file mode 100644
index 0000000..fc6ef96
--- /dev/null
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/GroupFacetCollector.java
@@ -0,0 +1,324 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search.grouping;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.PriorityQueue;
+
+/**
+ * Base class for computing grouped facets.
+ *
+ * @lucene.experimental
+ */
+public abstract class GroupFacetCollector extends SimpleCollector {
+
+  protected final String groupField;
+  protected final String facetField;
+  protected final BytesRef facetPrefix;
+  protected final List<SegmentResult> segmentResults;
+
+  protected int[] segmentFacetCounts;
+  protected int segmentTotalCount;
+  protected int startFacetOrd;
+  protected int endFacetOrd;
+
+  protected GroupFacetCollector(String groupField, String facetField, BytesRef facetPrefix) {
+    this.groupField = groupField;
+    this.facetField = facetField;
+    this.facetPrefix = facetPrefix;
+    segmentResults = new ArrayList<>();
+  }
+
+  /**
+   * Returns grouped facet results that were computed over zero or more segments.
+   * Grouped facet counts are merged from zero or more segment results.
+   *
+   * @param size The total number of facets to include. This is typically offset + limit
+   * @param minCount The minimum count a facet entry should have to be included in the grouped facet result
+   * @param orderByCount Whether to sort the facet entries by facet entry count. If <code>false</code> then the facets
+   *                     are sorted lexicographically in ascending order.
+   * @return grouped facet results
+   * @throws IOException If I/O related errors occur during merging segment grouped facet counts.
+   */
+  public GroupedFacetResult mergeSegmentResults(int size, int minCount, boolean orderByCount) throws IOException {
+    if (segmentFacetCounts != null) {
+      segmentResults.add(createSegmentResult());
+      segmentFacetCounts = null; // reset
+    }
+
+    int totalCount = 0;
+    int missingCount = 0;
+    SegmentResultPriorityQueue segments = new SegmentResultPriorityQueue(segmentResults.size());
+    for (SegmentResult segmentResult : segmentResults) {
+      missingCount += segmentResult.missing;
+      if (segmentResult.mergePos >= segmentResult.maxTermPos) {
+        continue;
+      }
+      totalCount += segmentResult.total;
+      segments.add(segmentResult);
+    }
+
+    GroupedFacetResult facetResult = new GroupedFacetResult(size, minCount, orderByCount, totalCount, missingCount);
+    while (segments.size() > 0) {
+      SegmentResult segmentResult = segments.top();
+      BytesRef currentFacetValue = BytesRef.deepCopyOf(segmentResult.mergeTerm);
+      int count = 0;
+
+      do {
+        count += segmentResult.counts[segmentResult.mergePos++];
+        if (segmentResult.mergePos < segmentResult.maxTermPos) {
+          segmentResult.nextTerm();
+          segmentResult = segments.updateTop();
+        } else {
+          segments.pop();
+          segmentResult = segments.top();
+          if (segmentResult == null) {
+            break;
+          }
+        }
+      } while (currentFacetValue.equals(segmentResult.mergeTerm));
+      facetResult.addFacetCount(currentFacetValue, count);
+    }
+    return facetResult;
+  }
+
+  protected abstract SegmentResult createSegmentResult() throws IOException;
+
+  @Override
+  public void setScorer(Scorer scorer) throws IOException {
+  }
+
+  @Override
+  public boolean needsScores() {
+    return false;
+  }
+
+  /**
+   * The grouped facet result. Containing grouped facet entries, total count and total missing count.
+   */
+  public static class GroupedFacetResult {
+
+    private final static Comparator<FacetEntry> orderByCountAndValue = new Comparator<FacetEntry>() {
+
+      @Override
+      public int compare(FacetEntry a, FacetEntry b) {
+        int cmp = b.count - a.count; // Highest count first!
+        if (cmp != 0) {
+          return cmp;
+        }
+        return a.value.compareTo(b.value);
+      }
+
+    };
+
+    private final static Comparator<FacetEntry> orderByValue = new Comparator<FacetEntry>() {
+
+      @Override
+      public int compare(FacetEntry a, FacetEntry b) {
+        return a.value.compareTo(b.value);
+      }
+
+    };
+
+    private final int maxSize;
+    private final NavigableSet<FacetEntry> facetEntries;
+    private final int totalMissingCount;
+    private final int totalCount;
+
+    private int currentMin;
+
+    public GroupedFacetResult(int size, int minCount, boolean orderByCount, int totalCount, int totalMissingCount) {
+      this.facetEntries = new TreeSet<>(orderByCount ? orderByCountAndValue : orderByValue);
+      this.totalMissingCount = totalMissingCount;
+      this.totalCount = totalCount;
+      maxSize = size;
+      currentMin = minCount;
+    }
+
+    public void addFacetCount(BytesRef facetValue, int count) {
+      if (count < currentMin) {
+        return;
+      }
+
+      FacetEntry facetEntry = new FacetEntry(facetValue, count);
+      if (facetEntries.size() == maxSize) {
+        if (facetEntries.higher(facetEntry) == null) {
+          return;
+        }
+        facetEntries.pollLast();
+      }
+      facetEntries.add(facetEntry);
+
+      if (facetEntries.size() == maxSize) {
+        currentMin = facetEntries.last().count;
+      }
+    }
+
+    /**
+     * Returns a list of facet entries to be rendered based on the specified offset and limit.
+     * The facet entries are retrieved from the facet entries collected during merging.
+     *
+     * @param offset The offset in the collected facet entries during merging
+     * @param limit The number of facets to return starting from the offset.
+     * @return a list of facet entries to be rendered based on the specified offset and limit
+     */
+    public List<FacetEntry> getFacetEntries(int offset, int limit) {
+      List<FacetEntry> entries = new LinkedList<>();
+
+      int skipped = 0;
+      int included = 0;
+      for (FacetEntry facetEntry : facetEntries) {
+        if (skipped < offset) {
+          skipped++;
+          continue;
+        }
+        if (included++ >= limit) {
+          break;
+        }
+        entries.add(facetEntry);
+      }
+      return entries;
+    }
+
+    /**
+     * Returns the sum of all facet entries counts.
+     *
+     * @return the sum of all facet entries counts
+     */
+    public int getTotalCount() {
+      return totalCount;
+    }
+
+    /**
+     * Returns the number of groups that didn't have a facet value.
+     *
+     * @return the number of groups that didn't have a facet value
+     */
+    public int getTotalMissingCount() {
+      return totalMissingCount;
+    }
+  }
+
+  /**
+   * Represents a facet entry with a value and a count.
+   */
+  public static class FacetEntry {
+
+    private final BytesRef value;
+    private final int count;
+
+    public FacetEntry(BytesRef value, int count) {
+      this.value = value;
+      this.count = count;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      FacetEntry that = (FacetEntry) o;
+
+      if (count != that.count) return false;
+      if (!value.equals(that.value)) return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = value.hashCode();
+      result = 31 * result + count;
+      return result;
+    }
+
+    @Override
+    public String toString() {
+      return "FacetEntry{" +
+          "value=" + value.utf8ToString() +
+          ", count=" + count +
+          '}';
+    }
+
+    /**
+     * @return The value of this facet entry
+     */
+    public BytesRef getValue() {
+      return value;
+    }
+
+    /**
+     * @return The count (number of groups) of this facet entry.
+     */
+    public int getCount() {
+      return count;
+    }
+  }
+
+  /**
+   * Contains the local grouped segment counts for a particular segment.
+   * Each <code>SegmentResult</code> must be added together.
+   */
+  protected abstract static class SegmentResult {
+
+    protected final int[] counts;
+    protected final int total;
+    protected final int missing;
+    protected final int maxTermPos;
+
+    protected BytesRef mergeTerm;
+    protected int mergePos;
+
+    protected SegmentResult(int[] counts, int total, int missing, int maxTermPos) {
+      this.counts = counts;
+      this.total = total;
+      this.missing = missing;
+      this.maxTermPos = maxTermPos;
+    }
+
+    /**
+     * Go to next term in this <code>SegmentResult</code> in order to retrieve the grouped facet counts.
+     *
+     * @throws IOException If I/O related errors occur
+     */
+    protected abstract void nextTerm() throws IOException;
+
+  }
+
+  private static class SegmentResultPriorityQueue extends PriorityQueue<SegmentResult> {
+
+    SegmentResultPriorityQueue(int maxSize) {
+      super(maxSize);
+    }
+
+    @Override
+    protected boolean lessThan(SegmentResult a, SegmentResult b) {
+      return a.mergeTerm.compareTo(b.mergeTerm) < 0;
+    }
+  }
+
+}


[06/43] lucene-solr:jira/solr-8593: LUCENE-7610: Deprecate ValueSource methods in facets module

Posted by kr...@apache.org.
LUCENE-7610: Deprecate ValueSource methods in facets module


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/713b65d1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/713b65d1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/713b65d1

Branch: refs/heads/jira/solr-8593
Commit: 713b65d1dcc80c1fe147a5bf999e1a88b63b9dce
Parents: 8b05538
Author: Alan Woodward <ro...@apache.org>
Authored: Fri Jan 6 10:41:12 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Sat Jan 7 11:52:24 2017 +0000

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   3 +
 .../demo/facet/DistanceFacetsExample.java       |  18 +--
 .../org/apache/lucene/facet/package-info.java   |   2 +-
 .../apache/lucene/facet/range/DoubleRange.java  |  33 ++++--
 .../facet/range/DoubleRangeFacetCounts.java     |  80 ++++++++-----
 .../apache/lucene/facet/range/LongRange.java    |  34 ++++--
 .../facet/range/LongRangeFacetCounts.java       |  47 +++++---
 .../org/apache/lucene/facet/range/Range.java    |  31 ------
 .../lucene/facet/taxonomy/FakeScorer.java       |  53 ---------
 .../taxonomy/TaxonomyFacetSumValueSource.java   |  99 ++++++++++++-----
 .../facet/range/TestRangeFacetCounts.java       |  53 +++++----
 .../TestTaxonomyFacetSumValueSource.java        |   5 +-
 .../lucene/queries/function/ValueSource.java    | 111 +++++++++++++++++++
 13 files changed, 361 insertions(+), 208 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/713b65d1/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index c667040..d0bedb7 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -79,6 +79,9 @@ API Changes
   replaced with Expression#getDoubleValuesSource(). (Alan Woodward, Adrien
   Grand)
 
+* LUCENE-7610: The facets module now uses the DoubleValuesSource API, and
+  methods that take ValueSource parameters are deprecated (Alan Woodward)
+
 New features
 
 * LUCENE-5867: Added BooleanSimilarity. (Robert Muir, Adrien Grand)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/713b65d1/lucene/demo/src/java/org/apache/lucene/demo/facet/DistanceFacetsExample.java
----------------------------------------------------------------------
diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/DistanceFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/DistanceFacetsExample.java
index 7d029ee..fa7ce83 100644
--- a/lucene/demo/src/java/org/apache/lucene/demo/facet/DistanceFacetsExample.java
+++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/DistanceFacetsExample.java
@@ -16,9 +16,13 @@
  */
 package org.apache.lucene.demo.facet;
 
+import java.io.Closeable;
+import java.io.IOException;
+import java.text.ParseException;
+
 import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
-import org.apache.lucene.document.DoublePoint;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoublePoint;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.expressions.Expression;
 import org.apache.lucene.expressions.SimpleBindings;
@@ -36,9 +40,9 @@ import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.Query;
@@ -48,10 +52,6 @@ import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.SloppyMath;
 
-import java.io.Closeable;
-import java.io.IOException;
-import java.text.ParseException;
-
 /** Shows simple usage of dynamic range faceting, using the
  *  expressions module to calculate distance. */
 public class DistanceFacetsExample implements Closeable {
@@ -117,7 +117,7 @@ public class DistanceFacetsExample implements Closeable {
     writer.close();
   }
 
-  private ValueSource getDistanceValueSource() {
+  private DoubleValuesSource getDistanceValueSource() {
     Expression distance;
     try {
       distance = JavascriptCompiler.compile(
@@ -130,7 +130,7 @@ public class DistanceFacetsExample implements Closeable {
     bindings.add(new SortField("latitude", SortField.Type.DOUBLE));
     bindings.add(new SortField("longitude", SortField.Type.DOUBLE));
 
-    return distance.getValueSource(bindings);
+    return distance.getDoubleValuesSource(bindings);
   }
 
   /** Given a latitude and longitude (in degrees) and the
@@ -224,7 +224,7 @@ public class DistanceFacetsExample implements Closeable {
     // Passing no baseQuery means we drill down on all
     // documents ("browse only"):
     DrillDownQuery q = new DrillDownQuery(null);
-    final ValueSource vs = getDistanceValueSource();
+    final DoubleValuesSource vs = getDistanceValueSource();
     q.add("field", range.getQuery(getBoundingBoxQuery(ORIGIN_LATITUDE, ORIGIN_LONGITUDE, range.max), vs));
     DrillSideways ds = new DrillSideways(searcher, config, (TaxonomyReader) null) {
         @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/713b65d1/lucene/facet/src/java/org/apache/lucene/facet/package-info.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/package-info.java b/lucene/facet/src/java/org/apache/lucene/facet/package-info.java
index 0501d6a..acbdd44 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/package-info.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/package-info.java
@@ -40,7 +40,7 @@
  * 
  *  <li> Range faceting {@link org.apache.lucene.facet.range.LongRangeFacetCounts}, {@link
  *       org.apache.lucene.facet.range.DoubleRangeFacetCounts} compute counts for a dynamic numeric
- *       range from a provided {@link org.apache.lucene.queries.function.ValueSource} (previously indexed
+ *       range from a provided {@link org.apache.lucene.search.LongValuesSource} (previously indexed
  *       numeric field, or a dynamic expression such as distance).
  * </ul>
  * <p>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/713b65d1/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java
index 2203be3..ce377f5 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRange.java
@@ -17,16 +17,16 @@
 package org.apache.lucene.facet.range;
 
 import java.io.IOException;
-import java.util.Collections;
 import java.util.Objects;
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.ConstantScoreScorer;
 import org.apache.lucene.search.ConstantScoreWeight;
 import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.DoubleValues;
+import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
@@ -95,9 +95,9 @@ public final class DoubleRange extends Range {
   private static class ValueSourceQuery extends Query {
     private final DoubleRange range;
     private final Query fastMatchQuery;
-    private final ValueSource valueSource;
+    private final DoubleValuesSource valueSource;
 
-    ValueSourceQuery(DoubleRange range, Query fastMatchQuery, ValueSource valueSource) {
+    ValueSourceQuery(DoubleRange range, Query fastMatchQuery, DoubleValuesSource valueSource) {
       this.range = range;
       this.fastMatchQuery = fastMatchQuery;
       this.valueSource = valueSource;
@@ -158,11 +158,11 @@ public final class DoubleRange extends Range {
             approximation = s.iterator();
           }
 
-          final FunctionValues values = valueSource.getValues(Collections.emptyMap(), context);
+          final DoubleValues values = valueSource.getValues(context, null);
           final TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) {
             @Override
             public boolean matches() throws IOException {
-              return range.accept(values.doubleVal(approximation.docID()));
+              return values.advanceExact(approximation.docID()) && range.accept(values.doubleValue());
             }
 
             @Override
@@ -177,8 +177,27 @@ public final class DoubleRange extends Range {
 
   }
 
-  @Override
+  /**
+   * @deprecated Use {@link #getQuery(Query, DoubleValuesSource)}
+   */
+  @Deprecated
   public Query getQuery(final Query fastMatchQuery, final ValueSource valueSource) {
+    return new ValueSourceQuery(this, fastMatchQuery, valueSource.asDoubleValuesSource());
+  }
+
+  /**
+   * Create a Query that matches documents in this range
+   *
+   * The query will check all documents that match the provided match query,
+   * or every document in the index if the match query is null.
+   *
+   * If the value source is static, eg an indexed numeric field, it may be
+   * faster to use {@link org.apache.lucene.search.PointRangeQuery}
+   *
+   * @param fastMatchQuery a query to use as a filter
+   * @param valueSource    the source of values for the range check
+   */
+  public Query getQuery(Query fastMatchQuery, DoubleValuesSource valueSource) {
     return new ValueSourceQuery(this, fastMatchQuery, valueSource);
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/713b65d1/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java
index 2d0ba5c..63fc935 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/DoubleRangeFacetCounts.java
@@ -17,22 +17,18 @@
 package org.apache.lucene.facet.range;
 
 import java.io.IOException;
-import java.util.Collections;
 import java.util.List;
 
-import org.apache.lucene.document.DoubleDocValuesField; // javadocs
-import org.apache.lucene.document.FloatDocValuesField; // javadocs
+import org.apache.lucene.document.FloatDocValuesField;
 import org.apache.lucene.facet.Facets;
-import org.apache.lucene.facet.FacetsCollector.MatchingDocs;
 import org.apache.lucene.facet.FacetsCollector;
+import org.apache.lucene.facet.FacetsCollector.MatchingDocs;
 import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
-import org.apache.lucene.queries.function.valuesource.FloatFieldSource; // javadocs
-import org.apache.lucene.search.DocIdSet;
 import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.DoubleValues;
+import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
@@ -41,46 +37,70 @@ import org.apache.lucene.util.NumericUtils;
 
 /** {@link Facets} implementation that computes counts for
  *  dynamic double ranges from a provided {@link
- *  ValueSource}, using {@link FunctionValues#doubleVal}.  Use
- *  this for dimensions that change in real-time (e.g. a
+ *  DoubleValuesSource}.  Use this for dimensions that change in real-time (e.g. a
  *  relative time based dimension like "Past day", "Past 2
  *  days", etc.) or that change for each request (e.g.
  *  distance from the user's location, "&lt; 1 km", "&lt; 2 km",
  *  etc.).
  *
- *  <p> If you had indexed your field using {@link
- *  FloatDocValuesField} then pass {@link FloatFieldSource}
- *  as the {@link ValueSource}; if you used {@link
- *  DoubleDocValuesField} then pass {@link
- *  DoubleFieldSource} (this is the default used when you
- *  pass just a the field name).
+ *  If you have indexed your field using {@link
+ *  FloatDocValuesField}, then you should use a DoubleValuesSource
+ *  generated from {@link DoubleValuesSource#fromFloatField(String)}.
  *
  *  @lucene.experimental */
 public class DoubleRangeFacetCounts extends RangeFacetCounts {
 
-  /** Create {@code RangeFacetCounts}, using {@link
-   *  DoubleFieldSource} from the specified field. */
+  /**
+   * Create {@code RangeFacetCounts}, using {@link DoubleValues} from the specified field.
+   *
+   * N.B This assumes that the field was indexed with {@link org.apache.lucene.document.DoubleDocValuesField}.
+   * For float-valued fields, use {@link #DoubleRangeFacetCounts(String, DoubleValuesSource, FacetsCollector, DoubleRange...)}
+   */
   public DoubleRangeFacetCounts(String field, FacetsCollector hits, DoubleRange... ranges) throws IOException {
-    this(field, new DoubleFieldSource(field), hits, ranges);
+    this(field, DoubleValuesSource.fromDoubleField(field), hits, ranges);
   }
 
-  /** Create {@code RangeFacetCounts}, using the provided
-   *  {@link ValueSource}. */
+  /**
+   * Create {@code RangeFacetCounts}, using the provided {@link ValueSource}.
+   *
+   * @deprecated Use {@link #DoubleRangeFacetCounts(String, DoubleValuesSource, FacetsCollector, DoubleRange...)}
+   * */
   public DoubleRangeFacetCounts(String field, ValueSource valueSource, FacetsCollector hits, DoubleRange... ranges) throws IOException {
     this(field, valueSource, hits, null, ranges);
   }
 
-  /** Create {@code RangeFacetCounts}, using the provided
-   *  {@link ValueSource}, and using the provided Query as
-   *  a fastmatch: only documents passing the filter are
-   *  checked for the matching ranges.  The filter must be
-   *  random access (implement {@link DocIdSet#bits}). */
+  /**
+   * Create {@code RangeFacetCounts} using the provided {@link DoubleValuesSource}
+   */
+  public DoubleRangeFacetCounts(String field, DoubleValuesSource valueSource, FacetsCollector hits, DoubleRange... ranges) throws IOException {
+    this(field, valueSource, hits, null, ranges);
+  }
+
+  /**
+   * Create {@code RangeFacetCounts}, using the provided
+   * {@link ValueSource}, and using the provided Query as
+   * a fastmatch: only documents matching the query are
+   * checked for the matching ranges.
+   *
+   * @deprecated Use ({@link #DoubleRangeFacetCounts(String, DoubleValuesSource, FacetsCollector, Query, DoubleRange...)}
+   */
+  @Deprecated
   public DoubleRangeFacetCounts(String field, ValueSource valueSource, FacetsCollector hits, Query fastMatchQuery, DoubleRange... ranges) throws IOException {
+    this(field, valueSource.asDoubleValuesSource(), hits, fastMatchQuery, ranges);
+  }
+
+  /**
+   * Create {@code RangeFacetCounts}, using the provided
+   * {@link DoubleValuesSource}, and using the provided Query as
+   * a fastmatch: only documents matching the query are
+   * checked for the matching ranges.
+   */
+ public DoubleRangeFacetCounts(String field, DoubleValuesSource valueSource, FacetsCollector hits, Query fastMatchQuery, DoubleRange... ranges) throws IOException {
     super(field, ranges, fastMatchQuery);
     count(valueSource, hits.getMatchingDocs());
   }
 
-  private void count(ValueSource valueSource, List<MatchingDocs> matchingDocs) throws IOException {
+  private void count(DoubleValuesSource valueSource, List<MatchingDocs> matchingDocs) throws IOException {
 
     DoubleRange[] ranges = (DoubleRange[]) this.ranges;
 
@@ -96,7 +116,7 @@ public class DoubleRangeFacetCounts extends RangeFacetCounts {
 
     int missingCount = 0;
     for (MatchingDocs hits : matchingDocs) {
-      FunctionValues fv = valueSource.getValues(Collections.emptyMap(), hits.context);
+      DoubleValues fv = valueSource.getValues(hits.context, null);
       
       totCount += hits.totalHits;
       final DocIdSetIterator fastMatchDocs;
@@ -129,8 +149,8 @@ public class DoubleRangeFacetCounts extends RangeFacetCounts {
           }
         }
         // Skip missing docs:
-        if (fv.exists(doc)) {
-          counter.add(NumericUtils.doubleToSortableLong(fv.doubleVal(doc)));
+        if (fv.advanceExact(doc)) {
+          counter.add(NumericUtils.doubleToSortableLong(fv.doubleValue()));
         } else {
           missingCount++;
         }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/713b65d1/lucene/facet/src/java/org/apache/lucene/facet/range/LongRange.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/LongRange.java b/lucene/facet/src/java/org/apache/lucene/facet/range/LongRange.java
index 9c12ecd..20c408d 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/LongRange.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/LongRange.java
@@ -17,17 +17,17 @@
 package org.apache.lucene.facet.range;
 
 import java.io.IOException;
-import java.util.Collections;
 import java.util.Objects;
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.ConstantScoreScorer;
 import org.apache.lucene.search.ConstantScoreWeight;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.LongValues;
+import org.apache.lucene.search.LongValuesSource;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.TwoPhaseIterator;
@@ -87,9 +87,9 @@ public final class LongRange extends Range {
   private static class ValueSourceQuery extends Query {
     private final LongRange range;
     private final Query fastMatchQuery;
-    private final ValueSource valueSource;
+    private final LongValuesSource valueSource;
 
-    ValueSourceQuery(LongRange range, Query fastMatchQuery, ValueSource valueSource) {
+    ValueSourceQuery(LongRange range, Query fastMatchQuery, LongValuesSource valueSource) {
       this.range = range;
       this.fastMatchQuery = fastMatchQuery;
       this.valueSource = valueSource;
@@ -150,11 +150,11 @@ public final class LongRange extends Range {
             approximation = s.iterator();
           }
 
-          final FunctionValues values = valueSource.getValues(Collections.emptyMap(), context);
+          final LongValues values = valueSource.getValues(context, null);
           final TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) {
             @Override
             public boolean matches() throws IOException {
-              return range.accept(values.longVal(approximation.docID()));
+              return values.advanceExact(approximation.docID()) && range.accept(values.longValue());
             }
 
             @Override
@@ -169,8 +169,28 @@ public final class LongRange extends Range {
 
   }
 
-  @Override
+
+  /**
+   * @deprecated Use {@link #getQuery(Query, LongValuesSource)}
+   */
+  @Deprecated
   public Query getQuery(final Query fastMatchQuery, final ValueSource valueSource) {
+    return new ValueSourceQuery(this, fastMatchQuery, valueSource.asLongValuesSource());
+  }
+
+  /**
+   * Create a Query that matches documents in this range
+   *
+   * The query will check all documents that match the provided match query,
+   * or every document in the index if the match query is null.
+   *
+   * If the value source is static, eg an indexed numeric field, it may be
+   * faster to use {@link org.apache.lucene.search.PointRangeQuery}
+   *
+   * @param fastMatchQuery a query to use as a filter
+   * @param valueSource    the source of values for the range check
+   */
+  public Query getQuery(Query fastMatchQuery, LongValuesSource valueSource) {
     return new ValueSourceQuery(this, fastMatchQuery, valueSource);
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/713b65d1/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java b/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java
index 0512ab3..a3cfc71 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java
@@ -17,27 +17,25 @@
 package org.apache.lucene.facet.range;
 
 import java.io.IOException;
-import java.util.Collections;
 import java.util.List;
 
 import org.apache.lucene.facet.Facets;
-import org.apache.lucene.facet.FacetsCollector.MatchingDocs;
 import org.apache.lucene.facet.FacetsCollector;
+import org.apache.lucene.facet.FacetsCollector.MatchingDocs;
 import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.valuesource.LongFieldSource;
 import org.apache.lucene.search.DocIdSet;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.LongValues;
+import org.apache.lucene.search.LongValuesSource;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Weight;
 
 /** {@link Facets} implementation that computes counts for
- *  dynamic long ranges from a provided {@link ValueSource},
- *  using {@link FunctionValues#longVal}.  Use
+ *  dynamic long ranges from a provided {@link LongValuesSource}.  Use
  *  this for dimensions that change in real-time (e.g. a
  *  relative time based dimension like "Past day", "Past 2
  *  days", etc.) or that change for each request (e.g. 
@@ -48,28 +46,49 @@ import org.apache.lucene.search.Weight;
 public class LongRangeFacetCounts extends RangeFacetCounts {
 
   /** Create {@code LongRangeFacetCounts}, using {@link
-   *  LongFieldSource} from the specified field. */
+   *  LongValuesSource} from the specified field. */
   public LongRangeFacetCounts(String field, FacetsCollector hits, LongRange... ranges) throws IOException {
-    this(field, new LongFieldSource(field), hits, ranges);
+    this(field, LongValuesSource.fromLongField(field), hits, ranges);
+  }
+
+  /**
+   * Create {@code RangeFacetCounts}, using the provided {@link ValueSource}.
+   *
+   * @deprecated Use {@link #LongRangeFacetCounts(String, LongValuesSource, FacetsCollector, LongRange...)}
+   */
+  @Deprecated
+  public LongRangeFacetCounts(String field, ValueSource valueSource, FacetsCollector hits, LongRange... ranges) throws IOException {
+    this(field, valueSource.asLongValuesSource(), hits, null, ranges);
   }
 
   /** Create {@code RangeFacetCounts}, using the provided
    *  {@link ValueSource}. */
-  public LongRangeFacetCounts(String field, ValueSource valueSource, FacetsCollector hits, LongRange... ranges) throws IOException {
+  public LongRangeFacetCounts(String field, LongValuesSource valueSource, FacetsCollector hits, LongRange... ranges) throws IOException {
     this(field, valueSource, hits, null, ranges);
   }
 
+  /**
+   * Create {@code RangeFacetCounts}, using the provided {@link ValueSource}.
+   *
+   * @deprecated Use {@link #LongRangeFacetCounts(String, LongValuesSource, FacetsCollector, Query, LongRange...)}
+   */
+  @Deprecated
+  public LongRangeFacetCounts(String field, ValueSource valueSource, FacetsCollector hits, Query fastMatchQuery, LongRange... ranges) throws IOException {
+    this(field, valueSource.asLongValuesSource(), hits, fastMatchQuery, ranges);
+  }
+
+
   /** Create {@code RangeFacetCounts}, using the provided
    *  {@link ValueSource}, and using the provided Filter as
    *  a fastmatch: only documents passing the filter are
    *  checked for the matching ranges.  The filter must be
    *  random access (implement {@link DocIdSet#bits}). */
-  public LongRangeFacetCounts(String field, ValueSource valueSource, FacetsCollector hits, Query fastMatchQuery, LongRange... ranges) throws IOException {
+  public LongRangeFacetCounts(String field, LongValuesSource valueSource, FacetsCollector hits, Query fastMatchQuery, LongRange... ranges) throws IOException {
     super(field, ranges, fastMatchQuery);
     count(valueSource, hits.getMatchingDocs());
   }
 
-  private void count(ValueSource valueSource, List<MatchingDocs> matchingDocs) throws IOException {
+  private void count(LongValuesSource valueSource, List<MatchingDocs> matchingDocs) throws IOException {
 
     LongRange[] ranges = (LongRange[]) this.ranges;
 
@@ -77,7 +96,7 @@ public class LongRangeFacetCounts extends RangeFacetCounts {
 
     int missingCount = 0;
     for (MatchingDocs hits : matchingDocs) {
-      FunctionValues fv = valueSource.getValues(Collections.emptyMap(), hits.context);
+      LongValues fv = valueSource.getValues(hits.context, null);
       
       totCount += hits.totalHits;
       final DocIdSetIterator fastMatchDocs;
@@ -109,8 +128,8 @@ public class LongRangeFacetCounts extends RangeFacetCounts {
           }
         }
         // Skip missing docs:
-        if (fv.exists(doc)) {
-          counter.add(fv.longVal(doc));
+        if (fv.advanceExact(doc)) {
+          counter.add(fv.longValue());
         } else {
           missingCount++;
         }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/713b65d1/lucene/facet/src/java/org/apache/lucene/facet/range/Range.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/Range.java b/lucene/facet/src/java/org/apache/lucene/facet/range/Range.java
index 5f6de98..82b8088 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/Range.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/Range.java
@@ -16,10 +16,6 @@
  */
 package org.apache.lucene.facet.range;
 
-import org.apache.lucene.facet.DrillDownQuery; // javadocs
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.search.Query;
-
 /** Base class for a single labeled range.
  *
  *  @lucene.experimental */
@@ -36,33 +32,6 @@ public abstract class Range {
     this.label = label;
   }
 
-  /** Returns a new {@link Query} accepting only documents
-   *  in this range.  This query might not be very efficient
-   *  when run on its own since it is optimized towards
-   *  random-access, so it is best used either with
-   *  {@link DrillDownQuery#add(String, Query) DrillDownQuery}
-   *  or when intersected with another query that can lead the
-   *  iteration.  If the {@link ValueSource} is static, e.g. an
-   *  indexed numeric field, then it may be more efficient to use
-   *  {@link org.apache.lucene.search.PointRangeQuery}. The provided fastMatchQuery,
-   *  if non-null, will first be consulted, and only if
-   *  that is set for each document will the range then be
-   *  checked. */
-  public abstract Query getQuery(Query fastMatchQuery, ValueSource valueSource);
-
-  /** Returns a new {@link Query} accepting only documents
-   *  in this range.  This query might not be very efficient
-   *  when run on its own since it is optimized towards
-   *  random-access, so it is best used either with
-   *  {@link DrillDownQuery#add(String, Query) DrillDownQuery}
-   *  or when intersected with another query that can lead the
-   *  iteration.  If the {@link ValueSource} is static, e.g. an
-   *  indexed numeric field, then it may be more efficient to
-   *  use {@link org.apache.lucene.search.PointRangeQuery}. */
-  public Query getQuery(ValueSource valueSource) {
-    return getQuery(null, valueSource);
-  }
-
   /** Invoke this for a useless range. */
   protected void failNoMatch() {
     throw new IllegalArgumentException("range \"" + label + "\" matches nothing");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/713b65d1/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/FakeScorer.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/FakeScorer.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/FakeScorer.java
deleted file mode 100644
index 238b74c..0000000
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/FakeScorer.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.facet.taxonomy;
-
-import java.io.IOException;
-
-import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.Scorer;
-
-class FakeScorer extends Scorer {
-
-  float score;
-  int doc = -1;
-  int freq = 1;
-
-  FakeScorer() {
-    super(null);
-  }
-
-  @Override
-  public int docID() {
-    return doc;
-  }
-
-  @Override
-  public DocIdSetIterator iterator() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public int freq() throws IOException {
-    return freq;
-  }
-
-  @Override
-  public float score() throws IOException {
-    return score;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/713b65d1/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java
index 4010c81..0a73ae5 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java
@@ -17,7 +17,6 @@
 package org.apache.lucene.facet.taxonomy;
 
 import java.io.IOException;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -29,6 +28,8 @@ import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
 import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.DoubleValues;
+import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.util.IntsRef;
 
@@ -39,52 +40,94 @@ import org.apache.lucene.util.IntsRef;
 public class TaxonomyFacetSumValueSource extends FloatTaxonomyFacets {
   private final OrdinalsReader ordinalsReader;
 
-  /** Aggreggates float facet values from the provided
+  /**
+   * Aggreggates double facet values from the provided
    *  {@link ValueSource}, pulling ordinals using {@link
    *  DocValuesOrdinalsReader} against the default indexed
    *  facet field {@link
-   *  FacetsConfig#DEFAULT_INDEX_FIELD_NAME}. */
+   *  FacetsConfig#DEFAULT_INDEX_FIELD_NAME}.
+   *
+   *  @deprecated {@link #TaxonomyFacetSumValueSource(TaxonomyReader, FacetsConfig, FacetsCollector, DoubleValuesSource)}
+   */
+  @Deprecated
   public TaxonomyFacetSumValueSource(TaxonomyReader taxoReader, FacetsConfig config,
                                      FacetsCollector fc, ValueSource valueSource) throws IOException {
     this(new DocValuesOrdinalsReader(FacetsConfig.DEFAULT_INDEX_FIELD_NAME), taxoReader, config, fc, valueSource);
   }
 
-  /** Aggreggates float facet values from the provided
+  /**
+   * Aggreggates double facet values from the provided
+   * {@link DoubleValuesSource}, pulling ordinals using {@link
+   * DocValuesOrdinalsReader} against the default indexed
+   * facet field {@link FacetsConfig#DEFAULT_INDEX_FIELD_NAME}.
+   */
+   public TaxonomyFacetSumValueSource(TaxonomyReader taxoReader, FacetsConfig config,
+                                     FacetsCollector fc, DoubleValuesSource valueSource) throws IOException {
+    this(new DocValuesOrdinalsReader(FacetsConfig.DEFAULT_INDEX_FIELD_NAME), taxoReader, config, fc, valueSource);
+  }
+
+  /**
+   * Aggreggates float facet values from the provided
    *  {@link ValueSource}, and pulls ordinals from the
-   *  provided {@link OrdinalsReader}. */
+   *  provided {@link OrdinalsReader}.
+   *
+   *  @deprecated use {@link #TaxonomyFacetSumValueSource(OrdinalsReader, TaxonomyReader, FacetsConfig, FacetsCollector, DoubleValuesSource)}
+   */
+  @Deprecated
   public TaxonomyFacetSumValueSource(OrdinalsReader ordinalsReader, TaxonomyReader taxoReader,
                                      FacetsConfig config, FacetsCollector fc, ValueSource valueSource) throws IOException {
     super(ordinalsReader.getIndexFieldName(), taxoReader, config);
     this.ordinalsReader = ordinalsReader;
-    sumValues(fc.getMatchingDocs(), fc.getKeepScores(), valueSource);
+    sumValues(fc.getMatchingDocs(), fc.getKeepScores(), valueSource.asDoubleValuesSource());
   }
 
-  private final void sumValues(List<MatchingDocs> matchingDocs, boolean keepScores, ValueSource valueSource) throws IOException {
-    final FakeScorer scorer = new FakeScorer();
-    Map<String, Scorer> context = new HashMap<>();
-    if (keepScores) {
-      context.put("scorer", scorer);
-    }
+  /**
+   * Aggreggates float facet values from the provided
+   *  {@link DoubleValuesSource}, and pulls ordinals from the
+   *  provided {@link OrdinalsReader}.
+   */
+   public TaxonomyFacetSumValueSource(OrdinalsReader ordinalsReader, TaxonomyReader taxoReader,
+                                     FacetsConfig config, FacetsCollector fc, DoubleValuesSource vs) throws IOException {
+    super(ordinalsReader.getIndexFieldName(), taxoReader, config);
+    this.ordinalsReader = ordinalsReader;
+    sumValues(fc.getMatchingDocs(), fc.getKeepScores(), vs);
+  }
+
+  private static DoubleValues scores(MatchingDocs hits) {
+    return new DoubleValues() {
+
+      int index = -1;
+
+      @Override
+      public double doubleValue() throws IOException {
+        return hits.scores[index];
+      }
+
+      @Override
+      public boolean advanceExact(int doc) throws IOException {
+        index++;
+        return true;
+      }
+    };
+  }
+
+  private void sumValues(List<MatchingDocs> matchingDocs, boolean keepScores, DoubleValuesSource valueSource) throws IOException {
+
     IntsRef scratch = new IntsRef();
     for(MatchingDocs hits : matchingDocs) {
       OrdinalsReader.OrdinalsSegmentReader ords = ordinalsReader.getReader(hits.context);
-      
-      int scoresIdx = 0;
-      float[] scores = hits.scores;
-
-      FunctionValues functionValues = valueSource.getValues(context, hits.context);
+      DoubleValues scores = keepScores ? scores(hits) : null;
+      DoubleValues functionValues = valueSource.getValues(hits.context, scores);
       DocIdSetIterator docs = hits.bits.iterator();
       
       int doc;
       while ((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
         ords.get(doc, scratch);
-        if (keepScores) {
-          scorer.doc = doc;
-          scorer.score = scores[scoresIdx++];
-        }
-        float value = (float) functionValues.doubleVal(doc);
-        for(int i=0;i<scratch.length;i++) {
-          values[scratch.ints[i]] += value;
+        if (functionValues.advanceExact(doc)) {
+          float value = (float) functionValues.doubleValue();
+          for (int i = 0; i < scratch.length; i++) {
+            values[scratch.ints[i]] += value;
+          }
         }
       }
     }
@@ -92,9 +135,13 @@ public class TaxonomyFacetSumValueSource extends FloatTaxonomyFacets {
     rollup();
   }
 
-  /** {@link ValueSource} that returns the score for each
+  /**
+   * {@link ValueSource} that returns the score for each
    *  hit; use this to aggregate the sum of all hit scores
-   *  for each facet label.  */
+   *  for each facet label.
+   *
+   * @deprecated Use {@link DoubleValuesSource#SCORES}
+   */
   public static class ScoreValueSource extends ValueSource {
 
     /** Sole constructor. */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/713b65d1/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
index 7250ef4..ff207d3 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
@@ -28,8 +28,8 @@ import org.apache.lucene.document.DoublePoint;
 import org.apache.lucene.document.LongPoint;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.facet.DrillDownQuery;
-import org.apache.lucene.facet.DrillSideways.DrillSidewaysResult;
 import org.apache.lucene.facet.DrillSideways;
+import org.apache.lucene.facet.DrillSideways.DrillSidewaysResult;
 import org.apache.lucene.facet.FacetField;
 import org.apache.lucene.facet.FacetResult;
 import org.apache.lucene.facet.FacetTestCase;
@@ -46,11 +46,11 @@ import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
 import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
+import org.apache.lucene.search.DoubleValues;
+import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
@@ -708,7 +708,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
 
   }
 
-  public void testCustomDoublesValueSource() throws Exception {
+  public void testCustomDoubleValuesSource() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     
@@ -720,33 +720,30 @@ public class TestRangeFacetCounts extends FacetTestCase {
     // Test wants 3 docs in one segment:
     writer.forceMerge(1);
 
-    final ValueSource vs = new ValueSource() {
-        @SuppressWarnings("rawtypes")
-        @Override
-        public FunctionValues getValues(Map ignored, LeafReaderContext ignored2) {
-          return new DoubleDocValues(null) {
-            @Override
-            public double doubleVal(int doc) {
-              return doc+1;
-            }
-          };
-        }
+    final DoubleValuesSource vs = new DoubleValuesSource() {
 
-        @Override
-        public boolean equals(Object o) {
-          return o != null && getClass() == o.getClass();
-        }
+      @Override
+      public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
+        return new DoubleValues() {
+          int doc = -1;
+          @Override
+          public double doubleValue() throws IOException {
+            return doc + 1;
+          }
 
-        @Override
-        public int hashCode() {
-          return getClass().hashCode();
-        }
+          @Override
+          public boolean advanceExact(int doc) throws IOException {
+            this.doc = doc;
+            return true;
+          }
+        };
+      }
 
-        @Override
-        public String description() {
-          throw new UnsupportedOperationException();
-        }
-      };
+      @Override
+      public boolean needsScores() {
+        return false;
+      }
+    };
 
     FacetsConfig config = new FacetsConfig();
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/713b65d1/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
index 0ad90ba..31bf6e1 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
@@ -52,6 +52,7 @@ import org.apache.lucene.queries.function.valuesource.IntFieldSource;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
 import org.apache.lucene.search.BoostQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.Query;
@@ -266,7 +267,7 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
     
     TopDocs td = FacetsCollector.search(newSearcher(r), csq, 10, fc);
 
-    Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, fc, new TaxonomyFacetSumValueSource.ScoreValueSource());
+    Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, fc, DoubleValuesSource.SCORES);
     
     int expected = (int) (td.getMaxScore() * td.totalHits);
     assertEquals(expected, facets.getSpecificValue("dim", "a").intValue());
@@ -408,7 +409,7 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
     FacetsCollector.search(newSearcher(r), new MatchAllDocsQuery(), 10, fc);
     
     Facets facets1 = getTaxonomyFacetCounts(taxoReader, config, fc);
-    Facets facets2 = new TaxonomyFacetSumValueSource(new DocValuesOrdinalsReader("$b"), taxoReader, config, fc, new TaxonomyFacetSumValueSource.ScoreValueSource());
+    Facets facets2 = new TaxonomyFacetSumValueSource(new DocValuesOrdinalsReader("$b"), taxoReader, config, fc, DoubleValuesSource.SCORES);
 
     assertEquals(r.maxDoc(), facets1.getTopChildren(10, "a").value.intValue());
     assertEquals(r.maxDoc(), facets2.getTopChildren(10, "b").value.doubleValue(), 1E-10);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/713b65d1/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java
index 4064fc9..5bf6324 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java
@@ -17,13 +17,20 @@
 package org.apache.lucene.queries.function;
 
 import java.io.IOException;
+import java.util.HashMap;
 import java.util.IdentityHashMap;
 import java.util.Map;
 
 import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.DoubleValues;
+import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.FieldComparator;
 import org.apache.lucene.search.FieldComparatorSource;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.LongValues;
+import org.apache.lucene.search.LongValuesSource;
+import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.SimpleFieldComparator;
 import org.apache.lucene.search.SortField;
 
@@ -78,6 +85,110 @@ public abstract class ValueSource {
     return context;
   }
 
+  private static class FakeScorer extends Scorer {
+
+    int current = -1;
+    float score = 0;
+
+    FakeScorer() {
+      super(null);
+    }
+
+    @Override
+    public int docID() {
+      return current;
+    }
+
+    @Override
+    public float score() throws IOException {
+      return score;
+    }
+
+    @Override
+    public int freq() throws IOException {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public DocIdSetIterator iterator() {
+      throw new UnsupportedOperationException();
+    }
+  }
+
+  /**
+   * Expose this ValueSource as a LongValuesSource
+   */
+  public LongValuesSource asLongValuesSource() {
+    return new LongValuesSource() {
+      @Override
+      public LongValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
+        Map context = new IdentityHashMap<>();
+        FakeScorer scorer = new FakeScorer();
+        context.put("scorer", scorer);
+        final FunctionValues fv = ValueSource.this.getValues(context, ctx);
+        return new LongValues() {
+
+          @Override
+          public long longValue() throws IOException {
+            return fv.longVal(scorer.current);
+          }
+
+          @Override
+          public boolean advanceExact(int doc) throws IOException {
+            scorer.current = doc;
+            if (scores != null && scores.advanceExact(doc))
+              scorer.score = (float) scores.doubleValue();
+            else
+              scorer.score = 0;
+            return fv.exists(doc);
+          }
+        };
+      }
+
+      @Override
+      public boolean needsScores() {
+        return false;
+      }
+    };
+  }
+
+  /**
+   * Expose this ValueSource as a DoubleValuesSource
+   */
+  public DoubleValuesSource asDoubleValuesSource() {
+    return new DoubleValuesSource() {
+      @Override
+      public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
+        Map context = new HashMap<>();
+        FakeScorer scorer = new FakeScorer();
+        context.put("scorer", scorer);
+        FunctionValues fv = ValueSource.this.getValues(context, ctx);
+        return new DoubleValues() {
+
+          @Override
+          public double doubleValue() throws IOException {
+            return fv.doubleVal(scorer.current);
+          }
+
+          @Override
+          public boolean advanceExact(int doc) throws IOException {
+            scorer.current = doc;
+            if (scores != null && scores.advanceExact(doc)) {
+              scorer.score = (float) scores.doubleValue();
+            }
+            else
+              scorer.score = 0;
+            return fv.exists(doc);
+          }
+        };
+      }
+
+      @Override
+      public boolean needsScores() {
+        return true;  // be on the safe side
+      }
+    };
+  }
 
   //
   // Sorting by function


[32/43] lucene-solr:jira/solr-8593: * SOLR-9886: Add a 'enable' flag to caches to enable/disable them

Posted by kr...@apache.org.
* SOLR-9886: Add a 'enable' flag to caches to enable/disable them


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/2048b824
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/2048b824
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/2048b824

Branch: refs/heads/jira/solr-8593
Commit: 2048b82443db548f76d584f9a95b5628c407edde
Parents: 2b4e3dd
Author: Noble Paul <no...@apache.org>
Authored: Tue Jan 10 21:05:38 2017 +1030
Committer: Noble Paul <no...@apache.org>
Committed: Tue Jan 10 21:05:38 2017 +1030

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 +
 .../org/apache/solr/search/CacheConfig.java     |  2 +-
 .../org/apache/solr/search/FastLRUCache.java    |  2 +-
 .../src/java/org/apache/solr/util/DOMUtil.java  |  5 ++
 .../resources/EditableSolrConfigAttributes.json | 16 +++-
 .../conf/solrconfig-cache-enable-disable.xml    | 80 ++++++++++++++++++++
 .../test/org/apache/solr/core/TestConfig.java   | 32 ++++++++
 7 files changed, 136 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2048b824/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 2b79f04..2a5d5bb 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -230,6 +230,8 @@ New Features
 
 * SOLR-9856: Collect metrics for shard replication and tlog replay on replicas (ab).
 
+* SOLR-9886: Add a 'enable' flag to caches to enable/disable them (Pushkar Raste, noble)
+
 Optimizations
 ----------------------
 * SOLR-9704: Facet Module / JSON Facet API: Optimize blockChildren facets that have

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2048b824/solr/core/src/java/org/apache/solr/search/CacheConfig.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/CacheConfig.java b/solr/core/src/java/org/apache/solr/search/CacheConfig.java
index d3565a6..648fe38 100644
--- a/solr/core/src/java/org/apache/solr/search/CacheConfig.java
+++ b/solr/core/src/java/org/apache/solr/search/CacheConfig.java
@@ -89,7 +89,7 @@ public class CacheConfig implements MapSerializable{
 
   public static CacheConfig getConfig(SolrConfig solrConfig, String xpath) {
     Node node = solrConfig.getNode(xpath, false);
-    if(node == null) {
+    if(node == null || !"true".equals(DOMUtil.getAttrOrDefault(node, "enabled", "true"))) {
       Map<String, String> m = solrConfig.getOverlay().getEditableSubProperties(xpath);
       if(m==null) return null;
       List<String> parts = StrUtils.splitSmart(xpath, '/');

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2048b824/solr/core/src/java/org/apache/solr/search/FastLRUCache.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/FastLRUCache.java b/solr/core/src/java/org/apache/solr/search/FastLRUCache.java
index 6c2e4d5..9c4b892 100644
--- a/solr/core/src/java/org/apache/solr/search/FastLRUCache.java
+++ b/solr/core/src/java/org/apache/solr/search/FastLRUCache.java
@@ -69,7 +69,7 @@ public class FastLRUCache<K, V> extends SolrCacheBase implements SolrCache<K,V>
     } else {
       minLimit = Integer.parseInt(str);
     }
-    if (minLimit==0) minLimit=1;
+    if (minLimit <= 0) minLimit = 1;
     if (limit <= minLimit) limit=minLimit+1;
 
     int acceptableLimit;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2048b824/solr/core/src/java/org/apache/solr/util/DOMUtil.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/DOMUtil.java b/solr/core/src/java/org/apache/solr/util/DOMUtil.java
index 773d08c..29dab93 100644
--- a/solr/core/src/java/org/apache/solr/util/DOMUtil.java
+++ b/solr/core/src/java/org/apache/solr/util/DOMUtil.java
@@ -79,6 +79,11 @@ public class DOMUtil {
     return getAttr(nd.getAttributes(), name);
   }
 
+  public static String getAttrOrDefault(Node nd, String name, String def) {
+    String attr = getAttr(nd.getAttributes(), name);
+    return attr == null ? def : attr;
+  }
+
   public static String getAttr(NamedNodeMap attrs, String name, String missing_err) {
     Node attr = attrs==null? null : attrs.getNamedItem(name);
     if (attr==null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2048b824/solr/core/src/resources/EditableSolrConfigAttributes.json
----------------------------------------------------------------------
diff --git a/solr/core/src/resources/EditableSolrConfigAttributes.json b/solr/core/src/resources/EditableSolrConfigAttributes.json
index b0d6c2f..ce9d1ad 100644
--- a/solr/core/src/resources/EditableSolrConfigAttributes.json
+++ b/solr/core/src/resources/EditableSolrConfigAttributes.json
@@ -1,4 +1,14 @@
 {
+//-------legend----------
+// 0  = string attribute
+// 1  = string node
+// 10 = boolean attribute
+// 11 = boolean node
+// 20 = int attrubute
+// 21 = int node
+// 30 = float attribute
+// 31 = float node
+//------------------------
   "updateHandler":{
     "autoCommit":{
       "maxDocs":20,
@@ -12,6 +22,7 @@
   "query":{
     "filterCache":{
       "class":0,
+      "enabled":10,
       "size":0,
       "initialSize":20,
       "autowarmCount":20,
@@ -19,6 +30,7 @@
       "regenerator":0},
     "queryResultCache":{
       "class":0,
+      "enabled":10,
       "size":20,
       "initialSize":20,
       "autowarmCount":20,
@@ -26,12 +38,14 @@
       "regenerator":0},
     "documentCache":{
       "class":0,
+      "enabled":10,
       "size":20,
       "initialSize":20,
       "autowarmCount":20,
       "regenerator":0},
     "fieldValueCache":{
       "class":0,
+      "enabled":10,
       "size":20,
       "initialSize":20,
       "autowarmCount":20,
@@ -56,4 +70,4 @@
   "peerSync":{
     "useRangeVersions":11
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2048b824/solr/core/src/test-files/solr/collection1/conf/solrconfig-cache-enable-disable.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-cache-enable-disable.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-cache-enable-disable.xml
new file mode 100644
index 0000000..4053ebe
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-cache-enable-disable.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<config>
+  <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
+  <dataDir>${solr.data.dir:}</dataDir>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+  <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
+  <schemaFactory class="ClassicIndexSchemaFactory"/>
+  <requestHandler name="standard" class="solr.StandardRequestHandler" />
+  
+  <query>
+    <!-- Maximum number of clauses in a boolean query... can affect
+        range or wildcard queries that expand to big boolean
+        queries.  An exception is thrown if exceeded.
+    -->
+    <maxBooleanClauses>1024</maxBooleanClauses>
+
+    <!-- Cache specification for Filters or DocSets - unordered set of *all* documents
+         that match a particular query.
+      -->
+    <filterCache
+      enabled="${filterCache.enabled}"
+      class="solr.search.FastLRUCache"
+      size="512"
+      initialSize="512"
+      autowarmCount="2"/>
+
+    <queryResultCache
+      enabled="${queryResultCache.enabled}"
+      class="solr.search.LRUCache"
+      size="512"
+      initialSize="512"
+      autowarmCount="2"/>
+
+    <documentCache
+      enabled="${documentCache.enabled}"
+      class="solr.search.LRUCache"
+      size="512"
+      initialSize="512"
+      autowarmCount="0"/>
+
+
+    <!-- If true, stored fields that are not requested will be loaded lazily.
+    -->
+    <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+    <queryResultWindowSize>10</queryResultWindowSize>
+
+    <!-- set maxSize artificially low to exercise both types of sets -->
+    <HashDocSet maxSize="3" loadFactor="0.75"/>
+
+    <!-- boolToFilterOptimizer converts boolean clauses with zero boost
+         into cached filters if the number of docs selected by the clause exceeds
+         the threshold (represented as a fraction of the total index)
+    -->
+    <boolTofilterOptimizer enabled="false" cacheSize="32" threshold=".05"/>
+
+  </query>
+  
+</config>
+
+
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/2048b824/solr/core/src/test/org/apache/solr/core/TestConfig.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/core/TestConfig.java b/solr/core/src/test/org/apache/solr/core/TestConfig.java
index 8244b32..094f013 100644
--- a/solr/core/src/test/org/apache/solr/core/TestConfig.java
+++ b/solr/core/src/test/org/apache/solr/core/TestConfig.java
@@ -105,6 +105,38 @@ public class TestConfig extends SolrTestCaseJ4 {
     assertTrue("file handler should have been automatically registered", handler != null);
 
   }
+  
+ @Test
+ public void testCacheEnablingDisabling() throws Exception {
+   // ensure if cache is not defined in the config then cache is disabled 
+   SolrConfig sc = new SolrConfig(new SolrResourceLoader(TEST_PATH().resolve("collection1")), "solrconfig-defaults.xml", null);
+   assertNull(sc.filterCacheConfig);
+   assertNull(sc.queryResultCacheConfig);
+   assertNull(sc.documentCacheConfig);
+   
+   // enable all the caches via system properties and verify 
+   System.setProperty("filterCache.enabled", "true");
+   System.setProperty("queryResultCache.enabled", "true");
+   System.setProperty("documentCache.enabled", "true");
+   sc = new SolrConfig(new SolrResourceLoader(TEST_PATH().resolve("collection1")), "solrconfig-cache-enable-disable.xml", null);
+   assertNotNull(sc.filterCacheConfig);
+   assertNotNull(sc.queryResultCacheConfig);
+   assertNotNull(sc.documentCacheConfig);
+   
+   // disable all the caches via system properties and verify
+   System.setProperty("filterCache.enabled", "false");
+   System.setProperty("queryResultCache.enabled", "false");
+   System.setProperty("documentCache.enabled", "false");
+   sc = new SolrConfig(new SolrResourceLoader(TEST_PATH().resolve("collection1")), "solrconfig-cache-enable-disable.xml", null);
+   assertNull(sc.filterCacheConfig);
+   assertNull(sc.queryResultCacheConfig);
+   assertNull(sc.documentCacheConfig);
+   
+   System.clearProperty("filterCache.enabled");
+   System.clearProperty("queryResultCache.enabled");
+   System.clearProperty("documentCache.enabled");
+ }
+  
 
   // If defaults change, add test methods to cover each version
   @Test


[18/43] lucene-solr:jira/solr-8593: TokenStreamToAutomaton failed to handle certain holes correctly

Posted by kr...@apache.org.
TokenStreamToAutomaton failed to handle certain holes correctly


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e64111c6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e64111c6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e64111c6

Branch: refs/heads/jira/solr-8593
Commit: e64111c6545d8e05241bc87eb4623c5ed44db312
Parents: 1aa9c42
Author: Mike McCandless <mi...@apache.org>
Authored: Sun Jan 8 06:26:08 2017 -0500
Committer: Mike McCandless <mi...@apache.org>
Committed: Sun Jan 8 06:26:08 2017 -0500

----------------------------------------------------------------------
 .../apache/lucene/analysis/TokenStreamToAutomaton.java  | 11 ++++++++++-
 .../org/apache/lucene/analysis/TestGraphTokenizers.java | 12 ++++++++++++
 2 files changed, 22 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e64111c6/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java b/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java
index 071fa4a..64bac66 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java
@@ -113,6 +113,7 @@ public class TokenStreamToAutomaton {
     final RollingBuffer<Position> positions = new Positions();
 
     int pos = -1;
+    int freedPos = 0;
     Position posData = null;
     int maxOffset = 0;
     while (in.incrementToken()) {
@@ -150,7 +151,15 @@ public class TokenStreamToAutomaton {
             addHoles(builder, positions, pos);
           }
         }
-        positions.freeBefore(pos);
+        while (freedPos <= pos) {
+          Position freePosData = positions.get(freedPos);
+          // don't free this position yet if we may still need to fill holes over it:
+          if (freePosData.arriving == -1 || freePosData.leaving == -1) {
+            break;
+          }
+          positions.freeBefore(freedPos);
+          freedPos++;
+        }
       }
 
       final int endPos = pos + posLengthAtt.getPositionLength();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e64111c6/lucene/test-framework/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
index 78fb127..8899dd1 100644
--- a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
+++ b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
@@ -585,4 +585,16 @@ public class TestGraphTokenizers extends BaseTokenStreamTestCase {
       Operations.determinize(Operations.removeDeadStates(expected), DEFAULT_MAX_DETERMINIZED_STATES),
       Operations.determinize(Operations.removeDeadStates(actual), DEFAULT_MAX_DETERMINIZED_STATES)));
   }
+
+  public void testTokenStreamGraphWithHoles() throws Exception {
+    final TokenStream ts = new CannedTokenStream(
+      new Token[] {
+        token("abc", 1, 1),
+        token("xyz", 1, 8),
+        token("def", 1, 1),
+        token("ghi", 1, 1),
+      });
+    assertSameLanguage(Operations.union(join(s2a("abc"), SEP_A, s2a("xyz")),
+                                        join(s2a("abc"), SEP_A, HOLE_A, SEP_A, s2a("def"), SEP_A, s2a("ghi"))), ts);
+  }
 }


[02/43] lucene-solr:jira/solr-8593: LUCENE-7617: Grouping collector API cleanup

Posted by kr...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
index 72c858f..f079b85 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
@@ -144,10 +144,10 @@ public class TestGrouping extends LuceneTestCase {
 
     final Sort groupSort = Sort.RELEVANCE;
 
-    final AbstractFirstPassGroupingCollector<?> c1 = createRandomFirstPassCollector(groupField, groupSort, 10);
+    final FirstPassGroupingCollector<?> c1 = createRandomFirstPassCollector(groupField, groupSort, 10);
     indexSearcher.search(new TermQuery(new Term("content", "random")), c1);
 
-    final AbstractSecondPassGroupingCollector<?> c2 = createSecondPassCollector(c1, groupField, groupSort, Sort.RELEVANCE, 0, 5, true, true, true);
+    final SecondPassGroupingCollector<?> c2 = createSecondPassCollector(c1, groupField, groupSort, Sort.RELEVANCE, 0, 5, true, true, true);
     indexSearcher.search(new TermQuery(new Term("content", "random")), c2);
 
     final TopGroups<?> groups = c2.getTopGroups(0);
@@ -195,8 +195,8 @@ public class TestGrouping extends LuceneTestCase {
     doc.add(new SortedDocValuesField(groupField, new BytesRef(value)));
   }
 
-  private AbstractFirstPassGroupingCollector<?> createRandomFirstPassCollector(String groupField, Sort groupSort, int topDocs) throws IOException {
-    AbstractFirstPassGroupingCollector<?> selected;
+  private FirstPassGroupingCollector<?> createRandomFirstPassCollector(String groupField, Sort groupSort, int topDocs) throws IOException {
+    FirstPassGroupingCollector<?> selected;
     if (random().nextBoolean()) {
       ValueSource vs = new BytesRefFieldSource(groupField);
       selected = new FunctionFirstPassGroupingCollector(vs, new HashMap<>(), groupSort, topDocs);
@@ -209,7 +209,7 @@ public class TestGrouping extends LuceneTestCase {
     return selected;
   }
 
-  private AbstractFirstPassGroupingCollector<?> createFirstPassCollector(String groupField, Sort groupSort, int topDocs, AbstractFirstPassGroupingCollector<?> firstPassGroupingCollector) throws IOException {
+  private FirstPassGroupingCollector<?> createFirstPassCollector(String groupField, Sort groupSort, int topDocs, FirstPassGroupingCollector<?> firstPassGroupingCollector) throws IOException {
     if (TermFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
       ValueSource vs = new BytesRefFieldSource(groupField);
       return new FunctionFirstPassGroupingCollector(vs, new HashMap<>(), groupSort, topDocs);
@@ -219,37 +219,37 @@ public class TestGrouping extends LuceneTestCase {
   }
 
   @SuppressWarnings({"unchecked","rawtypes"})
-  private <T> AbstractSecondPassGroupingCollector<T> createSecondPassCollector(AbstractFirstPassGroupingCollector firstPassGroupingCollector,
-                                                                        String groupField,
-                                                                        Sort groupSort,
-                                                                        Sort sortWithinGroup,
-                                                                        int groupOffset,
-                                                                        int maxDocsPerGroup,
-                                                                        boolean getScores,
-                                                                        boolean getMaxScores,
-                                                                        boolean fillSortFields) throws IOException {
+  private <T> SecondPassGroupingCollector<T> createSecondPassCollector(FirstPassGroupingCollector firstPassGroupingCollector,
+                                                                       String groupField,
+                                                                       Sort groupSort,
+                                                                       Sort sortWithinGroup,
+                                                                       int groupOffset,
+                                                                       int maxDocsPerGroup,
+                                                                       boolean getScores,
+                                                                       boolean getMaxScores,
+                                                                       boolean fillSortFields) throws IOException {
 
     if (TermFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
       Collection<SearchGroup<BytesRef>> searchGroups = firstPassGroupingCollector.getTopGroups(groupOffset, fillSortFields);
-      return (AbstractSecondPassGroupingCollector) new TermSecondPassGroupingCollector(groupField, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup , getScores, getMaxScores, fillSortFields);
+      return (SecondPassGroupingCollector) new TermSecondPassGroupingCollector(groupField, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup , getScores, getMaxScores, fillSortFields);
     } else {
       ValueSource vs = new BytesRefFieldSource(groupField);
       Collection<SearchGroup<MutableValue>> searchGroups = firstPassGroupingCollector.getTopGroups(groupOffset, fillSortFields);
-      return (AbstractSecondPassGroupingCollector) new FunctionSecondPassGroupingCollector(searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields, vs, new HashMap());
+      return (SecondPassGroupingCollector) new FunctionSecondPassGroupingCollector(searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields, vs, new HashMap());
     }
   }
 
   // Basically converts searchGroups from MutableValue to BytesRef if grouping by ValueSource
   @SuppressWarnings("unchecked")
-  private AbstractSecondPassGroupingCollector<?> createSecondPassCollector(AbstractFirstPassGroupingCollector<?> firstPassGroupingCollector,
-                                                                        String groupField,
-                                                                        Collection<SearchGroup<BytesRef>> searchGroups,
-                                                                        Sort groupSort,
-                                                                        Sort sortWithinGroup,
-                                                                        int maxDocsPerGroup,
-                                                                        boolean getScores,
-                                                                        boolean getMaxScores,
-                                                                        boolean fillSortFields) throws IOException {
+  private SecondPassGroupingCollector<?> createSecondPassCollector(FirstPassGroupingCollector<?> firstPassGroupingCollector,
+                                                                   String groupField,
+                                                                   Collection<SearchGroup<BytesRef>> searchGroups,
+                                                                   Sort groupSort,
+                                                                   Sort sortWithinGroup,
+                                                                   int maxDocsPerGroup,
+                                                                   boolean getScores,
+                                                                   boolean getMaxScores,
+                                                                   boolean fillSortFields) throws IOException {
     if (firstPassGroupingCollector.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) {
       return new TermSecondPassGroupingCollector(groupField, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup , getScores, getMaxScores, fillSortFields);
     } else {
@@ -272,8 +272,8 @@ public class TestGrouping extends LuceneTestCase {
     }
   }
 
-  private AbstractAllGroupsCollector<?> createAllGroupsCollector(AbstractFirstPassGroupingCollector<?> firstPassGroupingCollector,
-                                                              String groupField) {
+  private AllGroupsCollector<?> createAllGroupsCollector(FirstPassGroupingCollector<?> firstPassGroupingCollector,
+                                                         String groupField) {
     if (firstPassGroupingCollector.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) {
       return new TermAllGroupsCollector(groupField);
     } else {
@@ -305,7 +305,7 @@ public class TestGrouping extends LuceneTestCase {
     }
   }
 
-  private Collection<SearchGroup<BytesRef>> getSearchGroups(AbstractFirstPassGroupingCollector<?> c, int groupOffset, boolean fillFields) throws IOException {
+  private Collection<SearchGroup<BytesRef>> getSearchGroups(FirstPassGroupingCollector<?> c, int groupOffset, boolean fillFields) throws IOException {
     if (TermFirstPassGroupingCollector.class.isAssignableFrom(c.getClass())) {
       return ((TermFirstPassGroupingCollector) c).getTopGroups(groupOffset, fillFields);
     } else if (FunctionFirstPassGroupingCollector.class.isAssignableFrom(c.getClass())) {
@@ -328,7 +328,7 @@ public class TestGrouping extends LuceneTestCase {
   }
 
   @SuppressWarnings({"unchecked", "rawtypes"})
-  private TopGroups<BytesRef> getTopGroups(AbstractSecondPassGroupingCollector c, int withinGroupOffset) {
+  private TopGroups<BytesRef> getTopGroups(SecondPassGroupingCollector c, int withinGroupOffset) {
     if (c.getClass().isAssignableFrom(TermSecondPassGroupingCollector.class)) {
       return ((TermSecondPassGroupingCollector) c).getTopGroups(withinGroupOffset);
     } else if (c.getClass().isAssignableFrom(FunctionSecondPassGroupingCollector.class)) {
@@ -874,11 +874,11 @@ public class TestGrouping extends LuceneTestCase {
         if (VERBOSE) {
           System.out.println("  groupField=" + groupField);
         }
-        final AbstractFirstPassGroupingCollector<?> c1 = createRandomFirstPassCollector(groupField, groupSort, groupOffset+topNGroups);
+        final FirstPassGroupingCollector<?> c1 = createRandomFirstPassCollector(groupField, groupSort, groupOffset+topNGroups);
         final CachingCollector cCache;
         final Collector c;
         
-        final AbstractAllGroupsCollector<?> allGroupsCollector;
+        final AllGroupsCollector<?> allGroupsCollector;
         if (doAllGroups) {
           allGroupsCollector = createAllGroupsCollector(c1, groupField);
         } else {
@@ -953,7 +953,7 @@ public class TestGrouping extends LuceneTestCase {
         
         final TopGroups<BytesRef> topGroupsShards = searchShards(s, shards.subSearchers, query, groupSort, docSort,
             groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores, true, false);
-        final AbstractSecondPassGroupingCollector<?> c2;
+        final SecondPassGroupingCollector<?> c2;
         if (topGroups != null) {
           
           if (VERBOSE) {
@@ -1172,8 +1172,8 @@ public class TestGrouping extends LuceneTestCase {
     // Run 1st pass collector to get top groups per shard
     final Weight w = topSearcher.createNormalizedWeight(query, getScores);
     final List<Collection<SearchGroup<BytesRef>>> shardGroups = new ArrayList<>();
-    List<AbstractFirstPassGroupingCollector<?>> firstPassGroupingCollectors = new ArrayList<>();
-    AbstractFirstPassGroupingCollector<?> firstPassCollector = null;
+    List<FirstPassGroupingCollector<?>> firstPassGroupingCollectors = new ArrayList<>();
+    FirstPassGroupingCollector<?> firstPassCollector = null;
     boolean shardsCanUseIDV = canUseIDV;
 
     String groupField = "group";
@@ -1223,7 +1223,7 @@ public class TestGrouping extends LuceneTestCase {
       @SuppressWarnings({"unchecked","rawtypes"})
       final TopGroups<BytesRef>[] shardTopGroups = new TopGroups[subSearchers.length];
       for(int shardIDX=0;shardIDX<subSearchers.length;shardIDX++) {
-        final AbstractSecondPassGroupingCollector<?> secondPassCollector = createSecondPassCollector(firstPassGroupingCollectors.get(shardIDX),
+        final SecondPassGroupingCollector<?> secondPassCollector = createSecondPassCollector(firstPassGroupingCollectors.get(shardIDX),
             groupField, mergedTopGroups, groupSort, docSort, docOffset + topNDocs, getScores, getMaxScores, true);
         subSearchers[shardIDX].search(w, secondPassCollector);
         shardTopGroups[shardIDX] = getTopGroups(secondPassCollector, 0);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
index f29a767..641b1f3 100644
--- a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
+++ b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
@@ -51,7 +51,7 @@ import org.apache.lucene.search.FilterCollector;
 import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.grouping.AbstractAllGroupHeadsCollector;
+import org.apache.lucene.search.grouping.AllGroupHeadsCollector;
 import org.apache.lucene.search.grouping.term.TermAllGroupsCollector;
 import org.apache.lucene.search.grouping.term.TermGroupFacetCollector;
 import org.apache.lucene.util.BytesRef;
@@ -282,7 +282,7 @@ public class SimpleFacets {
       } else {
         return base;
       }
-      AbstractAllGroupHeadsCollector allGroupHeadsCollector = grouping.getCommands().get(0).createAllGroupCollector();
+      AllGroupHeadsCollector allGroupHeadsCollector = grouping.getCommands().get(0).createAllGroupCollector();
       searcher.search(base.getTopFilter(), allGroupHeadsCollector);
       return new BitDocSet(allGroupHeadsCollector.retrieveGroupHeads(searcher.maxDoc()));
     } else {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/solr/core/src/java/org/apache/solr/search/Grouping.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/Grouping.java b/solr/core/src/java/org/apache/solr/search/Grouping.java
index 8d6f3ca..75011e7 100644
--- a/solr/core/src/java/org/apache/solr/search/Grouping.java
+++ b/solr/core/src/java/org/apache/solr/search/Grouping.java
@@ -46,7 +46,7 @@ import org.apache.lucene.search.TopDocsCollector;
 import org.apache.lucene.search.TopFieldCollector;
 import org.apache.lucene.search.TopScoreDocCollector;
 import org.apache.lucene.search.TotalHitCountCollector;
-import org.apache.lucene.search.grouping.AbstractAllGroupHeadsCollector;
+import org.apache.lucene.search.grouping.AllGroupHeadsCollector;
 import org.apache.lucene.search.grouping.GroupDocs;
 import org.apache.lucene.search.grouping.SearchGroup;
 import org.apache.lucene.search.grouping.TopGroups;
@@ -323,7 +323,7 @@ public class Grouping {
       cmd.prepare();
     }
 
-    AbstractAllGroupHeadsCollector<?> allGroupHeadsCollector = null;
+    AllGroupHeadsCollector<?> allGroupHeadsCollector = null;
     List<Collector> collectors = new ArrayList<>(commands.size());
     for (Command cmd : commands) {
       Collector collector = cmd.createFirstPassCollector();
@@ -513,7 +513,7 @@ public class Grouping {
    * Note: Maybe the creating the response structure should be done in something like a ReponseBuilder???
    * Warning NOT thread save!
    */
-  public abstract class Command<GROUP_VALUE_TYPE> {
+  public abstract class Command<T> {
 
     public String key;       // the name to use for this group in the response
     public Sort withinGroupSort;   // the sort of the documents *within* a single group.
@@ -527,7 +527,7 @@ public class Grouping {
     public boolean main;     // use as the main result in simple format (grouped.main=true param)
     public TotalCount totalCount = TotalCount.ungrouped;
 
-    TopGroups<GROUP_VALUE_TYPE> result;
+    TopGroups<T> result;
 
 
     /**
@@ -565,7 +565,7 @@ public class Grouping {
      * @return a collector that is able to return the most relevant document of all groups.
      * @throws IOException If I/O related errors occur
      */
-    public AbstractAllGroupHeadsCollector<?> createAllGroupCollector() throws IOException {
+    public AllGroupHeadsCollector<?> createAllGroupCollector() throws IOException {
       return null;
     }
 
@@ -774,7 +774,7 @@ public class Grouping {
      * {@inheritDoc}
      */
     @Override
-    public AbstractAllGroupHeadsCollector<?> createAllGroupCollector() throws IOException {
+    public AllGroupHeadsCollector<?> createAllGroupCollector() throws IOException {
       Sort sortWithinGroup = withinGroupSort != null ? withinGroupSort : Sort.RELEVANCE;
       return TermAllGroupHeadsCollector.create(groupBy, sortWithinGroup);
     }
@@ -992,7 +992,7 @@ public class Grouping {
     }
 
     @Override
-    public AbstractAllGroupHeadsCollector<?> createAllGroupCollector() throws IOException {
+    public AllGroupHeadsCollector<?> createAllGroupCollector() throws IOException {
       Sort sortWithinGroup = withinGroupSort != null ? withinGroupSort : Sort.RELEVANCE;
       return new FunctionAllGroupHeadsCollector(groupBy, context, sortWithinGroup);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java b/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java
index 716f5df..74c2b70 100644
--- a/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java
+++ b/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java
@@ -31,7 +31,7 @@ import org.apache.lucene.search.MultiCollector;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TimeLimitingCollector;
 import org.apache.lucene.search.TotalHitCountCollector;
-import org.apache.lucene.search.grouping.AbstractAllGroupHeadsCollector;
+import org.apache.lucene.search.grouping.AllGroupHeadsCollector;
 import org.apache.lucene.search.grouping.function.FunctionAllGroupHeadsCollector;
 import org.apache.lucene.search.grouping.term.TermAllGroupHeadsCollector;
 import org.apache.solr.common.util.NamedList;
@@ -170,7 +170,7 @@ public class CommandHandler {
     SchemaField sf = searcher.getSchema().getField(field);
     FieldType fieldType = sf.getType();
     
-    final AbstractAllGroupHeadsCollector allGroupHeadsCollector;
+    final AllGroupHeadsCollector allGroupHeadsCollector;
     if (fieldType.getNumericType() != null) {
       ValueSource vs = fieldType.getValueSource(sf, null);
       allGroupHeadsCollector = new FunctionAllGroupHeadsCollector(vs, new HashMap(), firstCommand.getSortWithinGroup());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java
index fc04599..46f8009 100644
--- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java
+++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java
@@ -19,8 +19,8 @@ package org.apache.solr.search.grouping.distributed.command;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.grouping.AbstractAllGroupsCollector;
-import org.apache.lucene.search.grouping.AbstractFirstPassGroupingCollector;
+import org.apache.lucene.search.grouping.AllGroupsCollector;
+import org.apache.lucene.search.grouping.FirstPassGroupingCollector;
 import org.apache.lucene.search.grouping.SearchGroup;
 import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector;
 import org.apache.lucene.search.grouping.function.FunctionFirstPassGroupingCollector;
@@ -81,8 +81,8 @@ public class SearchGroupsFieldCommand implements Command<SearchGroupsFieldComman
   private final int topNGroups;
   private final boolean includeGroupCount;
 
-  private AbstractFirstPassGroupingCollector firstPassGroupingCollector;
-  private AbstractAllGroupsCollector allGroupsCollector;
+  private FirstPassGroupingCollector firstPassGroupingCollector;
+  private AllGroupsCollector allGroupsCollector;
 
   private SearchGroupsFieldCommand(SchemaField field, Sort groupSort, int topNGroups, boolean includeGroupCount) {
     this.field = field;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/da30f21f/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java
index 2660b21..0bdb0ed 100644
--- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java
+++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java
@@ -19,7 +19,7 @@ package org.apache.solr.search.grouping.distributed.command;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.grouping.AbstractSecondPassGroupingCollector;
+import org.apache.lucene.search.grouping.SecondPassGroupingCollector;
 import org.apache.lucene.search.grouping.GroupDocs;
 import org.apache.lucene.search.grouping.SearchGroup;
 import org.apache.lucene.search.grouping.TopGroups;
@@ -106,7 +106,7 @@ public class TopGroupsFieldCommand implements Command<TopGroups<BytesRef>> {
   private final int maxDocPerGroup;
   private final boolean needScores;
   private final boolean needMaxScore;
-  private AbstractSecondPassGroupingCollector secondPassCollector;
+  private SecondPassGroupingCollector secondPassCollector;
 
   private TopGroupsFieldCommand(SchemaField field,
                                 Sort groupSort,


[14/43] lucene-solr:jira/solr-8593: SOLR-9944: Update CHANGES.txt

Posted by kr...@apache.org.
SOLR-9944: Update CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ac14fc32
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ac14fc32
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ac14fc32

Branch: refs/heads/jira/solr-8593
Commit: ac14fc32e045d45b5129dc237f7e5472fc86e4a0
Parents: aae4217
Author: Joel Bernstein <jb...@apache.org>
Authored: Sat Jan 7 22:19:46 2017 -0500
Committer: Joel Bernstein <jb...@apache.org>
Committed: Sat Jan 7 22:19:46 2017 -0500

----------------------------------------------------------------------
 solr/CHANGES.txt | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ac14fc32/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 0d61730..899dcd3 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -401,6 +401,8 @@ Other Changes
 
 * SOLR-3990: Moves getIndexSize() from ReplicationHandler to SolrCore (Shawn Heisey)
 
+* SOLR-9944: Map the nodes function name to the GatherNodesStream (Joel Bernstein)
+
 ==================  6.3.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.


[21/43] lucene-solr:jira/solr-8593: SOLR-9902: Fix move impl.

Posted by kr...@apache.org.
SOLR-9902: Fix move impl.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8bc151d1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8bc151d1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8bc151d1

Branch: refs/heads/jira/solr-8593
Commit: 8bc151d1c61932dda26c682cf2281535f0c36058
Parents: 25290ab
Author: markrmiller <ma...@apache.org>
Authored: Sun Jan 8 10:22:42 2017 -0500
Committer: markrmiller <ma...@apache.org>
Committed: Sun Jan 8 10:22:55 2017 -0500

----------------------------------------------------------------------
 solr/CHANGES.txt                                                  | 2 +-
 .../src/java/org/apache/solr/core/StandardDirectoryFactory.java   | 3 ++-
 2 files changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8bc151d1/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index c18381e..11151d7 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -248,7 +248,7 @@ Optimizations
   resulting in less produced garbage and 5-7% better performance.
   (yonik)
 
-* SOLR-9902: StandardDirectoryFactory should use Files API for it's move implementation. (Mark Miller)
+* SOLR-9902: StandardDirectoryFactory should use Files API for it's move implementation. (Mark Miller, Mike Drob)
 
 Bug Fixes
 ----------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8bc151d1/solr/core/src/java/org/apache/solr/core/StandardDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/StandardDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/StandardDirectoryFactory.java
index 37c15ed..1bc4914 100644
--- a/solr/core/src/java/org/apache/solr/core/StandardDirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/StandardDirectoryFactory.java
@@ -131,13 +131,14 @@ public class StandardDirectoryFactory extends CachingDirectoryFactory {
     if (baseFromDir instanceof FSDirectory && baseToDir instanceof FSDirectory) {
   
       Path path1 = ((FSDirectory) baseFromDir).getDirectory().toAbsolutePath();
-      Path path2 = ((FSDirectory) baseFromDir).getDirectory().toAbsolutePath();
+      Path path2 = ((FSDirectory) baseToDir).getDirectory().toAbsolutePath();
       
       try {
         Files.move(path1.resolve(fileName), path2.resolve(fileName), StandardCopyOption.ATOMIC_MOVE);
       } catch (AtomicMoveNotSupportedException e) {
         Files.move(path1.resolve(fileName), path2.resolve(fileName));
       }
+      return;
     }
 
     super.move(fromDir, toDir, fileName, ioContext);


[15/43] lucene-solr:jira/solr-8593: LUCENE-7620: UnifiedHighlighter: new LengthGoalBreakIterator wrapper

Posted by kr...@apache.org.
LUCENE-7620: UnifiedHighlighter: new LengthGoalBreakIterator wrapper


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ea499895
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ea499895
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ea499895

Branch: refs/heads/jira/solr-8593
Commit: ea49989524e96563f2b9bdd4256012239907882f
Parents: ac14fc3
Author: David Smiley <ds...@apache.org>
Authored: Sat Jan 7 23:10:48 2017 -0500
Committer: David Smiley <ds...@apache.org>
Committed: Sat Jan 7 23:10:48 2017 -0500

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   6 +-
 .../uhighlight/LengthGoalBreakIterator.java     | 185 +++++++++++++++++++
 .../lucene/search/uhighlight/Passage.java       |   1 +
 .../uhighlight/LengthGoalBreakIteratorTest.java | 104 +++++++++++
 4 files changed, 295 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea499895/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 30c9ab0..4bbf9ee 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -216,7 +216,11 @@ Improvements
   ensure all dimensions are indexed. (Adrien Grand)
 
 * LUCENE-7614: Complex Phrase Query parser ignores double quotes around single token 
-  prefix, wildcard, range queries (Mikhail Khludnev) 
+  prefix, wildcard, range queries (Mikhail Khludnev)
+
+* LUCENE-7620: Added LengthGoalBreakIterator, a wrapper around another B.I. to skip breaks
+  that would create Passages that are too short.  Only for use with the UnifiedHighlighter
+  (and probably PostingsHighlighter).  (David Smiley)
 
 Optimizations
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea499895/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/LengthGoalBreakIterator.java
----------------------------------------------------------------------
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/LengthGoalBreakIterator.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/LengthGoalBreakIterator.java
new file mode 100644
index 0000000..3134013
--- /dev/null
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/LengthGoalBreakIterator.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.search.uhighlight;
+
+import java.text.BreakIterator;
+import java.text.CharacterIterator;
+
+/**
+ * Wraps another {@link BreakIterator} to skip past breaks that would result in passages that are too
+ * short.  It's still possible to get a short passage but only at the very end of the input text.
+ * <p>
+ * Important: This is not a general purpose {@link BreakIterator}; it's only designed to work in a way
+ * compatible with the {@link UnifiedHighlighter}.  Some assumptions are checked with Java assertions.
+ *
+ * @lucene.experimental
+ */
+public class LengthGoalBreakIterator extends BreakIterator {
+
+  private final BreakIterator baseIter;
+  private final int lengthGoal;
+  private final boolean isMinimumLength; // if false then is "closest to" length
+
+  /** Breaks will be at least {@code minLength} apart (to the extent possible). */
+  public static LengthGoalBreakIterator createMinLength(BreakIterator baseIter, int minLength) {
+    return new LengthGoalBreakIterator(baseIter, minLength, true);
+  }
+
+  /** Breaks will be on average {@code targetLength} apart; the closest break to this target (before or after)
+   * is chosen. */
+  public static LengthGoalBreakIterator createClosestToLength(BreakIterator baseIter, int targetLength) {
+    return new LengthGoalBreakIterator(baseIter, targetLength, false);
+  }
+
+  private LengthGoalBreakIterator(BreakIterator baseIter, int lengthGoal, boolean isMinimumLength) {
+    this.baseIter = baseIter;
+    this.lengthGoal = lengthGoal;
+    this.isMinimumLength = isMinimumLength;
+  }
+
+  // note: the only methods that will get called are setText(txt), getText(),
+  // getSummaryPassagesNoHighlight: current(), first(), next()
+  // highlightOffsetsEnums: preceding(int), and following(int)
+  //   Nonetheless we make some attempt to implement the rest; mostly delegating.
+
+  @Override
+  public String toString() {
+    String goalDesc = isMinimumLength ? "minLen" : "targetLen";
+    return getClass().getSimpleName() + "{" + goalDesc + "=" + lengthGoal + ", baseIter=" + baseIter + "}";
+  }
+
+  @Override
+  public Object clone() {
+    return new LengthGoalBreakIterator((BreakIterator) baseIter.clone(), lengthGoal, isMinimumLength);
+  }
+
+  @Override
+  public CharacterIterator getText() {
+    return baseIter.getText();
+  }
+
+  @Override
+  public void setText(String newText) {
+    baseIter.setText(newText);
+  }
+
+  @Override
+  public void setText(CharacterIterator newText) {
+    baseIter.setText(newText);
+  }
+
+  @Override
+  public int current() {
+    return baseIter.current();
+  }
+
+  @Override
+  public int first() {
+    return baseIter.first();
+  }
+
+  @Override
+  public int last() {
+    return baseIter.last();
+  }
+
+  @Override
+  public int next(int n) {
+    assert false : "Not supported";
+    return baseIter.next(n); // probably wrong
+  }
+
+  // called by getSummaryPassagesNoHighlight to generate default summary.
+  @Override
+  public int next() {
+    return following(current());
+  }
+
+  @Override
+  public int previous() {
+    assert false : "Not supported";
+    return baseIter.previous();
+  }
+
+  // called while the current position is the start of a new passage; find end of passage
+  @Override
+  public int following(int followingIdx) {
+    final int startIdx = current();
+    if (followingIdx < startIdx) {
+      assert false : "Not supported";
+      return baseIter.following(followingIdx);
+    }
+    final int targetIdx = startIdx + lengthGoal;
+    // When followingIdx >= targetIdx, we can simply delegate since it will be >= the target
+    if (followingIdx >= targetIdx - 1) {
+      return baseIter.following(followingIdx);
+    }
+    // If target exceeds the text length, return the last index.
+    if (targetIdx >= getText().getEndIndex()) {
+      return baseIter.last();
+    }
+
+    // Find closest break >= the target
+    final int afterIdx = baseIter.following(targetIdx - 1);
+    if (afterIdx == DONE) { // we're at the end; can this happen?
+      return current();
+    }
+    if (afterIdx == targetIdx) { // right on the money
+      return afterIdx;
+    }
+    if (isMinimumLength) { // thus never undershoot
+      return afterIdx;
+    }
+
+    // note: it is a shame that we invoke preceding() *in addition to* following(); BI's are sometimes expensive.
+
+    // Find closest break < target
+    final int beforeIdx = baseIter.preceding(targetIdx); // or could do baseIter.previous() but we hope the BI implements preceding()
+    if (beforeIdx <= followingIdx) { // too far back
+      return moveToBreak(afterIdx);
+    }
+
+    if (targetIdx - beforeIdx <= afterIdx - targetIdx) {
+      return beforeIdx;
+    }
+    return moveToBreak(afterIdx);
+  }
+
+  private int moveToBreak(int idx) { // precondition: idx is a known break
+    // bi.isBoundary(idx) has side-effect of moving the position.  Not obvious!
+    //boolean moved = baseIter.isBoundary(idx); // probably not particularly expensive
+    //assert moved && current() == idx;
+
+    // TODO fix: Would prefer to do "- 1" instead of "- 2" but CustomSeparatorBreakIterator has a bug.
+    int current = baseIter.following(idx - 2);
+    assert current == idx : "following() didn't move us to the expected index.";
+    return idx;
+  }
+
+  // called at start of new Passage given first word start offset
+  @Override
+  public int preceding(int offset) {
+    return baseIter.preceding(offset); // no change needed
+  }
+
+  @Override
+  public boolean isBoundary(int offset) {
+    assert false : "Not supported";
+    return baseIter.isBoundary(offset);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea499895/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/Passage.java
----------------------------------------------------------------------
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/Passage.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/Passage.java
index d64b96e..3efb694 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/Passage.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/Passage.java
@@ -171,6 +171,7 @@ public class Passage {
 
   /** @lucene.internal */
   public void setEndOffset(int endOffset) {
+    assert startOffset <= endOffset;
     this.endOffset = endOffset;
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ea499895/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/LengthGoalBreakIteratorTest.java
----------------------------------------------------------------------
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/LengthGoalBreakIteratorTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/LengthGoalBreakIteratorTest.java
new file mode 100644
index 0000000..42d2bf6
--- /dev/null
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/LengthGoalBreakIteratorTest.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.search.uhighlight;
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.QueryBuilder;
+
+public class LengthGoalBreakIteratorTest extends LuceneTestCase {
+  private static final String FIELD = "body";
+
+  // We test LengthGoalBreakIterator as it is used by the UnifiedHighlighter instead of directly, because it is
+  //  not a general purpose BreakIterator.  A unit test of it directly wouldn't give as much confidence.
+
+  private final Analyzer analyzer =
+      new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);//whitespace, punctuation, lowercase
+
+  // We do a '.' BreakIterator and test varying the length goal.
+  //                      0         1
+  //                      01234567890123456789
+  final String content = "Aa bb. Cc dd. Ee ff";
+
+  public void testTargetLen() throws IOException {
+    // "goal" means target length goal to find closest break
+
+    // at first word:
+    Query query = query("aa");
+    assertEquals("almost two sent",
+        "<b>Aa</b> bb.", highlightClosestToLen(content, query, 9));
+    assertEquals( "barely two sent",
+        "<b>Aa</b> bb. Cc dd.", highlightClosestToLen(content, query, 10));
+    assertEquals("long goal",
+        "<b>Aa</b> bb. Cc dd. Ee ff", highlightClosestToLen(content, query, 17 + random().nextInt(20)));
+
+    // at some word not at start of passage
+    query = query("dd");
+    assertEquals("short goal",
+        " Cc <b>dd</b>.", highlightClosestToLen(content, query, random().nextInt(5)));
+    assertEquals("almost two sent",
+        " Cc <b>dd</b>.", highlightClosestToLen(content, query, 10));
+    assertEquals("barely two sent",
+        " Cc <b>dd</b>. Ee ff", highlightClosestToLen(content, query, 11));
+    assertEquals("long goal",
+        " Cc <b>dd</b>. Ee ff", highlightClosestToLen(content, query, 12 + random().nextInt(20)));
+  }
+
+  public void testMinLen() throws IOException {
+    // minLen mode is simpler than targetLen... just test a few cases
+
+    Query query = query("dd");
+    assertEquals("almost two sent",
+        " Cc <b>dd</b>.", highlightMinLen(content, query, 6));
+    assertEquals("barely two sent",
+        " Cc <b>dd</b>. Ee ff", highlightMinLen(content, query, 7));
+  }
+
+  public void testDefaultSummaryTargetLen() throws IOException {
+    Query query = query("zz");
+    assertEquals("Aa bb.",
+        highlightClosestToLen(content, query, random().nextInt(10))); // < 10
+    assertEquals("Aa bb. Cc dd.",
+        highlightClosestToLen(content, query, 10 + 6)); // cusp of adding 3rd sentence
+    assertEquals("Aa bb. Cc dd. Ee ff",
+        highlightClosestToLen(content, query, 17 + random().nextInt(20))); // >= 14
+  }
+
+  private Query query(String qStr) {
+    return new QueryBuilder(analyzer).createBooleanQuery(FIELD, qStr);
+  }
+
+  private String highlightClosestToLen(String content, Query query, int lengthGoal) throws IOException {
+    UnifiedHighlighter highlighter = new UnifiedHighlighter(null, analyzer);
+    highlighter.setBreakIterator(() -> LengthGoalBreakIterator.createClosestToLength(new CustomSeparatorBreakIterator('.'), lengthGoal));
+    return highlighter.highlightWithoutSearcher(FIELD, query, content, 1).toString();
+  }
+
+  private String highlightMinLen(String content, Query query, int lengthGoal) throws IOException {
+    // differs from above only by "createMinLength"
+    UnifiedHighlighter highlighter = new UnifiedHighlighter(null, analyzer);
+    highlighter.setBreakIterator(() -> LengthGoalBreakIterator.createMinLength(new CustomSeparatorBreakIterator('.'), lengthGoal));
+    return highlighter.highlightWithoutSearcher(FIELD, query, content, 1).toString();
+  }
+}
\ No newline at end of file


[08/43] lucene-solr:jira/solr-8593: SOLR-9928 Unwrap Directory consistently whenever it's passed as an argument.

Posted by kr...@apache.org.
SOLR-9928 Unwrap Directory consistently whenever it's passed as an argument.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e5f39f62
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e5f39f62
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e5f39f62

Branch: refs/heads/jira/solr-8593
Commit: e5f39f62f76677a5f500af4f323c0c31afb26228
Parents: 1a95c5a
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Sat Jan 7 13:22:37 2017 +0100
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Sat Jan 7 13:24:22 2017 +0100

----------------------------------------------------------------------
 .../solr/core/MetricsDirectoryFactory.java      | 60 ++++++++------------
 1 file changed, 25 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e5f39f62/solr/core/src/java/org/apache/solr/core/MetricsDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/MetricsDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/MetricsDirectoryFactory.java
index 8706c61..f441579 100644
--- a/solr/core/src/java/org/apache/solr/core/MetricsDirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/MetricsDirectoryFactory.java
@@ -74,21 +74,28 @@ public class MetricsDirectoryFactory extends DirectoryFactory implements SolrCor
     }
   }
 
-  @Override
-  public void doneWithDirectory(Directory dir) throws IOException {
-    // unwrap
+  /**
+   * Unwrap just one level if the argument is a {@link MetricsDirectory}
+   * @param dir directory
+   * @return delegate if the instance was a {@link MetricsDirectory}, otherwise unchanged.
+   */
+  private static Directory unwrap(Directory dir) {
     if (dir instanceof MetricsDirectory) {
-      dir = ((MetricsDirectory)dir).getDelegate();
+      return ((MetricsDirectory)dir).getDelegate();
+    } else {
+      return dir;
     }
+  }
+
+  @Override
+  public void doneWithDirectory(Directory dir) throws IOException {
+    dir = unwrap(dir);
     in.doneWithDirectory(dir);
   }
 
   @Override
   public void addCloseListener(Directory dir, CachingDirectoryFactory.CloseListener closeListener) {
-    // unwrap
-    if (dir instanceof MetricsDirectory) {
-      dir = ((MetricsDirectory)dir).getDelegate();
-    }
+    dir = unwrap(dir);
     in.addCloseListener(dir, closeListener);
   }
 
@@ -115,19 +122,13 @@ public class MetricsDirectoryFactory extends DirectoryFactory implements SolrCor
 
   @Override
   public void remove(Directory dir) throws IOException {
-    // unwrap
-    if (dir instanceof MetricsDirectory) {
-      dir = ((MetricsDirectory)dir).getDelegate();
-    }
+    dir = unwrap(dir);
     in.remove(dir);
   }
 
   @Override
   public void remove(Directory dir, boolean afterCoreClose) throws IOException {
-    // unwrap
-    if (dir instanceof MetricsDirectory) {
-      dir = ((MetricsDirectory)dir).getDelegate();
-    }
+    dir = unwrap(dir);
     in.remove(dir, afterCoreClose);
   }
 
@@ -152,8 +153,9 @@ public class MetricsDirectoryFactory extends DirectoryFactory implements SolrCor
   }
 
   @Override
-  public long size(Directory directory) throws IOException {
-    return in.size(directory);
+  public long size(Directory dir) throws IOException {
+    dir = unwrap(dir);
+    return in.size(dir);
   }
 
   @Override
@@ -183,6 +185,8 @@ public class MetricsDirectoryFactory extends DirectoryFactory implements SolrCor
 
   @Override
   public void move(Directory fromDir, Directory toDir, String fileName, IOContext ioContext) throws IOException {
+    fromDir = unwrap(fromDir);
+    toDir = unwrap(toDir);
     in.move(fromDir, toDir, fileName, ioContext);
   }
 
@@ -198,10 +202,7 @@ public class MetricsDirectoryFactory extends DirectoryFactory implements SolrCor
 
   @Override
   public void renameWithOverwrite(Directory dir, String fileName, String toName) throws IOException {
-    if (dir instanceof MetricsDirectory) {
-      dir = ((MetricsDirectory) dir).getDelegate();
-    }
-
+    dir = unwrap(dir);
     in.renameWithOverwrite(dir, fileName, toName);
   }
 
@@ -221,16 +222,8 @@ public class MetricsDirectoryFactory extends DirectoryFactory implements SolrCor
   }
 
   @Override
-  protected Directory getBaseDir(Directory dir) {
-    return in.getBaseDir(dir);
-  }
-
-  @Override
   public void incRef(Directory dir) {
-    // unwrap
-    if (dir instanceof MetricsDirectory) {
-      dir = ((MetricsDirectory)dir).getDelegate();
-    }
+    dir = unwrap(dir);
     in.incRef(dir);
   }
 
@@ -248,10 +241,7 @@ public class MetricsDirectoryFactory extends DirectoryFactory implements SolrCor
 
   @Override
   public void release(Directory dir) throws IOException {
-    // unwrap
-    if (dir instanceof MetricsDirectory) {
-      dir = ((MetricsDirectory)dir).getDelegate();
-    }
+    dir = unwrap(dir);
     in.release(dir);
   }
 


[43/43] lucene-solr:jira/solr-8593: Merge branch 'apache-https-master' into jira/solr-8593

Posted by kr...@apache.org.
Merge branch 'apache-https-master' into jira/solr-8593


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/73719471
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/73719471
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/73719471

Branch: refs/heads/jira/solr-8593
Commit: 737194712f1f5b4fa54efc7e84cc17c3b3b212b3
Parents: 4b17b82 6c6c077
Author: Kevin Risden <kr...@apache.org>
Authored: Thu Jan 12 11:51:04 2017 -0500
Committer: Kevin Risden <kr...@apache.org>
Committed: Thu Jan 12 11:51:04 2017 -0500

----------------------------------------------------------------------
 dev-tools/idea/lucene/suggest/suggest.iml       |   1 -
 lucene/CHANGES.txt                              |  31 +-
 .../lucene/analysis/TokenStreamToAutomaton.java |  11 +-
 .../lucene/search/DoubleValuesSource.java       |  27 +
 .../apache/lucene/search/LongValuesSource.java  |  27 +
 .../apache/lucene/search/TermInSetQuery.java    | 369 ++++++++++++
 .../search/UsageTrackingQueryCachingPolicy.java |   5 +-
 .../org/apache/lucene/store/MMapDirectory.java  |   2 +-
 .../lucene/analysis/TestGraphTokenizers.java    | 600 +++++++++++++++++++
 .../lucene/search/TermInSetQueryTest.java       | 328 ++++++++++
 .../demo/facet/DistanceFacetsExample.java       |  18 +-
 .../ExpressionAggregationFacetsExample.java     |   2 +-
 lucene/expressions/build.xml                    |  11 -
 .../org/apache/lucene/expressions/Bindings.java |  14 +-
 .../apache/lucene/expressions/Expression.java   |  21 +-
 .../expressions/ExpressionComparator.java       | 100 ----
 .../expressions/ExpressionFunctionValues.java   |  35 +-
 .../lucene/expressions/ExpressionRescorer.java  |  33 +-
 .../lucene/expressions/ExpressionSortField.java |  77 ---
 .../expressions/ExpressionValueSource.java      |  70 +--
 .../lucene/expressions/ScoreFunctionValues.java |  46 --
 .../lucene/expressions/ScoreValueSource.java    |  61 --
 .../lucene/expressions/SimpleBindings.java      |  30 +-
 .../expressions/js/JavascriptCompiler.java      |  15 +-
 .../apache/lucene/expressions/package-info.java |   5 +-
 .../lucene/expressions/TestDemoExpressions.java |  44 +-
 .../expressions/TestExpressionSortField.java    |   2 +-
 .../expressions/TestExpressionValueSource.java  | 111 +---
 .../expressions/js/TestCustomFunctions.java     |  20 +-
 .../expressions/js/TestJavascriptFunction.java  |   2 +-
 .../js/TestJavascriptOperations.java            |   2 +-
 .../org/apache/lucene/facet/DrillSideways.java  |  37 +-
 .../apache/lucene/facet/MultiFacetQuery.java    |  12 +-
 .../org/apache/lucene/facet/package-info.java   |   2 +-
 .../apache/lucene/facet/range/DoubleRange.java  |  28 +-
 .../facet/range/DoubleRangeFacetCounts.java     |  63 +-
 .../apache/lucene/facet/range/LongRange.java    |  28 +-
 .../facet/range/LongRangeFacetCounts.java       |  26 +-
 .../org/apache/lucene/facet/range/Range.java    |  31 -
 .../lucene/facet/taxonomy/FakeScorer.java       |  53 --
 .../taxonomy/TaxonomyFacetSumValueSource.java   | 114 ++--
 .../facet/range/TestRangeFacetCounts.java       |  61 +-
 .../TestTaxonomyFacetSumValueSource.java        |  54 +-
 .../AbstractAllGroupHeadsCollector.java         | 176 ------
 .../grouping/AbstractAllGroupsCollector.java    |  67 ---
 .../AbstractDistinctValuesCollector.java        |  59 --
 .../AbstractFirstPassGroupingCollector.java     | 354 -----------
 .../grouping/AbstractGroupFacetCollector.java   | 319 ----------
 .../AbstractSecondPassGroupingCollector.java    | 162 -----
 .../search/grouping/AllGroupHeadsCollector.java | 176 ++++++
 .../search/grouping/AllGroupsCollector.java     |  67 +++
 .../search/grouping/CollectedSearchGroup.java   |   2 +-
 .../grouping/DistinctValuesCollector.java       |  59 ++
 .../grouping/FirstPassGroupingCollector.java    | 363 +++++++++++
 .../lucene/search/grouping/GroupDocs.java       |   8 +-
 .../search/grouping/GroupFacetCollector.java    | 324 ++++++++++
 .../apache/lucene/search/grouping/Grouper.java  |  56 ++
 .../lucene/search/grouping/GroupingSearch.java  | 130 +---
 .../lucene/search/grouping/SearchGroup.java     |  23 +-
 .../grouping/SecondPassGroupingCollector.java   | 169 ++++++
 .../lucene/search/grouping/TopGroups.java       |  12 +-
 .../FunctionAllGroupHeadsCollector.java         |  32 +-
 .../function/FunctionAllGroupsCollector.java    |   4 +-
 .../FunctionDistinctValuesCollector.java        |  35 +-
 .../FunctionFirstPassGroupingCollector.java     |   6 +-
 .../grouping/function/FunctionGrouper.java      |  69 +++
 .../FunctionSecondPassGroupingCollector.java    |   6 +-
 .../term/TermAllGroupHeadsCollector.java        |  26 +-
 .../grouping/term/TermAllGroupsCollector.java   |   8 +-
 .../term/TermDistinctValuesCollector.java       |  26 +-
 .../term/TermFirstPassGroupingCollector.java    |   6 +-
 .../grouping/term/TermGroupFacetCollector.java  |  10 +-
 .../search/grouping/term/TermGrouper.java       |  81 +++
 .../term/TermSecondPassGroupingCollector.java   |   6 +-
 .../grouping/AllGroupHeadsCollectorTest.java    |  14 +-
 .../search/grouping/AllGroupsCollectorTest.java |   6 +-
 .../grouping/DistinctValuesCollectorTest.java   |  46 +-
 .../grouping/GroupFacetCollectorTest.java       |  10 +-
 .../lucene/search/grouping/TestGrouping.java    |  70 +--
 .../search/uhighlight/FieldHighlighter.java     |   4 +-
 .../uhighlight/LengthGoalBreakIterator.java     | 185 ++++++
 .../lucene/search/uhighlight/OffsetsEnum.java   |  45 +-
 .../lucene/search/uhighlight/Passage.java       |   1 +
 .../uhighlight/LengthGoalBreakIteratorTest.java | 104 ++++
 .../TestUnifiedHighlighterExtensibility.java    |  57 +-
 .../org/apache/lucene/queries/TermsQuery.java   | 381 ------------
 .../lucene/queries/function/ValueSource.java    | 111 ++++
 .../apache/lucene/queries/TermsQueryTest.java   | 339 -----------
 .../complexPhrase/ComplexPhraseQueryParser.java |   4 +-
 .../complexPhrase/TestComplexPhraseQuery.java   |   6 +
 .../prefix/TermQueryPrefixTreeStrategy.java     |  10 +-
 .../spatial/prefix/NumberRangeFacetsTest.java   |   6 +-
 lucene/suggest/build.xml                        |   4 +-
 .../suggest/DocumentValueSourceDictionary.java  |  58 +-
 .../DocumentValueSourceDictionaryTest.java      | 287 ++++++++-
 .../lucene/analysis/TestGraphTokenizers.java    | 588 ------------------
 solr/CHANGES.txt                                |  43 +-
 .../org/apache/solr/core/CoreContainer.java     |  10 +-
 .../org/apache/solr/core/DirectoryFactory.java  |   3 +-
 .../org/apache/solr/core/JmxMonitoredMap.java   |  63 +-
 .../solr/core/MetricsDirectoryFactory.java      |  60 +-
 .../src/java/org/apache/solr/core/SolrCore.java |   4 +-
 .../org/apache/solr/core/SolrInfoMBean.java     |   5 +-
 .../org/apache/solr/core/SolrXmlConfig.java     |   9 +-
 .../solr/core/StandardDirectoryFactory.java     |   3 +-
 .../apache/solr/handler/CdcrRequestHandler.java |   5 +
 .../org/apache/solr/handler/GraphHandler.java   |   1 +
 .../apache/solr/handler/PingRequestHandler.java |   5 +
 .../apache/solr/handler/ReplicationHandler.java |   5 +
 .../apache/solr/handler/RequestHandlerBase.java |   2 +-
 .../org/apache/solr/handler/SchemaHandler.java  |   5 +
 .../org/apache/solr/handler/SnapShooter.java    |   6 +-
 .../apache/solr/handler/SolrConfigHandler.java  |   2 +-
 .../org/apache/solr/handler/StreamHandler.java  |   1 +
 .../solr/handler/UpdateRequestHandler.java      |   5 +
 .../solr/handler/admin/CollectionsHandler.java  |   5 +
 .../solr/handler/admin/ConfigSetsHandler.java   |   5 +
 .../solr/handler/admin/CoreAdminHandler.java    |   5 +
 .../apache/solr/handler/admin/InfoHandler.java  |   5 +
 .../solr/handler/admin/LoggingHandler.java      |   6 +
 .../solr/handler/admin/LukeRequestHandler.java  |   5 +
 .../solr/handler/admin/MetricsHandler.java      |   5 +
 .../solr/handler/admin/PluginInfoHandler.java   |   5 +
 .../handler/admin/PropertiesRequestHandler.java |   5 +
 .../solr/handler/admin/SecurityConfHandler.java |   5 +
 .../admin/SegmentsInfoRequestHandler.java       |   5 +
 .../handler/admin/ShowFileRequestHandler.java   |   4 +
 .../handler/admin/SolrInfoMBeanHandler.java     |   5 +
 .../solr/handler/admin/SystemInfoHandler.java   |   7 +-
 .../solr/handler/admin/ThreadDumpHandler.java   |   5 +
 .../handler/admin/ZookeeperInfoHandler.java     |   5 +
 .../solr/handler/component/DebugComponent.java  |   5 +
 .../solr/handler/component/ExpandComponent.java |  30 +-
 .../solr/handler/component/FacetComponent.java  |   5 +
 .../handler/component/HighlightComponent.java   |   5 +
 .../component/HttpShardHandlerFactory.java      |  17 +-
 .../component/MoreLikeThisComponent.java        |   7 +-
 .../solr/handler/component/QueryComponent.java  |   5 +
 .../handler/component/RealTimeGetComponent.java |   5 +
 .../handler/component/SpellCheckComponent.java  |   5 +
 .../handler/component/TermVectorComponent.java  |   5 +
 .../solr/handler/component/TermsComponent.java  |   5 +
 .../solr/highlight/HighlightingPluginBase.java  |   2 +-
 .../solr/highlight/UnifiedSolrHighlighter.java  |  12 +-
 .../solr/metrics/SolrCoreMetricManager.java     |   3 +-
 .../metrics/reporters/JmxObjectNameFactory.java | 155 +++++
 .../solr/metrics/reporters/SolrJmxReporter.java | 105 ----
 .../org/apache/solr/request/SimpleFacets.java   |   4 +-
 .../java/org/apache/solr/schema/FieldType.java  |   4 +-
 .../org/apache/solr/search/CacheConfig.java     |   2 +-
 .../org/apache/solr/search/FastLRUCache.java    |   2 +-
 .../java/org/apache/solr/search/Grouping.java   |  14 +-
 .../apache/solr/search/TermsQParserPlugin.java  |   4 +-
 .../apache/solr/search/facet/FacetModule.java   |   5 +
 .../solr/search/grouping/CommandHandler.java    |   4 +-
 .../command/SearchGroupsFieldCommand.java       |   8 +-
 .../command/TopGroupsFieldCommand.java          |   4 +-
 .../org/apache/solr/search/join/GraphQuery.java |   4 +-
 .../apache/solr/search/mlt/CloudMLTQParser.java |  49 +-
 .../solr/search/mlt/SimpleMLTQParser.java       |  30 +-
 .../solr/security/PKIAuthenticationPlugin.java  |   6 +
 .../DocumentExpressionDictionaryFactory.java    |   6 +-
 .../solr/update/DirectUpdateHandler2.java       |   5 -
 .../java/org/apache/solr/update/PeerSync.java   |  42 +-
 .../org/apache/solr/update/UpdateHandler.java   |   5 +
 .../java/org/apache/solr/update/UpdateLog.java  |  74 ++-
 .../apache/solr/update/UpdateShardHandler.java  |  16 +-
 .../solr/update/UpdateShardHandlerConfig.java   |  14 +-
 .../AddSchemaFieldsUpdateProcessorFactory.java  |   3 +-
 .../SkipExistingDocumentsProcessorFactory.java  | 255 ++++++++
 .../src/java/org/apache/solr/util/DOMUtil.java  |   5 +
 .../stats/HttpClientMetricNameStrategy.java     |  28 +
 .../stats/InstrumentedHttpRequestExecutor.java  |  81 ++-
 .../resources/EditableSolrConfigAttributes.json |  16 +-
 ...dd-schema-fields-update-processor-chains.xml |   8 +-
 .../conf/solrconfig-cache-enable-disable.xml    |  80 +++
 .../collection1/conf/solrconfig-schemaless.xml  |  45 +-
 .../test-files/solr/solr-gangliareporter.xml    |   2 +-
 .../test-files/solr/solr-graphitereporter.xml   |   2 +-
 .../src/test-files/solr/solr-slf4jreporter.xml  |   4 +-
 .../solr/cloud/BasicDistributedZkTest.java      |   2 +-
 .../solr/cloud/PeerSyncReplicationTest.java     |  15 +
 .../apache/solr/cloud/TestCloudRecovery.java    |  26 +
 .../test/org/apache/solr/core/TestConfig.java   |  32 +
 .../solr/handler/admin/MBeansHandlerTest.java   |   4 +-
 .../solr/handler/admin/MetricsHandlerTest.java  |  18 +-
 .../highlight/TestUnifiedSolrHighlighter.java   |  24 +-
 .../metrics/SolrMetricsIntegrationTest.java     |   2 +-
 .../reporters/SolrGangliaReporterTest.java      |   2 +-
 .../reporters/SolrGraphiteReporterTest.java     |   2 +-
 .../schema/TestSchemalessBufferedUpdates.java   | 160 +++++
 .../ApacheLuceneSolrNearQueryBuilder.java       |  51 ++
 .../org/apache/solr/search/TestRTGBase.java     |  12 -
 .../org/apache/solr/search/TestRecovery.java    | 179 +++++-
 .../apache/solr/search/TestRecoveryHdfs.java    |  11 -
 .../apache/solr/search/TestSolrCoreParser.java  | 110 ++++
 .../apache/solr/search/TestSolrQueryParser.java |  12 +-
 .../solr/search/mlt/CloudMLTQParserTest.java    |  23 +-
 .../solr/search/mlt/SimpleMLTQParserTest.java   |  33 +-
 .../apache/solr/update/CdcrUpdateLogTest.java   |  17 -
 .../solr/update/SolrIndexMetricsTest.java       |   4 +-
 ...ipExistingDocumentsProcessorFactoryTest.java | 335 +++++++++++
 solr/example/files/conf/solrconfig.xml          |   5 +-
 .../basic_configs/conf/solrconfig.xml           |   6 +-
 .../conf/solrconfig.xml                         |   5 +-
 .../solr/common/params/HighlightParams.java     |   2 +-
 .../client/solrj/impl/CloudSolrClientTest.java  |   6 +-
 .../solrj/io/graph/GraphExpressionTest.java     |   3 +-
 .../java/org/apache/solr/SolrTestCaseJ4.java    |  22 +-
 .../java/org/apache/solr/util/TestHarness.java  |   3 +-
 solr/webapp/web/js/angular/services.js          |  46 +-
 211 files changed, 6402 insertions(+), 4097 deletions(-)
----------------------------------------------------------------------



[24/43] lucene-solr:jira/solr-8593: SOLR-9854 Relax test assertions.

Posted by kr...@apache.org.
SOLR-9854 Relax test assertions.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b9827bcb
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b9827bcb
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b9827bcb

Branch: refs/heads/jira/solr-8593
Commit: b9827bcba9ee4e1b0dea8a18c6efe4322a400b09
Parents: b017731
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Mon Jan 9 11:16:40 2017 +0100
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Mon Jan 9 11:16:40 2017 +0100

----------------------------------------------------------------------
 .../src/test/org/apache/solr/update/SolrIndexMetricsTest.java    | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b9827bcb/solr/core/src/test/org/apache/solr/update/SolrIndexMetricsTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/update/SolrIndexMetricsTest.java b/solr/core/src/test/org/apache/solr/update/SolrIndexMetricsTest.java
index e17b1bd..4f5ea69 100644
--- a/solr/core/src/test/org/apache/solr/update/SolrIndexMetricsTest.java
+++ b/solr/core/src/test/org/apache/solr/update/SolrIndexMetricsTest.java
@@ -67,13 +67,13 @@ public class SolrIndexMetricsTest extends SolrTestCaseJ4 {
 
     // check basic index meters
     Timer timer = (Timer)metrics.get("INDEX.merge.minor");
-    assertEquals("minorMerge: " + timer.getCount(), 4, timer.getCount());
+    assertTrue("minorMerge: " + timer.getCount(), timer.getCount() >= 3);
     timer = (Timer)metrics.get("INDEX.merge.major");
     assertEquals("majorMerge: " + timer.getCount(), 0, timer.getCount());
     Meter meter = (Meter)metrics.get("INDEX.merge.major.docs");
     assertEquals("majorMergeDocs: " + meter.getCount(), 0, meter.getCount());
     meter = (Meter)metrics.get("INDEX.flush");
-    assertEquals("flush: " + meter.getCount(), 19, meter.getCount());
+    assertTrue("flush: " + meter.getCount(), meter.getCount() > 10);
 
     // check basic directory meters
     meter = (Meter)metrics.get("DIRECTORY.total.reads");


[36/43] lucene-solr:jira/solr-8593: SOLR-9954: Prevent against failure during failed snapshot cleanup from swallowing the actual cause for the snapshot to fail.

Posted by kr...@apache.org.
SOLR-9954: Prevent against failure during failed snapshot cleanup from swallowing the actual cause for the snapshot to fail.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/118fc422
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/118fc422
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/118fc422

Branch: refs/heads/jira/solr-8593
Commit: 118fc422d0cff8492db99edccb3d73068cf04b52
Parents: f99c967
Author: Timothy Potter <th...@gmail.com>
Authored: Tue Jan 10 18:35:19 2017 -0700
Committer: Timothy Potter <th...@gmail.com>
Committed: Tue Jan 10 18:35:19 2017 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                            | 3 +++
 solr/core/src/java/org/apache/solr/handler/SnapShooter.java | 6 +++++-
 2 files changed, 8 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/118fc422/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 0131b7b..0ee18ba 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -353,6 +353,9 @@ Bug Fixes
   and CloudMLTQParser included extra strings from the field definitions in the query.
   (Ere Maijala via Anshum Gupta)
 
+* SOLR-9954: Prevent against failure during failed snapshot cleanup from swallowing the actual cause
+  for the snapshot to fail. (thelabdude)
+
 Other Changes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/118fc422/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/SnapShooter.java b/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
index bf02e4c..a6e8110 100644
--- a/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
+++ b/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
@@ -231,7 +231,11 @@ public class SnapShooter {
       return details;
     } finally {
       if (!success) {
-        backupRepo.deleteDirectory(snapshotDirPath);
+        try {
+          backupRepo.deleteDirectory(snapshotDirPath);
+        } catch (Exception excDuringDelete) {
+          LOG.warn("Failed to delete "+snapshotDirPath+" after snapshot creation failed due to: "+excDuringDelete);
+        }
       }
     }
   }


[42/43] lucene-solr:jira/solr-8593: SOLR-9947 Clean up metrics and SolrInfoMBean categories. Add a hierarhical view of SolrInfoMBeans in JMX.

Posted by kr...@apache.org.
SOLR-9947 Clean up metrics and SolrInfoMBean categories. Add a hierarhical view of
SolrInfoMBeans in JMX.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6c6c0774
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6c6c0774
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6c6c0774

Branch: refs/heads/jira/solr-8593
Commit: 6c6c077435bcc5bd3f4520a70a4c678d4b3f7661
Parents: d2664b1
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Thu Jan 12 13:14:12 2017 +0100
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Thu Jan 12 13:14:12 2017 +0100

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   3 +
 .../org/apache/solr/core/CoreContainer.java     |  10 +-
 .../org/apache/solr/core/JmxMonitoredMap.java   |  63 ++++++--
 .../org/apache/solr/core/SolrInfoMBean.java     |   5 +-
 .../apache/solr/handler/CdcrRequestHandler.java |   5 +
 .../apache/solr/handler/PingRequestHandler.java |   5 +
 .../apache/solr/handler/ReplicationHandler.java |   5 +
 .../apache/solr/handler/RequestHandlerBase.java |   2 +-
 .../org/apache/solr/handler/SchemaHandler.java  |   5 +
 .../apache/solr/handler/SolrConfigHandler.java  |   2 +-
 .../solr/handler/UpdateRequestHandler.java      |   5 +
 .../solr/handler/admin/CollectionsHandler.java  |   5 +
 .../solr/handler/admin/ConfigSetsHandler.java   |   5 +
 .../solr/handler/admin/CoreAdminHandler.java    |   5 +
 .../apache/solr/handler/admin/InfoHandler.java  |   5 +
 .../solr/handler/admin/LoggingHandler.java      |   6 +
 .../solr/handler/admin/LukeRequestHandler.java  |   5 +
 .../solr/handler/admin/MetricsHandler.java      |   5 +
 .../solr/handler/admin/PluginInfoHandler.java   |   5 +
 .../handler/admin/PropertiesRequestHandler.java |   5 +
 .../solr/handler/admin/SecurityConfHandler.java |   5 +
 .../admin/SegmentsInfoRequestHandler.java       |   5 +
 .../handler/admin/ShowFileRequestHandler.java   |   4 +
 .../handler/admin/SolrInfoMBeanHandler.java     |   5 +
 .../solr/handler/admin/SystemInfoHandler.java   |   7 +-
 .../solr/handler/admin/ThreadDumpHandler.java   |   5 +
 .../handler/admin/ZookeeperInfoHandler.java     |   5 +
 .../solr/handler/component/DebugComponent.java  |   5 +
 .../solr/handler/component/ExpandComponent.java |   5 +
 .../solr/handler/component/FacetComponent.java  |   5 +
 .../handler/component/HighlightComponent.java   |   5 +
 .../component/HttpShardHandlerFactory.java      |   2 +-
 .../component/MoreLikeThisComponent.java        |   7 +-
 .../solr/handler/component/QueryComponent.java  |   5 +
 .../handler/component/RealTimeGetComponent.java |   5 +
 .../handler/component/SpellCheckComponent.java  |   5 +
 .../handler/component/TermVectorComponent.java  |   5 +
 .../solr/handler/component/TermsComponent.java  |   5 +
 .../solr/highlight/HighlightingPluginBase.java  |   2 +-
 .../solr/metrics/SolrCoreMetricManager.java     |   3 +-
 .../metrics/reporters/JmxObjectNameFactory.java | 155 +++++++++++++++++++
 .../solr/metrics/reporters/SolrJmxReporter.java | 105 -------------
 .../apache/solr/search/facet/FacetModule.java   |   5 +
 .../solr/security/PKIAuthenticationPlugin.java  |   6 +
 .../solr/update/DirectUpdateHandler2.java       |   5 -
 .../org/apache/solr/update/UpdateHandler.java   |   5 +
 .../java/org/apache/solr/update/UpdateLog.java  |  28 ++--
 .../apache/solr/update/UpdateShardHandler.java  |   2 +-
 .../test-files/solr/solr-gangliareporter.xml    |   2 +-
 .../test-files/solr/solr-graphitereporter.xml   |   2 +-
 .../src/test-files/solr/solr-slf4jreporter.xml  |   4 +-
 .../solr/cloud/BasicDistributedZkTest.java      |   2 +-
 .../solr/handler/admin/MBeansHandlerTest.java   |   4 +-
 .../solr/handler/admin/MetricsHandlerTest.java  |  18 +--
 .../metrics/SolrMetricsIntegrationTest.java     |   2 +-
 .../reporters/SolrGangliaReporterTest.java      |   2 +-
 .../reporters/SolrGraphiteReporterTest.java     |   2 +-
 .../org/apache/solr/search/TestRecovery.java    |   2 +-
 .../client/solrj/impl/CloudSolrClientTest.java  |   6 +-
 59 files changed, 438 insertions(+), 170 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 0cf50d4..b6055fe 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -242,6 +242,9 @@ New Features
 
 * SOLR-9886: Add a 'enable' flag to caches to enable/disable them (Pushkar Raste, noble)
 
+* SOLR-9947: Clean up some SolrInfoMBean categories. Add an alternative hierarchical view in JMX
+  for SolrInfoMBeans, which uses similar conventions to SolrJmxReporter. (ab)
+
 Optimizations
 ----------------------
 * SOLR-9704: Facet Module / JSON Facet API: Optimize blockChildren facets that have

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/core/CoreContainer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 8a72617..3c4ed56 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -466,7 +466,7 @@ public class CoreContainer {
     coreContainerWorkExecutor = MetricUtils.instrumentedExecutorService(
         coreContainerWorkExecutor,
         metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoMBean.Group.node)),
-        SolrMetricManager.mkName("coreContainerWorkExecutor", "threadPool"));
+        SolrMetricManager.mkName("coreContainerWorkExecutor", SolrInfoMBean.Category.CONTAINER.toString(), "threadPool"));
 
     shardHandlerFactory = ShardHandlerFactory.newInstance(cfg.getShardHandlerFactoryPluginInfo(), loader);
     if (shardHandlerFactory instanceof SolrMetricProducer) {
@@ -518,11 +518,11 @@ public class CoreContainer {
     Gauge<Integer> unloadedCores = () -> solrCores.getAllCoreNames().size() - solrCores.getCoreNames().size();
 
     metricManager.register(SolrMetricManager.getRegistryName(SolrInfoMBean.Group.node),
-        loadedCores, true, "loaded", "cores");
+        loadedCores, true, "loaded", SolrInfoMBean.Category.CONTAINER.toString(), "cores");
     metricManager.register(SolrMetricManager.getRegistryName(SolrInfoMBean.Group.node),
-        lazyCores, true, "lazy", "cores");
+        lazyCores, true, "lazy",SolrInfoMBean.Category.CONTAINER.toString(), "cores");
     metricManager.register(SolrMetricManager.getRegistryName(SolrInfoMBean.Group.node),
-        unloadedCores, true, "unloaded", "cores");
+        unloadedCores, true, "unloaded",SolrInfoMBean.Category.CONTAINER.toString(), "cores");
 
     // setup executor to load cores in parallel
     ExecutorService coreLoadExecutor = MetricUtils.instrumentedExecutorService(
@@ -530,7 +530,7 @@ public class CoreContainer {
             cfg.getCoreLoadThreadCount(isZooKeeperAware()),
             new DefaultSolrThreadFactory("coreLoadExecutor")),
         metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoMBean.Group.node)),
-        SolrMetricManager.mkName("coreLoadExecutor", "threadPool"));
+        SolrMetricManager.mkName("coreLoadExecutor",SolrInfoMBean.Category.CONTAINER.toString(), "threadPool"));
     final List<Future<SolrCore>> futures = new ArrayList<>();
     try {
       List<CoreDescriptor> cds = coresLocator.discover(this);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/core/JmxMonitoredMap.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/JmxMonitoredMap.java b/solr/core/src/java/org/apache/solr/core/JmxMonitoredMap.java
index a5a27dc..b2a5c79 100644
--- a/solr/core/src/java/org/apache/solr/core/JmxMonitoredMap.java
+++ b/solr/core/src/java/org/apache/solr/core/JmxMonitoredMap.java
@@ -53,6 +53,8 @@ import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.SolrConfig.JmxConfiguration;
+import org.apache.solr.metrics.SolrCoreMetricManager;
+import org.apache.solr.metrics.reporters.JmxObjectNameFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -75,15 +77,21 @@ public class JmxMonitoredMap<K, V> extends
         ConcurrentHashMap<String, SolrInfoMBean> {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
+  private static final String REPORTER_NAME = "_jmx_";
+
   // set to true to use cached statistics NamedLists between getMBeanInfo calls to work
   // around over calling getStatistics on MBeanInfos when iterating over all attributes (SOLR-6586)
-  private boolean useCachedStatsBetweenGetMBeanInfoCalls = Boolean.getBoolean("useCachedStatsBetweenGetMBeanInfoCalls");
+  private final boolean useCachedStatsBetweenGetMBeanInfoCalls = Boolean.getBoolean("useCachedStatsBetweenGetMBeanInfoCalls");
   
-  private MBeanServer server = null;
+  private final MBeanServer server;
+
+  private final String jmxRootName;
 
-  private String jmxRootName;
+  private final String coreHashCode;
 
-  private String coreHashCode;
+  private final JmxObjectNameFactory nameFactory;
+
+  private final String registryName;
 
   public JmxMonitoredMap(String coreName, String coreHashCode,
                          final JmxConfiguration jmxConfig) {
@@ -108,6 +116,9 @@ public class JmxMonitoredMap<K, V> extends
       }
 
       if (servers == null || servers.isEmpty()) {
+        server = null;
+        registryName = null;
+        nameFactory = null;
         log.debug("No JMX servers found, not exposing Solr information with JMX.");
         return;
       }
@@ -115,20 +126,23 @@ public class JmxMonitoredMap<K, V> extends
       log.info("JMX monitoring is enabled. Adding Solr mbeans to JMX Server: "
                + server);
     } else {
+      MBeanServer newServer = null;
       try {
         // Create a new MBeanServer with the given serviceUrl
-        server = MBeanServerFactory.newMBeanServer();
+        newServer = MBeanServerFactory.newMBeanServer();
         JMXConnectorServer connector = JMXConnectorServerFactory
                 .newJMXConnectorServer(new JMXServiceURL(jmxConfig.serviceUrl),
-                        null, server);
+                        null, newServer);
         connector.start();
         log.info("JMX monitoring is enabled at " + jmxConfig.serviceUrl);
       } catch (Exception e) {
         // Release the reference
-        server = null;
         throw new RuntimeException("Could not start JMX monitoring ", e);
       }
+      server = newServer;
     }
+    registryName = SolrCoreMetricManager.createRegistryName(null, coreName);
+    nameFactory = new JmxObjectNameFactory(REPORTER_NAME + coreHashCode, registryName);
   }
 
   /**
@@ -138,12 +152,12 @@ public class JmxMonitoredMap<K, V> extends
   @Override
   public void clear() {
     if (server != null) {
-      QueryExp exp = Query.eq(Query.attr("coreHashCode"), Query.value(coreHashCode));
+      QueryExp exp = Query.or(Query.eq(Query.attr("coreHashCode"), Query.value(coreHashCode)),
+                            Query.eq(Query.attr("reporter"), Query.value(REPORTER_NAME + coreHashCode)));
       
       Set<ObjectName> objectNames = null;
       try {
-        ObjectName instance = new ObjectName(jmxRootName + ":*");
-        objectNames = server.queryNames(instance, exp);
+        objectNames = server.queryNames(null, exp);
       } catch (Exception e) {
         log.warn("Exception querying for mbeans", e);
       }
@@ -175,19 +189,39 @@ public class JmxMonitoredMap<K, V> extends
   public SolrInfoMBean put(String key, SolrInfoMBean infoBean) {
     if (server != null && infoBean != null) {
       try {
+        // back-compat name
         ObjectName name = getObjectName(key, infoBean);
         if (server.isRegistered(name))
           server.unregisterMBean(name);
         SolrDynamicMBean mbean = new SolrDynamicMBean(coreHashCode, infoBean, useCachedStatsBetweenGetMBeanInfoCalls);
         server.registerMBean(mbean, name);
+        // now register it also under new name
+        String beanName = createBeanName(infoBean, key);
+        name = nameFactory.createName(null, registryName, beanName);
+        if (server.isRegistered(name))
+          server.unregisterMBean(name);
+        server.registerMBean(mbean, name);
       } catch (Exception e) {
-        log.warn( "Failed to register info bean: " + key, e);
+        log.warn( "Failed to register info bean: key=" + key + ", infoBean=" + infoBean, e);
       }
     }
 
     return super.put(key, infoBean);
   }
 
+  private String createBeanName(SolrInfoMBean infoBean, String key) {
+    if (infoBean.getCategory() == null) {
+      throw new IllegalArgumentException("SolrInfoMBean.category must never be null: " + infoBean);
+    }
+    StringBuilder sb = new StringBuilder();
+    sb.append(infoBean.getCategory().toString());
+    sb.append('.');
+    sb.append(key);
+    sb.append('.');
+    sb.append(infoBean.getName());
+    return sb.toString();
+  }
+
   /**
    * Removes the SolrInfoMBean object at the given key and unregisters it from
    * MBeanServer
@@ -212,10 +246,17 @@ public class JmxMonitoredMap<K, V> extends
       return;
 
     try {
+      // remove legacy name
       ObjectName name = getObjectName(key, infoBean);
       if (server.isRegistered(name) && coreHashCode.equals(server.getAttribute(name, "coreHashCode"))) {
         server.unregisterMBean(name);
       }
+      // remove new name
+      String beanName = createBeanName(infoBean, key);
+      name = nameFactory.createName(null, registryName, beanName);
+      if (server.isRegistered(name)) {
+        server.unregisterMBean(name);
+      }
     } catch (Exception e) {
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
               "Failed to unregister info bean: " + key, e);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/core/SolrInfoMBean.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrInfoMBean.java b/solr/core/src/java/org/apache/solr/core/SolrInfoMBean.java
index 04c8395..bf77db4 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrInfoMBean.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrInfoMBean.java
@@ -30,9 +30,10 @@ import org.apache.solr.common.util.NamedList;
 public interface SolrInfoMBean {
 
   /**
-   * Category of {@link SolrCore} component.
+   * Category of Solr component.
    */
-  enum Category { CORE, QUERYHANDLER, UPDATEHANDLER, CACHE, HIGHLIGHTING, QUERYPARSER, SEARCHER, REPLICATION, TLOG, INDEX, DIRECTORY, HTTP, OTHER }
+  enum Category { CONTAINER, ADMIN, CORE, QUERY, UPDATE, CACHE, HIGHLIGHTER, QUERYPARSER, SPELLCHECKER,
+    SEARCHER, REPLICATION, TLOG, INDEX, DIRECTORY, HTTP, OTHER }
 
   /**
    * Top-level group of beans for a subsystem.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java
index f706637..fcc4bbe 100644
--- a/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java
@@ -806,6 +806,11 @@ public class CdcrRequestHandler extends RequestHandlerBase implements SolrCoreAw
     return "Manage Cross Data Center Replication";
   }
 
+  @Override
+  public Category getCategory() {
+    return Category.REPLICATION;
+  }
+
   /**
    * A thread subclass for executing a single
    * {@link org.apache.solr.handler.CdcrParams.CdcrAction#SHARDCHECKPOINT} action.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java
index 4b72e0f..04b930a 100644
--- a/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java
@@ -328,4 +328,9 @@ public class PingRequestHandler extends RequestHandlerBase implements SolrCoreAw
   public String getDescription() {
     return "Reports application health to a load-balancer";
   }
+
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
index edf5e94..b875144 100644
--- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
@@ -814,6 +814,11 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
   }
 
   @Override
+  public Category getCategory() {
+    return Category.REPLICATION;
+  }
+
+  @Override
   public String getDescription() {
     return "ReplicationHandler provides replication of index and configuration files from Master to Slaves";
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java b/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
index 1fd6c30..b70c096 100644
--- a/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
+++ b/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
@@ -227,7 +227,7 @@ public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfo
   
   @Override
   public Category getCategory() {
-    return Category.QUERYHANDLER;
+    return Category.QUERY;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/SchemaHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/SchemaHandler.java b/solr/core/src/java/org/apache/solr/handler/SchemaHandler.java
index 35e463b..9c2d45c 100644
--- a/solr/core/src/java/org/apache/solr/handler/SchemaHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/SchemaHandler.java
@@ -252,6 +252,11 @@ public class SchemaHandler extends RequestHandlerBase implements SolrCoreAware,
   }
 
   @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
+
+  @Override
   public void inform(SolrCore core) {
     isImmutableConfigSet = SolrConfigHandler.getImmutable(core);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java b/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
index b6cb596..1c584b1 100644
--- a/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
@@ -708,7 +708,7 @@ public class SolrConfigHandler extends RequestHandlerBase implements SolrCoreAwa
 
   @Override
   public Category getCategory() {
-    return Category.OTHER;
+    return Category.ADMIN;
   }
 
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
index 7c97331..6628368 100644
--- a/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
@@ -170,6 +170,11 @@ public class UpdateRequestHandler extends ContentStreamHandlerBase implements Pe
     return "Add documents using XML (with XSLT), CSV, JSON, or javabin";
   }
 
+  @Override
+  public Category getCategory() {
+    return Category.UPDATE;
+  }
+
   public static final String DOC_PATH = "/update/json/docs";
   public static final String JSON_PATH = "/update/json";
   public static final String CSV_PATH = "/update/csv";

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index e683e96..148d73c 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -335,6 +335,11 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
     return "Manage SolrCloud Collections";
   }
 
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
+
   public static final String SYSTEM_COLL = ".system";
 
   private static void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
index ab2b172..f3a8dd2 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
@@ -161,6 +161,11 @@ public class ConfigSetsHandler extends RequestHandlerBase {
     return "Manage SolrCloud ConfigSets";
   }
 
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
+
   enum ConfigSetOperation {
     CREATE_OP(CREATE) {
       @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
index 05a1671..a415d8a 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
@@ -275,6 +275,11 @@ public class CoreAdminHandler extends RequestHandlerBase implements PermissionNa
   }
 
   @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
+
+  @Override
   public Name getPermissionName(AuthorizationContext ctx) {
     String action = ctx.getParams().get(CoreAdminParams.ACTION);
     if (action == null) return CORE_READ_PERM;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java
index 9c437ab..8fdac21 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java
@@ -103,6 +103,11 @@ public class InfoHandler extends RequestHandlerBase {
     return "System Information";
   }
 
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
+
   protected PropertiesRequestHandler getPropertiesHandler() {
     return propertiesHandler;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java
index a561594..b10aed1 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java
@@ -156,4 +156,10 @@ public class LoggingHandler extends RequestHandlerBase implements SolrCoreAware
   public String getDescription() {
     return "Logging Handler";
   }
+
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
index d7dedf1..50f46ef 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
@@ -703,6 +703,11 @@ public class LukeRequestHandler extends RequestHandlerBase
   }
 
   @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
+
+  @Override
   public URL[] getDocs() {
     try {
       return new URL[] { new URL("http://wiki.apache.org/solr/LukeRequestHandler") };

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
index 428a72b..0c87875 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/MetricsHandler.java
@@ -155,6 +155,11 @@ public class MetricsHandler extends RequestHandlerBase implements PermissionName
     return "A handler to return all the metrics gathered by Solr";
   }
 
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
+
   enum MetricType {
     histogram(Histogram.class),
     meter(Meter.class),

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java
index 6bc34ed..a096e79 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java
@@ -90,4 +90,9 @@ public class PluginInfoHandler extends RequestHandlerBase
   public String getDescription() {
     return "Registry";
   }
+
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java
index bae61a7..c16cded 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java
@@ -55,4 +55,9 @@ public class PropertiesRequestHandler extends RequestHandlerBase
   public String getDescription() {
     return "Get System Properties";
   }
+
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandler.java
index 88e4b01..1b81722 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandler.java
@@ -163,6 +163,11 @@ public abstract class SecurityConfHandler extends RequestHandlerBase implements
     return "Edit or read security configuration";
   }
 
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
+
   /**
    * Gets security.json from source
    */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/SegmentsInfoRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SegmentsInfoRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SegmentsInfoRequestHandler.java
index df1bbd8..1baf25a 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/SegmentsInfoRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/SegmentsInfoRequestHandler.java
@@ -117,4 +117,9 @@ public class SegmentsInfoRequestHandler extends RequestHandlerBase {
   public String getDescription() {
     return "Lucene segments info.";
   }
+
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
index ea08438..805a690 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
@@ -364,4 +364,8 @@ public class ShowFileRequestHandler extends RequestHandlerBase
   public String getDescription() {
     return "Admin Config File -- view or update config files directly";
   }
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java
index 4d3c2b5..f5f28c5 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java
@@ -298,4 +298,9 @@ public class SolrInfoMBeanHandler extends RequestHandlerBase {
   public String getDescription() {
     return "Get Info (and statistics) for registered SolrInfoMBeans";
   }
+
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java
index a873c09..d031d69 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java
@@ -408,7 +408,12 @@ public class SystemInfoHandler extends RequestHandlerBase
   public String getDescription() {
     return "Get System Info";
   }
-  
+
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
+
   private static final long ONE_KB = 1024;
   private static final long ONE_MB = ONE_KB * ONE_KB;
   private static final long ONE_GB = ONE_KB * ONE_MB;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java
index 3ddc9db..f0e3970 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java
@@ -130,4 +130,9 @@ public class ThreadDumpHandler extends RequestHandlerBase
   public String getDescription() {
     return "Thread Dump";
   }
+
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/admin/ZookeeperInfoHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ZookeeperInfoHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ZookeeperInfoHandler.java
index e2ce63d..0616ac8 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/ZookeeperInfoHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/ZookeeperInfoHandler.java
@@ -97,6 +97,11 @@ public final class ZookeeperInfoHandler extends RequestHandlerBase {
     return "Fetch Zookeeper contents";
   }
 
+  @Override
+  public Category getCategory() {
+    return Category.ADMIN;
+  }
+
   /**
    * Enumeration of ways to filter collections on the graph panel.
    */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java b/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java
index 42d21e0..be21733 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java
@@ -389,6 +389,11 @@ public class DebugComponent extends SearchComponent
   }
 
   @Override
+  public Category getCategory() {
+    return Category.OTHER;
+  }
+
+  @Override
   public URL[] getDocs() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
index a5cbee2..c06aab0 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
@@ -746,6 +746,11 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
   }
 
   @Override
+  public Category getCategory() {
+    return Category.QUERY;
+  }
+
+  @Override
   public URL[] getDocs() {
     try {
       return new URL[]{

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
index 90608c0..1cc05ab 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
@@ -1213,6 +1213,11 @@ public class FacetComponent extends SearchComponent {
   }
 
   @Override
+  public Category getCategory() {
+    return Category.QUERY;
+  }
+
+  @Override
   public URL[] getDocs() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java b/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java
index 00ed4ef..d147be2 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java
@@ -273,6 +273,11 @@ public class HighlightComponent extends SearchComponent implements PluginInfoIni
   public String getDescription() {
     return "Highlighting";
   }
+
+  @Override
+  public Category getCategory() {
+    return Category.HIGHLIGHTER;
+  }
   
   @Override
   public URL[] getDocs() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
index be6e763..87d5c3d 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
@@ -377,7 +377,7 @@ public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.
 
   @Override
   public void initializeMetrics(SolrMetricManager manager, String registry, String scope) {
-    String expandedScope = SolrMetricManager.mkName(scope, SolrInfoMBean.Category.HTTP.name());
+    String expandedScope = SolrMetricManager.mkName(scope, SolrInfoMBean.Category.QUERY.name());
     clientConnectionManager.initializeMetrics(manager, registry, expandedScope);
     httpRequestExecutor.initializeMetrics(manager, registry, expandedScope);
     commExecutor = MetricUtils.instrumentedExecutorService(commExecutor,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java b/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
index 7cf6d39..893cdf3 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
@@ -409,7 +409,12 @@ public class MoreLikeThisComponent extends SearchComponent {
   public String getDescription() {
     return "More Like This";
   }
-  
+
+  @Override
+  public Category getCategory() {
+    return Category.QUERY;
+  }
+
   @Override
   public URL[] getDocs() {
     return null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
index 9bd5efb..88ff731 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
@@ -1370,6 +1370,11 @@ public class QueryComponent extends SearchComponent
   }
 
   @Override
+  public Category getCategory() {
+    return Category.QUERY;
+  }
+
+  @Override
   public URL[] getDocs() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java b/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
index 39e5b8a..8ce7301 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
@@ -599,6 +599,11 @@ public class RealTimeGetComponent extends SearchComponent
   }
 
   @Override
+  public Category getCategory() {
+    return Category.QUERY;
+  }
+
+  @Override
   public URL[] getDocs() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java b/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
index 4966237..a229a85 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
@@ -862,4 +862,9 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
   public String getDescription() {
     return "A Spell Checker component";
   }
+
+  @Override
+  public Category getCategory() {
+    return Category.SPELLCHECKER;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java b/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
index 60ceca0..c887277 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
@@ -481,6 +481,11 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar
   public String getDescription() {
     return "A Component for working with Term Vectors";
   }
+
+  @Override
+  public Category getCategory() {
+    return Category.QUERY;
+  }
 }
 
 class FieldOptions {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java b/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java
index 918efa0..b76537a 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java
@@ -584,4 +584,9 @@ public class TermsComponent extends SearchComponent {
   public String getDescription() {
     return "A Component for working with Term Enumerators";
   }
+
+  @Override
+  public Category getCategory() {
+    return Category.QUERY;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java b/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java
index c03a961..f60ada8 100644
--- a/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java
+++ b/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java
@@ -61,7 +61,7 @@ public abstract class HighlightingPluginBase implements SolrInfoMBean
   @Override
   public Category getCategory()
   {
-    return Category.HIGHLIGHTING;
+    return Category.HIGHLIGHTER;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/metrics/SolrCoreMetricManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/metrics/SolrCoreMetricManager.java b/solr/core/src/java/org/apache/solr/metrics/SolrCoreMetricManager.java
index e7ca9c4..3bebcd3 100644
--- a/solr/core/src/java/org/apache/solr/metrics/SolrCoreMetricManager.java
+++ b/solr/core/src/java/org/apache/solr/metrics/SolrCoreMetricManager.java
@@ -127,8 +127,7 @@ public class SolrCoreMetricManager implements Closeable {
     return registryName;
   }
 
-  /* package visibility for tests. */
-  String createRegistryName(String collectionName, String coreName) {
+  public static String createRegistryName(String collectionName, String coreName) {
     if (collectionName == null || (collectionName != null && !coreName.startsWith(collectionName + "_"))) {
       // single core, or unknown naming scheme
       return SolrMetricManager.getRegistryName(SolrInfoMBean.Group.core, coreName);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/metrics/reporters/JmxObjectNameFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/JmxObjectNameFactory.java b/solr/core/src/java/org/apache/solr/metrics/reporters/JmxObjectNameFactory.java
new file mode 100644
index 0000000..4df5257
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/metrics/reporters/JmxObjectNameFactory.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.metrics.reporters;
+
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+
+import java.util.Arrays;
+
+import com.codahale.metrics.ObjectNameFactory;
+import org.apache.solr.metrics.SolrMetricInfo;
+
+/**
+ * Factory to create MBean names for a given metric.
+ */
+public class JmxObjectNameFactory implements ObjectNameFactory {
+
+  private final String domain;
+  private final String[] subdomains;
+  private final String reporterName;
+  private final String[] props;
+
+  /**
+   * Create ObjectName factory.
+   * @param reporterName name of the reporter
+   * @param domain JMX domain name
+   * @param additionalProperties additional properties as key, value pairs.
+   */
+  public JmxObjectNameFactory(String reporterName, String domain, String... additionalProperties) {
+    this.reporterName = reporterName;
+    this.domain = domain;
+    this.subdomains = domain.split("\\.");
+    if (additionalProperties != null && (additionalProperties.length % 2) != 0) {
+      throw new IllegalArgumentException("additionalProperties length must be even: " + Arrays.toString(additionalProperties));
+    }
+    this.props = additionalProperties;
+  }
+
+  /**
+   * Create a hierarchical name.
+   *
+   * @param type    metric class, eg. "counters", may be null for non-metric MBeans
+   * @param currentDomain  JMX domain
+   * @param name    object name
+   */
+  @Override
+  public ObjectName createName(String type, String currentDomain, String name) {
+    SolrMetricInfo metricInfo = SolrMetricInfo.of(name);
+
+    // It turns out that ObjectName(String) mostly preserves key ordering
+    // as specified in the constructor (except for the 'type' key that ends
+    // up at top level) - unlike ObjectName(String, Map) constructor
+    // that seems to have a mind of its own...
+    StringBuilder sb = new StringBuilder();
+    if (domain.equals(currentDomain)) {
+      if (subdomains != null && subdomains.length > 1) {
+        // use only first segment as domain
+        sb.append(subdomains[0]);
+        sb.append(':');
+        // use remaining segments as properties
+        for (int i = 1; i < subdomains.length; i++) {
+          if (i > 1) {
+            sb.append(',');
+          }
+          sb.append("dom");
+          sb.append(String.valueOf(i));
+          sb.append('=');
+          sb.append(subdomains[i]);
+        }
+        sb.append(','); // separate from other properties
+      } else {
+        sb.append(currentDomain);
+        sb.append(':');
+      }
+    } else {
+      sb.append(currentDomain);
+      sb.append(':');
+    }
+    sb.append("reporter=");
+    sb.append(reporterName);
+    sb.append(',');
+    if (metricInfo != null) {
+      sb.append("category=");
+      sb.append(metricInfo.category.toString());
+      sb.append(",scope=");
+      sb.append(metricInfo.scope);
+      // we could also split by type, but don't call it 'type' :)
+      // if (type != null) {
+      //   sb.append(",class=");
+      //   sb.append(type);
+      // }
+      sb.append(",name=");
+      sb.append(metricInfo.name);
+    } else {
+      // make dotted names into hierarchies
+      String[] path = name.split("\\.");
+      for (int i = 0; i < path.length - 1; i++) {
+        if (i > 0) {
+          sb.append(',');
+        }
+        sb.append("name"); sb.append(String.valueOf(i));
+        sb.append('=');
+        sb.append(path[i]);
+      }
+      if (path.length > 1) {
+        sb.append(',');
+      }
+      // split by type
+      // if (type != null) {
+      //   sb.append("class=");
+      //   sb.append(type);
+      // }
+      sb.append("name=");
+      sb.append(path[path.length - 1]);
+    }
+    if (props != null && props.length > 0) {
+      for (int i = 0; i < props.length; i += 2) {
+        if (props[i] == null || props[i].isEmpty()) {
+          continue;
+        }
+        if (props[i + 1] == null || props[i + 1].isEmpty()) {
+          continue;
+        }
+        sb.append(',');
+        sb.append(props[i]);
+        sb.append('=');
+        sb.append(props[i + 1]);
+      }
+    }
+
+    ObjectName objectName;
+
+    try {
+      objectName = new ObjectName(sb.toString());
+    } catch (MalformedObjectNameException e) {
+      throw new RuntimeException(sb.toString(), e);
+    }
+
+    return objectName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
index 47fbf11..0e78eee 100644
--- a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
+++ b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
@@ -17,8 +17,6 @@
 package org.apache.solr.metrics.reporters;
 
 import javax.management.MBeanServer;
-import javax.management.MalformedObjectNameException;
-import javax.management.ObjectName;
 
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
@@ -26,9 +24,7 @@ import java.lang.management.ManagementFactory;
 import java.util.Locale;
 
 import com.codahale.metrics.JmxReporter;
-import com.codahale.metrics.ObjectNameFactory;
 import org.apache.solr.core.PluginInfo;
-import org.apache.solr.metrics.SolrMetricInfo;
 import org.apache.solr.metrics.SolrMetricManager;
 import org.apache.solr.metrics.SolrMetricReporter;
 import org.apache.solr.util.JmxUtil;
@@ -180,105 +176,4 @@ public class SolrJmxReporter extends SolrMetricReporter {
         getClass().getName(), Integer.toHexString(hashCode()), domain, serviceUrl, agentId);
   }
 
-  /**
-   * Factory to create MBean names for a given metric.
-   */
-  private static class JmxObjectNameFactory implements ObjectNameFactory {
-
-    private final String domain;
-    private final String[] subdomains;
-    private final String reporterName;
-
-    JmxObjectNameFactory(String reporterName, String domain) {
-      this.reporterName = reporterName;
-      this.domain = domain;
-      this.subdomains = domain.split("\\.");
-    }
-
-    /**
-     * Create a hierarchical name of a metric.
-     *
-     * @param type    metric class, eg. "counters"
-     * @param currentDomain  JMX domain
-     * @param name    metric name
-     */
-    @Override
-    public ObjectName createName(String type, String currentDomain, String name) {
-      SolrMetricInfo metricInfo = SolrMetricInfo.of(name);
-
-      // It turns out that ObjectName(String) mostly preserves key ordering
-      // as specified in the constructor (except for the 'type' key that ends
-      // up at top level) - unlike ObjectName(String, Map) constructor
-      // that seems to have a mind of its own...
-      StringBuilder sb = new StringBuilder();
-      if (domain.equals(currentDomain)) {
-        if (subdomains != null && subdomains.length > 1) {
-          // use only first segment as domain
-          sb.append(subdomains[0]);
-          sb.append(':');
-          // use remaining segments as properties
-          for (int i = 1; i < subdomains.length; i++) {
-            if (i > 1) {
-              sb.append(',');
-            }
-            sb.append("dom");
-            sb.append(String.valueOf(i));
-            sb.append('=');
-            sb.append(subdomains[i]);
-          }
-          sb.append(','); // separate from other properties
-        } else {
-          sb.append(currentDomain);
-          sb.append(':');
-        }
-      } else {
-        sb.append(currentDomain);
-        sb.append(':');
-      }
-      sb.append("reporter=");
-      sb.append(reporterName);
-      sb.append(',');
-      if (metricInfo != null) {
-        sb.append("category=");
-        sb.append(metricInfo.category.toString());
-        sb.append(",scope=");
-        sb.append(metricInfo.scope);
-        // we could also split by type, but don't call it 'type' :)
-        // sb.append(",class=");
-        //sb.append(type);
-        sb.append(",name=");
-        sb.append(metricInfo.name);
-      } else {
-        // make dotted names into hierarchies
-        String[] path = name.split("\\.");
-        for (int i = 0; i < path.length - 1; i++) {
-          if (i > 0) {
-            sb.append(',');
-          }
-          sb.append("name"); sb.append(String.valueOf(i));
-          sb.append('=');
-          sb.append(path[i]);
-        }
-        if (path.length > 1) {
-          sb.append(',');
-        }
-        // split by type
-        // sb.append("class=");
-        // sb.append(type);
-        sb.append("name=");
-        sb.append(path[path.length - 1]);
-      }
-
-      ObjectName objectName;
-
-      try {
-        objectName = new ObjectName(sb.toString());
-      } catch (MalformedObjectNameException e) {
-        throw new RuntimeException(sb.toString(), e);
-      }
-
-      return objectName;
-    }
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/search/facet/FacetModule.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetModule.java b/solr/core/src/java/org/apache/solr/search/facet/FacetModule.java
index f8d677a..87aaa8f 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetModule.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetModule.java
@@ -292,6 +292,11 @@ public class FacetModule extends SearchComponent {
   }
 
   @Override
+  public Category getCategory() {
+    return Category.QUERY;
+  }
+
+  @Override
   public String getSource() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/security/PKIAuthenticationPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/security/PKIAuthenticationPlugin.java b/solr/core/src/java/org/apache/solr/security/PKIAuthenticationPlugin.java
index 9de9e42..fdd4408 100644
--- a/solr/core/src/java/org/apache/solr/security/PKIAuthenticationPlugin.java
+++ b/solr/core/src/java/org/apache/solr/security/PKIAuthenticationPlugin.java
@@ -235,6 +235,12 @@ public class PKIAuthenticationPlugin extends AuthenticationPlugin implements Htt
       public String getDescription() {
         return "Return the public key of this server";
       }
+
+      @Override
+      public Category getCategory() {
+        return Category.ADMIN;
+      }
+
     };
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
index 73731ad..035ae8d 100644
--- a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
+++ b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
@@ -850,11 +850,6 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
   }
 
   @Override
-  public Category getCategory() {
-    return Category.UPDATEHANDLER;
-  }
-
-  @Override
   public String getSource() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
index f52e353..1cf8a3f 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
@@ -200,4 +200,9 @@ public abstract class UpdateHandler implements SolrInfoMBean {
   }
 
   public abstract void split(SplitIndexCommand cmd) throws IOException;
+
+  @Override
+  public Category getCategory() {
+    return Category.UPDATE;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/update/UpdateLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
index 626dcd6..7c2dae6 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
@@ -103,7 +103,20 @@ public class UpdateLog implements PluginInfoInitialized, SolrMetricProducer {
       }
     }
   }
-  public enum State { REPLAYING, BUFFERING, APPLYING_BUFFERED, ACTIVE }
+
+  // NOTE: when adding new states make sure to keep existing numbers, because external metrics
+  // monitoring may depend on these values being stable.
+  public enum State { REPLAYING(0), BUFFERING(1), APPLYING_BUFFERED(2), ACTIVE(3);
+    private final int value;
+
+    State(final int value) {
+      this.value = value;
+    }
+
+    public int getValue() {
+      return value;
+    }
+  }
 
   public static final int ADD = 0x01;
   public static final int DELETE = 0x02;
@@ -365,19 +378,14 @@ public class UpdateLog implements PluginInfoInitialized, SolrMetricProducer {
       }
     };
     replayLogsCountGauge = () -> logs.size();
-    replayBytesGauge = () -> {
-      if (state == State.REPLAYING) {
-        return getTotalLogsSize();
-      } else {
-        return 0L;
-      }
-    };
+    replayBytesGauge = () -> getTotalLogsSize();
+
     manager.register(registry, bufferedOpsGauge, true, "ops", scope, "buffered");
     manager.register(registry, replayLogsCountGauge, true, "logs", scope, "replay", "remaining");
     manager.register(registry, replayBytesGauge, true, "bytes", scope, "replay", "remaining");
-    applyingBufferedOpsMeter = manager.meter(registry, "ops", scope, "applying_buffered");
+    applyingBufferedOpsMeter = manager.meter(registry, "ops", scope, "applyingBuffered");
     replayOpsMeter = manager.meter(registry, "ops", scope, "replay");
-    stateGauge = () -> state.ordinal();
+    stateGauge = () -> state.getValue();
     manager.register(registry, stateGauge, true, "state", scope);
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java b/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
index 67447a3..5affae6 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
@@ -128,7 +128,7 @@ public class UpdateShardHandler implements SolrMetricProducer, SolrInfoMBean {
 
   @Override
   public Category getCategory() {
-    return Category.HTTP;
+    return Category.UPDATE;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/test-files/solr/solr-gangliareporter.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/solr-gangliareporter.xml b/solr/core/src/test-files/solr/solr-gangliareporter.xml
index 9e7233c..4327388 100644
--- a/solr/core/src/test-files/solr/solr-gangliareporter.xml
+++ b/solr/core/src/test-files/solr/solr-gangliareporter.xml
@@ -24,7 +24,7 @@
     <!-- for unit tests this is set to 1 second - DO NOT USE THIS VALUE IN PRODUCTION! -->
     <int name="period">1</int>
     <str name="prefix">test</str>
-    <str name="filter">cores</str>
+    <str name="filter">CONTAINER.cores</str>
     <!-- this is only used when unit testing - DO NOT USE otherwise! -->
     <bool name="testing">true</bool>
   </reporter>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/test-files/solr/solr-graphitereporter.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/solr-graphitereporter.xml b/solr/core/src/test-files/solr/solr-graphitereporter.xml
index a0557df..7c7f5ca 100644
--- a/solr/core/src/test-files/solr/solr-graphitereporter.xml
+++ b/solr/core/src/test-files/solr/solr-graphitereporter.xml
@@ -24,7 +24,7 @@
     <!-- for unit tests this is set to 1 second - DO NOT USE THIS VALUE IN PRODUCTION! -->
     <int name="period">1</int>
     <str name="prefix">test</str>
-    <str name="filter">cores</str>
+    <str name="filter">CONTAINER.cores</str>
     <bool name="pickled">false</bool>
   </reporter>
  </metrics>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/test-files/solr/solr-slf4jreporter.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/solr-slf4jreporter.xml b/solr/core/src/test-files/solr/solr-slf4jreporter.xml
index 1a08416..f3144ca 100644
--- a/solr/core/src/test-files/solr/solr-slf4jreporter.xml
+++ b/solr/core/src/test-files/solr/solr-slf4jreporter.xml
@@ -22,13 +22,13 @@
     <!-- for unit tests this is set to 1 second - DO NOT USE THIS VALUE IN PRODUCTION! -->
     <int name="period">1</int>
     <str name="prefix">test</str>
-    <str name="filter">cores</str>
+    <str name="filter">CONTAINER.cores</str>
   </reporter>
   <reporter name="test2" group="node" class="org.apache.solr.metrics.reporters.SolrSlf4jReporter">
     <!-- for unit tests this is set to 1 second - DO NOT USE THIS VALUE IN PRODUCTION! -->
     <int name="period">1</int>
     <str name="prefix">test</str>
-    <str name="filter">cores</str>
+    <str name="filter">CONTAINER.cores</str>
     <str name="logger">foobar</str>
   </reporter>
  </metrics>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
index 906116e..25c483b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
@@ -754,7 +754,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
       QueryRequest req = new QueryRequest(params);
       NamedList<Object> resp = client.request(req);
       NamedList mbeans = (NamedList) resp.get("solr-mbeans");
-      NamedList uhandlerCat = (NamedList) mbeans.get("UPDATEHANDLER");
+      NamedList uhandlerCat = (NamedList) mbeans.get("UPDATE");
       NamedList uhandler = (NamedList) uhandlerCat.get("updateHandler");
       NamedList stats = (NamedList) uhandler.get("stats");
       return (Long) stats.get("commits");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/test/org/apache/solr/handler/admin/MBeansHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/MBeansHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/MBeansHandlerTest.java
index 7169888..21634b7 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/MBeansHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/MBeansHandlerTest.java
@@ -56,7 +56,7 @@ public class MBeansHandlerTest extends SolrTestCaseJ4 {
     NamedList<NamedList<NamedList<Object>>> diff = SolrInfoMBeanHandler.fromXML(xml);
 
     // The stats bean for SolrInfoMBeanHandler
-    NamedList stats = (NamedList)diff.get("QUERYHANDLER").get("/admin/mbeans").get("stats");
+    NamedList stats = (NamedList)diff.get("ADMIN").get("/admin/mbeans").get("stats");
     
     //System.out.println("stats:"+stats);
     assertEquals("Was: 1, Now: 2, Delta: 1", stats.get("requests"));
@@ -67,7 +67,7 @@ public class MBeansHandlerTest extends SolrTestCaseJ4 {
         "key","org.apache.solr.handler.admin.CollectionsHandler"
     ));
     NamedList<NamedList<NamedList<Object>>> nl = SolrInfoMBeanHandler.fromXML(xml);
-    assertNotNull( nl.get("QUERYHANDLER").get("org.apache.solr.handler.admin.CollectionsHandler"));
+    assertNotNull( nl.get("ADMIN").get("org.apache.solr.handler.admin.CollectionsHandler"));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/test/org/apache/solr/handler/admin/MetricsHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/MetricsHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/MetricsHandlerTest.java
index a3e724a..edab3ce 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/MetricsHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/MetricsHandlerTest.java
@@ -51,10 +51,10 @@ public class MetricsHandlerTest extends SolrTestCaseJ4 {
     assertNotNull(((NamedList) nl.get("SEARCHER.new.errors")).get("count"));
     assertEquals(0L, ((NamedList) nl.get("SEARCHER.new.errors")).get("count"));
     nl = (NamedList) values.get("solr.node");
-    assertNotNull(nl.get("cores.loaded")); // int gauge
-    assertEquals(1, ((NamedList) nl.get("cores.loaded")).get("value"));
-    assertNotNull(nl.get("QUERYHANDLER./admin/authorization.clientErrors")); // timer type
-    assertEquals(5, ((NamedList) nl.get("QUERYHANDLER./admin/authorization.clientErrors")).size());
+    assertNotNull(nl.get("CONTAINER.cores.loaded")); // int gauge
+    assertEquals(1, ((NamedList) nl.get("CONTAINER.cores.loaded")).get("value"));
+    assertNotNull(nl.get("ADMIN./admin/authorization.clientErrors")); // timer type
+    assertEquals(5, ((NamedList) nl.get("ADMIN./admin/authorization.clientErrors")).size());
 
     resp = new SolrQueryResponse();
     handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "group", "jvm,jetty"), resp);
@@ -91,10 +91,10 @@ public class MetricsHandlerTest extends SolrTestCaseJ4 {
     assertEquals(1, values.size());
     values = (NamedList) values.get("solr.node");
     assertNotNull(values);
-    assertNull(values.get("QUERYHANDLER./admin/authorization.errors")); // this is a timer node
+    assertNull(values.get("ADMIN./admin/authorization.errors")); // this is a timer node
 
     resp = new SolrQueryResponse();
-    handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "prefix", "cores"), resp);
+    handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "prefix", "CONTAINER.cores"), resp);
     values = resp.getValues();
     assertNotNull(values.get("metrics"));
     values = (NamedList) values.get("metrics");
@@ -105,10 +105,10 @@ public class MetricsHandlerTest extends SolrTestCaseJ4 {
     assertEquals(3, ((NamedList)values.get("solr.node")).size());
     assertNotNull(values.get("solr.node"));
     values = (NamedList) values.get("solr.node");
-    assertNotNull(values.get("cores.lazy")); // this is a gauge node
+    assertNotNull(values.get("CONTAINER.cores.lazy")); // this is a gauge node
 
     resp = new SolrQueryResponse();
-    handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "group", "jvm", "prefix", "cores"), resp);
+    handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "group", "jvm", "prefix", "CONTAINER.cores"), resp);
     values = resp.getValues();
     assertNotNull(values.get("metrics"));
     values = (NamedList) values.get("metrics");
@@ -117,7 +117,7 @@ public class MetricsHandlerTest extends SolrTestCaseJ4 {
     assertNull(values.get("solr.node"));
 
     resp = new SolrQueryResponse();
-    handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "group", "node", "type", "timer", "prefix", "cores"), resp);
+    handler.handleRequestBody(req(CommonParams.QT, "/admin/metrics", CommonParams.WT, "json", "group", "node", "type", "timer", "prefix", "CONTAINER.cores"), resp);
     values = resp.getValues();
     assertNotNull(values.get("metrics"));
     values = (NamedList) values.get("metrics");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/test/org/apache/solr/metrics/SolrMetricsIntegrationTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/metrics/SolrMetricsIntegrationTest.java b/solr/core/src/test/org/apache/solr/metrics/SolrMetricsIntegrationTest.java
index c6449ac..27c038b 100644
--- a/solr/core/src/test/org/apache/solr/metrics/SolrMetricsIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/metrics/SolrMetricsIntegrationTest.java
@@ -51,7 +51,7 @@ public class SolrMetricsIntegrationTest extends SolrTestCaseJ4 {
   private static final String MULTIREGISTRY = "multiregistry";
   private static final String[] INITIAL_REPORTERS = {REPORTER_NAMES[0], REPORTER_NAMES[1], UNIVERSAL, SPECIFIC, MULTIGROUP, MULTIREGISTRY};
   private static final String[] RENAMED_REPORTERS = {REPORTER_NAMES[0], REPORTER_NAMES[1], UNIVERSAL, MULTIGROUP};
-  private static final SolrInfoMBean.Category HANDLER_CATEGORY = SolrInfoMBean.Category.QUERYHANDLER;
+  private static final SolrInfoMBean.Category HANDLER_CATEGORY = SolrInfoMBean.Category.QUERY;
 
   private CoreContainer cc;
   private SolrMetricManager metricManager;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGangliaReporterTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGangliaReporterTest.java b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGangliaReporterTest.java
index d9675ef..c50ff3c 100644
--- a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGangliaReporterTest.java
+++ b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGangliaReporterTest.java
@@ -75,7 +75,7 @@ public class SolrGangliaReporterTest extends SolrTestCaseJ4 {
     assertTrue(names.size() >= 3);
     String[] frozenNames = (String[])names.toArray(new String[names.size()]);
     for (String name : frozenNames) {
-      assertTrue(name, name.startsWith("test.solr.node.cores."));
+      assertTrue(name, name.startsWith("test.solr.node.CONTAINER.cores."));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java
index 3d1c482..e58c9dd 100644
--- a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java
+++ b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java
@@ -71,7 +71,7 @@ public class SolrGraphiteReporterTest extends SolrTestCaseJ4 {
       assertTrue(mock.lines.size() >= 3);
       String[] frozenLines = (String[])mock.lines.toArray(new String[mock.lines.size()]);
       for (String line : frozenLines) {
-        assertTrue(line, line.startsWith("test.solr.node.cores."));
+        assertTrue(line, line.startsWith("test.solr.node.CONTAINER.cores."));
       }
     } finally {
       mock.close();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/core/src/test/org/apache/solr/search/TestRecovery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRecovery.java b/solr/core/src/test/org/apache/solr/search/TestRecovery.java
index cc3ca47..15aed5d 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRecovery.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRecovery.java
@@ -366,7 +366,7 @@ public class TestRecovery extends SolrTestCaseJ4 {
       assertEquals(UpdateLog.State.BUFFERING.ordinal(), state.getValue().intValue());
       Gauge<Integer> bufferedOps = (Gauge<Integer>)metrics.get("TLOG.buffered.ops");
       int initialOps = bufferedOps.getValue();
-      Meter applyingBuffered = (Meter)metrics.get("TLOG.applying_buffered.ops");
+      Meter applyingBuffered = (Meter)metrics.get("TLOG.applyingBuffered.ops");
       long initialApplyingOps = applyingBuffered.getCount();
 
       // simulate updates from a leader

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c6c0774/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
index 541ffed..cbaaa1b 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
@@ -391,7 +391,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
 
   private Long getNumRequests(String baseUrl, String collectionName) throws
       SolrServerException, IOException {
-    return getNumRequests(baseUrl, collectionName, "QUERYHANDLER", "standard", false);
+    return getNumRequests(baseUrl, collectionName, "QUERY", "standard", false);
   }
 
   private Long getNumRequests(String baseUrl, String collectionName, String category, String key, boolean returnNumErrors) throws
@@ -431,7 +431,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
         for (String adminPath : adminPathToMbean.keySet()) {
           long errorsBefore = 0;
           for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
-            Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "QUERYHANDLER", adminPathToMbean.get(adminPath), true);
+            Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "QUERY", adminPathToMbean.get(adminPath), true);
             errorsBefore += numRequests;
             log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl());
           }
@@ -448,7 +448,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
           }
           long errorsAfter = 0;
           for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
-            Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "QUERYHANDLER", adminPathToMbean.get(adminPath), true);
+            Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "QUERY", adminPathToMbean.get(adminPath), true);
             errorsAfter += numRequests;
             log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl());
           }


[38/43] lucene-solr:jira/solr-8593: SOLR-9918: Remove unused import to make precommit happy

Posted by kr...@apache.org.
SOLR-9918: Remove unused import to make precommit happy


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/24372047
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/24372047
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/24372047

Branch: refs/heads/jira/solr-8593
Commit: 2437204730130dc8c03efb111ec7d4db456189ed
Parents: d66bfba
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Wed Jan 11 12:20:42 2017 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Wed Jan 11 12:20:42 2017 +0530

----------------------------------------------------------------------
 .../update/processor/SkipExistingDocumentsProcessorFactoryTest.java | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/24372047/solr/core/src/test/org/apache/solr/update/processor/SkipExistingDocumentsProcessorFactoryTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/update/processor/SkipExistingDocumentsProcessorFactoryTest.java b/solr/core/src/test/org/apache/solr/update/processor/SkipExistingDocumentsProcessorFactoryTest.java
index 2afe35c..7e17f71 100644
--- a/solr/core/src/test/org/apache/solr/update/processor/SkipExistingDocumentsProcessorFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/update/processor/SkipExistingDocumentsProcessorFactoryTest.java
@@ -20,7 +20,6 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
 import java.io.IOException;


[22/43] lucene-solr:jira/solr-8593: LUCENE-7611: Remove queries javadoc link from suggester help page

Posted by kr...@apache.org.
LUCENE-7611: Remove queries javadoc link from suggester help page


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/322ad889
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/322ad889
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/322ad889

Branch: refs/heads/jira/solr-8593
Commit: 322ad889604688db9d22ba7dfa1e389a01e34857
Parents: 8bc151d
Author: Alan Woodward <ro...@apache.org>
Authored: Sun Jan 8 20:07:30 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Sun Jan 8 20:07:30 2017 +0000

----------------------------------------------------------------------
 lucene/suggest/build.xml | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/322ad889/lucene/suggest/build.xml
----------------------------------------------------------------------
diff --git a/lucene/suggest/build.xml b/lucene/suggest/build.xml
index bc4bed5..47d4a63 100644
--- a/lucene/suggest/build.xml
+++ b/lucene/suggest/build.xml
@@ -38,7 +38,6 @@
     <invoke-module-javadoc>
       <links>
         <link href="../analyzers-common"/>
-        <link href="../queries"/>
       </links>
     </invoke-module-javadoc>
   </target>


[20/43] lucene-solr:jira/solr-8593: SOLR-9859: Don't log error on NoSuchFileException (Cao Manh Dat)

Posted by kr...@apache.org.
SOLR-9859: Don't log error on NoSuchFileException (Cao Manh Dat)


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/25290ab5
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/25290ab5
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/25290ab5

Branch: refs/heads/jira/solr-8593
Commit: 25290ab5d6af25c05cbbb4738f49329273a7d693
Parents: f985fca
Author: markrmiller <ma...@apache.org>
Authored: Sun Jan 8 09:21:43 2017 -0500
Committer: markrmiller <ma...@apache.org>
Committed: Sun Jan 8 09:21:43 2017 -0500

----------------------------------------------------------------------
 solr/core/src/java/org/apache/solr/core/DirectoryFactory.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/25290ab5/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
index 136a0a6..ac18d7e 100644
--- a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
@@ -22,6 +22,7 @@ import java.io.FileFilter;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
+import java.nio.file.NoSuchFileException;
 import java.util.Collection;
 import java.util.Collections;
 
@@ -191,7 +192,7 @@ public abstract class DirectoryFactory implements NamedListInitializedPlugin,
   public void renameWithOverwrite(Directory dir, String fileName, String toName) throws IOException {
     try {
       dir.deleteFile(toName);
-    } catch (FileNotFoundException e) {
+    } catch (FileNotFoundException | NoSuchFileException e) {
 
     } catch (Exception e) {
       log.error("Exception deleting file", e);


[23/43] lucene-solr:jira/solr-8593: SOLR-9777: IndexFingerprinting should use getCombinedCoreAndDeletesKey() instead of getCoreCacheKey() for per-segment caching

Posted by kr...@apache.org.
SOLR-9777: IndexFingerprinting should use getCombinedCoreAndDeletesKey() instead of getCoreCacheKey() for per-segment caching


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b0177312
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b0177312
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b0177312

Branch: refs/heads/jira/solr-8593
Commit: b0177312032e039673bfbbd42cd1dca09fb93833
Parents: 322ad88
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Mon Jan 9 08:33:06 2017 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Mon Jan 9 08:33:06 2017 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                      | 2 ++
 solr/core/src/java/org/apache/solr/core/SolrCore.java | 4 ++--
 2 files changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b0177312/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 11151d7..49d24df 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -406,6 +406,8 @@ Other Changes
 
 * SOLR-9944: Map the nodes function name to the GatherNodesStream (Joel Bernstein)
 
+* SOLR-9777: IndexFingerprinting should use getCombinedCoreAndDeletesKey() instead of getCoreCacheKey() for per-segment caching (Ishan Chattopadhyaya)
+
 ==================  6.3.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b0177312/solr/core/src/java/org/apache/solr/core/SolrCore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java
index 08072e1..697e008 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCore.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCore.java
@@ -1687,7 +1687,7 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
   public IndexFingerprint getIndexFingerprint(SolrIndexSearcher searcher, LeafReaderContext ctx, long maxVersion)
       throws IOException {
     IndexFingerprint f = null;
-    f = perSegmentFingerprintCache.get(ctx.reader().getCoreCacheKey());
+    f = perSegmentFingerprintCache.get(ctx.reader().getCombinedCoreAndDeletesKey());
     // fingerprint is either not cached or
     // if we want fingerprint only up to a version less than maxVersionEncountered in the segment, or
     // documents were deleted from segment for which fingerprint was cached
@@ -1698,7 +1698,7 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
       // cache fingerprint for the segment only if all the versions in the segment are included in the fingerprint
       if (f.getMaxVersionEncountered() == f.getMaxInHash()) {
         log.info("Caching fingerprint for searcher:{} leafReaderContext:{} mavVersion:{}", searcher, ctx, maxVersion);
-        perSegmentFingerprintCache.put(ctx.reader().getCoreCacheKey(), f);
+        perSegmentFingerprintCache.put(ctx.reader().getCombinedCoreAndDeletesKey(), f);
       }
 
     } else {


[33/43] lucene-solr:jira/solr-8593: SOLR-9950 Check the difference in counts - meter may not be zero at this point.

Posted by kr...@apache.org.
SOLR-9950 Check the difference in counts - meter may not be zero at this point.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/98422e0d
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/98422e0d
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/98422e0d

Branch: refs/heads/jira/solr-8593
Commit: 98422e0dc0c7de4635e1bc80bcd5ca70a8d2761a
Parents: 2048b82
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Tue Jan 10 11:41:40 2017 +0100
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Tue Jan 10 11:44:16 2017 +0100

----------------------------------------------------------------------
 .../test/org/apache/solr/search/TestRecovery.java    | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/98422e0d/solr/core/src/test/org/apache/solr/search/TestRecovery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRecovery.java b/solr/core/src/test/org/apache/solr/search/TestRecovery.java
index 7bd0951..f3e98dd 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRecovery.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRecovery.java
@@ -139,6 +139,8 @@ public class TestRecovery extends SolrTestCaseJ4 {
       assertTrue(replayingLogs.getValue().intValue() > 0);
       Gauge<Long> replayingDocs = (Gauge<Long>)metrics.get("TLOG.replay.remaining.bytes");
       assertTrue(replayingDocs.getValue().longValue() > 0);
+      Meter replayDocs = (Meter)metrics.get("TLOG.replay.ops");
+      long initialOps = replayDocs.getCount();
 
       // unblock recovery
       logReplay.release(1000);
@@ -151,8 +153,7 @@ public class TestRecovery extends SolrTestCaseJ4 {
 
       assertJQ(req("q","*:*") ,"/response/numFound==3");
 
-      Meter replayDocs = (Meter)metrics.get("TLOG.replay.ops");
-      assertEquals(5L, replayDocs.getCount());
+      assertEquals(5L, replayDocs.getCount() - initialOps);
       assertEquals(UpdateLog.State.ACTIVE.ordinal(), state.getValue().intValue());
 
       // make sure we can still access versions after recovery
@@ -236,6 +237,10 @@ public class TestRecovery extends SolrTestCaseJ4 {
       assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
       Gauge<Integer> state = (Gauge<Integer>)metrics.get("TLOG.state");
       assertEquals(UpdateLog.State.BUFFERING.ordinal(), state.getValue().intValue());
+      Gauge<Integer> bufferedOps = (Gauge<Integer>)metrics.get("TLOG.buffered.ops");
+      int initialOps = bufferedOps.getValue();
+      Meter applyingBuffered = (Meter)metrics.get("TLOG.applying_buffered.ops");
+      long initialApplyingOps = applyingBuffered.getCount();
 
       // simulate updates from a leader
       updateJ(jsonAdd(sdoc("id","B1", "_version_","1010")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
@@ -267,8 +272,7 @@ public class TestRecovery extends SolrTestCaseJ4 {
           ,"=={'doc':null}"
       );
 
-      Gauge<Integer> bufferedOps = (Gauge<Integer>)metrics.get("TLOG.buffered.ops");
-      assertEquals(6, bufferedOps.getValue().intValue());
+      assertEquals(6, bufferedOps.getValue().intValue() - initialOps);
 
       rinfoFuture = ulog.applyBufferedUpdates();
       assertTrue(rinfoFuture != null);
@@ -280,8 +284,7 @@ public class TestRecovery extends SolrTestCaseJ4 {
       UpdateLog.RecoveryInfo rinfo = rinfoFuture.get();
       assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
 
-      Meter applyingBuffered = (Meter)metrics.get("TLOG.applying_buffered.ops");
-      assertEquals(6L, applyingBuffered.getCount());
+      assertEquals(6L, applyingBuffered.getCount() - initialApplyingOps);
 
       assertJQ(req("qt","/get", "getVersions","6")
           ,"=={'versions':[-2010,1030,1020,-1017,1015,1010]}"


[05/43] lucene-solr:jira/solr-8593: LUCENE-7609: Refactor expressions module to use DoubleValuesSource

Posted by kr...@apache.org.
LUCENE-7609: Refactor expressions module to use DoubleValuesSource


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8b055382
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8b055382
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8b055382

Branch: refs/heads/jira/solr-8593
Commit: 8b055382d6c88acaed9fe472a038c7ee6b35c016
Parents: da30f21
Author: Alan Woodward <ro...@apache.org>
Authored: Thu Jan 5 13:15:19 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Sat Jan 7 11:05:06 2017 +0000

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   5 +
 .../ExpressionAggregationFacetsExample.java     |   2 +-
 lucene/expressions/build.xml                    |  11 --
 .../org/apache/lucene/expressions/Bindings.java |  14 +--
 .../apache/lucene/expressions/Expression.java   |  21 ++--
 .../expressions/ExpressionComparator.java       | 100 -----------------
 .../expressions/ExpressionFunctionValues.java   |  35 +++---
 .../lucene/expressions/ExpressionRescorer.java  |  33 ++++--
 .../lucene/expressions/ExpressionSortField.java |  77 -------------
 .../expressions/ExpressionValueSource.java      |  70 ++++++------
 .../lucene/expressions/ScoreFunctionValues.java |  46 --------
 .../lucene/expressions/ScoreValueSource.java    |  61 ----------
 .../lucene/expressions/SimpleBindings.java      |  30 +++--
 .../expressions/js/JavascriptCompiler.java      |  15 ++-
 .../apache/lucene/expressions/package-info.java |   5 +-
 .../lucene/expressions/TestDemoExpressions.java |  44 ++++++--
 .../expressions/TestExpressionSortField.java    |   2 +-
 .../expressions/TestExpressionValueSource.java  | 111 ++++++-------------
 .../expressions/js/TestCustomFunctions.java     |  20 ++--
 .../expressions/js/TestJavascriptFunction.java  |   2 +-
 .../js/TestJavascriptOperations.java            |   2 +-
 21 files changed, 197 insertions(+), 509 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index fa5cc1c..c667040 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -74,6 +74,11 @@ API Changes
   grouping Collectors are renamed to remove the Abstract* prefix.
   (Alan Woodward, Martijn van Groningen)
 
+* LUCENE-7609: The expressions module now uses the DoubleValuesSource API, and 
+  no longer depends on the queries module.  Expression#getValueSource() is
+  replaced with Expression#getDoubleValuesSource(). (Alan Woodward, Adrien
+  Grand)
+
 New features
 
 * LUCENE-5867: Added BooleanSimilarity. (Robert Muir, Adrien Grand)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/demo/src/java/org/apache/lucene/demo/facet/ExpressionAggregationFacetsExample.java
----------------------------------------------------------------------
diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/ExpressionAggregationFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/ExpressionAggregationFacetsExample.java
index fca7b6c..7f29b38 100644
--- a/lucene/demo/src/java/org/apache/lucene/demo/facet/ExpressionAggregationFacetsExample.java
+++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/ExpressionAggregationFacetsExample.java
@@ -103,7 +103,7 @@ public class ExpressionAggregationFacetsExample {
     FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);
 
     // Retrieve results
-    Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, fc, expr.getValueSource(bindings));
+    Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, fc, expr.getDoubleValuesSource(bindings));
     FacetResult result = facets.getTopChildren(10, "A");
     
     indexReader.close();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/build.xml
----------------------------------------------------------------------
diff --git a/lucene/expressions/build.xml b/lucene/expressions/build.xml
index 1dddc6d..61ae64f 100644
--- a/lucene/expressions/build.xml
+++ b/lucene/expressions/build.xml
@@ -26,7 +26,6 @@
   <path id="classpath">
     <path refid="base.classpath"/>
     <fileset dir="lib"/>
-    <pathelement path="${queries.jar}"/>
   </path>
 
   <path id="test.classpath">
@@ -35,16 +34,6 @@
     <pathelement path="src/test-files"/>
   </path>
 
-  <target name="compile-core" depends="jar-queries,common.compile-core" />
-
-  <target name="javadocs" depends="javadocs-queries,compile-core,check-javadocs-uptodate" unless="javadocs-uptodate-${name}">
-    <invoke-module-javadoc>
-      <links>
-        <link href="../queries"/>
-      </links>
-    </invoke-module-javadoc>
-  </target>
-
   <target name="regenerate" depends="run-antlr"/>
 
   <target name="resolve-antlr" xmlns:ivy="antlib:org.apache.ivy.ant">

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/java/org/apache/lucene/expressions/Bindings.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/Bindings.java b/lucene/expressions/src/java/org/apache/lucene/expressions/Bindings.java
index 5ec2edb..91a22f5 100644
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/Bindings.java
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/Bindings.java
@@ -16,7 +16,7 @@
  */
 package org.apache.lucene.expressions;
 
-import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.DoubleValuesSource;
 
 /**
  * Binds variable names in expressions to actual data.
@@ -31,14 +31,10 @@ public abstract class Bindings {
   /** Sole constructor. (For invocation by subclass 
    *  constructors, typically implicit.) */
   protected Bindings() {}
-  
+
   /**
-   * Returns a ValueSource bound to the variable name.
+   * Returns a DoubleValuesSource bound to the variable name
    */
-  public abstract ValueSource getValueSource(String name);
-  
-  /** Returns a {@code ValueSource} over relevance scores */
-  protected final ValueSource getScoreValueSource() {
-    return new ScoreValueSource();
-  }
+  public abstract DoubleValuesSource getDoubleValuesSource(String name);
+
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/java/org/apache/lucene/expressions/Expression.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/Expression.java b/lucene/expressions/src/java/org/apache/lucene/expressions/Expression.java
index 02be23b..c92e21b 100644
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/Expression.java
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/Expression.java
@@ -16,9 +16,9 @@
  */
 package org.apache.lucene.expressions;
 
-import org.apache.lucene.expressions.js.JavascriptCompiler; // javadocs
-import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.expressions.js.JavascriptCompiler;
+import org.apache.lucene.search.DoubleValues;
+import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.Rescorer;
 import org.apache.lucene.search.SortField;
 
@@ -63,26 +63,25 @@ public abstract class Expression {
   }
 
   /**
-   * Evaluates the expression for the given document.
+   * Evaluates the expression for the current document.
    *
-   * @param document <code>docId</code> of the document to compute a value for
-   * @param functionValues {@link FunctionValues} for each element of {@link #variables}.
+   * @param functionValues {@link DoubleValues} for each element of {@link #variables}.
    * @return The computed value of the expression for the given document.
    */
-  public abstract double evaluate(int document, FunctionValues[] functionValues);
+  public abstract double evaluate(DoubleValues[] functionValues);
 
   /**
-   * Get a value source which can compute the value of this expression in the context of the given bindings.
+   * Get a DoubleValuesSource which can compute the value of this expression in the context of the given bindings.
    * @param bindings Bindings to use for external values in this expression
-   * @return A value source which will evaluate this expression when used
+   * @return A DoubleValuesSource which will evaluate this expression when used
    */
-  public ValueSource getValueSource(Bindings bindings) {
+  public DoubleValuesSource getDoubleValuesSource(Bindings bindings) {
     return new ExpressionValueSource(bindings, this);
   }
   
   /** Get a sort field which can be used to rank documents by this expression. */
   public SortField getSortField(Bindings bindings, boolean reverse) {
-    return getValueSource(bindings).getSortField(reverse);
+    return getDoubleValuesSource(bindings).getSortField(reverse);
   }
 
   /** Get a {@link Rescorer}, to rescore first-pass hits

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionComparator.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionComparator.java b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionComparator.java
deleted file mode 100644
index eabf6dd..0000000
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionComparator.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.expressions;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.search.FieldComparator;
-import org.apache.lucene.search.LeafFieldComparator;
-import org.apache.lucene.search.Scorer;
-
-/** A custom comparator for sorting documents by an expression */
-class ExpressionComparator extends FieldComparator<Double> implements LeafFieldComparator {
-  private final double[] values;
-  private double bottom;
-  private double topValue;
-  
-  private ValueSource source;
-  private FunctionValues scores;
-  private LeafReaderContext readerContext;
-  
-  public ExpressionComparator(ValueSource source, int numHits) {
-    values = new double[numHits];
-    this.source = source;
-  }
-  
-  // TODO: change FieldComparator.setScorer to throw IOException and remove this try-catch
-  @Override
-  public void setScorer(Scorer scorer) {
-    // TODO: might be cleaner to lazy-init 'source' and set scorer after?
-    assert readerContext != null;
-    try {
-      Map<String,Object> context = new HashMap<>();
-      assert scorer != null;
-      context.put("scorer", scorer);
-      scores = source.getValues(context, readerContext);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-  
-  @Override
-  public int compare(int slot1, int slot2) {
-    return Double.compare(values[slot1], values[slot2]);
-  }
-  
-  @Override
-  public void setBottom(int slot) {
-    bottom = values[slot];
-  }
-  
-  @Override
-  public void setTopValue(Double value) {
-    topValue = value.doubleValue();
-  }
-  
-  @Override
-  public int compareBottom(int doc) throws IOException {
-    return Double.compare(bottom, scores.doubleVal(doc));
-  }
-  
-  @Override
-  public void copy(int slot, int doc) throws IOException {
-    values[slot] = scores.doubleVal(doc);
-  }
-  
-  @Override
-  public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException {
-    this.readerContext = context;
-    return this;
-  }
-  
-  @Override
-  public Double value(int slot) {
-    return Double.valueOf(values[slot]);
-  }
-  
-  @Override
-  public int compareTop(int doc) throws IOException {
-    return Double.compare(topValue, scores.doubleVal(doc));
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionFunctionValues.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionFunctionValues.java b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionFunctionValues.java
index ad195cd..2e6f7c4 100644
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionFunctionValues.java
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionFunctionValues.java
@@ -16,20 +16,16 @@
  */
 package org.apache.lucene.expressions;
 
-import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
+import java.io.IOException;
 
-/** A {@link FunctionValues} which evaluates an expression */
-class ExpressionFunctionValues extends DoubleDocValues {
+import org.apache.lucene.search.DoubleValues;
+
+/** A {@link DoubleValues} which evaluates an expression */
+class ExpressionFunctionValues extends DoubleValues {
   final Expression expression;
-  final FunctionValues[] functionValues;
-  
-  int currentDocument = -1;
-  double currentValue;
+  final DoubleValues[] functionValues;
   
-  ExpressionFunctionValues(ValueSource parent, Expression expression, FunctionValues[] functionValues) {
-    super(parent);
+  ExpressionFunctionValues(Expression expression, DoubleValues[] functionValues) {
     if (expression == null) {
       throw new NullPointerException();
     }
@@ -39,14 +35,17 @@ class ExpressionFunctionValues extends DoubleDocValues {
     this.expression = expression;
     this.functionValues = functionValues;
   }
-  
+
   @Override
-  public double doubleVal(int document) {
-    if (currentDocument != document) {
-      currentDocument = document;
-      currentValue = expression.evaluate(document, functionValues);
+  public boolean advanceExact(int doc) throws IOException {
+    for (DoubleValues v : functionValues) {
+      v.advanceExact(doc);
     }
-    
-    return currentValue;
+    return true;
+  }
+  
+  @Override
+  public double doubleValue() {
+    return expression.evaluate(functionValues);
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java
index 33e8428..e3e7a4e 100644
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java
@@ -20,13 +20,11 @@ package org.apache.lucene.expressions;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.DoubleValues;
 import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Rescorer;
@@ -49,7 +47,7 @@ class ExpressionRescorer extends SortRescorer {
   private final Expression expression;
   private final Bindings bindings;
 
-  /** Uses the provided {@link ValueSource} to assign second
+  /** Uses the provided {@link Expression} to assign second
    *  pass scores. */
   public ExpressionRescorer(Expression expression, Bindings bindings) {
     super(new Sort(expression.getSortField(bindings, true)));
@@ -57,6 +55,21 @@ class ExpressionRescorer extends SortRescorer {
     this.bindings = bindings;
   }
 
+  private static DoubleValues scores(int doc, float score) {
+    return new DoubleValues() {
+      @Override
+      public double doubleValue() throws IOException {
+        return score;
+      }
+
+      @Override
+      public boolean advanceExact(int target) throws IOException {
+        assert doc == target;
+        return true;
+      }
+    };
+  }
+
   @Override
   public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanation, int docID) throws IOException {
     Explanation superExpl = super.explain(searcher, firstPassExplanation, docID);
@@ -65,18 +78,14 @@ class ExpressionRescorer extends SortRescorer {
     int subReader = ReaderUtil.subIndex(docID, leaves);
     LeafReaderContext readerContext = leaves.get(subReader);
     int docIDInSegment = docID - readerContext.docBase;
-    Map<String,Object> context = new HashMap<>();
-
-    FakeScorer fakeScorer = new FakeScorer();
-    fakeScorer.score = firstPassExplanation.getValue();
-    fakeScorer.doc = docIDInSegment;
 
-    context.put("scorer", fakeScorer);
+    DoubleValues scores = scores(docIDInSegment, firstPassExplanation.getValue());
 
     List<Explanation> subs = new ArrayList<>(Arrays.asList(superExpl.getDetails()));
     for(String variable : expression.variables) {
-      subs.add(Explanation.match((float) bindings.getValueSource(variable).getValues(context, readerContext).doubleVal(docIDInSegment),
-                                       "variable \"" + variable + "\""));
+      DoubleValues dv = bindings.getDoubleValuesSource(variable).getValues(readerContext, scores);
+      if (dv.advanceExact(docIDInSegment))
+        subs.add(Explanation.match((float) dv.doubleValue(), "variable \"" + variable + "\""));
     }
 
     return Explanation.match(superExpl.getValue(), superExpl.getDescription(), subs);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionSortField.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionSortField.java b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionSortField.java
deleted file mode 100644
index 2b39834..0000000
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionSortField.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.expressions;
-
-import java.io.IOException;
-
-import org.apache.lucene.search.FieldComparator;
-import org.apache.lucene.search.SortField;
-
-/** A {@link SortField} which sorts documents by the evaluated value of an expression for each document */
-class ExpressionSortField extends SortField {
-  private final ExpressionValueSource source;
-
-  ExpressionSortField(String name, ExpressionValueSource source, boolean reverse) {
-    super(name, Type.CUSTOM, reverse);
-    this.source = source;
-  }
-  
-  @Override
-  public FieldComparator<?> getComparator(final int numHits, final int sortPos) throws IOException {
-    return new ExpressionComparator(source, numHits);
-  }
-
-  @Override
-  public int hashCode() {
-    final int prime = 31;
-    int result = super.hashCode();
-    result = prime * result + ((source == null) ? 0 : source.hashCode());
-    return result;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (this == obj) return true;
-    if (!super.equals(obj)) return false;
-    if (getClass() != obj.getClass()) return false;
-    ExpressionSortField other = (ExpressionSortField) obj;
-    if (source == null) {
-      if (other.source != null) return false;
-    } else if (!source.equals(other.source)) return false;
-    return true;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder buffer = new StringBuilder();
-    
-    buffer.append("<expr \"");
-    buffer.append(getField());
-    buffer.append("\">");
-    
-    if (getReverse()) {
-      buffer.append('!');
-    }
-
-    return buffer.toString();
-  }
-
-  @Override
-  public boolean needsScores() {
-    return source.needsScores();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionValueSource.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionValueSource.java b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionValueSource.java
index fcba455..7842de9 100644
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionValueSource.java
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionValueSource.java
@@ -20,76 +20,77 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Objects;
 
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.DoubleValues;
+import org.apache.lucene.search.DoubleValuesSource;
 
 /**
- * A {@link ValueSource} which evaluates a {@link Expression} given the context of an {@link Bindings}.
+ * A {@link DoubleValuesSource} which evaluates a {@link Expression} given the context of an {@link Bindings}.
  */
 @SuppressWarnings({"rawtypes", "unchecked"})
-final class ExpressionValueSource extends ValueSource {
-  final ValueSource variables[];
+final class ExpressionValueSource extends DoubleValuesSource {
+  final DoubleValuesSource variables[];
   final Expression expression;
   final boolean needsScores;
 
   ExpressionValueSource(Bindings bindings, Expression expression) {
     if (bindings == null) throw new NullPointerException();
-    if (expression == null) throw new NullPointerException();
-    this.expression = expression;
-    variables = new ValueSource[expression.variables.length];
+    this.expression = Objects.requireNonNull(expression);
+    variables = new DoubleValuesSource[expression.variables.length];
     boolean needsScores = false;
     for (int i = 0; i < variables.length; i++) {
-      ValueSource source = bindings.getValueSource(expression.variables[i]);
-      if (source instanceof ScoreValueSource) {
-        needsScores = true;
-      } else if (source instanceof ExpressionValueSource) {
-        if (((ExpressionValueSource)source).needsScores()) {
-          needsScores = true;
-        }
-      } else if (source == null) {
+      DoubleValuesSource source = bindings.getDoubleValuesSource(expression.variables[i]);
+      if (source == null) {
         throw new RuntimeException("Internal error. Variable (" + expression.variables[i] + ") does not exist.");
       }
+      needsScores |= source.needsScores();
       variables[i] = source;
     }
     this.needsScores = needsScores;
   }
 
   @Override
-  public FunctionValues getValues(Map context, LeafReaderContext readerContext) throws IOException {
-    Map<String, FunctionValues> valuesCache = (Map<String, FunctionValues>)context.get("valuesCache");
-    if (valuesCache == null) {
-      valuesCache = new HashMap<>();
-      context = new HashMap(context);
-      context.put("valuesCache", valuesCache);
-    }
-    FunctionValues[] externalValues = new FunctionValues[expression.variables.length];
+  public DoubleValues getValues(LeafReaderContext readerContext, DoubleValues scores) throws IOException {
+    Map<String, DoubleValues> valuesCache = new HashMap<>();
+    DoubleValues[] externalValues = new DoubleValues[expression.variables.length];
 
     for (int i = 0; i < variables.length; ++i) {
       String externalName = expression.variables[i];
-      FunctionValues values = valuesCache.get(externalName);
+      DoubleValues values = valuesCache.get(externalName);
       if (values == null) {
-        values = variables[i].getValues(context, readerContext);
+        values = variables[i].getValues(readerContext, scores);
         if (values == null) {
           throw new RuntimeException("Internal error. External (" + externalName + ") does not exist.");
         }
         valuesCache.put(externalName, values);
       }
-      externalValues[i] = values;
+      externalValues[i] = zeroWhenUnpositioned(values);
     }
 
-    return new ExpressionFunctionValues(this, expression, externalValues);
+    return new ExpressionFunctionValues(expression, externalValues);
   }
 
-  @Override
-  public SortField getSortField(boolean reverse) {
-    return new ExpressionSortField(expression.sourceText, this, reverse);
+  private static DoubleValues zeroWhenUnpositioned(DoubleValues in) {
+    return new DoubleValues() {
+
+      boolean positioned = false;
+
+      @Override
+      public double doubleValue() throws IOException {
+        return positioned ? in.doubleValue() : 0;
+      }
+
+      @Override
+      public boolean advanceExact(int doc) throws IOException {
+        return positioned = in.advanceExact(doc);
+      }
+    };
   }
 
   @Override
-  public String description() {
+  public String toString() {
     return "expr(" + expression.sourceText + ")";
   }
   
@@ -132,7 +133,8 @@ final class ExpressionValueSource extends ValueSource {
     return true;
   }
 
-  boolean needsScores() {
+  @Override
+  public boolean needsScores() {
     return needsScores;
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/java/org/apache/lucene/expressions/ScoreFunctionValues.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/ScoreFunctionValues.java b/lucene/expressions/src/java/org/apache/lucene/expressions/ScoreFunctionValues.java
deleted file mode 100644
index e310c06..0000000
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/ScoreFunctionValues.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.expressions;
-
-import java.io.IOException;
-
-import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
-import org.apache.lucene.search.Scorer;
-
-/**
- * A utility class to allow expressions to access the score as a {@link FunctionValues}.
- */
-class ScoreFunctionValues extends DoubleDocValues {
-  final Scorer scorer;
-
-  ScoreFunctionValues(ValueSource parent, Scorer scorer) {
-    super(parent);
-    this.scorer = scorer;
-  }
-  
-  @Override
-  public double doubleVal(int document) {
-    try {
-      assert document == scorer.docID();
-      return scorer.score();
-    } catch (IOException exception) {
-      throw new RuntimeException(exception);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/java/org/apache/lucene/expressions/ScoreValueSource.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/ScoreValueSource.java b/lucene/expressions/src/java/org/apache/lucene/expressions/ScoreValueSource.java
deleted file mode 100644
index ea1669c..0000000
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/ScoreValueSource.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.expressions;
-
-
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.search.Scorer;
-
-import java.io.IOException;
-import java.util.Map;
-
-/**
- * A {@link ValueSource} which uses the {@link Scorer} passed through
- * the context map by {@link ExpressionComparator}.
- */
-@SuppressWarnings({"rawtypes"})
-class ScoreValueSource extends ValueSource {
-
-  /**
-   * <code>context</code> must contain a key "scorer" which is a {@link Scorer}.
-   */
-  @Override
-  public FunctionValues getValues(Map context, LeafReaderContext readerContext) throws IOException {
-    Scorer v = (Scorer) context.get("scorer");
-    if (v == null) {
-      throw new IllegalStateException("Expressions referencing the score can only be used for sorting");
-    }
-    return new ScoreFunctionValues(this, v);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    return o == this;
-  }
-
-  @Override
-  public int hashCode() {
-    return System.identityHashCode(this);
-  }
-
-  @Override
-  public String description() {
-    return "score()";
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java b/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java
index 1c11cb2..6276055 100644
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java
@@ -20,11 +20,7 @@ package org.apache.lucene.expressions;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
-import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
-import org.apache.lucene.queries.function.valuesource.IntFieldSource;
-import org.apache.lucene.queries.function.valuesource.LongFieldSource;
+import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.SortField;
 
 /**
@@ -64,9 +60,9 @@ public final class SimpleBindings extends Bindings {
   }
 
   /**
-   * Bind a {@link ValueSource} directly to the given name.
+   * Bind a {@link DoubleValuesSource} directly to the given name.
    */
-  public void add(String name, ValueSource source) { map.put(name, source); }
+  public void add(String name, DoubleValuesSource source) { map.put(name, source); }
   
   /** 
    * Adds an Expression to the bindings.
@@ -78,27 +74,27 @@ public final class SimpleBindings extends Bindings {
   }
   
   @Override
-  public ValueSource getValueSource(String name) {
+  public DoubleValuesSource getDoubleValuesSource(String name) {
     Object o = map.get(name);
     if (o == null) {
       throw new IllegalArgumentException("Invalid reference '" + name + "'");
     } else if (o instanceof Expression) {
-      return ((Expression)o).getValueSource(this);
-    } else if (o instanceof ValueSource) {
-      return ((ValueSource)o);
+      return ((Expression)o).getDoubleValuesSource(this);
+    } else if (o instanceof DoubleValuesSource) {
+      return ((DoubleValuesSource) o);
     }
     SortField field = (SortField) o;
     switch(field.getType()) {
       case INT:
-        return new IntFieldSource(field.getField());
+        return DoubleValuesSource.fromIntField(field.getField());
       case LONG:
-        return new LongFieldSource(field.getField());
+        return DoubleValuesSource.fromLongField(field.getField());
       case FLOAT:
-        return new FloatFieldSource(field.getField());
+        return DoubleValuesSource.fromFloatField(field.getField());
       case DOUBLE:
-        return new DoubleFieldSource(field.getField());
+        return DoubleValuesSource.fromDoubleField(field.getField());
       case SCORE:
-        return getScoreValueSource();
+        return DoubleValuesSource.SCORES;
       default:
         throw new UnsupportedOperationException(); 
     }
@@ -113,7 +109,7 @@ public final class SimpleBindings extends Bindings {
       if (o instanceof Expression) {
         Expression expr = (Expression) o;
         try {
-          expr.getValueSource(this);
+          expr.getDoubleValuesSource(this);
         } catch (StackOverflowError e) {
           throw new IllegalArgumentException("Recursion Error: Cycle detected originating in (" + expr.sourceText + ")");
         }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java b/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java
index 13174bc..87e41c0 100644
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java
@@ -39,7 +39,7 @@ import org.antlr.v4.runtime.CommonTokenStream;
 import org.antlr.v4.runtime.tree.ParseTree;
 import org.apache.lucene.expressions.Expression;
 import org.apache.lucene.expressions.js.JavascriptParser.ExpressionContext;
-import org.apache.lucene.queries.function.FunctionValues;
+import org.apache.lucene.search.DoubleValues;
 import org.apache.lucene.util.IOUtils;
 import org.objectweb.asm.ClassWriter;
 import org.objectweb.asm.Label;
@@ -93,13 +93,13 @@ public final class JavascriptCompiler {
   private static final String COMPILED_EXPRESSION_INTERNAL = COMPILED_EXPRESSION_CLASS.replace('.', '/');
   
   static final Type EXPRESSION_TYPE = Type.getType(Expression.class);
-  static final Type FUNCTION_VALUES_TYPE = Type.getType(FunctionValues.class);
+  static final Type FUNCTION_VALUES_TYPE = Type.getType(DoubleValues.class);
 
   private static final org.objectweb.asm.commons.Method
     EXPRESSION_CTOR = getAsmMethod(void.class, "<init>", String.class, String[].class),
-    EVALUATE_METHOD = getAsmMethod(double.class, "evaluate", int.class, FunctionValues[].class);
+    EVALUATE_METHOD = getAsmMethod(double.class, "evaluate", DoubleValues[].class);
 
-  static final org.objectweb.asm.commons.Method DOUBLE_VAL_METHOD = getAsmMethod(double.class, "doubleVal", int.class);
+  static final org.objectweb.asm.commons.Method DOUBLE_VAL_METHOD = getAsmMethod(double.class, "doubleValue");
   
   /** create an ASM Method object from return type, method name, and parameters. */
   private static org.objectweb.asm.commons.Method getAsmMethod(Class<?> rtype, String name, Class<?>... ptypes) {
@@ -155,8 +155,8 @@ public final class JavascriptCompiler {
    */
   @SuppressWarnings({"unused", "null"})
   private static void unusedTestCompile() throws IOException {
-    FunctionValues f = null;
-    double ret = f.doubleVal(2);
+    DoubleValues f = null;
+    double ret = f.doubleValue();
   }
   
   /**
@@ -325,10 +325,9 @@ public final class JavascriptCompiler {
             externalsMap.put(text, index);
           }
 
-          gen.loadArg(1);
+          gen.loadArg(0);
           gen.push(index);
           gen.arrayLoad(FUNCTION_VALUES_TYPE);
-          gen.loadArg(0);
           gen.invokeVirtual(FUNCTION_VALUES_TYPE, DOUBLE_VAL_METHOD);
           gen.cast(Type.DOUBLE_TYPE, typeStack.peek());
         } else {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/java/org/apache/lucene/expressions/package-info.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/package-info.java b/lucene/expressions/src/java/org/apache/lucene/expressions/package-info.java
index 62a519b..4a1eadf 100644
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/package-info.java
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/package-info.java
@@ -25,11 +25,12 @@
  * 
  * <p>
  * {@link org.apache.lucene.expressions.Bindings} - abstraction for binding external variables
- * to a way to get a value for those variables for a particular document (ValueSource).
+ * to a way to get a value for those variables for a particular document (DoubleValuesSource).
  * </p>
  * 
  * <p>
- * {@link org.apache.lucene.expressions.SimpleBindings} - default implementation of bindings which provide easy ways to bind sort fields and other expressions to external variables
+ * {@link org.apache.lucene.expressions.SimpleBindings} - default implementation of bindings which
+ * provide easy ways to bind sort fields and other expressions to external variables
  * </p>
  */
 package org.apache.lucene.expressions;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/test/org/apache/lucene/expressions/TestDemoExpressions.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/TestDemoExpressions.java b/lucene/expressions/src/test/org/apache/lucene/expressions/TestDemoExpressions.java
index 01b3394..d76ef1c 100644
--- a/lucene/expressions/src/test/org/apache/lucene/expressions/TestDemoExpressions.java
+++ b/lucene/expressions/src/test/org/apache/lucene/expressions/TestDemoExpressions.java
@@ -16,18 +16,20 @@
  */
 package org.apache.lucene.expressions;
 
+import java.io.IOException;
+
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.expressions.js.JavascriptCompiler;
 import org.apache.lucene.expressions.js.VariableContext;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.valuesource.DoubleConstValueSource;
-import org.apache.lucene.queries.function.valuesource.IntFieldSource;
 import org.apache.lucene.search.CheckHits;
+import org.apache.lucene.search.DoubleValues;
+import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.FieldDoc;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
@@ -39,9 +41,9 @@ import org.apache.lucene.search.TopFieldDocs;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 
+import static org.apache.lucene.expressions.js.VariableContext.Type.INT_INDEX;
 import static org.apache.lucene.expressions.js.VariableContext.Type.MEMBER;
 import static org.apache.lucene.expressions.js.VariableContext.Type.STR_INDEX;
-import static org.apache.lucene.expressions.js.VariableContext.Type.INT_INDEX;
 
 
 /** simple demo of using expressions */
@@ -236,7 +238,7 @@ public class  TestDemoExpressions extends LuceneTestCase {
   public void testStaticExtendedVariableExample() throws Exception {
     Expression popularity = JavascriptCompiler.compile("doc[\"popularity\"].value");
     SimpleBindings bindings = new SimpleBindings();
-    bindings.add("doc['popularity'].value", new IntFieldSource("popularity"));
+    bindings.add("doc['popularity'].value", DoubleValuesSource.fromIntField("popularity"));
     Sort sort = new Sort(popularity.getSortField(bindings, true));
     TopFieldDocs td = searcher.search(new MatchAllDocsQuery(), 3, sort);
 
@@ -250,6 +252,30 @@ public class  TestDemoExpressions extends LuceneTestCase {
     assertEquals(2D, (Double)d.fields[0], 1E-4);
   }
 
+  private static DoubleValuesSource constant(double value) {
+    return new DoubleValuesSource() {
+      @Override
+      public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
+        return new DoubleValues() {
+          @Override
+          public double doubleValue() throws IOException {
+            return value;
+          }
+
+          @Override
+          public boolean advanceExact(int doc) throws IOException {
+            return true;
+          }
+        };
+      }
+
+      @Override
+      public boolean needsScores() {
+        return false;
+      }
+    };
+  }
+
   public void testDynamicExtendedVariableExample() throws Exception {
     Expression popularity = JavascriptCompiler.compile("doc['popularity'].value + magicarray[0] + fourtytwo");
 
@@ -258,7 +284,7 @@ public class  TestDemoExpressions extends LuceneTestCase {
     // filled in with proper error messages for a real use case.
     Bindings bindings = new Bindings() {
       @Override
-      public ValueSource getValueSource(String name) {
+      public DoubleValuesSource getDoubleValuesSource(String name) {
         VariableContext[] var = VariableContext.parse(name);
         assert var[0].type == MEMBER;
         String base = var[0].text;
@@ -266,7 +292,7 @@ public class  TestDemoExpressions extends LuceneTestCase {
           if (var.length > 1 && var[1].type == STR_INDEX) {
             String field = var[1].text;
             if (var.length > 2 && var[2].type == MEMBER && var[2].text.equals("value")) {
-              return new IntFieldSource(field);
+              return DoubleValuesSource.fromIntField(field);
             } else {
               fail("member: " + var[2].text);// error case, non/missing "value" member access
             }
@@ -275,12 +301,12 @@ public class  TestDemoExpressions extends LuceneTestCase {
           }
         } else if (base.equals("magicarray")) {
           if (var.length > 1 && var[1].type == INT_INDEX) {
-            return new DoubleConstValueSource(2048);
+            return constant(2048);
           } else {
             fail();// error case, magic array isn't an array
           }
         } else if (base.equals("fourtytwo")) {
-          return new DoubleConstValueSource(42);
+          return constant(42);
         } else {
           fail();// error case (variable doesn't exist)
         }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSortField.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSortField.java b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSortField.java
index ec6ea11..73e7b8b 100644
--- a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSortField.java
+++ b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionSortField.java
@@ -31,7 +31,7 @@ public class TestExpressionSortField extends LuceneTestCase {
     bindings.add(new SortField("popularity", SortField.Type.INT));
     
     SortField sf = expr.getSortField(bindings, true);
-    assertEquals("<expr \"sqrt(_score) + ln(popularity)\">!", sf.toString());
+    assertEquals("<expr(sqrt(_score) + ln(popularity))>!", sf.toString());
   }
   
   public void testEquals() throws Exception {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionValueSource.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionValueSource.java b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionValueSource.java
index eeb3c9c..39217d8 100644
--- a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionValueSource.java
+++ b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionValueSource.java
@@ -17,21 +17,17 @@
 package org.apache.lucene.expressions;
 
 
-import java.util.HashMap;
-
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.expressions.js.JavascriptCompiler;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.ValueSourceScorer;
-import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.DoubleValues;
+import org.apache.lucene.search.DoubleValuesSource;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -47,7 +43,7 @@ public class TestExpressionValueSource extends LuceneTestCase {
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
-    
+
     Document doc = new Document();
     doc.add(newStringField("id", "1", Field.Store.YES));
     doc.add(newTextField("body", "some contents and more contents", Field.Store.NO));
@@ -58,6 +54,7 @@ public class TestExpressionValueSource extends LuceneTestCase {
     doc.add(newStringField("id", "2", Field.Store.YES));
     doc.add(newTextField("body", "another document with different contents", Field.Store.NO));
     doc.add(new NumericDocValuesField("popularity", 20));
+    doc.add(new NumericDocValuesField("count", 1));
     iw.addDocument(doc);
     
     doc = new Document();
@@ -77,81 +74,34 @@ public class TestExpressionValueSource extends LuceneTestCase {
     dir.close();
     super.tearDown();
   }
-  
-  public void testTypes() throws Exception {
-    Expression expr = JavascriptCompiler.compile("2*popularity");
-    SimpleBindings bindings = new SimpleBindings();
-    bindings.add(new SortField("popularity", SortField.Type.LONG));
-    ValueSource vs = expr.getValueSource(bindings);
-    
-    assertEquals(1, reader.leaves().size());
-    LeafReaderContext leaf = reader.leaves().get(0);
-    FunctionValues values = vs.getValues(new HashMap<String,Object>(), leaf);
-    
-    assertEquals(10, values.doubleVal(0), 0);
-    assertEquals(10, values.floatVal(0), 0);
-    assertEquals(10, values.longVal(0));
-    assertEquals(10, values.intVal(0));
-    assertEquals(10, values.shortVal(0));
-    assertEquals(10, values.byteVal(0));
-    assertEquals("10.0", values.strVal(0));
-    assertEquals(new Double(10), values.objectVal(0));
-    
-    assertEquals(40, values.doubleVal(1), 0);
-    assertEquals(40, values.floatVal(1), 0);
-    assertEquals(40, values.longVal(1));
-    assertEquals(40, values.intVal(1));
-    assertEquals(40, values.shortVal(1));
-    assertEquals(40, values.byteVal(1));
-    assertEquals("40.0", values.strVal(1));
-    assertEquals(new Double(40), values.objectVal(1));
-    
-    assertEquals(4, values.doubleVal(2), 0);
-    assertEquals(4, values.floatVal(2), 0);
-    assertEquals(4, values.longVal(2));
-    assertEquals(4, values.intVal(2));
-    assertEquals(4, values.shortVal(2));
-    assertEquals(4, values.byteVal(2));
-    assertEquals("4.0", values.strVal(2));
-    assertEquals(new Double(4), values.objectVal(2));    
-  }
-  
-  public void testRangeScorer() throws Exception {
-    Expression expr = JavascriptCompiler.compile("2*popularity");
+
+  public void testDoubleValuesSourceTypes() throws Exception {
+    Expression expr = JavascriptCompiler.compile("2*popularity + count");
     SimpleBindings bindings = new SimpleBindings();
     bindings.add(new SortField("popularity", SortField.Type.LONG));
-    ValueSource vs = expr.getValueSource(bindings);
-    
+    bindings.add(new SortField("count", SortField.Type.LONG));
+    DoubleValuesSource vs = expr.getDoubleValuesSource(bindings);
+
     assertEquals(1, reader.leaves().size());
     LeafReaderContext leaf = reader.leaves().get(0);
-    FunctionValues values = vs.getValues(new HashMap<String,Object>(), leaf);
-    
-    // everything
-    ValueSourceScorer scorer = values.getRangeScorer(leaf, "4", "40", true, true);
-    DocIdSetIterator iter = scorer.iterator();
-    assertEquals(-1, iter.docID());
-    assertEquals(0, iter.nextDoc());
-    assertEquals(1, iter.nextDoc());
-    assertEquals(2, iter.nextDoc());
-    assertEquals(DocIdSetIterator.NO_MORE_DOCS, iter.nextDoc());
+    DoubleValues values = vs.getValues(leaf, null);
 
-    // just the first doc
-    values = vs.getValues(new HashMap<String,Object>(), leaf);
-    scorer = values.getRangeScorer(leaf, "4", "40", false, false);
-    iter = scorer.iterator();
-    assertEquals(-1, scorer.docID());
-    assertEquals(0, iter.nextDoc());
-    assertEquals(DocIdSetIterator.NO_MORE_DOCS, iter.nextDoc());
+    assertTrue(values.advanceExact(0));
+    assertEquals(10, values.doubleValue(), 0);
+    assertTrue(values.advanceExact(1));
+    assertEquals(41, values.doubleValue(), 0);
+    assertTrue(values.advanceExact(2));
+    assertEquals(4, values.doubleValue(), 0);
   }
-  
-  public void testEquals() throws Exception {
+
+  public void testDoubleValuesSourceEquals() throws Exception {
     Expression expr = JavascriptCompiler.compile("sqrt(a) + ln(b)");
-    
-    SimpleBindings bindings = new SimpleBindings();    
+
+    SimpleBindings bindings = new SimpleBindings();
     bindings.add(new SortField("a", SortField.Type.INT));
     bindings.add(new SortField("b", SortField.Type.INT));
-    
-    ValueSource vs1 = expr.getValueSource(bindings);
+
+    DoubleValuesSource vs1 = expr.getDoubleValuesSource(bindings);
     // same instance
     assertEquals(vs1, vs1);
     // null
@@ -159,20 +109,21 @@ public class TestExpressionValueSource extends LuceneTestCase {
     // other object
     assertFalse(vs1.equals("foobar"));
     // same bindings and expression instances
-    ValueSource vs2 = expr.getValueSource(bindings);
+    DoubleValuesSource vs2 = expr.getDoubleValuesSource(bindings);
     assertEquals(vs1.hashCode(), vs2.hashCode());
     assertEquals(vs1, vs2);
     // equiv bindings (different instance)
-    SimpleBindings bindings2 = new SimpleBindings();    
+    SimpleBindings bindings2 = new SimpleBindings();
     bindings2.add(new SortField("a", SortField.Type.INT));
     bindings2.add(new SortField("b", SortField.Type.INT));
-    ValueSource vs3 = expr.getValueSource(bindings2);
+    DoubleValuesSource vs3 = expr.getDoubleValuesSource(bindings2);
     assertEquals(vs1, vs3);
     // different bindings (same names, different types)
-    SimpleBindings bindings3 = new SimpleBindings();    
+    SimpleBindings bindings3 = new SimpleBindings();
     bindings3.add(new SortField("a", SortField.Type.LONG));
-    bindings3.add(new SortField("b", SortField.Type.INT));
-    ValueSource vs4 = expr.getValueSource(bindings3);
+    bindings3.add(new SortField("b", SortField.Type.FLOAT));
+    DoubleValuesSource vs4 = expr.getDoubleValuesSource(bindings3);
     assertFalse(vs1.equals(vs4));
   }
+
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestCustomFunctions.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestCustomFunctions.java b/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestCustomFunctions.java
index 7c46b05..9df5d74 100644
--- a/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestCustomFunctions.java
+++ b/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestCustomFunctions.java
@@ -50,7 +50,7 @@ public class TestCustomFunctions extends LuceneTestCase {
   public void testDefaultList() throws Exception {
     Map<String,Method> functions = JavascriptCompiler.DEFAULT_FUNCTIONS;
     Expression expr = JavascriptCompiler.compile("sqrt(20)", functions, getClass().getClassLoader());
-    assertEquals(Math.sqrt(20), expr.evaluate(0, null), DELTA);
+    assertEquals(Math.sqrt(20), expr.evaluate(null), DELTA);
   }
   
   public static double zeroArgMethod() { return 5; }
@@ -60,7 +60,7 @@ public class TestCustomFunctions extends LuceneTestCase {
     Map<String,Method> functions = new HashMap<>();
     functions.put("foo", getClass().getMethod("zeroArgMethod"));
     Expression expr = JavascriptCompiler.compile("foo()", functions, getClass().getClassLoader());
-    assertEquals(5, expr.evaluate(0, null), DELTA);
+    assertEquals(5, expr.evaluate(null), DELTA);
   }
 
   public static double oneArgMethod(double arg1) { return 3 + arg1; }
@@ -70,7 +70,7 @@ public class TestCustomFunctions extends LuceneTestCase {
     Map<String,Method> functions = new HashMap<>();
     functions.put("foo", getClass().getMethod("oneArgMethod", double.class));
     Expression expr = JavascriptCompiler.compile("foo(3)", functions, getClass().getClassLoader());
-    assertEquals(6, expr.evaluate(0, null), DELTA);
+    assertEquals(6, expr.evaluate(null), DELTA);
   }
   
   public static double threeArgMethod(double arg1, double arg2, double arg3) { return arg1 + arg2 + arg3; }
@@ -80,7 +80,7 @@ public class TestCustomFunctions extends LuceneTestCase {
     Map<String,Method> functions = new HashMap<>();
     functions.put("foo", getClass().getMethod("threeArgMethod", double.class, double.class, double.class));
     Expression expr = JavascriptCompiler.compile("foo(3, 4, 5)", functions, getClass().getClassLoader());
-    assertEquals(12, expr.evaluate(0, null), DELTA);
+    assertEquals(12, expr.evaluate(null), DELTA);
   }
   
   /** tests a map with 2 functions */
@@ -89,7 +89,7 @@ public class TestCustomFunctions extends LuceneTestCase {
     functions.put("foo", getClass().getMethod("zeroArgMethod"));
     functions.put("bar", getClass().getMethod("oneArgMethod", double.class));
     Expression expr = JavascriptCompiler.compile("foo() + bar(3)", functions, getClass().getClassLoader());
-    assertEquals(11, expr.evaluate(0, null), DELTA);
+    assertEquals(11, expr.evaluate(null), DELTA);
   }
 
   /** tests invalid methods that are not allowed to become variables to be mapped */
@@ -220,7 +220,7 @@ public class TestCustomFunctions extends LuceneTestCase {
     
     // this should pass:
     Expression expr = JavascriptCompiler.compile("bar()", functions, childLoader);
-    assertEquals(2.0, expr.evaluate(0, null), DELTA);
+    assertEquals(2.0, expr.evaluate(null), DELTA);
     
     // use our classloader, not the foreign one, which should fail!
     IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
@@ -232,9 +232,9 @@ public class TestCustomFunctions extends LuceneTestCase {
     Map<String,Method> mixedFunctions = new HashMap<>(JavascriptCompiler.DEFAULT_FUNCTIONS);
     mixedFunctions.putAll(functions);
     expr = JavascriptCompiler.compile("bar()", mixedFunctions, childLoader);
-    assertEquals(2.0, expr.evaluate(0, null), DELTA);
+    assertEquals(2.0, expr.evaluate(null), DELTA);
     expr = JavascriptCompiler.compile("sqrt(20)", mixedFunctions, childLoader);
-    assertEquals(Math.sqrt(20), expr.evaluate(0, null), DELTA);
+    assertEquals(Math.sqrt(20), expr.evaluate(null), DELTA);
     
     // use our classloader, not the foreign one, which should fail!
     expected = expectThrows(IllegalArgumentException.class, () -> {
@@ -256,7 +256,7 @@ public class TestCustomFunctions extends LuceneTestCase {
     String source = "3 * foo() / 5";
     Expression expr = JavascriptCompiler.compile(source, functions, getClass().getClassLoader());
     ArithmeticException expected = expectThrows(ArithmeticException.class, () -> {
-      expr.evaluate(0, null);
+      expr.evaluate(null);
     });
     assertEquals(MESSAGE, expected.getMessage());
     StringWriter sw = new StringWriter();
@@ -272,6 +272,6 @@ public class TestCustomFunctions extends LuceneTestCase {
     functions.put("foo.bar", getClass().getMethod("zeroArgMethod"));
     String source = "foo.bar()";
     Expression expr = JavascriptCompiler.compile(source, functions, getClass().getClassLoader());
-    assertEquals(5, expr.evaluate(0, null), DELTA);
+    assertEquals(5, expr.evaluate(null), DELTA);
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestJavascriptFunction.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestJavascriptFunction.java b/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestJavascriptFunction.java
index 81362a6..ed68a5f 100644
--- a/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestJavascriptFunction.java
+++ b/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestJavascriptFunction.java
@@ -24,7 +24,7 @@ public class TestJavascriptFunction extends LuceneTestCase {
   
   private void assertEvaluatesTo(String expression, double expected) throws Exception {
     Expression evaluator = JavascriptCompiler.compile(expression);
-    double actual = evaluator.evaluate(0, null);
+    double actual = evaluator.evaluate(null);
     assertEquals(expected, actual, DELTA);
   }
   

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8b055382/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestJavascriptOperations.java
----------------------------------------------------------------------
diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestJavascriptOperations.java b/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestJavascriptOperations.java
index 82d5056..fd098c5 100644
--- a/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestJavascriptOperations.java
+++ b/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestJavascriptOperations.java
@@ -22,7 +22,7 @@ import org.apache.lucene.util.LuceneTestCase;
 public class TestJavascriptOperations extends LuceneTestCase {
   private void assertEvaluatesTo(String expression, long expected) throws Exception {
     Expression evaluator = JavascriptCompiler.compile(expression);
-    long actual = (long)evaluator.evaluate(0, null);
+    long actual = (long)evaluator.evaluate(null);
     assertEquals(expected, actual);
   }
   


[12/43] lucene-solr:jira/solr-8593: SOLR-9883: In example schemaless configs' default update chain, move the DUP to after the AddSchemaFields URP (which is now tagged as RunAlways), to avoid invalid buffered tlog entry replays.

Posted by kr...@apache.org.
SOLR-9883: In example schemaless configs' default update chain, move the DUP to after the AddSchemaFields URP (which is now tagged as RunAlways), to avoid invalid buffered tlog entry replays.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d817fd43
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d817fd43
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d817fd43

Branch: refs/heads/jira/solr-8593
Commit: d817fd43eccd67a5d73c3bbc49561de65d3fc9cb
Parents: 67261d2
Author: Steve Rowe <sa...@apache.org>
Authored: Sat Jan 7 16:39:20 2017 -0500
Committer: Steve Rowe <sa...@apache.org>
Committed: Sat Jan 7 16:39:20 2017 -0500

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   3 +
 .../java/org/apache/solr/update/UpdateLog.java  |   4 +-
 .../AddSchemaFieldsUpdateProcessorFactory.java  |   3 +-
 ...dd-schema-fields-update-processor-chains.xml |   8 +-
 .../collection1/conf/solrconfig-schemaless.xml  |  45 ++----
 .../schema/TestSchemalessBufferedUpdates.java   | 160 +++++++++++++++++++
 solr/example/files/conf/solrconfig.xml          |   5 +-
 .../basic_configs/conf/solrconfig.xml           |   6 +-
 .../conf/solrconfig.xml                         |   5 +-
 9 files changed, 190 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d817fd43/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 1f7f09a..0d61730 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -337,6 +337,9 @@ Bug Fixes
 
 * SOLR-9503: NPE in Replica Placement Rules when using Overseer Role with other rules (Tim Owen via noble)
 
+* SOLR-9883: Example schemaless solr config files can lead to invalid tlog replays: when updates are buffered,
+  update processors ordered before DistributedUpdateProcessor, e.g. field normalization, are never run. (Steve Rowe)
+
 Other Changes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d817fd43/solr/core/src/java/org/apache/solr/update/UpdateLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
index 9c0f1cf..b79290d 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
@@ -1429,7 +1429,7 @@ public class UpdateLog implements PluginInfoInitialized {
             }
           } catch (IOException ex) {
             recoveryInfo.errors++;
-            loglog.warn("REYPLAY_ERR: IOException reading log", ex);
+            loglog.warn("REPLAY_ERR: IOException reading log", ex);
             // could be caused by an incomplete flush if recovering from log
           } catch (ClassCastException cl) {
             recoveryInfo.errors++;
@@ -1440,7 +1440,7 @@ public class UpdateLog implements PluginInfoInitialized {
               throw ex;
             }
             recoveryInfo.errors++;
-            loglog.warn("REYPLAY_ERR: IOException reading log", ex);
+            loglog.warn("REPLAY_ERR: IOException reading log", ex);
             // could be caused by an incomplete flush if recovering from log
           } catch (Exception ex) {
             recoveryInfo.errors++;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d817fd43/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java
index 4f68bcc..4758972 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java
@@ -120,7 +120,8 @@ import static org.apache.solr.core.ConfigSetProperties.IMMUTABLE_CONFIGSET_ARG;
  *   &lt;/lst&gt;
  * &lt;/processor&gt;</pre>
  */
-public class AddSchemaFieldsUpdateProcessorFactory extends UpdateRequestProcessorFactory implements SolrCoreAware {
+public class AddSchemaFieldsUpdateProcessorFactory extends UpdateRequestProcessorFactory
+    implements SolrCoreAware, UpdateRequestProcessorFactory.RunAlways {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
   private static final String TYPE_MAPPING_PARAM = "typeMapping";

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d817fd43/solr/core/src/test-files/solr/collection1/conf/solrconfig-add-schema-fields-update-processor-chains.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-add-schema-fields-update-processor-chains.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-add-schema-fields-update-processor-chains.xml
index 8d91d28..e574575 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-add-schema-fields-update-processor-chains.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-add-schema-fields-update-processor-chains.xml
@@ -66,8 +66,6 @@
   </updateRequestProcessorChain>
 
   <updateRequestProcessorChain name="add-fields">
-    <processor class="solr.LogUpdateProcessorFactory" />
-    <processor class="solr.DistributedUpdateProcessorFactory" />
     <processor class="solr.AddSchemaFieldsUpdateProcessorFactory">
       <str name="defaultFieldType">text</str>
       <lst name="typeMapping">
@@ -96,12 +94,12 @@
         <str name="fieldType">tdouble</str>
       </lst>
     </processor>
+    <processor class="solr.LogUpdateProcessorFactory" />
+    <processor class="solr.DistributedUpdateProcessorFactory" />
     <processor class="solr.RunUpdateProcessorFactory" />
   </updateRequestProcessorChain>
 
   <updateRequestProcessorChain name="parse-and-add-fields">
-    <processor class="solr.LogUpdateProcessorFactory" />
-    <processor class="solr.DistributedUpdateProcessorFactory" />
     <processor class="solr.ParseBooleanFieldUpdateProcessorFactory"/>
     <processor class="solr.ParseLongFieldUpdateProcessorFactory"/>
     <processor class="solr.ParseDoubleFieldUpdateProcessorFactory"/>
@@ -154,6 +152,8 @@
         <str name="fieldType">tdouble</str>
       </lst>
     </processor>
+    <processor class="solr.LogUpdateProcessorFactory" />
+    <processor class="solr.DistributedUpdateProcessorFactory" />
     <processor class="solr.RunUpdateProcessorFactory" />
   </updateRequestProcessorChain>
 </config>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d817fd43/solr/core/src/test-files/solr/collection1/conf/solrconfig-schemaless.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-schemaless.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-schemaless.xml
index 46aa2a4..8247d69 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-schemaless.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-schemaless.xml
@@ -1,5 +1,4 @@
-<?xml version="1.0" ?>
-
+<?xml version="1.0" encoding="UTF-8" ?>
 <!--
  Licensed to the Apache Software Foundation (ASF) under one or more
  contributor license agreements.  See the NOTICE file distributed with
@@ -16,7 +15,7 @@
  See the License for the specific language governing permissions and
  limitations under the License.
 -->
-                                                           
+
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
 
@@ -33,47 +32,26 @@
   <updateHandler>
     <updateLog enable="true">
       <str name="dir">${solr.ulog.dir:}</str>
-    </updateLog> 
+    </updateLog>
   </updateHandler>
 
   <requestHandler name="standard" class="solr.StandardRequestHandler">
     <bool name="httpCaching">true</bool>
   </requestHandler>
 
-
-  <requestHandler name="/update" class="solr.UpdateRequestHandler">
+  <initParams path="/update/**">
     <lst name="defaults">
       <str name="update.chain">add-unknown-fields-to-the-schema</str>
     </lst>
-  </requestHandler>
-
-  <query>
-    <!-- custom cache currently used by block join -->
-    <cache name="perSegFilter"
-           class="solr.search.LRUCache"
-           size="10"
-           initialSize="0"
-           autowarmCount="10"
-           regenerator="solr.NoOpRegenerator" />
-  </query>
+  </initParams>
 
-  <!-- Add unknown fields to the schema
-  
-       An example field type guessing update processor that will
-       attempt to parse string-typed field values as Booleans, Longs,
-       Doubles, or Dates, and then add schema fields with the guessed
-       field types.  
-       
-       This requires that the schema is both managed and mutable, by
-       declaring schemaFactory as ManagedIndexSchemaFactory, with
-       mutable specified as true. 
-       
-       See http://wiki.apache.org/solr/GuessingFieldTypes
-    -->
   <updateRequestProcessorChain name="add-unknown-fields-to-the-schema">
-    <processor class="solr.LogUpdateProcessorFactory"/>
-    <processor class="solr.DistributedUpdateProcessorFactory" />
+    <processor class="solr.UUIDUpdateProcessorFactory" />
     <processor class="solr.RemoveBlankFieldUpdateProcessorFactory"/>
+    <processor class="solr.FieldNameMutatingUpdateProcessorFactory">
+      <str name="pattern">[^\w-\.]</str>
+      <str name="replacement">_</str>
+    </processor>
     <processor class="solr.ParseBooleanFieldUpdateProcessorFactory"/>
     <processor class="solr.ParseLongFieldUpdateProcessorFactory"/>
     <processor class="solr.ParseDoubleFieldUpdateProcessorFactory"/>
@@ -119,7 +97,8 @@
         <str name="fieldType">tdouble</str>
       </lst>
     </processor>
+    <processor class="solr.LogUpdateProcessorFactory"/>
+    <processor class="solr.DistributedUpdateProcessorFactory" />
     <processor class="solr.RunUpdateProcessorFactory"/>
   </updateRequestProcessorChain>
-
 </config>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d817fd43/solr/core/src/test/org/apache/solr/schema/TestSchemalessBufferedUpdates.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/schema/TestSchemalessBufferedUpdates.java b/solr/core/src/test/org/apache/solr/schema/TestSchemalessBufferedUpdates.java
new file mode 100644
index 0000000..c2e8b2e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/schema/TestSchemalessBufferedUpdates.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.schema;
+
+import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.request.SolrRequestInfo;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.update.AddUpdateCommand;
+import org.apache.solr.update.DirectUpdateHandler2;
+import org.apache.solr.update.UpdateLog;
+import org.apache.solr.update.UpdateHandler;
+import org.apache.solr.update.processor.DistributedUpdateProcessorFactory;
+import org.apache.solr.update.processor.UpdateRequestProcessor;
+import org.apache.solr.update.processor.UpdateRequestProcessorChain;
+import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.concurrent.Future;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase;
+
+public class TestSchemalessBufferedUpdates extends SolrTestCaseJ4 {
+
+  // means that we've seen the leader and have version info (i.e. we are a non-leader replica)
+  private static final String FROM_LEADER = DistribPhase.FROMLEADER.toString();
+  private static final String UPDATE_CHAIN = "add-unknown-fields-to-the-schema";
+  private static final int TIMEOUT = 10;
+
+  private static final String collection = "collection1";
+  private static final String confDir = collection + "/conf";
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    File tmpSolrHome = createTempDir().toFile();
+    File tmpConfDir = new File(tmpSolrHome, confDir);
+    File testHomeConfDir = new File(TEST_HOME(), confDir);
+    FileUtils.copyFileToDirectory(new File(testHomeConfDir, "solrconfig-schemaless.xml"), tmpConfDir);
+    FileUtils.copyFileToDirectory(new File(testHomeConfDir, "schema-add-schema-fields-update-processor.xml"), tmpConfDir);
+    FileUtils.copyFileToDirectory(new File(testHomeConfDir, "solrconfig.snippet.randomindexconfig.xml"), tmpConfDir);
+    initCore("solrconfig-schemaless.xml", "schema-add-schema-fields-update-processor.xml", tmpSolrHome.getPath());
+  }
+
+  @Test
+  public void test() throws Exception {
+    DirectUpdateHandler2.commitOnClose = false;
+    final Semaphore logReplay = new Semaphore(0);
+    final Semaphore logReplayFinish = new Semaphore(0);
+    UpdateLog.testing_logReplayHook = () -> {
+      try {
+        assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+    };
+    UpdateLog.testing_logReplayFinishHook = logReplayFinish::release;
+
+    SolrQueryRequest req = req();
+    UpdateHandler uhandler = req.getCore().getUpdateHandler();
+    UpdateLog ulog = uhandler.getUpdateLog();
+
+    try {
+      assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
+
+      // Invalid date will be normalized by ParseDateField URP
+      updateJ(jsonAdd(processAdd(sdoc("id","1", "f_dt","2017-01-04"))), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      assertU(commit());
+      assertJQ(req("q", "*:*"), "/response/numFound==1");
+
+      ulog.bufferUpdates();
+      assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
+
+      // If the ParseDateField URP isn't ahead of the DUP, then the date won't be normalized in the buffered tlog entry,
+      // and the doc won't be indexed on the replaying replica - a warning is logged as follows:
+      // WARN [...] o.a.s.u.UpdateLog REYPLAY_ERR: IOException reading log
+      //            org.apache.solr.common.SolrException: Invalid Date String:'2017-01-05'
+      //              at org.apache.solr.util.DateMathParser.parseMath(DateMathParser.java:234)
+      //              at org.apache.solr.schema.TrieField.createField(TrieField.java:725) [...]
+      updateJ(jsonAdd(processAdd(sdoc("id","2", "f_dt","2017-01-05"))), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+      Future<UpdateLog.RecoveryInfo> rinfoFuture = ulog.applyBufferedUpdates();
+
+      assertTrue(rinfoFuture != null);
+
+      assertEquals(UpdateLog.State.APPLYING_BUFFERED, ulog.getState());
+
+      logReplay.release(1000);
+
+      UpdateLog.RecoveryInfo rinfo = rinfoFuture.get();
+      assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
+
+      assertU(commit());
+      assertJQ(req("q", "*:*"), "/response/numFound==2");
+    } finally {
+      DirectUpdateHandler2.commitOnClose = true;
+      UpdateLog.testing_logReplayHook = null;
+      UpdateLog.testing_logReplayFinishHook = null;
+      req().close();
+    }
+  }
+
+  private SolrInputDocument processAdd(final SolrInputDocument docIn) throws IOException {
+    UpdateRequestProcessorChain processorChain = h.getCore().getUpdateProcessingChain(UPDATE_CHAIN);
+    assertNotNull("Undefined URP chain '" + UPDATE_CHAIN + "'", processorChain);
+    List <UpdateRequestProcessorFactory> factoriesUpToDUP = new ArrayList<>();
+    for (UpdateRequestProcessorFactory urpFactory : processorChain.getProcessors()) {
+      factoriesUpToDUP.add(urpFactory);
+      if (urpFactory.getClass().equals(DistributedUpdateProcessorFactory.class)) 
+        break;
+    }
+    UpdateRequestProcessorChain chainUpToDUP = new UpdateRequestProcessorChain(factoriesUpToDUP, h.getCore());
+    assertNotNull("URP chain '" + UPDATE_CHAIN + "'", chainUpToDUP);
+    SolrQueryResponse rsp = new SolrQueryResponse();
+    SolrQueryRequest req = req();
+    try {
+      SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp));
+      AddUpdateCommand cmd = new AddUpdateCommand(req);
+      cmd.solrDoc = docIn;
+      UpdateRequestProcessor processor = chainUpToDUP.createProcessor(req, rsp);
+      processor.processAdd(cmd);
+      if (cmd.solrDoc.get("f_dt").getValue() instanceof Date) {
+        // Non-JSON types (Date in this case) aren't handled properly in noggit-0.6.  Although this is fixed in 
+        // https://github.com/yonik/noggit/commit/ec3e732af7c9425e8f40297463cbe294154682b1 to call obj.toString(), 
+        // Date::toString produces a Date representation that Solr doesn't like, so we convert using Instant::toString
+        cmd.solrDoc.get("f_dt").setValue(((Date) cmd.solrDoc.get("f_dt").getValue()).toInstant().toString(), 1.0f);
+      }
+      return cmd.solrDoc;
+    } finally {
+      SolrRequestInfo.clearRequestInfo();
+      req.close();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d817fd43/solr/example/files/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/example/files/conf/solrconfig.xml b/solr/example/files/conf/solrconfig.xml
index 3fd825e..f83c235 100644
--- a/solr/example/files/conf/solrconfig.xml
+++ b/solr/example/files/conf/solrconfig.xml
@@ -1193,9 +1193,6 @@
   <updateRequestProcessorChain name="files-update-processor">
     <!-- UUIDUpdateProcessorFactory will generate an id if none is present in the incoming document -->
     <processor class="solr.UUIDUpdateProcessorFactory" />
-
-    <processor class="solr.LogUpdateProcessorFactory"/>
-    <processor class="solr.DistributedUpdateProcessorFactory"/>
     <processor class="solr.RemoveBlankFieldUpdateProcessorFactory"/>
     <processor class="solr.FieldNameMutatingUpdateProcessorFactory">
       <str name="pattern">[^\w-\.]</str>
@@ -1261,6 +1258,8 @@
       <!--</lst>-->
     </processor>
 
+    <processor class="solr.LogUpdateProcessorFactory"/>
+    <processor class="solr.DistributedUpdateProcessorFactory"/>
     <processor class="solr.RunUpdateProcessorFactory"/>
   </updateRequestProcessorChain>
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d817fd43/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml b/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml
index 4ef902f..e009aab 100644
--- a/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml
+++ b/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml
@@ -1187,9 +1187,6 @@
   <updateRequestProcessorChain name="add-unknown-fields-to-the-schema">
     <!-- UUIDUpdateProcessorFactory will generate an id if none is present in the incoming document -->
     <processor class="solr.UUIDUpdateProcessorFactory" />
-
-    <processor class="solr.LogUpdateProcessorFactory"/>
-    <processor class="solr.DistributedUpdateProcessorFactory"/>
     <processor class="solr.RemoveBlankFieldUpdateProcessorFactory"/>
     <processor class="solr.FieldNameMutatingUpdateProcessorFactory">
       <str name="pattern">[^\w-\.]</str>
@@ -1239,6 +1236,9 @@
         <str name="fieldType">tdoubles</str>
       </lst>
     </processor>
+
+    <processor class="solr.LogUpdateProcessorFactory"/>
+    <processor class="solr.DistributedUpdateProcessorFactory"/>
     <processor class="solr.RunUpdateProcessorFactory"/>
   </updateRequestProcessorChain>
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d817fd43/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml b/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml
index 4b0899b..2ca1b7f 100644
--- a/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml
+++ b/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml
@@ -1186,9 +1186,6 @@
   <updateRequestProcessorChain name="add-unknown-fields-to-the-schema">
     <!-- UUIDUpdateProcessorFactory will generate an id if none is present in the incoming document -->
     <processor class="solr.UUIDUpdateProcessorFactory" />
-
-    <processor class="solr.LogUpdateProcessorFactory"/>
-    <processor class="solr.DistributedUpdateProcessorFactory"/>
     <processor class="solr.RemoveBlankFieldUpdateProcessorFactory"/>
     <processor class="solr.FieldNameMutatingUpdateProcessorFactory">
       <str name="pattern">[^\w-\.]</str>
@@ -1238,6 +1235,8 @@
         <str name="fieldType">tdoubles</str>
       </lst>
     </processor>
+    <processor class="solr.LogUpdateProcessorFactory"/>
+    <processor class="solr.DistributedUpdateProcessorFactory"/>
     <processor class="solr.RunUpdateProcessorFactory"/>
   </updateRequestProcessorChain>
 


[29/43] lucene-solr:jira/solr-8593: SOLR-9934: SolrTestCase.clearIndex has been improved to take advantage of low level test specific logic that clears the index metadata more completely then a normal *:* DBQ can due to update versioning

Posted by kr...@apache.org.
SOLR-9934: SolrTestCase.clearIndex has been improved to take advantage of low level test specific logic that clears the index metadata more completely then a normal *:* DBQ can due to update versioning


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1d7379b6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1d7379b6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1d7379b6

Branch: refs/heads/jira/solr-8593
Commit: 1d7379b680062eca766f0410e3db7ff9e9b34cb0
Parents: 17cd0f0
Author: Chris Hostetter <ho...@apache.org>
Authored: Mon Jan 9 09:53:55 2017 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Mon Jan 9 09:54:56 2017 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  3 +++
 .../org/apache/solr/search/TestRTGBase.java     | 12 -----------
 .../org/apache/solr/search/TestRecovery.java    | 13 ------------
 .../apache/solr/search/TestRecoveryHdfs.java    | 11 ----------
 .../apache/solr/update/CdcrUpdateLogTest.java   | 17 ---------------
 .../java/org/apache/solr/SolrTestCaseJ4.java    | 22 ++++++++++++++++++--
 6 files changed, 23 insertions(+), 55 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1d7379b6/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 49d24df..b28df9c 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -408,6 +408,9 @@ Other Changes
 
 * SOLR-9777: IndexFingerprinting should use getCombinedCoreAndDeletesKey() instead of getCoreCacheKey() for per-segment caching (Ishan Chattopadhyaya)
 
+* SOLR-9934: SolrTestCase.clearIndex has been improved to take advantage of low level test specific logic that
+  clears the index metadata more completely then a normal *:* DBQ can due to update versioning.  (hossman)
+
 ==================  6.3.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1d7379b6/solr/core/src/test/org/apache/solr/search/TestRTGBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRTGBase.java b/solr/core/src/test/org/apache/solr/search/TestRTGBase.java
index 13ecb32..bb1b08a 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRTGBase.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRTGBase.java
@@ -36,24 +36,12 @@ import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.update.UpdateLog;
 
 import static org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase;
-import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
 
 public class TestRTGBase extends SolrTestCaseJ4 {
 
   // means we've seen the leader and have version info (i.e. we are a non-leader replica)
   public static String FROM_LEADER = DistribPhase.FROMLEADER.toString();
 
-  // since we make up fake versions in these tests, we can get messed up by a DBQ with a real version
-  // since Solr can think following updates were reordered.
-  @Override
-  public void clearIndex() {
-    try {
-      deleteByQueryAndGetVersion("*:*", params("_version_", Long.toString(-Long.MAX_VALUE), DISTRIB_UPDATE_PARAM,FROM_LEADER));
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
   protected final ConcurrentHashMap<Integer,DocInfo> model = new ConcurrentHashMap<>();
   protected Map<Integer,DocInfo> committedModel = new HashMap<>();
   protected long snapshotCount;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1d7379b6/solr/core/src/test/org/apache/solr/search/TestRecovery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRecovery.java b/solr/core/src/test/org/apache/solr/search/TestRecovery.java
index 12d3ec3..9b88ec4 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRecovery.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRecovery.java
@@ -72,19 +72,6 @@ public class TestRecovery extends SolrTestCaseJ4 {
     }
   }
 
-
-  // since we make up fake versions in these tests, we can get messed up by a DBQ with a real version
-  // since Solr can think following updates were reordered.
-  @Override
-  public void clearIndex() {
-    try {
-      deleteByQueryAndGetVersion("*:*", params("_version_", Long.toString(-Long.MAX_VALUE), DISTRIB_UPDATE_PARAM,FROM_LEADER));
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-
   @Test
   public void testLogReplay() throws Exception {
     try {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1d7379b6/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java b/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
index e7e60ec..e6bb9a6 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
@@ -109,17 +109,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
     dfsCluster = null;
   }
 
-  // since we make up fake versions in these tests, we can get messed up by a DBQ with a real version
-  // since Solr can think following updates were reordered.
-  @Override
-  public void clearIndex() {
-    try {
-      deleteByQueryAndGetVersion("*:*", params("_version_", Long.toString(-Long.MAX_VALUE), DISTRIB_UPDATE_PARAM,FROM_LEADER));
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
   @Test
   public void testReplicationFactor() throws Exception {
     clearIndex(); 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1d7379b6/solr/core/src/test/org/apache/solr/update/CdcrUpdateLogTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/update/CdcrUpdateLogTest.java b/solr/core/src/test/org/apache/solr/update/CdcrUpdateLogTest.java
index 60dc9c3..b7409d1 100644
--- a/solr/core/src/test/org/apache/solr/update/CdcrUpdateLogTest.java
+++ b/solr/core/src/test/org/apache/solr/update/CdcrUpdateLogTest.java
@@ -31,20 +31,14 @@ import org.apache.lucene.util.LuceneTestCase.Nightly;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.update.processor.DistributedUpdateProcessor;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.noggit.ObjectBuilder;
 
-import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
-
 @Nightly
 public class CdcrUpdateLogTest extends SolrTestCaseJ4 {
 
-  // means that we've seen the leader and have version info (i.e. we are a non-leader replica)
-  private static String FROM_LEADER = DistributedUpdateProcessor.DistribPhase.FROMLEADER.toString();
-
   private static int timeout = 60;  // acquire timeout in seconds.  change this to a huge number when debugging to prevent threads from advancing.
 
   // TODO: fix this test to not require FSDirectory
@@ -66,17 +60,6 @@ public class CdcrUpdateLogTest extends SolrTestCaseJ4 {
     }
   }
 
-  // since we make up fake versions in these tests, we can get messed up by a DBQ with a real version
-  // since Solr can think following updates were reordered.
-  @Override
-  public void clearIndex() {
-    try {
-      deleteByQueryAndGetVersion("*:*", params("_version_", Long.toString(-Long.MAX_VALUE), DISTRIB_UPDATE_PARAM, FROM_LEADER));
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
   private void clearCore() throws IOException {
     clearIndex();
     assertU(commit());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1d7379b6/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
index 437679d..61de56d 100644
--- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
+++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
@@ -132,6 +132,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.xml.sax.SAXException;
 
+import static org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase;
+import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
+
 import static java.util.Objects.requireNonNull;
 
 /**
@@ -1148,9 +1151,24 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
     @Override
     public String toString() { return xml; }
   }
-  
+
+  /**
+   * Does a low level delete of all docs in the index. 
+   *
+   * The behavior of this method is slightly different then doing a normal <code>*:*</code> DBQ because it
+   * takes advantage of internal methods to ensure all index data is wiped, regardless of optimistic 
+   * concurrency version constraints -- making it suitable for tests that create synthetic versions, 
+   * and/or require a completely pristine index w/o any field metdata.
+   *
+   * @see #deleteByQueryAndGetVersion
+   */
   public void clearIndex() {
-    assertU(delQ("*:*"));
+    try {
+      deleteByQueryAndGetVersion("*:*", params("_version_", Long.toString(-Long.MAX_VALUE),
+                                               DISTRIB_UPDATE_PARAM,DistribPhase.FROMLEADER.toString()));
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
   }
 
   /** Send JSON update commands */


[11/43] lucene-solr:jira/solr-8593: LUCENE-7611: Remove unnecessary Exception wrapping from DocumentValueSourceDictionary

Posted by kr...@apache.org.
LUCENE-7611: Remove unnecessary Exception wrapping from DocumentValueSourceDictionary


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/67261d2f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/67261d2f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/67261d2f

Branch: refs/heads/jira/solr-8593
Commit: 67261d2fb515f255e05c138281ab6c6b71d66716
Parents: 8f4fee3
Author: Alan Woodward <ro...@apache.org>
Authored: Sat Jan 7 16:06:29 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Sat Jan 7 16:06:29 2017 +0000

----------------------------------------------------------------------
 .../suggest/DocumentValueSourceDictionary.java  | 21 +++++++-------------
 1 file changed, 7 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/67261d2f/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
index 2291ac9..9356975 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/DocumentValueSourceDictionary.java
@@ -132,27 +132,20 @@ public class DocumentValueSourceDictionary extends DocumentDictionary {
      * by the <code>weightsValueSource</code>
      * */
     @Override
-    protected long getWeight(Document doc, int docId) {
+    protected long getWeight(Document doc, int docId) throws IOException {
       if (currentWeightValues == null) {
         return 0;
       }
       int subIndex = ReaderUtil.subIndex(docId, starts);
       if (subIndex != currentLeafIndex) {
         currentLeafIndex = subIndex;
-        try {
-          currentWeightValues = weightsValueSource.getValues(leaves.get(currentLeafIndex), null);
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
-      }
-      try {
-        if (currentWeightValues.advanceExact(docId - starts[subIndex]))
-          return currentWeightValues.longValue();
-        else
-          return 0;
-      } catch (IOException e) {
-        throw new RuntimeException(e);
+        currentWeightValues = weightsValueSource.getValues(leaves.get(currentLeafIndex), null);
       }
+      if (currentWeightValues.advanceExact(docId - starts[subIndex]))
+        return currentWeightValues.longValue();
+      else
+        return 0;
+
     }
 
   }