You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by no...@apache.org on 2017/01/24 03:32:57 UTC

[01/50] [abbrv] lucene-solr:apiv2: SOLR-9906: Fix dodgy test check

Repository: lucene-solr
Updated Branches:
  refs/heads/apiv2 4841ce1af -> a6e777294


SOLR-9906: Fix dodgy test check


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/efc7ee0f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/efc7ee0f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/efc7ee0f

Branch: refs/heads/apiv2
Commit: efc7ee0f0c9154fe58671601fdc053540c97ff62
Parents: 478de2a
Author: Alan Woodward <ro...@apache.org>
Authored: Mon Jan 16 15:24:02 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Mon Jan 16 15:24:02 2017 +0000

----------------------------------------------------------------------
 .../java/org/apache/solr/cloud/AbstractDistribZkTestBase.java   | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/efc7ee0f/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
index 0669cbe..7141eed 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
@@ -29,7 +29,6 @@ import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
-import org.apache.solr.common.cloud.Slice.State;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.core.Diagnostics;
@@ -240,8 +239,8 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
       ClusterState clusterState = zkStateReader.getClusterState();
       DocCollection coll = clusterState.getCollection("collection1");
       Slice slice = coll.getSlice(shardName);
-      if (slice.getLeader() != null && !slice.getLeader().equals(oldLeader) && slice.getState() == State.ACTIVE) {
-        log.info("Old leader {}, new leader. New leader got elected in {} ms", oldLeader, slice.getLeader(),timeOut.timeElapsed(MILLISECONDS) );
+      if (slice.getLeader() != null && !slice.getLeader().equals(oldLeader) && slice.getLeader().getState() == Replica.State.ACTIVE) {
+        log.info("Old leader {}, new leader {}. New leader got elected in {} ms", oldLeader, slice.getLeader(),timeOut.timeElapsed(MILLISECONDS) );
         break;
       }
 


[35/50] [abbrv] lucene-solr:apiv2: LUCENE-7055: Make sure to use the same reader to create the weight and pull the scorers.

Posted by no...@apache.org.
LUCENE-7055: Make sure to use the same reader to create the weight and pull the scorers.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e8fa5990
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e8fa5990
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e8fa5990

Branch: refs/heads/apiv2
Commit: e8fa59904c99b7c09a89a4b2f79699ff5a384115
Parents: 075aec9
Author: Adrien Grand <jp...@gmail.com>
Authored: Thu Jan 19 09:29:51 2017 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Thu Jan 19 09:30:34 2017 +0100

----------------------------------------------------------------------
 .../test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e8fa5990/lucene/sandbox/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java b/lucene/sandbox/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
index 2a16e5d..de289e7 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
@@ -68,7 +68,7 @@ public class TestIndexOrDocValuesQuery extends LuceneTestCase {
         .build();
 
     final Weight w1 = searcher.createNormalizedWeight(q1, random().nextBoolean());
-    final Scorer s1 = w1.scorer(reader.leaves().get(0));
+    final Scorer s1 = w1.scorer(searcher.getIndexReader().leaves().get(0));
     assertNotNull(s1.twoPhaseIterator()); // means we use doc values
 
     // The term query is less selective, so the IndexOrDocValuesQuery should use points
@@ -78,7 +78,7 @@ public class TestIndexOrDocValuesQuery extends LuceneTestCase {
         .build();
 
     final Weight w2 = searcher.createNormalizedWeight(q2, random().nextBoolean());
-    final Scorer s2 = w2.scorer(reader.leaves().get(0));
+    final Scorer s2 = w2.scorer(searcher.getIndexReader().leaves().get(0));
     assertNull(s2.twoPhaseIterator()); // means we use points
 
     reader.close();


[44/50] [abbrv] lucene-solr:apiv2: SOLR-10013: Fix DV range query bug introduced by LUCENE-7643 by disabling and optimization (LUCENE-7649 to track re-enabling or removing completely)

Posted by no...@apache.org.
SOLR-10013: Fix DV range query bug introduced by LUCENE-7643 by disabling and optimization (LUCENE-7649 to track re-enabling or removing completely)


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b0db06ba
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b0db06ba
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b0db06ba

Branch: refs/heads/apiv2
Commit: b0db06bad568b7eedf528379a2fe5ac935992d56
Parents: 69055aa
Author: Chris Hostetter <ho...@apache.org>
Authored: Fri Jan 20 13:27:09 2017 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Fri Jan 20 13:27:09 2017 -0700

----------------------------------------------------------------------
 .../SortedNumericDocValuesRangeQuery.java       |  5 +--
 .../document/SortedSetDocValuesRangeQuery.java  |  5 +--
 .../lucene/search/TestDocValuesQueries.java     | 33 ++++++++++++++++++++
 3 files changed, 39 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b0db06ba/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesRangeQuery.java b/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesRangeQuery.java
index 18805b2..d5f75a7 100644
--- a/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesRangeQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesRangeQuery.java
@@ -19,7 +19,6 @@ package org.apache.lucene.document;
 import java.io.IOException;
 import java.util.Objects;
 
-import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
@@ -100,9 +99,11 @@ abstract class SortedNumericDocValuesRangeQuery extends Query {
         if (values == null) {
           return null;
         }
-        final NumericDocValues singleton = DocValues.unwrapSingleton(values);
+        final NumericDocValues singleton = null; // TODO: LUCENE-7649, re-consider optimization that broke SOLR-10013
+        // final NumericDocValues singleton = DocValues.unwrapSingleton(values);
         final TwoPhaseIterator iterator;
         if (singleton != null) {
+          assert false : "imposible code -- or: someone re-enabled singleton optinization w/o reading the whole method";
           iterator = new TwoPhaseIterator(singleton) {
             @Override
             public boolean matches() throws IOException {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b0db06ba/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesRangeQuery.java b/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesRangeQuery.java
index 30af45f..3bc1b9c 100644
--- a/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesRangeQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesRangeQuery.java
@@ -19,7 +19,6 @@ package org.apache.lucene.document;
 import java.io.IOException;
 import java.util.Objects;
 
-import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
@@ -144,9 +143,11 @@ abstract class SortedSetDocValuesRangeQuery extends Query {
           return null;
         }
 
-        final SortedDocValues singleton = DocValues.unwrapSingleton(values);
+        final SortedDocValues singleton = null; // TODO: LUCENE-7649, re-consider optimization that broke SOLR-10013
+        // final SortedDocValues singleton = DocValues.unwrapSingleton(values);
         final TwoPhaseIterator iterator;
         if (singleton != null) {
+          assert false : "imposible code -- or: someone re-enabled singleton optinization w/o reading the whole method";
           iterator = new TwoPhaseIterator(singleton) {
             @Override
             public boolean matches() throws IOException {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b0db06ba/lucene/core/src/test/org/apache/lucene/search/TestDocValuesQueries.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesQueries.java b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesQueries.java
index 501538f..6cb0460 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesQueries.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesQueries.java
@@ -30,6 +30,7 @@ import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.TestUtil;
 
 public class TestDocValuesQueries extends LuceneTestCase {
@@ -235,4 +236,36 @@ public class TestDocValuesQueries extends LuceneTestCase {
     reader.close();
     dir.close();
   }
+
+  public void testSortedNumericNPE() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+    double[] nums = {-1.7147449030215377E-208, -1.6887024655302576E-11, 1.534911516604164E113, 0.0,
+        2.6947996404505155E-166, -2.649722021970773E306, 6.138239235731689E-198, 2.3967090122610808E111};
+    for (int i = 0; i < nums.length; ++i) {
+      Document doc = new Document();
+      doc.add(new SortedNumericDocValuesField("dv", NumericUtils.doubleToSortableLong(nums[i])));
+      iw.addDocument(doc);
+    }
+    iw.commit();
+    final IndexReader reader = iw.getReader();
+    final IndexSearcher searcher = newSearcher(reader);
+    iw.close();
+
+    final long lo = NumericUtils.doubleToSortableLong(8.701032080293731E-226);
+    final long hi = NumericUtils.doubleToSortableLong(2.0801416404385346E-41);
+    
+    Query query = SortedNumericDocValuesField.newRangeQuery("dv", lo, hi);
+    // TODO: assert expected matches
+    searcher.search(query, searcher.reader.maxDoc(), Sort.INDEXORDER);
+
+    // swap order, should still work
+    query = SortedNumericDocValuesField.newRangeQuery("dv", hi, lo);
+    // TODO: assert expected matches
+    searcher.search(query, searcher.reader.maxDoc(), Sort.INDEXORDER);
+    
+    reader.close();
+    dir.close();
+  }
+   
 }


[08/50] [abbrv] lucene-solr:apiv2: SOLR-9935: UnifiedHighlighter, when hl.fragsize=0 don't do fragmenting

Posted by no...@apache.org.
SOLR-9935: UnifiedHighlighter, when hl.fragsize=0 don't do fragmenting


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ed513fde
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ed513fde
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ed513fde

Branch: refs/heads/apiv2
Commit: ed513fdee77b95379bed8f8d5f369fb0393fd364
Parents: 43874fc
Author: David Smiley <ds...@apache.org>
Authored: Tue Jan 17 08:06:21 2017 -0500
Committer: David Smiley <ds...@apache.org>
Committed: Tue Jan 17 08:07:51 2017 -0500

----------------------------------------------------------------------
 .../org/apache/solr/highlight/UnifiedSolrHighlighter.java | 10 +++++++---
 .../apache/solr/highlight/TestUnifiedSolrHighlighter.java |  7 +++++--
 2 files changed, 12 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ed513fde/solr/core/src/java/org/apache/solr/highlight/UnifiedSolrHighlighter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/highlight/UnifiedSolrHighlighter.java b/solr/core/src/java/org/apache/solr/highlight/UnifiedSolrHighlighter.java
index 5b59b85..2633522 100644
--- a/solr/core/src/java/org/apache/solr/highlight/UnifiedSolrHighlighter.java
+++ b/solr/core/src/java/org/apache/solr/highlight/UnifiedSolrHighlighter.java
@@ -295,6 +295,13 @@ public class UnifiedSolrHighlighter extends SolrHighlighter implements PluginInf
 
     @Override
     protected BreakIterator getBreakIterator(String field) {
+      // Use a default fragsize the same as the regex Fragmenter (original Highlighter) since we're
+      //  both likely shooting for sentence-like patterns.
+      int fragsize = params.getFieldInt(field, HighlightParams.FRAGSIZE, LuceneRegexFragmenter.DEFAULT_FRAGMENT_SIZE);
+      if (fragsize == 0) { // special value; no fragmenting
+        return new WholeBreakIterator();
+      }
+
       String language = params.getFieldParam(field, HighlightParams.BS_LANGUAGE);
       String country = params.getFieldParam(field, HighlightParams.BS_COUNTRY);
       String variant = params.getFieldParam(field, HighlightParams.BS_VARIANT);
@@ -302,9 +309,6 @@ public class UnifiedSolrHighlighter extends SolrHighlighter implements PluginInf
       String type = params.getFieldParam(field, HighlightParams.BS_TYPE);
       BreakIterator baseBI = parseBreakIterator(type, locale);
 
-      // Use a default fragsize the same as the regex Fragmenter (original Highlighter) since we're
-      //  both likely shooting for sentence-like patterns.
-      int fragsize = params.getFieldInt(field, HighlightParams.FRAGSIZE, LuceneRegexFragmenter.DEFAULT_FRAGMENT_SIZE);
       if (fragsize <= 1 || baseBI instanceof WholeBreakIterator) { // no real minimum size
         return baseBI;
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ed513fde/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java b/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java
index 2eb4ba3..d452829 100644
--- a/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java
+++ b/solr/core/src/test/org/apache/solr/highlight/TestUnifiedSolrHighlighter.java
@@ -79,7 +79,7 @@ public class TestUnifiedSolrHighlighter extends SolrTestCaseJ4 {
     assertU(commit());
     assertQ("multiple snippets test",
         req("q", "text:document", "sort", "id asc", "hl", "true", "hl.snippets", "2", "hl.bs.type", "SENTENCE",
-            "hl.fragsize", "0"),
+            "hl.fragsize", "-1"),
         "count(//lst[@name='highlighting']/lst[@name='101']/arr[@name='text']/*)=2",
         "//lst[@name='highlighting']/lst[@name='101']/arr/str[1]='<em>Document</em> snippet one. '",
         "//lst[@name='highlighting']/lst[@name='101']/arr/str[2]='<em>Document</em> snippet two.'");
@@ -214,9 +214,12 @@ public class TestUnifiedSolrHighlighter extends SolrTestCaseJ4 {
   public void testBreakIteratorWhole() {
     assertU(adoc("text", "Document one has a first sentence. Document two has a second sentence.", "id", "103"));
     assertU(commit());
-    assertQ("different breakiterator", 
+    assertQ("WHOLE breakiterator",
         req("q", "text:document", "sort", "id asc", "hl", "true", "hl.bs.type", "WHOLE", "hl.fragsize", "-1"),
         "//lst[@name='highlighting']/lst[@name='103']/arr[@name='text']/str='<em>Document</em> one has a first sentence. <em>Document</em> two has a second sentence.'");
+    assertQ("hl.fragsize 0 is equivalent to WHOLE",
+        req("q", "text:document", "sort", "id asc", "hl", "true", "hl.fragsize", "0"),
+        "//lst[@name='highlighting']/lst[@name='103']/arr[@name='text']/str='<em>Document</em> one has a first sentence. <em>Document</em> two has a second sentence.'");
   }
 
   public void testFragsize() {


[28/50] [abbrv] lucene-solr:apiv2: SOLR-8396: Add support for PointFields in Solr

Posted by no...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/schema/TestPointFields.java b/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
new file mode 100644
index 0000000..12f1504
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
@@ -0,0 +1,1472 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.schema;
+
+import java.util.HashSet;
+import java.util.Locale;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.schema.DoublePointField;
+import org.apache.solr.schema.IntPointField;
+import org.apache.solr.schema.PointField;
+import org.apache.solr.schema.SchemaField;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import com.google.common.collect.ImmutableMap;
+
+/**
+ * Tests for PointField functionality
+ *
+ *
+ */
+public class TestPointFields extends SolrTestCaseJ4 {
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("solrconfig.xml","schema-point.xml");
+  }
+  
+  @Override
+  @After
+  public void tearDown() throws Exception {
+    clearIndex();
+    assertU(commit());
+    super.tearDown();
+  }
+  
+  @Test
+  public void testIntPointFieldExactQuery() throws Exception {
+    doTestIntPointFieldExactQuery("number_p_i", false);
+    doTestIntPointFieldExactQuery("number_p_i_mv", false);
+    doTestIntPointFieldExactQuery("number_p_i_ni_dv", false);
+    // uncomment once MultiValued docValues are supported in PointFields
+    //    doTestIntPointFieldExactQuery("number_p_i_ni_mv_dv", false);
+  }
+  
+  @Test
+  public void testIntPointFieldReturn() throws Exception {
+    testPointFieldReturn("number_p_i", "int", new String[]{"0", "-1", "2", "3", "43", "52", "-60", "74", "80", "99"});
+    clearIndex();
+    assertU(commit());
+    testPointFieldReturn("number_p_i_dv_ns", "int", new String[]{"0", "-1", "2", "3", "43", "52", "-60", "74", "80", "99"});
+  }
+  
+  @Test
+  public void testIntPointFieldRangeQuery() throws Exception {
+    doTestIntPointFieldRangeQuery("number_p_i", "int", false);
+  }
+  
+  @Test
+  public void testIntPointFieldSort() throws Exception {
+    doTestPointFieldSort("number_p_i", "number_p_i_dv", new String[]{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"});
+  }
+  
+  @Test
+  public void testIntPointFieldFacetField() throws Exception {
+    testPointFieldFacetField("number_p_i", "number_p_i_dv", getSequentialStringArrayWithInts(10));
+  }
+
+  @Test
+  public void testIntPointFieldRangeFacet() throws Exception {
+    doTestIntPointFieldRangeFacet("number_p_i_dv", "number_p_i");
+  }
+  
+  
+  @Test
+  public void testIntPointFunctionQuery() throws Exception {
+    doTestIntPointFunctionQuery("number_p_i_dv", "number_p_i", "int");
+  }
+
+
+  @Test
+  public void testIntPointStats() throws Exception {
+    testPointStats("number_p_i", "number_p_i_dv", new String[]{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"},
+        0D, 9D, "10", "1", 0D);
+  }
+
+  @Test
+  public void testIntPointFieldMultiValuedExactQuery() throws Exception {
+    testPointFieldMultiValuedExactQuery("number_p_i_mv", getSequentialStringArrayWithInts(20));
+  }
+  
+  @Test
+  public void testIntPointFieldMultiValuedReturn() throws Exception {
+    testPointFieldMultiValuedReturn("number_p_i_mv", "int", getSequentialStringArrayWithInts(20));
+  }
+  
+  @Test
+  public void testIntPointFieldMultiValuedRangeQuery() throws Exception {
+    testPointFieldMultiValuedRangeQuery("number_p_i_mv", "int", getSequentialStringArrayWithInts(20));
+  }
+  
+  //TODO MV SORT?
+  @Test
+  @Ignore("Enable once MultiValued docValues are supported in PointFields")
+  public void testIntPointFieldMultiValuedFacetField() throws Exception {
+    testPointFieldMultiValuedFacetField("number_p_i_mv", "number_p_i_mv_dv", getSequentialStringArrayWithInts(20));
+  }
+
+  @Test
+  @Ignore("Enable once MultiValued docValues are supported in PointFields")
+  public void testIntPointFieldMultiValuedRangeFacet() throws Exception {
+    String docValuesField = "number_p_i_mv_dv";
+    String nonDocValuesField = "number_p_i_mv";
+    
+    for (int i = 0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), docValuesField, String.valueOf(i), docValuesField, String.valueOf(i + 10), 
+          nonDocValuesField, String.valueOf(i), nonDocValuesField, String.valueOf(i + 10)));
+    }
+    assertU(commit());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).getType() instanceof IntPointField);
+    assertQ(req("q", "*:*", "fl", "id", "facet", "true", "facet.range", docValuesField, "facet.range.start", "-10", "facet.range.end", "20", "facet.range.gap", "2"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='2'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='4'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='6'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='8'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='10'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='12'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='14'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='16'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='18'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='-10'][.='0']");
+    
+    assertQ(req("q", "*:*", "fl", "id", "facet", "true", "facet.range", docValuesField, "facet.range.start", "-10", "facet.range.end", "20", "facet.range.gap", "2", "facet.range.method", "dv"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='2'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='4'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='6'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='8'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='10'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='12'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='14'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='16'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='18'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='-10'][.='0']");
+    
+    assertQ(req("q", "*:*", "fl", "id", "facet", "true", "facet.range", docValuesField, "facet.range.start", "0", "facet.range.end", "20", "facet.range.gap", "100"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='0'][.='10']");
+    
+    assertFalse(h.getCore().getLatestSchema().getField(nonDocValuesField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(nonDocValuesField).getType() instanceof IntPointField);
+    // Range Faceting with method = filter should work
+    assertQ(req("q", "*:*", "fl", "id", "facet", "true", "facet.range", nonDocValuesField, "facet.range.start", "-10", "facet.range.end", "20", "facet.range.gap", "2", "facet.range.method", "filter"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='2'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='4'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='6'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='8'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='10'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='12'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='14'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='16'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='18'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='-10'][.='0']");
+    
+    // this should actually use filter method instead of dv
+    assertQ(req("q", "*:*", "fl", "id", "facet", "true", "facet.range", nonDocValuesField, "facet.range.start", "-10", "facet.range.end", "20", "facet.range.gap", "2", "facet.range.method", "dv"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='2'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='4'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='6'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='8'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='10'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='12'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='14'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='16'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='18'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='-10'][.='0']");
+  }
+
+  @Test
+  @Ignore("Enable once MultiValued docValues are supported in PointFields")
+  public void testIntPointMultiValuedFunctionQuery() throws Exception {
+    testPointMultiValuedFunctionQuery("number_p_i_mv", "number_p_i_mv_dv", "int", getSequentialStringArrayWithInts(20));
+  }
+  
+  @Test
+  public void testIntPointFieldsAtomicUpdates() throws Exception {
+    if (!Boolean.getBoolean("enable.update.log")) {
+      return;
+    }
+    testIntPointFieldsAtomicUpdates("number_p_i", "int");
+    testIntPointFieldsAtomicUpdates("number_p_i_dv", "int");
+    testIntPointFieldsAtomicUpdates("number_p_i_dv_ns", "int");
+  }
+  
+  @Test
+  public void testIntPointSetQuery() throws Exception {
+    doTestSetQueries("number_p_i", getRandomStringArrayWithInts(10, false), false);
+    doTestSetQueries("number_p_i_mv", getRandomStringArrayWithInts(10, false), true);
+    doTestSetQueries("number_p_i_ni_dv", getRandomStringArrayWithInts(10, false), false);
+  }
+  
+  // DoublePointField
+
+  @Test
+  public void testDoublePointFieldExactQuery() throws Exception {
+    doTestFloatPointFieldExactQuery("number_d");
+    doTestFloatPointFieldExactQuery("number_p_d");
+    doTestFloatPointFieldExactQuery("number_p_d_mv");
+    doTestFloatPointFieldExactQuery("number_p_d_ni_dv");
+    // TODO enable once MuultiValued docValues are supported with PointFields
+//    doTestFloatPointFieldExactQuery("number_p_d_ni_mv_dv");
+  }
+  
+  @Test
+  public void testDoublePointFieldReturn() throws Exception {
+    testPointFieldReturn("number_p_d", "double", new String[]{"0.0", "1.2", "2.5", "3.02", "0.43", "5.2", "6.01", "74.0", "80.0", "9.9"});
+    clearIndex();
+    assertU(commit());
+    testPointFieldReturn("number_p_d_dv_ns", "double", new String[]{"0.0", "1.2", "2.5", "3.02", "0.43", "5.2", "6.01", "74.0", "80.0", "9.9"});
+    clearIndex();
+    assertU(commit());
+    String[] arr = new String[atLeast(10)];
+    for (int i = 0; i < arr.length; i++) {
+      double rand = random().nextDouble() * 10;
+      arr[i] = String.valueOf(rand);
+    }
+    testPointFieldReturn("number_p_d", "double", arr);
+  }
+  
+  @Test
+  public void testDoublePointFieldRangeQuery() throws Exception {
+    doTestFloatPointFieldRangeQuery("number_p_d", "double", true);
+  }
+  
+  @Test
+  public void testDoublePointFieldSort() throws Exception {
+    String[] arr = getRandomStringArrayWithDoubles(10, true);
+    doTestPointFieldSort("number_p_d", "number_p_d_dv", arr);
+  }
+  
+  @Test
+  public void testDoublePointFieldFacetField() throws Exception {
+    testPointFieldFacetField("number_p_d", "number_p_d_dv", getSequentialStringArrayWithDoubles(10));
+    clearIndex();
+    assertU(commit());
+    testPointFieldFacetField("number_p_d", "number_p_d_dv", getRandomStringArrayWithDoubles(10, false));
+  }
+
+  @Test
+  public void testDoublePointFieldRangeFacet() throws Exception {
+    doTestFloatPointFieldRangeFacet("number_p_d_dv", "number_p_d");
+  }
+
+  @Test
+  public void testDoublePointFunctionQuery() throws Exception {
+    doTestFloatPointFunctionQuery("number_p_d_dv", "number_p_d", "double");
+  }
+  
+  @Test
+  public void testDoublePointStats() throws Exception {
+    testPointStats("number_p_d", "number_p_d_dv", new String[]{"-10.0", "1.1", "2.2", "3.3", "4.4", "5.5", "6.6", "7.7", "8.8", "9.9"},
+        -10.0D, 9.9D, "10", "1", 1E-10D);
+  }
+  
+  @Test
+  public void testDoublePointFieldMultiValuedExactQuery() throws Exception {
+    testPointFieldMultiValuedExactQuery("number_p_d_mv", getRandomStringArrayWithDoubles(20, false));
+  }
+  
+  @Test
+  public void testDoublePointFieldMultiValuedReturn() throws Exception {
+    testPointFieldMultiValuedReturn("number_p_d_mv", "double", getSequentialStringArrayWithDoubles(20));
+  }
+  
+  @Test
+  public void testDoublePointFieldMultiValuedRangeQuery() throws Exception {
+    testPointFieldMultiValuedRangeQuery("number_p_d_mv", "double", getSequentialStringArrayWithDoubles(20));
+  }
+  
+  @Test
+  @Ignore("Enable once MultiValued docValues are supported in PointFields")
+  public void testDoublePointFieldMultiValuedFacetField() throws Exception {
+    testPointFieldMultiValuedFacetField("number_p_d_mv", "number_p_d_mv_dv", getSequentialStringArrayWithDoubles(20));
+    testPointFieldMultiValuedFacetField("number_p_d_mv", "number_p_d_mv_dv", getRandomStringArrayWithDoubles(20, false));
+  }
+
+  @Test
+  @Ignore("Enable once MultiValued docValues are supported in PointFields")
+  public void testDoublePointFieldMultiValuedRangeFacet() throws Exception {
+    String docValuesField = "number_p_d_mv_dv";
+    String nonDocValuesField = "number_p_d_mv";
+    
+    for (int i = 0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), docValuesField, String.valueOf(i), docValuesField, String.valueOf(i + 10), 
+          nonDocValuesField, String.valueOf(i), nonDocValuesField, String.valueOf(i + 10)));
+    }
+    assertU(commit());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).multiValued());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).getType() instanceof DoublePointField);
+    assertQ(req("q", "*:*", "fl", "id", "facet", "true", "facet.range", docValuesField, "facet.range.start", "-10", "facet.range.end", "20", "facet.range.gap", "2"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='0.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='2.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='4.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='6.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='8.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='10.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='12.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='14.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='16.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='18.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='-10.0'][.='0']");
+    
+    assertQ(req("q", "*:*", "fl", "id", "facet", "true", "facet.range", docValuesField, "facet.range.start", "-10", "facet.range.end", "20", "facet.range.gap", "2", "facet.range.method", "dv"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='0.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='2.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='4.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='6.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='8.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='10.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='12.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='14.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='16.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='18.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='-10.0'][.='0']");
+    
+    assertQ(req("q", "*:*", "fl", "id", "facet", "true", "facet.range", docValuesField, "facet.range.start", "0", "facet.range.end", "20", "facet.range.gap", "100"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='0.0'][.='10']");
+    
+    assertFalse(h.getCore().getLatestSchema().getField(nonDocValuesField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(nonDocValuesField).multiValued());
+    assertTrue(h.getCore().getLatestSchema().getField(nonDocValuesField).getType() instanceof DoublePointField);
+    // Range Faceting with method = filter should work
+    assertQ(req("q", "*:*", "fl", "id", "facet", "true", "facet.range", nonDocValuesField, "facet.range.start", "-10", "facet.range.end", "20", "facet.range.gap", "2", "facet.range.method", "filter"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='0.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='2.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='4.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='6.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='8.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='10.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='12.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='14.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='16.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='18.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='-10.0'][.='0']");
+    
+    // this should actually use filter method instead of dv
+    assertQ(req("q", "*:*", "fl", "id", "facet", "true", "facet.range", nonDocValuesField, "facet.range.start", "-10", "facet.range.end", "20", "facet.range.gap", "2", "facet.range.method", "dv"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='0.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='2.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='4.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='6.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='8.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='10.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='12.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='14.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='16.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='18.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='-10.0'][.='0']");
+  }
+  
+  @Test
+  @Ignore("Enable once MultiValued docValues are supported in PointFields")
+  public void testDoublePointMultiValuedFunctionQuery() throws Exception {
+    testPointMultiValuedFunctionQuery("number_p_d_mv", "number_p_d_mv_dv", "double", getSequentialStringArrayWithDoubles(20));
+    testPointMultiValuedFunctionQuery("number_p_d_mv", "number_p_d_mv_dv", "double", getRandomStringArrayWithFloats(20, true));
+  }
+  
+  @Test
+  public void testDoublePointFieldsAtomicUpdates() throws Exception {
+    if (!Boolean.getBoolean("enable.update.log")) {
+      return;
+    }
+    doTestFloatPointFieldsAtomicUpdates("number_p_d", "double");
+    doTestFloatPointFieldsAtomicUpdates("number_p_d_dv", "double");
+    doTestFloatPointFieldsAtomicUpdates("number_p_d_dv_ns", "double");
+  }
+  
+  private void doTestFloatPointFieldsAtomicUpdates(String field, String type) throws Exception {
+    assertU(adoc(sdoc("id", "1", field, "1.1234")));
+    assertU(commit());
+
+    assertU(adoc(sdoc("id", "1", field, ImmutableMap.of("inc", 1.1F))));
+    assertU(commit());
+
+    assertQ(req("q", "id:1"),
+        "//result/doc[1]/" + type + "[@name='" + field + "'][.='2.2234']");
+    
+    assertU(adoc(sdoc("id", "1", field, ImmutableMap.of("inc", -1.1F))));
+    assertU(commit());
+    
+    // TODO: can this test be better?
+    assertQ(req("q", "id:1"),
+        "//result/doc[1]/" + type + "[@name='" + field + "'][.>'1.1233']",
+        "//result/doc[1]/" + type + "[@name='" + field + "'][.<'1.1235']");
+    
+    assertU(adoc(sdoc("id", "1", field, ImmutableMap.of("set", 3.123F))));
+    assertU(commit());
+    
+    assertQ(req("q", "id:1"),
+        "//result/doc[1]/" + type + "[@name='" + field + "'][.='3.123']");
+    
+    assertU(adoc(sdoc("id", "1", field, ImmutableMap.of("set", 3.14F))));
+    assertU(commit());
+    assertU(adoc(sdoc("id", "1", field, ImmutableMap.of("inc", 1F))));
+    assertU(commit());
+    assertQ(req("q", "id:1"),
+        "//result/doc[1]/" + type + "[@name='" + field + "'][.>'4.13']",
+        "//result/doc[1]/" + type + "[@name='" + field + "'][.<'4.15']");
+  }
+  
+  @Test
+  public void testDoublePointSetQuery() throws Exception {
+    doTestSetQueries("number_p_d", getRandomStringArrayWithDoubles(10, false), false);
+    doTestSetQueries("number_p_d_mv", getRandomStringArrayWithDoubles(10, false), true);
+    doTestSetQueries("number_p_d_ni_dv", getRandomStringArrayWithDoubles(10, false), false);
+  }
+  
+  // Float
+  
+
+  @Test
+  public void testFloatPointFieldExactQuery() throws Exception {
+    doTestFloatPointFieldExactQuery("number_p_f");
+    doTestFloatPointFieldExactQuery("number_p_f_mv");
+    doTestFloatPointFieldExactQuery("number_p_f_ni_dv");
+//    doTestFloatPointFieldExactQuery("number_p_f_ni_mv_dv");
+  }
+  
+  @Test
+  public void testFloatPointFieldReturn() throws Exception {
+    testPointFieldReturn("number_p_f", "float", new String[]{"0.0", "-1.2", "2.5", "3.02", "0.43", "5.2", "6.01", "74.0", "80.0", "9.9"});
+    clearIndex();
+    assertU(commit());
+    testPointFieldReturn("number_p_f_dv_ns", "float", new String[]{"0.0", "-1.2", "2.5", "3.02", "0.43", "5.2", "6.01", "74.0", "80.0", "9.9"});
+    clearIndex();
+    assertU(commit());
+    String[] arr = new String[atLeast(10)];
+    for (int i = 0; i < arr.length; i++) {
+      float rand = random().nextFloat() * 10;
+      arr[i] = String.valueOf(rand);
+    }
+    testPointFieldReturn("number_p_f", "float", arr);
+  }
+  
+  @Test
+  public void testFloatPointFieldRangeQuery() throws Exception {
+    doTestFloatPointFieldRangeQuery("number_p_f", "float", false);
+  }
+  
+  @Test
+  public void testFloatPointFieldSort() throws Exception {
+    String[] arr = getRandomStringArrayWithFloats(10, true);
+    doTestPointFieldSort("number_p_f", "number_p_f_dv", arr);
+  }
+  
+  @Test
+  public void testFloatPointFieldFacetField() throws Exception {
+    testPointFieldFacetField("number_p_f", "number_p_f_dv", getSequentialStringArrayWithDoubles(10));
+    clearIndex();
+    assertU(commit());
+    testPointFieldFacetField("number_p_f", "number_p_f_dv", getRandomStringArrayWithFloats(10, false));
+  }
+
+  @Test
+  public void testFloatPointFieldRangeFacet() throws Exception {
+    doTestFloatPointFieldRangeFacet("number_p_f_dv", "number_p_f");
+  }
+
+  @Test
+  public void testFloatPointFunctionQuery() throws Exception {
+    doTestFloatPointFunctionQuery("number_p_f_dv", "number_p_f", "float");
+  }
+  
+  @Test
+  public void testFloatPointStats() throws Exception {
+    testPointStats("number_p_f", "number_p_f_dv", new String[]{"-10.0", "1.1", "2.2", "3.3", "4.4", "5.5", "6.6", "7.7", "8.8", "9.9"},
+        -10D, 9.9D, "10", "1", 1E-6D);
+  }
+  
+  @Test
+  public void testFloatPointFieldMultiValuedExactQuery() throws Exception {
+    testPointFieldMultiValuedExactQuery("number_p_f_mv", getRandomStringArrayWithFloats(20, false));
+  }
+  
+  @Test
+  public void testFloatPointFieldMultiValuedReturn() throws Exception {
+    testPointFieldMultiValuedReturn("number_p_f_mv", "float", getSequentialStringArrayWithDoubles(20));
+  }
+  
+  @Test
+  public void testFloatPointFieldMultiValuedRangeQuery() throws Exception {
+    testPointFieldMultiValuedRangeQuery("number_p_f_mv", "float", getSequentialStringArrayWithDoubles(20));
+  }
+  
+  @Test
+  public void testFloatPointFieldsAtomicUpdates() throws Exception {
+    if (!Boolean.getBoolean("enable.update.log")) {
+      return;
+    }
+    doTestFloatPointFieldsAtomicUpdates("number_p_f", "float");
+    doTestFloatPointFieldsAtomicUpdates("number_p_f_dv", "float");
+    doTestFloatPointFieldsAtomicUpdates("number_p_f_dv_ns", "float");
+  }
+  
+
+  @Test
+  public void testFloatPointSetQuery() throws Exception {
+    doTestSetQueries("number_p_f", getRandomStringArrayWithFloats(10, false), false);
+    doTestSetQueries("number_p_f_mv", getRandomStringArrayWithFloats(10, false), true);
+    doTestSetQueries("number_p_f_ni_dv", getRandomStringArrayWithFloats(10, false), false);
+  }
+  
+  // Long
+  
+  @Test
+  public void testLongPointFieldExactQuery() throws Exception {
+    doTestIntPointFieldExactQuery("number_p_l", true);
+    doTestIntPointFieldExactQuery("number_p_l_mv", true);
+    doTestIntPointFieldExactQuery("number_p_l_ni_dv", true);
+//    doTestIntPointFieldExactQuery("number_p_i_ni_mv_dv", true);
+  }
+  
+  @Test
+  public void testLongPointFieldReturn() throws Exception {
+    testPointFieldReturn("number_p_l", "long", new String[]{"0", "-1", "2", "3", "43", "52", "-60", "74", "80", "99", String.valueOf(Long.MAX_VALUE)});
+    clearIndex();
+    assertU(commit());
+    testPointFieldReturn("number_p_l_dv_ns", "long", new String[]{"0", "-1", "2", "3", "43", "52", "-60", "74", "80", "99", String.valueOf(Long.MAX_VALUE)});
+  }
+  
+  @Test
+  public void testLongPointFieldRangeQuery() throws Exception {
+    doTestIntPointFieldRangeQuery("number_p_l", "long", true);
+  }
+  
+  @Test
+  public void testLongPointFieldSort() throws Exception {
+    doTestPointFieldSort("number_p_l", "number_p_l_dv", new String[]{String.valueOf(Integer.MIN_VALUE), 
+        "1", "2", "3", "4", "5", "6", "7", 
+        String.valueOf(Integer.MAX_VALUE), String.valueOf(Long.MAX_VALUE)});
+  }
+  
+  @Test
+  public void testLongPointFieldFacetField() throws Exception {
+    testPointFieldFacetField("number_p_l", "number_p_l_dv", getSequentialStringArrayWithInts(10));
+    clearIndex();
+    assertU(commit());
+    testPointFieldFacetField("number_p_l", "number_p_l_dv", getRandomStringArrayWithLongs(10, true));
+  }
+  
+  @Test
+  public void testLongPointFieldRangeFacet() throws Exception {
+    doTestIntPointFieldRangeFacet("number_p_l_dv", "number_p_l");
+  }
+  
+  @Test
+  public void testLongPointFunctionQuery() throws Exception {
+    doTestIntPointFunctionQuery("number_p_l_dv", "number_p_l", "long");
+  }
+  
+  @Test
+  public void testLongPointStats() throws Exception {
+    testPointStats("number_p_l", "number_p_l_dv", new String[]{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"},
+        0D, 9D, "10", "1", 0D);
+  }
+  
+  @Test
+  public void testLongPointFieldMultiValuedExactQuery() throws Exception {
+    testPointFieldMultiValuedExactQuery("number_p_l_mv", getSequentialStringArrayWithInts(20));
+  }
+  
+  @Test
+  public void testLongPointFieldMultiValuedReturn() throws Exception {
+    testPointFieldMultiValuedReturn("number_p_l_mv", "long", getSequentialStringArrayWithInts(20));
+  }
+  
+  @Test
+  public void testLongPointFieldMultiValuedRangeQuery() throws Exception {
+    testPointFieldMultiValuedRangeQuery("number_p_l_mv", "long", getSequentialStringArrayWithInts(20));
+  }
+  
+  @Test
+  public void testLongPointFieldsAtomicUpdates() throws Exception {
+    if (!Boolean.getBoolean("enable.update.log")) {
+      return;
+    }
+    testIntPointFieldsAtomicUpdates("number_p_l", "long");
+    testIntPointFieldsAtomicUpdates("number_p_l_dv", "long");
+    testIntPointFieldsAtomicUpdates("number_p_l_dv_ns", "long");
+  }
+  
+  @Test
+  public void testLongPointSetQuery() throws Exception {
+    doTestSetQueries("number_p_l", getRandomStringArrayWithLongs(10, false), false);
+    doTestSetQueries("number_p_l_mv", getRandomStringArrayWithLongs(10, false), true);
+    doTestSetQueries("number_p_l_ni_dv", getRandomStringArrayWithLongs(10, false), false);
+  }
+  
+  // Helper methods
+  
+  private String[] getRandomStringArrayWithDoubles(int length, boolean sorted) {
+    Set<Double> set;
+    if (sorted) {
+      set = new TreeSet<>();
+    } else {
+      set = new HashSet<>();
+    }
+    while (set.size() < length) {
+      double f = random().nextDouble() * (Double.MAX_VALUE/2);
+      if (random().nextBoolean()) {
+        f = f * -1;
+      }
+      set.add(f);
+    }
+    String[] stringArr = new String[length];
+    int i = 0;
+    for (double val:set) {
+      stringArr[i] = String.valueOf(val);
+      i++;
+    }
+    return stringArr;
+  }
+  
+  private String[] getRandomStringArrayWithFloats(int length, boolean sorted) {
+    Set<Float> set;
+    if (sorted) {
+      set = new TreeSet<>();
+    } else {
+      set = new HashSet<>();
+    }
+    while (set.size() < length) {
+      float f = random().nextFloat() * (Float.MAX_VALUE/2);
+      if (random().nextBoolean()) {
+        f = f * -1;
+      }
+      set.add(f);
+    }
+    String[] stringArr = new String[length];
+    int i = 0;
+    for (float val:set) {
+      stringArr[i] = String.valueOf(val);
+      i++;
+    }
+    return stringArr;
+  }
+  
+  private String[] getSequentialStringArrayWithInts(int length) {
+    String[] arr = new String[length];
+    for (int i = 0; i < length; i++) {
+      arr[i] = String.valueOf(i);
+    }
+    return arr;
+  }
+  
+  private String[] getSequentialStringArrayWithDoubles(int length) {
+    String[] arr = new String[length];
+    for (int i = 0; i < length; i++) {
+      arr[i] = String.format(Locale.ROOT, "%d.0", i);
+    }
+    return arr;
+  }
+  
+  private String[] getRandomStringArrayWithInts(int length, boolean sorted) {
+    Set<Integer> set;
+    if (sorted) {
+      set = new TreeSet<>();
+    } else {
+      set = new HashSet<>();
+    }
+    while (set.size() < length) {
+      int number = random().nextInt(100);
+      if (random().nextBoolean()) {
+        number = number * -1;
+      }
+      set.add(number);
+    }
+    String[] stringArr = new String[length];
+    int i = 0;
+    for (int val:set) {
+      stringArr[i] = String.valueOf(val);
+      i++;
+    }
+    return stringArr;
+  }
+  
+  private String[] getRandomStringArrayWithLongs(int length, boolean sorted) {
+    Set<Long> set;
+    if (sorted) {
+      set = new TreeSet<>();
+    } else {
+      set = new HashSet<>();
+    }
+    while (set.size() < length) {
+      long number = random().nextLong();
+      if (random().nextBoolean()) {
+        number = number * -1;
+      }
+      set.add(number);
+    }
+    String[] stringArr = new String[length];
+    int i = 0;
+    for (long val:set) {
+      stringArr[i] = String.valueOf(val);
+      i++;
+    }
+    return stringArr;
+  }
+  
+  private void doTestIntPointFieldExactQuery(String field, boolean testLong) throws Exception {
+    for (int i=0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), field, String.valueOf(i+1)));
+    }
+    assertU(commit());
+    for (int i = 0; i < 10; i++) {
+      assertQ(req("q", field + ":"+(i+1), "fl", "id, " + field), 
+          "//*[@numFound='1']");
+    }
+    
+    for (int i = 0; i < 10; i++) {
+      assertQ(req("q", field + ":" + (i+1) + " OR " + field + ":" + ((i+1)%10 + 1)), "//*[@numFound='2']");
+    }
+    
+    assertU(adoc("id", String.valueOf(Integer.MAX_VALUE), field, String.valueOf(Integer.MAX_VALUE)));
+    assertU(commit());
+    assertQ(req("q", field + ":"+Integer.MAX_VALUE, "fl", "id, " + field), 
+        "//*[@numFound='1']");
+    
+    if (testLong) {
+      for (long i = (long)Integer.MAX_VALUE; i < (long)Integer.MAX_VALUE + 10; i++) {
+        assertU(adoc("id", String.valueOf(i), field, String.valueOf(i+1)));
+      }
+      assertU(commit());
+      for (long i = (long)Integer.MAX_VALUE; i < (long)Integer.MAX_VALUE + 10; i++) {
+        assertQ(req("q", field + ":"+(i+1), "fl", "id, " + field), 
+            "//*[@numFound='1']");
+      }
+      assertU(adoc("id", String.valueOf(Long.MAX_VALUE), field, String.valueOf(Long.MAX_VALUE)));
+      assertU(commit());
+      assertQ(req("q", field + ":"+Long.MAX_VALUE, "fl", "id, " + field), 
+          "//*[@numFound='1']");
+    }
+    
+    clearIndex();
+    assertU(commit());
+  }
+
+  private void testPointFieldReturn(String field, String type, String[] values) throws Exception {
+    SchemaField sf = h.getCore().getLatestSchema().getField(field);
+    assert sf.stored() || (sf.hasDocValues() && sf.useDocValuesAsStored()): 
+      "Unexpected field definition for " + field; 
+    for (int i=0; i < values.length; i++) {
+      assertU(adoc("id", String.valueOf(i), field, values[i]));
+    }
+    assertU(commit());
+    String[] expected = new String[values.length + 1];
+    expected[0] = "//*[@numFound='" + values.length + "']"; 
+    for (int i = 1; i <= values.length; i++) {
+      expected[i] = "//result/doc[" + i + "]/" + type + "[@name='" + field + "'][.='" + values[i-1] + "']";
+    }
+    assertQ(req("q", "*:*", "fl", "id, " + field, "rows", String.valueOf(values.length)), expected);
+  }
+
+  private void doTestIntPointFieldRangeQuery(String fieldName, String type, boolean testLong) throws Exception {
+    for (int i = 0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), fieldName, String.valueOf(i)));
+    }
+    assertU(commit());
+    assertQ(req("q", fieldName + ":[0 TO 3]", "fl", "id, " + fieldName), 
+        "//*[@numFound='4']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='0']",
+        "//result/doc[2]/" + type + "[@name='" + fieldName + "'][.='1']",
+        "//result/doc[3]/" + type + "[@name='" + fieldName + "'][.='2']",
+        "//result/doc[4]/" + type + "[@name='" + fieldName + "'][.='3']");
+    
+    assertQ(req("q", fieldName + ":{0 TO 3]", "fl", "id, " + fieldName), 
+        "//*[@numFound='3']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='1']",
+        "//result/doc[2]/" + type + "[@name='" + fieldName + "'][.='2']",
+        "//result/doc[3]/" + type + "[@name='" + fieldName + "'][.='3']");
+    
+    assertQ(req("q", fieldName + ":[0 TO 3}", "fl", "id, " + fieldName), 
+        "//*[@numFound='3']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='0']",
+        "//result/doc[2]/" + type + "[@name='" + fieldName + "'][.='1']",
+        "//result/doc[3]/" + type + "[@name='" + fieldName + "'][.='2']");
+    
+    assertQ(req("q", fieldName + ":{0 TO 3}", "fl", "id, " + fieldName), 
+        "//*[@numFound='2']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='1']",
+        "//result/doc[2]/" + type + "[@name='" + fieldName + "'][.='2']");
+    
+    assertQ(req("q", fieldName + ":{0 TO *}", "fl", "id, " + fieldName), 
+        "//*[@numFound='9']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='1']");
+    
+    assertQ(req("q", fieldName + ":{* TO 3}", "fl", "id, " + fieldName), 
+        "//*[@numFound='3']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='0']");
+    
+    assertQ(req("q", fieldName + ":[* TO 3}", "fl", "id, " + fieldName), 
+        "//*[@numFound='3']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='0']");
+    
+    assertQ(req("q", fieldName + ":[* TO *}", "fl", "id, " + fieldName), 
+        "//*[@numFound='10']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='0']",
+        "//result/doc[10]/" + type + "[@name='" + fieldName + "'][.='9']");
+    
+    clearIndex();
+    assertU(commit());
+    
+    String[] arr;
+    if (testLong) {
+      arr = getRandomStringArrayWithLongs(10, true);
+    } else {
+      arr = getRandomStringArrayWithInts(10, true);
+    }
+    for (int i = 0; i < arr.length; i++) {
+      assertU(adoc("id", String.valueOf(i), fieldName, arr[i]));
+    }
+    assertU(commit());
+    for (int i = 0; i < arr.length; i++) {
+      assertQ(req("q", fieldName + ":[" + arr[0] + " TO " + arr[i] + "]", "fl", "id, " + fieldName), 
+          "//*[@numFound='" + (i + 1) + "']");
+      assertQ(req("q", fieldName + ":{" + arr[0] + " TO " + arr[i] + "}", "fl", "id, " + fieldName), 
+          "//*[@numFound='" + (Math.max(0,  i-1)) + "']");
+    }
+  }
+  
+  private void testPointFieldFacetField(String nonDocValuesField, String docValuesField, String[] numbers) throws Exception {
+    assert numbers != null && numbers.length == 10;
+    
+    assertFalse(h.getCore().getLatestSchema().getField(docValuesField).multiValued());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).getType() instanceof PointField);
+    
+    for (int i = 0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), docValuesField, numbers[i], nonDocValuesField, numbers[i]));
+    }
+    assertU(commit());
+    assertQ(req("q", "*:*", "fl", "id, " + docValuesField, "facet", "true", "facet.field", docValuesField), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + docValuesField +"']/int[@name='" + numbers[1] + "'][.='1']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + docValuesField +"']/int[@name='" + numbers[2] + "'][.='1']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + docValuesField +"']/int[@name='" + numbers[3] + "'][.='1']");
+    
+    assertU(adoc("id", "10", docValuesField, numbers[1], nonDocValuesField, numbers[1]));
+    
+    assertU(commit());
+    assertQ(req("q", "*:*", "fl", "id, " + docValuesField, "facet", "true", "facet.field", docValuesField), 
+        "//*[@numFound='11']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + docValuesField +"']/int[@name='" + numbers[1] + "'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + docValuesField +"']/int[@name='" + numbers[2] + "'][.='1']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + docValuesField +"']/int[@name='" + numbers[3] + "'][.='1']");
+    
+    assertFalse(h.getCore().getLatestSchema().getField(nonDocValuesField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(nonDocValuesField).getType() instanceof PointField);
+    assertQEx("Expecting Exception", 
+        "Can't facet on a PointField without docValues", 
+        req("q", "*:*", "fl", "id, " + nonDocValuesField, "facet", "true", "facet.field", nonDocValuesField), 
+        SolrException.ErrorCode.BAD_REQUEST);
+  }
+  
+  private void doTestIntPointFieldRangeFacet(String docValuesField, String nonDocValuesField) throws Exception {
+    for (int i = 0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), docValuesField, String.valueOf(i), nonDocValuesField, String.valueOf(i)));
+    }
+    assertU(commit());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).getType() instanceof PointField);
+    assertQ(req("q", "*:*", "facet", "true", "facet.range", docValuesField, "facet.range.start", "-10", "facet.range.end", "10", "facet.range.gap", "2"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='2'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='4'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='6'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='8'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='-10'][.='0']");
+    
+    assertQ(req("q", "*:*", "facet", "true", "facet.range", docValuesField, "facet.range.start", "-10", "facet.range.end", "10", "facet.range.gap", "2", "facet.range.method", "dv"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='2'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='4'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='6'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='8'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='-10'][.='0']");
+    
+    assertFalse(h.getCore().getLatestSchema().getField(nonDocValuesField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(nonDocValuesField).getType() instanceof PointField);
+    // Range Faceting with method = filter should work
+    assertQ(req("q", "*:*", "facet", "true", "facet.range", nonDocValuesField, "facet.range.start", "-10", "facet.range.end", "10", "facet.range.gap", "2", "facet.range.method", "filter"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='2'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='4'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='6'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='8'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='-10'][.='0']");
+    
+    // this should actually use filter method instead of dv
+    assertQ(req("q", "*:*", "facet", "true", "facet.range", nonDocValuesField, "facet.range.start", "-10", "facet.range.end", "10", "facet.range.gap", "2", "facet.range.method", "dv"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='2'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='4'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='6'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='8'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='-10'][.='0']");
+  }
+  
+  private void doTestIntPointFunctionQuery(String dvFieldName, String nonDvFieldName, String type) throws Exception {
+    for (int i = 0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), dvFieldName, String.valueOf(i), nonDvFieldName, String.valueOf(i)));
+    }
+    assertU(commit());
+    assertTrue(h.getCore().getLatestSchema().getField(dvFieldName).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(dvFieldName).getType() instanceof PointField);
+    assertQ(req("q", "*:*", "fl", "id, " + dvFieldName, "sort", "product(-1," + dvFieldName + ") asc"), 
+        "//*[@numFound='10']",
+        "//result/doc[1]/" + type + "[@name='" + dvFieldName + "'][.='9']",
+        "//result/doc[2]/" + type + "[@name='" + dvFieldName + "'][.='8']",
+        "//result/doc[3]/" + type + "[@name='" + dvFieldName + "'][.='7']",
+        "//result/doc[10]/" + type + "[@name='" + dvFieldName + "'][.='0']");
+    
+    assertQ(req("q", "*:*", "fl", "id, " + dvFieldName + ", product(-1," + dvFieldName + ")"), 
+        "//*[@numFound='10']",
+        "//result/doc[1]/float[@name='product(-1," + dvFieldName + ")'][.='-0.0']",
+        "//result/doc[2]/float[@name='product(-1," + dvFieldName + ")'][.='-1.0']",
+        "//result/doc[3]/float[@name='product(-1," + dvFieldName + ")'][.='-2.0']",
+        "//result/doc[10]/float[@name='product(-1," + dvFieldName + ")'][.='-9.0']");
+    
+    assertQ(req("q", "*:*", "fl", "id, " + dvFieldName + ", field(" + dvFieldName + ")"), 
+        "//*[@numFound='10']",
+        "//result/doc[1]/" + type + "[@name='field(" + dvFieldName + ")'][.='0']",
+        "//result/doc[2]/" + type + "[@name='field(" + dvFieldName + ")'][.='1']",
+        "//result/doc[3]/" + type + "[@name='field(" + dvFieldName + ")'][.='2']",
+        "//result/doc[10]/" + type + "[@name='field(" + dvFieldName + ")'][.='9']");
+    
+    assertFalse(h.getCore().getLatestSchema().getField(nonDvFieldName).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(nonDvFieldName).getType() instanceof PointField);
+
+    assertQEx("Expecting Exception", 
+        "sort param could not be parsed as a query", 
+        req("q", "*:*", "fl", "id, " + nonDvFieldName, "sort", "product(-1," + nonDvFieldName + ") asc"), 
+        SolrException.ErrorCode.BAD_REQUEST);
+  }
+  
+  private void testPointStats(String field, String dvField, String[] numbers, double min, double max, String count, String missing, double delta) {
+    String minMin = String.valueOf(min - Math.abs(delta*min));
+    String maxMin = String.valueOf(min + Math.abs(delta*min));
+    String minMax = String.valueOf(max - Math.abs(delta*max));
+    String maxMax = String.valueOf(max + Math.abs(delta*max));
+    for (int i = 0; i < numbers.length; i++) {
+      assertU(adoc("id", String.valueOf(i), dvField, numbers[i], field, numbers[i]));
+    }
+    assertU(adoc("id", String.valueOf(numbers.length)));
+    assertU(commit());
+    assertTrue(h.getCore().getLatestSchema().getField(dvField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(dvField).getType() instanceof PointField);
+    assertQ(req("q", "*:*", "fl", "id, " + dvField, "stats", "true", "stats.field", dvField), 
+        "//*[@numFound='11']",
+        "//lst[@name='stats']/lst[@name='stats_fields']/lst[@name='" + dvField+ "']/double[@name='min'][.>='" + minMin + "']",
+        "//lst[@name='stats']/lst[@name='stats_fields']/lst[@name='" + dvField+ "']/double[@name='min'][.<='" + maxMin+ "']",
+        "//lst[@name='stats']/lst[@name='stats_fields']/lst[@name='" + dvField+ "']/double[@name='max'][.>='" + minMax + "']",
+        "//lst[@name='stats']/lst[@name='stats_fields']/lst[@name='" + dvField+ "']/double[@name='max'][.<='" + maxMax + "']",
+        "//lst[@name='stats']/lst[@name='stats_fields']/lst[@name='" + dvField+ "']/long[@name='count'][.='" + count + "']",
+        "//lst[@name='stats']/lst[@name='stats_fields']/lst[@name='" + dvField+ "']/long[@name='missing'][.='" + missing + "']");
+    
+    assertFalse(h.getCore().getLatestSchema().getField(field).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(field).getType() instanceof PointField);
+    assertQEx("Expecting Exception", 
+        "Can't calculate stats on a PointField without docValues", 
+        req("q", "*:*", "fl", "id, " + field, "stats", "true", "stats.field", field), 
+        SolrException.ErrorCode.BAD_REQUEST);
+  }
+  
+  private void testPointFieldMultiValuedExactQuery(String fieldName, String[] numbers) throws Exception {
+    assert numbers != null && numbers.length == 20;
+    assertTrue(h.getCore().getLatestSchema().getField(fieldName).multiValued());
+    assertTrue(h.getCore().getLatestSchema().getField(fieldName).getType() instanceof PointField);
+    for (int i=0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), fieldName, numbers[i], fieldName, numbers[i+10]));
+    }
+    assertU(commit());
+    for (int i = 0; i < 20; i++) {
+      assertQ(req("q", fieldName + ":" + numbers[i].replace("-", "\\-")), 
+          "//*[@numFound='1']");
+    }
+    
+    for (int i = 0; i < 20; i++) {
+      assertQ(req("q", fieldName + ":" + numbers[i].replace("-", "\\-") + " OR " + fieldName + ":" + numbers[(i+1)%10].replace("-", "\\-")), "//*[@numFound='2']");
+    }
+  }
+  
+  private void testPointFieldMultiValuedReturn(String fieldName, String type, String[] numbers) throws Exception {
+    assert numbers != null && numbers.length == 20;
+    assertTrue(h.getCore().getLatestSchema().getField(fieldName).multiValued());
+    assertTrue(h.getCore().getLatestSchema().getField(fieldName).getType() instanceof PointField);
+    for (int i=0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), fieldName, numbers[i], fieldName, numbers[i+10]));
+    }
+    assertU(commit());
+    String[] expected = new String[11];
+    String[] expected2 = new String[11];
+    expected[0] = "//*[@numFound='10']"; 
+    expected2[0] = "//*[@numFound='10']"; 
+    for (int i = 1; i <= 10; i++) {
+      expected[i] = "//result/doc[" + i + "]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[i-1] + "']";
+      expected2[i] = "//result/doc[" + i + "]/arr[@name='" + fieldName + "']/" + type + "[2][.='" + numbers[i + 9] + "']";
+    }
+    assertQ(req("q", "*:*", "fl", "id, " + fieldName), expected);
+    assertQ(req("q", "*:*", "fl", "id, " + fieldName), expected2);
+  }
+  
+  private void testPointFieldMultiValuedRangeQuery(String fieldName, String type, String[] numbers) throws Exception {
+    assert numbers != null && numbers.length == 20;
+    assertTrue(h.getCore().getLatestSchema().getField(fieldName).multiValued());
+    assertTrue(h.getCore().getLatestSchema().getField(fieldName).getType() instanceof PointField);
+    for (int i=0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), fieldName, String.valueOf(i), fieldName, String.valueOf(i+10)));
+    }
+    assertU(commit());
+    assertQ(req("q", fieldName + ":[0 TO 3]", "fl", "id, " + fieldName), 
+        "//*[@numFound='4']",
+        "//result/doc[1]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[0] + "']",
+        "//result/doc[1]/arr[@name='" + fieldName + "']/" + type + "[2][.='" + numbers[10] + "']",
+        "//result/doc[2]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[1] + "']",
+        "//result/doc[2]/arr[@name='" + fieldName + "']/" + type + "[2][.='" + numbers[11] + "']",
+        "//result/doc[3]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[2] + "']",
+        "//result/doc[3]/arr[@name='" + fieldName + "']/" + type + "[2][.='" + numbers[12] + "']",
+        "//result/doc[4]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[3] + "']",
+        "//result/doc[4]/arr[@name='" + fieldName + "']/" + type + "[2][.='" + numbers[13] + "']");
+    
+    assertQ(req("q", fieldName + ":{0 TO 3]", "fl", "id, " + fieldName), 
+        "//*[@numFound='3']",
+        "//result/doc[1]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[1] + "']",
+        "//result/doc[2]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[2] + "']",
+        "//result/doc[3]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[3] + "']");
+    
+    assertQ(req("q", fieldName + ":[0 TO 3}", "fl", "id, " + fieldName), 
+        "//*[@numFound='3']",
+        "//result/doc[1]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[0] + "']",
+        "//result/doc[2]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[1] + "']",
+        "//result/doc[3]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[2] + "']");
+    
+    assertQ(req("q", fieldName + ":{0 TO 3}", "fl", "id, " + fieldName), 
+        "//*[@numFound='2']",
+        "//result/doc[1]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[1] + "']",
+        "//result/doc[2]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[2] + "']");
+    
+    assertQ(req("q", fieldName + ":{0 TO *}", "fl", "id, " + fieldName), 
+        "//*[@numFound='10']",
+        "//result/doc[1]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[0] + "']");
+    
+    assertQ(req("q", fieldName + ":{10 TO *}", "fl", "id, " + fieldName), 
+        "//*[@numFound='9']",
+        "//result/doc[1]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[1] + "']");
+    
+    assertQ(req("q", fieldName + ":{* TO 3}", "fl", "id, " + fieldName), 
+        "//*[@numFound='3']",
+        "//result/doc[1]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[0] + "']");
+    
+    assertQ(req("q", fieldName + ":[* TO 3}", "fl", "id, " + fieldName), 
+        "//*[@numFound='3']",
+        "//result/doc[1]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[0] + "']");
+    
+    assertQ(req("q", fieldName + ":[* TO *}", "fl", "id, " + fieldName), 
+        "//*[@numFound='10']",
+        "//result/doc[1]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[0] + "']",
+        "//result/doc[10]/arr[@name='" + fieldName + "']/" + type + "[1][.='" + numbers[9] + "']");
+  }
+
+  private void testPointFieldMultiValuedFacetField(String nonDocValuesField, String dvFieldName, String[] numbers) throws Exception {
+    assert numbers != null && numbers.length == 20;
+    assertTrue(h.getCore().getLatestSchema().getField(dvFieldName).multiValued());
+    assertTrue(h.getCore().getLatestSchema().getField(dvFieldName).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(dvFieldName).getType() instanceof PointField);
+    
+    for (int i = 0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), dvFieldName, numbers[i], dvFieldName, numbers[i + 10], 
+          nonDocValuesField, numbers[i], nonDocValuesField, numbers[i + 10]));
+    }
+    assertU(commit());
+    
+    assertQ(req("q", "*:*", "fl", "id, " + dvFieldName, "facet", "true", "facet.field", dvFieldName), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + dvFieldName +"']/int[@name='" + numbers[1] + "'][.='1']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + dvFieldName +"']/int[@name='" + numbers[2] + "'][.='1']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + dvFieldName +"']/int[@name='" + numbers[3] + "'][.='1']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + dvFieldName +"']/int[@name='" + numbers[10] + "'][.='1']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + dvFieldName +"']/int[@name='" + numbers[11] + "'][.='1']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + dvFieldName +"']/int[@name='" + numbers[12] + "'][.='1']");
+    
+    assertU(adoc("id", "10", dvFieldName, numbers[1], nonDocValuesField, numbers[1]));
+    
+    assertU(commit());
+    assertQ(req("q", "*:*", "fl", "id, " + dvFieldName, "facet", "true", "facet.field", dvFieldName), 
+        "//*[@numFound='11']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + dvFieldName +"']/int[@name='" + numbers[1] + "'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + dvFieldName +"']/int[@name='" + numbers[2] + "'][.='1']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + dvFieldName +"']/int[@name='" + numbers[3] + "'][.='1']",
+        "//lst[@name='facet_counts']/lst[@name='facet_fields']/lst[@name='" + dvFieldName +"']/int[@name='" + numbers[10] + "'][.='1']");
+    
+    assertFalse(h.getCore().getLatestSchema().getField(nonDocValuesField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(nonDocValuesField).getType() instanceof PointField);
+    assertQEx("Expecting Exception", 
+        "Can't facet on a PointField without docValues", 
+        req("q", "*:*", "fl", "id, " + nonDocValuesField, "facet", "true", "facet.field", nonDocValuesField), 
+        SolrException.ErrorCode.BAD_REQUEST);
+  }
+
+  private void testPointMultiValuedFunctionQuery(String nonDocValuesField, String docValuesField, String type, String[] numbers) throws Exception {
+    assert numbers != null && numbers.length == 20;
+    for (int i = 0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), docValuesField, numbers[i], docValuesField, numbers[i+10], 
+          nonDocValuesField, numbers[i], nonDocValuesField, numbers[i+10]));
+    }
+    assertU(commit());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).multiValued());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).getType() instanceof PointField);
+    String function = "field(" + docValuesField + ", min)";
+    
+    assertQ(req("q", "*:*", "fl", "id, " + function), 
+        "//*[@numFound='10']",
+        "//result/doc[1]/" + type + "[@name='" + function + "'][.='" + numbers[0] + "']",
+        "//result/doc[2]/" + type + "[@name='" + function + "'][.='" + numbers[1] + "']",
+        "//result/doc[3]/" + type + "[@name='" + function + "'][.='" + numbers[2] + "']",
+        "//result/doc[10]/" + type + "[@name='" + function + "'][.='" + numbers[9] + "']");
+    
+//    if (dvIsRandomAccessOrds(docValuesField)) {
+//      function = "field(" + docValuesField + ", max)";
+//      assertQ(req("q", "*:*", "fl", "id, " + function), 
+//          "//*[@numFound='10']",
+//          "//result/doc[1]/int[@name='" + function + "'][.='10']",
+//          "//result/doc[2]/int[@name='" + function + "'][.='11']",
+//          "//result/doc[3]/int[@name='" + function + "'][.='12']",
+//          "//result/doc[10]/int[@name='" + function + "'][.='19']");
+//    }
+    
+    assertFalse(h.getCore().getLatestSchema().getField(nonDocValuesField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(nonDocValuesField).multiValued());
+    assertTrue(h.getCore().getLatestSchema().getField(nonDocValuesField).getType() instanceof PointField);
+
+    function = "field(" + nonDocValuesField + ",min)";
+    
+    assertQEx("Expecting Exception", 
+        "sort param could not be parsed as a query", 
+        req("q", "*:*", "fl", "id", "sort", function + " desc"), 
+        SolrException.ErrorCode.BAD_REQUEST);
+    
+    assertQEx("Expecting Exception", 
+        "docValues='true' is required to select 'min' value from multivalued field (" + nonDocValuesField + ") at query time", 
+        req("q", "*:*", "fl", "id, " + function), 
+        SolrException.ErrorCode.BAD_REQUEST);
+    
+    function = "field(" + docValuesField + ",foo)";
+    assertQEx("Expecting Exception", 
+        "Multi-Valued field selector 'foo' not supported", 
+        req("q", "*:*", "fl", "id, " + function), 
+        SolrException.ErrorCode.BAD_REQUEST);
+  }
+
+  private void testIntPointFieldsAtomicUpdates(String field, String type) throws Exception {
+    assertU(adoc(sdoc("id", "1", field, "1")));
+    assertU(commit());
+
+    assertU(adoc(sdoc("id", "1", field, ImmutableMap.of("inc", 1))));
+    assertU(commit());
+
+    assertQ(req("q", "id:1"),
+        "//result/doc[1]/" + type + "[@name='" + field + "'][.='2']");
+    
+    assertU(adoc(sdoc("id", "1", field, ImmutableMap.of("inc", -1))));
+    assertU(commit());
+    
+    assertQ(req("q", "id:1"),
+        "//result/doc[1]/" + type + "[@name='" + field + "'][.='1']");
+    
+    assertU(adoc(sdoc("id", "1", field, ImmutableMap.of("set", 3))));
+    assertU(commit());
+    
+    assertQ(req("q", "id:1"),
+        "//result/doc[1]/" + type + "[@name='" + field + "'][.='3']");
+  }
+
+  private void doTestFloatPointFieldExactQuery(String field) throws Exception {
+    for (int i=0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), field, String.valueOf(i + "." + i)));
+    }
+    assertU(commit());
+    for (int i = 0; i < 9; i++) {
+      assertQ(req("q", field + ":"+(i+1) + "." + (i+1), "fl", "id, " + field), 
+          "//*[@numFound='1']");
+    }
+    
+    for (int i = 0; i < 9; i++) {
+      String num1 = (i+1) + "." + (i+1);
+      String num2 = ((i+1)%9 + 1) + "." + ((i+1)%9 + 1);
+      assertQ(req("q", field + ":" + num1 + " OR " + field + ":" + num2), "//*[@numFound='2']");
+    }
+    
+    clearIndex();
+    assertU(commit());
+    for (int i = 0; i < atLeast(10); i++) {
+      float rand = random().nextFloat() * 10;
+      assertU(adoc("id", "random_number ", field, String.valueOf(rand))); //always the same id to override
+      assertU(commit());
+      assertQ(req("q", field + ":" + rand, "fl", "id, " + field), 
+          "//*[@numFound='1']");
+    }
+    clearIndex();
+    assertU(commit());
+  }
+  
+  private void doTestPointFieldSort(String field, String dvField, String[] arr) throws Exception {
+    assert arr != null && arr.length == 10;
+    for (int i = 0; i < arr.length; i++) {
+      assertU(adoc("id", String.valueOf(i), dvField, String.valueOf(arr[i]), field, String.valueOf(arr[i])));
+    }
+    assertU(commit());
+    assertTrue(h.getCore().getLatestSchema().getField(dvField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(dvField).getType() instanceof PointField);
+    assertQ(req("q", "*:*", "fl", "id", "sort", dvField + " desc"), 
+        "//*[@numFound='10']",
+        "//result/doc[1]/str[@name='id'][.='9']",
+        "//result/doc[2]/str[@name='id'][.='8']",
+        "//result/doc[3]/str[@name='id'][.='7']",
+        "//result/doc[10]/str[@name='id'][.='0']");
+    
+    assertFalse(h.getCore().getLatestSchema().getField(field).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(field).getType() instanceof PointField);
+    assertQEx("Expecting Exception", 
+        "can not sort on a PointField without doc values: " + field, 
+        req("q", "*:*", "fl", "id", "sort", field + " desc"), 
+        SolrException.ErrorCode.BAD_REQUEST);
+    
+    //TODO: sort missing
+  }
+  
+  private void doTestFloatPointFieldRangeQuery(String fieldName, String type, boolean testDouble) throws Exception {
+    for (int i = 0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), fieldName, String.valueOf(i)));
+    }
+    assertU(commit());
+    assertQ(req("q", fieldName + ":[0 TO 3]", "fl", "id, " + fieldName), 
+        "//*[@numFound='4']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='0.0']",
+        "//result/doc[2]/" + type + "[@name='" + fieldName + "'][.='1.0']",
+        "//result/doc[3]/" + type + "[@name='" + fieldName + "'][.='2.0']",
+        "//result/doc[4]/" + type + "[@name='" + fieldName + "'][.='3.0']");
+    
+    assertQ(req("q", fieldName + ":{0 TO 3]", "fl", "id, " + fieldName), 
+        "//*[@numFound='3']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='1.0']",
+        "//result/doc[2]/" + type + "[@name='" + fieldName + "'][.='2.0']",
+        "//result/doc[3]/" + type + "[@name='" + fieldName + "'][.='3.0']");
+    
+    assertQ(req("q", fieldName + ":[0 TO 3}", "fl", "id, " + fieldName), 
+        "//*[@numFound='3']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='0.0']",
+        "//result/doc[2]/" + type + "[@name='" + fieldName + "'][.='1.0']",
+        "//result/doc[3]/" + type + "[@name='" + fieldName + "'][.='2.0']");
+    
+    assertQ(req("q", fieldName + ":{0 TO 3}", "fl", "id, " + fieldName), 
+        "//*[@numFound='2']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='1.0']",
+        "//result/doc[2]/" + type + "[@name='" + fieldName + "'][.='2.0']");
+    
+    assertQ(req("q", fieldName + ":{0 TO *}", "fl", "id, " + fieldName), 
+        "//*[@numFound='9']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='1.0']");
+    
+    assertQ(req("q", fieldName + ":{* TO 3}", "fl", "id, " + fieldName), 
+        "//*[@numFound='3']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='0.0']");
+    
+    assertQ(req("q", fieldName + ":[* TO 3}", "fl", "id, " + fieldName), 
+        "//*[@numFound='3']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='0.0']");
+    
+    assertQ(req("q", fieldName + ":[* TO *}", "fl", "id, " + fieldName), 
+        "//*[@numFound='10']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='0.0']",
+        "//result/doc[10]/" + type + "[@name='" + fieldName + "'][.='9.0']");
+    
+    assertQ(req("q", fieldName + ":[0.9 TO 1.01]", "fl", "id, " + fieldName), 
+        "//*[@numFound='1']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='1.0']");
+    
+    assertQ(req("q", fieldName + ":{0.9 TO 1.01}", "fl", "id, " + fieldName), 
+        "//*[@numFound='1']",
+        "//result/doc[1]/" + type + "[@name='" + fieldName + "'][.='1.0']");
+    
+    clearIndex();
+    assertU(commit());
+    
+    String[] arr;
+    if (testDouble) {
+      arr = getRandomStringArrayWithDoubles(10, true);
+    } else {
+      arr = getRandomStringArrayWithFloats(10, true);
+    }
+    for (int i = 0; i < arr.length; i++) {
+      assertU(adoc("id", String.valueOf(i), fieldName, arr[i]));
+    }
+    assertU(commit());
+    for (int i = 0; i < arr.length; i++) {
+      assertQ(req("q", fieldName + ":[" + arr[0] + " TO " + arr[i] + "]", "fl", "id, " + fieldName), 
+          "//*[@numFound='" + (i + 1) + "']");
+      assertQ(req("q", fieldName + ":{" + arr[0] + " TO " + arr[i] + "}", "fl", "id, " + fieldName), 
+          "//*[@numFound='" + (Math.max(0,  i-1)) + "']");
+    }
+  }
+  
+  private void doTestFloatPointFieldRangeFacet(String docValuesField, String nonDocValuesField) throws Exception {
+    
+    for (int i = 0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), docValuesField, String.format(Locale.ROOT, "%f", (float)i*1.1), nonDocValuesField, String.format(Locale.ROOT, "%f", (float)i*1.1)));
+    }
+    assertU(commit());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(docValuesField).getType() instanceof PointField);
+    assertQ(req("q", "*:*", "facet", "true", "facet.range", docValuesField, "facet.range.start", "-10", "facet.range.end", "10", "facet.range.gap", "2"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='0.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='2.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='4.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='6.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='8.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='-10.0'][.='0']");
+    
+    assertQ(req("q", "*:*", "facet", "true", "facet.range", docValuesField, "facet.range.start", "-10", "facet.range.end", "10", "facet.range.gap", "2", "facet.range.method", "dv"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='0.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='2.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='4.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='6.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='8.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + docValuesField + "']/lst[@name='counts']/int[@name='-10.0'][.='0']");
+    
+    assertFalse(h.getCore().getLatestSchema().getField(nonDocValuesField).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(nonDocValuesField).getType() instanceof PointField);
+    // Range Faceting with method = filter should work
+    assertQ(req("q", "*:*", "facet", "true", "facet.range", nonDocValuesField, "facet.range.start", "-10", "facet.range.end", "10", "facet.range.gap", "2", "facet.range.method", "filter"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='0.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='2.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='4.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='6.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='8.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='-10.0'][.='0']");
+    
+    // this should actually use filter method instead of dv
+    assertQ(req("q", "*:*", "facet", "true", "facet.range", nonDocValuesField, "facet.range.start", "-10", "facet.range.end", "10", "facet.range.gap", "2", "facet.range.method", "dv"), 
+        "//*[@numFound='10']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='0.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='2.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='4.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='6.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='8.0'][.='2']",
+        "//lst[@name='facet_counts']/lst[@name='facet_ranges']/lst[@name='" + nonDocValuesField + "']/lst[@name='counts']/int[@name='-10.0'][.='0']");
+  }
+  
+  private void doTestFloatPointFunctionQuery(String dvFieldName, String nonDvFieldName, String type) throws Exception {
+    for (int i = 0; i < 10; i++) {
+      assertU(adoc("id", String.valueOf(i), dvFieldName, String.format(Locale.ROOT, "%f", (float)i*1.1), nonDvFieldName, String.format(Locale.ROOT, "%f", (float)i*1.1)));
+    }
+    assertU(commit());
+    assertTrue(h.getCore().getLatestSchema().getField(dvFieldName).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(dvFieldName).getType() instanceof PointField);
+    assertQ(req("q", "*:*", "fl", "id, " + dvFieldName, "sort", "product(-1," + dvFieldName + ") asc"), 
+        "//*[@numFound='10']",
+        "//result/doc[1]/" + type + "[@name='" + dvFieldName + "'][.='9.9']",
+        "//result/doc[2]/" + type + "[@name='" + dvFieldName + "'][.='8.8']",
+        "//result/doc[3]/" + type + "[@name='" + dvFieldName + "'][.='7.7']",
+        "//result/doc[10]/" + type + "[@name='" + dvFieldName + "'][.='0.0']");
+    
+    assertQ(req("q", "*:*", "fl", "id, " + dvFieldName + ", product(-1," + dvFieldName + ")"), 
+        "//*[@numFound='10']",
+        "//result/doc[1]/float[@name='product(-1," + dvFieldName + ")'][.='-0.0']",
+        "//result/doc[2]/float[@name='product(-1," + dvFieldName + ")'][.='-1.1']",
+        "//result/doc[3]/float[@name='product(-1," + dvFieldName + ")'][.='-2.2']",
+        "//result/doc[10]/float[@name='product(-1," + dvFieldName + ")'][.='-9.9']");
+    
+    assertQ(req("q", "*:*", "fl", "id, " + dvFieldName + ", field(" + dvFieldName + ")"), 
+        "//*[@numFound='10']",
+        "//result/doc[1]/" + type + "[@name='field(" + dvFieldName + ")'][.='0.0']",
+        "//result/doc[2]/" + type + "[@name='field(" + dvFieldName + ")'][.='1.1']",
+        "//result/doc[3]/" + type + "[@name='field(" + dvFieldName + ")'][.='2.2']",
+        "//result/doc[10]/" + type + "[@name='field(" + dvFieldName + ")'][.='9.9']");
+    
+    assertFalse(h.getCore().getLatestSchema().getField(nonDvFieldName).hasDocValues());
+    assertTrue(h.getCore().getLatestSchema().getField(nonDvFieldName).getType() instanceof PointField);
+
+    assertQEx("Expecting Exception", 
+        "sort param could not be parsed as a query", 
+        req("q", "*:*", "fl", "id, " + nonDvFieldName, "sort", "product(-1," + nonDvFieldName + ") asc"), 
+        SolrException.ErrorCode.BAD_REQUEST);
+  }
+  
+  private void doTestSetQueries(String fieldName, String[] values, boolean multiValued) {
+    for (int i = 0; i < values.length; i++) {
+      assertU(adoc("id", String.valueOf(i), fieldName, values[i]));
+    }
+    assertU(commit());
+    assertTrue(h.getCore().getLatestSchema().getField(fieldName).getType() instanceof PointField);
+    
+    for (int i = 0; i < values.length; i++) {
+      assertQ(req("q", "{!term f='" + fieldName + "'}" + values[i], "fl", "id," + fieldName), 
+          "//*[@numFound='1']");
+    }
+    
+    for (int i = 0; i < values.length; i++) {
+      assertQ(req("q", "{!terms f='" + fieldName + "'}" + values[i] + "," + values[(i + 1)%values.length], "fl", "id," + fieldName), 
+          "//*[@numFound='2']");
+    }
+    
+    if (multiValued) {
+      clearIndex();
+      assertU(commit());
+      for (int i = 0; i < values.length; i++) {
+        assertU(adoc("id", String.valueOf(i), fieldName, values[i], fieldName, values[(i+1)%values.length]));
+      }
+      assertU(commit());
+      for (int i = 0; i < values.length; i++) {
+        assertQ(req("q", "{!term f='" + fieldName + "'}" + values[i], "fl", "id," + fieldName), 
+            "//*[@numFound='2']");
+      }
+      
+      for (int i = 0; i < values.length; i++) {
+        assertQ(req("q", "{!terms f='" + fieldName + "'}" + values[i] + "," + values[(i + 1)%values.length], "fl", "id," + fieldName), 
+            "//*[@numFound='3']");
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/search/TestCollapseQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestCollapseQParserPlugin.java b/solr/core/src/test/org/apache/solr/search/TestCollapseQParserPlugin.java
index 7c5fc4a..2fca452 100644
--- a/solr/core/src/test/org/apache/solr/search/TestCollapseQParserPlugin.java
+++ b/solr/core/src/test/org/apache/solr/search/TestCollapseQParserPlugin.java
@@ -25,6 +25,7 @@ import java.util.Iterator;
 
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.SolrTestCaseJ4.SuppressPointFields;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
@@ -36,8 +37,9 @@ import org.junit.Test;
 
 //We want codecs that support DocValues, and ones supporting blank/empty values.
 @SuppressCodecs({"Appending","Lucene3x","Lucene40","Lucene41","Lucene42"})
+@SuppressPointFields
 public class TestCollapseQParserPlugin extends SolrTestCaseJ4 {
-
+  
   @BeforeClass
   public static void beforeClass() throws Exception {
     initCore("solrconfig-collapseqparser.xml", "schema11.xml");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java b/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
index 6059528..610e998 100644
--- a/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
@@ -46,7 +46,7 @@ public class TestMaxScoreQueryParser extends AbstractSolrTestCase {
     assertEquals(new BoostQuery(new TermQuery(new Term("text", "foo")), 3f), q);
 
     q = parse("price:[0 TO 10]");
-    assertTrue(q instanceof LegacyNumericRangeQuery);
+    assertTrue(q instanceof LegacyNumericRangeQuery || q instanceof PointRangeQuery);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/search/TestRandomCollapseQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRandomCollapseQParserPlugin.java b/solr/core/src/test/org/apache/solr/search/TestRandomCollapseQParserPlugin.java
index f4dd449..7d135e2 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRandomCollapseQParserPlugin.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRandomCollapseQParserPlugin.java
@@ -24,6 +24,7 @@ import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 import org.apache.lucene.util.TestUtil;
 import org.apache.solr.CursorPagingTest;
 import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.SolrTestCaseJ4.SuppressPointFields;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
 import org.apache.solr.client.solrj.response.QueryResponse;
@@ -38,6 +39,7 @@ import org.junit.BeforeClass;
 
 //We want codecs that support DocValues, and ones supporting blank/empty values.
 @SuppressCodecs({"Appending","Lucene3x","Lucene40","Lucene41","Lucene42"})
+@SuppressPointFields
 public class TestRandomCollapseQParserPlugin extends SolrTestCaseJ4 {
 
   /** Full SolrServer instance for arbitrary introspection of response data and adding fqs */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
index 20c1907..37462d9 100644
--- a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
@@ -23,6 +23,7 @@ import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.BoostQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.PointInSetQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.TermQuery;
@@ -231,6 +232,13 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
     qParser.setIsFilter(true); // this may change in the future
     q = qParser.getQuery();
     assertEquals(20, ((TermInSetQuery)q).getTermData().size());
+    
+    // for point fields large filter query should use PointInSetQuery
+    qParser = QParser.getParser("foo_pi:(1 2 3 4 5 6 7 8 9 10 20 19 18 17 16 15 14 13 12 11)", req);
+    qParser.setIsFilter(true); // this may change in the future
+    q = qParser.getQuery();
+    assertTrue(q instanceof PointInSetQuery);
+    assertEquals(20, ((PointInSetQuery)q).getPackedPoints().size());
 
     // a filter() clause inside a relevancy query should be able to use a TermsQuery
     qParser = QParser.getParser("foo_s:aaa filter(foo_s:(a b c d e f g h i j k l m n o p q r s t u v w x y z))", req);


[16/50] [abbrv] lucene-solr:apiv2: LUCENE-7619: don't let offsets go backwards

Posted by no...@apache.org.
LUCENE-7619: don't let offsets go backwards


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/0bdcfc29
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/0bdcfc29
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/0bdcfc29

Branch: refs/heads/apiv2
Commit: 0bdcfc291fceab26e1c62a7e9791ce417671eacd
Parents: 39eec66
Author: Mike McCandless <mi...@apache.org>
Authored: Tue Jan 17 17:57:11 2017 -0500
Committer: Mike McCandless <mi...@apache.org>
Committed: Tue Jan 17 17:57:11 2017 -0500

----------------------------------------------------------------------
 .../miscellaneous/WordDelimiterGraphFilter.java   | 18 ++++++++++++++++--
 1 file changed, 16 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0bdcfc29/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java
index ea6f6cd..fe8ed72 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java
@@ -195,6 +195,7 @@ public final class WordDelimiterGraphFilter extends TokenFilter {
   private int savedStartOffset;
   private int savedEndOffset;
   private AttributeSource.State savedState;
+  private int lastStartOffset;
   
   // if length by start + end offsets doesn't match the term text then assume
   // this is a synonym and don't adjust the offsets.
@@ -373,12 +374,24 @@ public final class WordDelimiterGraphFilter extends TokenFilter {
         int endPart = bufferedParts[4*bufferedPos+3];
         bufferedPos++;
 
+        int startOffset;
+        int endOffset;
+
         if (hasIllegalOffsets) {
-          offsetAttribute.setOffset(savedStartOffset, savedEndOffset);
+          startOffset = savedStartOffset;
+          endOffset = savedEndOffset;
         } else {
-          offsetAttribute.setOffset(savedStartOffset + startPart, savedStartOffset + endPart);
+          startOffset = savedStartOffset + startPart;
+          endOffset = savedStartOffset + endPart;
         }
 
+        // never let offsets go backwards:
+        startOffset = Math.max(startOffset, lastStartOffset);
+        endOffset = Math.max(endOffset, lastStartOffset);
+
+        offsetAttribute.setOffset(startOffset, endOffset);
+        lastStartOffset = startOffset;
+
         if (termPart == null) {
           termAttribute.copyBuffer(savedTermBuffer, startPart, endPart - startPart);
         } else {
@@ -402,6 +415,7 @@ public final class WordDelimiterGraphFilter extends TokenFilter {
     super.reset();
     accumPosInc = 0;
     savedState = null;
+    lastStartOffset = 0;
     concat.clear();
     concatAll.clear();
   }


[32/50] [abbrv] lucene-solr:apiv2: SOLR-9984: Deprecate GenericHadoopAuthPlugin in favor of HadoopAuthPlugin

Posted by no...@apache.org.
SOLR-9984: Deprecate GenericHadoopAuthPlugin in favor of HadoopAuthPlugin


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1a05d6f4
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1a05d6f4
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1a05d6f4

Branch: refs/heads/apiv2
Commit: 1a05d6f4f1a6e7c99662549c8f24a11727d86b2f
Parents: 9f58b6c
Author: Ishan Chattopadhyaya <ic...@gmail.com>
Authored: Thu Jan 19 09:35:59 2017 +0530
Committer: Ishan Chattopadhyaya <ic...@gmail.com>
Committed: Thu Jan 19 09:35:59 2017 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   8 +
 .../solr/security/GenericHadoopAuthPlugin.java  | 245 +------------------
 .../apache/solr/security/HadoopAuthPlugin.java  |   2 +-
 3 files changed, 14 insertions(+), 241 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a05d6f4/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index aab5116..62b8818 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -94,6 +94,12 @@ Jetty 9.3.14.v20161028
 Detailed Change List
 ----------------------
 
+Upgrade Notes
+----------------------
+
+* SOLR-9984: GenericHadoopAuthPlugin is deprecated in favor of HadoopAuthPlugin. Simply changing the
+  name of the class in the security configurations should suffice while upgrading.
+
 New Features
 ----------------------
 
@@ -122,6 +128,8 @@ Other Changes
 ----------------------
 * SOLR-9980: Expose configVersion in core admin status (Jessica Cheng Mallet via Tom�s Fern�ndez L�bbe)
 
+* SOLR-9984: Deprecate GenericHadoopAuthPlugin in favor of HadoopAuthPlugin (Ishan Chattopadhyaya)
+
 ==================  6.4.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a05d6f4/solr/core/src/java/org/apache/solr/security/GenericHadoopAuthPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/security/GenericHadoopAuthPlugin.java b/solr/core/src/java/org/apache/solr/security/GenericHadoopAuthPlugin.java
index e5fe349..3d63fd6 100644
--- a/solr/core/src/java/org/apache/solr/security/GenericHadoopAuthPlugin.java
+++ b/solr/core/src/java/org/apache/solr/security/GenericHadoopAuthPlugin.java
@@ -16,251 +16,16 @@
  */
 package org.apache.solr.security;
 
-import static org.apache.solr.security.RequestContinuesRecorderAuthenticationHandler.REQUEST_CONTINUES_ATTR;
-import static org.apache.solr.security.HadoopAuthFilter.DELEGATION_TOKEN_ZK_CLIENT;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.lang.invoke.MethodHandles;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
-
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletContext;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletResponse;
-import javax.servlet.http.HttpServletResponseWrapper;
-
-import org.apache.commons.collections.iterators.IteratorEnumeration;
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache.solr.client.solrj.impl.HttpClientBuilderFactory;
-import org.apache.solr.client.solrj.impl.Krb5HttpClientBuilder;
-import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.common.util.SuppressForbidden;
 import org.apache.solr.core.CoreContainer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
- * This class implements a generic plugin which can use authentication schemes exposed by the
- * Hadoop framework. This plugin supports following features
- * - integration with authentication mehcanisms (e.g. kerberos)
- * - Delegation token support
- * - Proxy users (or secure impersonation) support
- *
- * This plugin enables defining configuration parameters required by the undelying Hadoop authentication
- * mechanism. These configuration parameters can either be specified as a Java system property or the default
- * value can be specified as part of the plugin configuration.
- *
- * The proxy users are configured by specifying relevant Hadoop configuration parameters. Please note that
- * the delegation token support must be enabled for using the proxy users support.
- *
- * For Solr internal communication, this plugin enables configuring {@linkplain HttpClientBuilderFactory}
- * implementation (e.g. based on kerberos).
+ *  * @deprecated Use {@link HadoopAuthPlugin}. For backcompat against Solr 6.4.
  */
-public class GenericHadoopAuthPlugin extends AuthenticationPlugin implements HttpClientBuilderPlugin {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  /**
-   * A property specifying the type of authentication scheme to be configured.
-   */
-  private static final String HADOOP_AUTH_TYPE = "type";
-
-  /**
-   * A property specifies the value of the prefix to be used to define Java system property
-   * for configuring the authentication mechanism. The name of the Java system property is
-   * defined by appending the configuration parmeter namne to this prefix value e.g. if prefix
-   * is 'solr' then the Java system property 'solr.kerberos.principal' defines the value of
-   * configuration parameter 'kerberos.principal'.
-   */
-  private static final String SYSPROP_PREFIX_PROPERTY = "sysPropPrefix";
-
-  /**
-   * A property specifying the configuration parameters required by the authentication scheme
-   * defined by {@linkplain #HADOOP_AUTH_TYPE} property.
-   */
-  private static final String AUTH_CONFIG_NAMES_PROPERTY = "authConfigs";
-
-  /**
-   * A property specifying the {@linkplain HttpClientBuilderFactory} used for the Solr internal
-   * communication.
-   */
-  private static final String HTTPCLIENT_BUILDER_FACTORY = "clientBuilderFactory";
-
-  /**
-   * A property specifying the default values for the configuration parameters specified by the
-   * {@linkplain #AUTH_CONFIG_NAMES_PROPERTY} property. The default values are specified as a
-   * collection of key-value pairs (i.e. property-name : default_value).
-   */
-  private static final String DEFAULT_AUTH_CONFIGS_PROPERTY = "defaultConfigs";
-
-  /**
-   * A property which enable (or disable) the delegation tokens functionality.
-   */
-  private static final String DELEGATION_TOKEN_ENABLED_PROPERTY = "enableDelegationToken";
-
-  /**
-   * A property which enables initialization of kerberos before connecting to Zookeeper.
-   */
-  private static final String INIT_KERBEROS_ZK = "initKerberosZk";
-
-  /**
-   * A property which configures proxy users for the underlying Hadoop authentication mechanism.
-   * This configuration is expressed as a collection of key-value pairs  (i.e. property-name : value).
-   */
-  public static final String PROXY_USER_CONFIGS = "proxyUserConfigs";
-
-  private AuthenticationFilter authFilter;
-  private HttpClientBuilderFactory factory = null;
-  private final CoreContainer coreContainer;
+@Deprecated
+public class GenericHadoopAuthPlugin extends HadoopAuthPlugin {
 
   public GenericHadoopAuthPlugin(CoreContainer coreContainer) {
-    this.coreContainer = coreContainer;
-  }
-
-  @SuppressWarnings("rawtypes")
-  @Override
-  public void init(Map<String,Object> pluginConfig) {
-    try {
-      String delegationTokenEnabled = (String)pluginConfig.getOrDefault(DELEGATION_TOKEN_ENABLED_PROPERTY, "false");
-      authFilter = (Boolean.parseBoolean(delegationTokenEnabled)) ? new HadoopAuthFilter() : new AuthenticationFilter();
-
-      // Initialize kerberos before initializing curator instance.
-      boolean initKerberosZk = Boolean.parseBoolean((String)pluginConfig.getOrDefault(INIT_KERBEROS_ZK, "false"));
-      if (initKerberosZk) {
-        (new Krb5HttpClientBuilder()).getBuilder();
-      }
-
-      FilterConfig conf = getInitFilterConfig(pluginConfig);
-      authFilter.init(conf);
-
-      String httpClientBuilderFactory = (String)pluginConfig.get(HTTPCLIENT_BUILDER_FACTORY);
-      if (httpClientBuilderFactory != null) {
-        Class c = Class.forName(httpClientBuilderFactory);
-        factory = (HttpClientBuilderFactory)c.newInstance();
-      }
-
-    } catch (ServletException | ClassNotFoundException | InstantiationException | IllegalAccessException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error initializing kerberos authentication plugin: "+e);
-    }
+    super(coreContainer);
   }
 
-  @SuppressWarnings("unchecked")
-  protected FilterConfig getInitFilterConfig(Map<String, Object> pluginConfig) {
-    Map<String, String> params = new HashMap<>();
-
-    String type = (String) Objects.requireNonNull(pluginConfig.get(HADOOP_AUTH_TYPE));
-    params.put(HADOOP_AUTH_TYPE, type);
-
-    String sysPropPrefix = (String) pluginConfig.getOrDefault(SYSPROP_PREFIX_PROPERTY, "solr.");
-    Collection<String> authConfigNames = (Collection<String>) pluginConfig.
-        getOrDefault(AUTH_CONFIG_NAMES_PROPERTY, Collections.emptyList());
-    Map<String,String> authConfigDefaults = (Map<String,String>) pluginConfig
-        .getOrDefault(DEFAULT_AUTH_CONFIGS_PROPERTY, Collections.emptyMap());
-    Map<String,String> proxyUserConfigs = (Map<String,String>) pluginConfig
-        .getOrDefault(PROXY_USER_CONFIGS, Collections.emptyMap());
-
-    for ( String configName : authConfigNames) {
-      String systemProperty = sysPropPrefix + configName;
-      String defaultConfigVal = authConfigDefaults.get(configName);
-      String configVal = System.getProperty(systemProperty, defaultConfigVal);
-      if (configVal != null) {
-        params.put(configName, configVal);
-      }
-    }
-
-    // Configure proxy user settings.
-    params.putAll(proxyUserConfigs);
-
-    final ServletContext servletContext = new AttributeOnlyServletContext();
-    log.info("Params: "+params);
-
-    ZkController controller = coreContainer.getZkController();
-    if (controller != null) {
-      servletContext.setAttribute(DELEGATION_TOKEN_ZK_CLIENT, controller.getZkClient());
-    }
-
-    FilterConfig conf = new FilterConfig() {
-      @Override
-      public ServletContext getServletContext() {
-        return servletContext;
-      }
-
-      @Override
-      public Enumeration<String> getInitParameterNames() {
-        return new IteratorEnumeration(params.keySet().iterator());
-      }
-
-      @Override
-      public String getInitParameter(String param) {
-        return params.get(param);
-      }
-
-      @Override
-      public String getFilterName() {
-        return "HadoopAuthFilter";
-      }
-    };
-
-    return conf;
-  }
-
-  @Override
-  public boolean doAuthenticate(ServletRequest request, ServletResponse response, FilterChain filterChain)
-      throws Exception {
-    final HttpServletResponse frsp = (HttpServletResponse)response;
-
-    // Workaround until HADOOP-13346 is fixed.
-    HttpServletResponse rspCloseShield = new HttpServletResponseWrapper(frsp) {
-      @SuppressForbidden(reason = "Hadoop DelegationTokenAuthenticationFilter uses response writer, this" +
-          "is providing a CloseShield on top of that")
-      @Override
-      public PrintWriter getWriter() throws IOException {
-        final PrintWriter pw = new PrintWriterWrapper(frsp.getWriter()) {
-          @Override
-          public void close() {};
-        };
-        return pw;
-      }
-    };
-    authFilter.doFilter(request, rspCloseShield, filterChain);
-
-    if (authFilter instanceof HadoopAuthFilter) { // delegation token mgmt.
-      String requestContinuesAttr = (String)request.getAttribute(REQUEST_CONTINUES_ATTR);
-      if (requestContinuesAttr == null) {
-        log.warn("Could not find " + REQUEST_CONTINUES_ATTR);
-        return false;
-      } else {
-        return Boolean.parseBoolean(requestContinuesAttr);
-      }
-    }
-
-    return true;
-  }
-
-  @Override
-  public SolrHttpClientBuilder getHttpClientBuilder(SolrHttpClientBuilder builder) {
-    return (factory != null) ? factory.getHttpClientBuilder(Optional.ofNullable(builder)) : builder;
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (authFilter != null) {
-      authFilter.destroy();
-    }
-    if (factory != null) {
-      factory.close();
-    }
-  }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a05d6f4/solr/core/src/java/org/apache/solr/security/HadoopAuthPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/security/HadoopAuthPlugin.java b/solr/core/src/java/org/apache/solr/security/HadoopAuthPlugin.java
index db0f639..1f0d5ad 100644
--- a/solr/core/src/java/org/apache/solr/security/HadoopAuthPlugin.java
+++ b/solr/core/src/java/org/apache/solr/security/HadoopAuthPlugin.java
@@ -135,7 +135,7 @@ public class HadoopAuthPlugin extends AuthenticationPlugin {
       authFilter.init(conf);
 
     } catch (ServletException e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "Error initializing GenericHadoopAuthPlugin: "+e);
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Error initializing " + getClass().getName() + ": "+e);
     }
   }
 


[03/50] [abbrv] lucene-solr:apiv2: Remove four unnecessary @Override annotations in SolrQueryBuilder (test) classes.

Posted by no...@apache.org.
Remove four unnecessary @Override annotations in SolrQueryBuilder (test) classes.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/23019006
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/23019006
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/23019006

Branch: refs/heads/apiv2
Commit: 230190065ca96c6ecc45e581a56f856888c2e321
Parents: 649c58d
Author: Christine Poerschke <cp...@apache.org>
Authored: Mon Jan 16 18:14:36 2017 +0000
Committer: Christine Poerschke <cp...@apache.org>
Committed: Mon Jan 16 18:14:36 2017 +0000

----------------------------------------------------------------------
 .../org/apache/solr/search/ApacheLuceneSolrNearQueryBuilder.java    | 1 -
 solr/core/src/test/org/apache/solr/search/GoodbyeQueryBuilder.java  | 1 -
 solr/core/src/test/org/apache/solr/search/HandyQueryBuilder.java    | 1 -
 solr/core/src/test/org/apache/solr/search/HelloQueryBuilder.java    | 1 -
 4 files changed, 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23019006/solr/core/src/test/org/apache/solr/search/ApacheLuceneSolrNearQueryBuilder.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/ApacheLuceneSolrNearQueryBuilder.java b/solr/core/src/test/org/apache/solr/search/ApacheLuceneSolrNearQueryBuilder.java
index 135ec45..bbc081a 100644
--- a/solr/core/src/test/org/apache/solr/search/ApacheLuceneSolrNearQueryBuilder.java
+++ b/solr/core/src/test/org/apache/solr/search/ApacheLuceneSolrNearQueryBuilder.java
@@ -35,7 +35,6 @@ public class ApacheLuceneSolrNearQueryBuilder extends SolrQueryBuilder {
     super(defaultField, analyzer, req, queryFactory);
   }
 
-  @Override
   public Query getQuery(Element e) throws ParserException {
     final String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
     final SpanQuery[] spanQueries = new SpanQuery[]{

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23019006/solr/core/src/test/org/apache/solr/search/GoodbyeQueryBuilder.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/GoodbyeQueryBuilder.java b/solr/core/src/test/org/apache/solr/search/GoodbyeQueryBuilder.java
index af258d4..93f4a1b 100644
--- a/solr/core/src/test/org/apache/solr/search/GoodbyeQueryBuilder.java
+++ b/solr/core/src/test/org/apache/solr/search/GoodbyeQueryBuilder.java
@@ -31,7 +31,6 @@ public class GoodbyeQueryBuilder extends SolrQueryBuilder {
     super(defaultField, analyzer, req, queryFactory);
   }
 
-  @Override
   public Query getQuery(Element e) throws ParserException {
     return new MatchNoDocsQuery();
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23019006/solr/core/src/test/org/apache/solr/search/HandyQueryBuilder.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/HandyQueryBuilder.java b/solr/core/src/test/org/apache/solr/search/HandyQueryBuilder.java
index 14a8aac..c38fb6b 100644
--- a/solr/core/src/test/org/apache/solr/search/HandyQueryBuilder.java
+++ b/solr/core/src/test/org/apache/solr/search/HandyQueryBuilder.java
@@ -35,7 +35,6 @@ public class HandyQueryBuilder extends SolrQueryBuilder {
     super(defaultField, analyzer, req, queryFactory);
   }
 
-  @Override
   public Query getQuery(Element e) throws ParserException {
     final BooleanQuery.Builder bq = new BooleanQuery.Builder();
     final Query lhsQ = getSubQuery(e, "Left");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23019006/solr/core/src/test/org/apache/solr/search/HelloQueryBuilder.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/HelloQueryBuilder.java b/solr/core/src/test/org/apache/solr/search/HelloQueryBuilder.java
index 642047f..8ea98f1 100644
--- a/solr/core/src/test/org/apache/solr/search/HelloQueryBuilder.java
+++ b/solr/core/src/test/org/apache/solr/search/HelloQueryBuilder.java
@@ -31,7 +31,6 @@ public class HelloQueryBuilder extends SolrQueryBuilder {
     super(defaultField, analyzer, req, queryFactory);
   }
 
-  @Override
   public Query getQuery(Element e) throws ParserException {
     return new MatchAllDocsQuery();
   }


[18/50] [abbrv] lucene-solr:apiv2: LUCENE-7641: Speed up range queries that match most documents.

Posted by no...@apache.org.
LUCENE-7641: Speed up range queries that match most documents.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/3404677e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/3404677e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/3404677e

Branch: refs/heads/apiv2
Commit: 3404677e57fcf7901813f7d7ccfc3e57db011993
Parents: 9ee48aa
Author: Adrien Grand <jp...@gmail.com>
Authored: Wed Jan 18 13:48:27 2017 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Wed Jan 18 13:48:27 2017 +0100

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  6 ++
 .../org/apache/lucene/index/CheckIndex.java     |  4 +-
 .../apache/lucene/search/PointRangeQuery.java   | 74 ++++++++++++++++++++
 .../org/apache/lucene/util/bkd/BKDReader.java   |  8 ++-
 .../org/apache/lucene/util/bkd/BKDWriter.java   | 14 ++--
 .../apache/lucene/search/TestPointQueries.java  | 35 +++++++++
 6 files changed, 130 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3404677e/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 4df7a67..cee0335 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -93,6 +93,12 @@ Improvements
   should be run, eg. using points or doc values depending on costs of other
   parts of the query. (Adrien Grand)
 
+Optimizations
+
+* LUCENE-7641: Optimized point range queries to compute documents that do not
+  match the range on single-valued fields when more than half the documents in
+  the index would match. (Adrien Grand)
+
 ======================= Lucene 6.4.0 =======================
 
 API Changes

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3404677e/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
index 7611a7f..f3bdfb0 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
@@ -1813,8 +1813,8 @@ public final class CheckIndex implements Closeable {
             int docCount = values.getDocCount();
 
             final long crossCost = values.estimatePointCount(new ConstantRelationIntersectVisitor(Relation.CELL_CROSSES_QUERY));
-            if (crossCost < size) {
-              throw new RuntimeException("estimatePointCount should return >= size when all cells match");
+            if (crossCost < size / 2) {
+              throw new RuntimeException("estimatePointCount should return >= size/2 when all cells match");
             }
             final long insideCost = values.estimatePointCount(new ConstantRelationIntersectVisitor(Relation.CELL_INSIDE_QUERY));
             if (insideCost < size) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3404677e/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
index 29c6e7f..7c997ca 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
@@ -26,7 +26,9 @@ import org.apache.lucene.index.PointValues.Relation;
 import org.apache.lucene.document.IntPoint;    // javadocs
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.util.BitSetIterator;
 import org.apache.lucene.util.DocIdSetBuilder;
+import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.StringHelper;
 
 /** 
@@ -163,6 +165,64 @@ public abstract class PointRangeQuery extends Query {
         };
       }
 
+      /**
+       * Create a visitor that clears documents that do NOT match the range.
+       */
+      private IntersectVisitor getInverseIntersectVisitor(FixedBitSet result, int[] cost) {
+        return new IntersectVisitor() {
+
+          @Override
+          public void visit(int docID) {
+            result.clear(docID);
+            cost[0]--;
+          }
+
+          @Override
+          public void visit(int docID, byte[] packedValue) {
+            for(int dim=0;dim<numDims;dim++) {
+              int offset = dim*bytesPerDim;
+              if (StringHelper.compare(bytesPerDim, packedValue, offset, lowerPoint, offset) < 0) {
+                // Doc's value is too low, in this dimension
+                result.clear(docID);
+                cost[0]--;
+                return;
+              }
+              if (StringHelper.compare(bytesPerDim, packedValue, offset, upperPoint, offset) > 0) {
+                // Doc's value is too high, in this dimension
+                result.clear(docID);
+                cost[0]--;
+                return;
+              }
+            }
+          }
+
+          @Override
+          public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+
+            boolean crosses = false;
+
+            for(int dim=0;dim<numDims;dim++) {
+              int offset = dim*bytesPerDim;
+
+              if (StringHelper.compare(bytesPerDim, minPackedValue, offset, upperPoint, offset) > 0 ||
+                  StringHelper.compare(bytesPerDim, maxPackedValue, offset, lowerPoint, offset) < 0) {
+                // This dim is not in the range
+                return Relation.CELL_INSIDE_QUERY;
+              }
+
+              crosses |= StringHelper.compare(bytesPerDim, minPackedValue, offset, lowerPoint, offset) < 0 ||
+                  StringHelper.compare(bytesPerDim, maxPackedValue, offset, upperPoint, offset) > 0;
+            }
+
+            if (crosses) {
+              return Relation.CELL_CROSSES_QUERY;
+            } else {
+              return Relation.CELL_OUTSIDE_QUERY;
+            }
+          }
+        };
+      }
+
       @Override
       public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
         LeafReader reader = context.reader();
@@ -221,6 +281,20 @@ public abstract class PointRangeQuery extends Query {
 
             @Override
             public Scorer get(boolean randomAccess) throws IOException {
+              if (values.getDocCount() == reader.maxDoc()
+                  && values.getDocCount() == values.size()
+                  && cost() > reader.maxDoc() / 2) {
+                // If all docs have exactly one value and the cost is greater
+                // than half the leaf size then maybe we can make things faster
+                // by computing the set of documents that do NOT match the range
+                final FixedBitSet result = new FixedBitSet(reader.maxDoc());
+                result.set(0, reader.maxDoc());
+                int[] cost = new int[] { reader.maxDoc() };
+                values.intersect(getInverseIntersectVisitor(result, cost));
+                final DocIdSetIterator iterator = new BitSetIterator(result, cost[0]);
+                return new ConstantScoreScorer(weight, score(), iterator);
+              }
+
               values.intersect(visitor);
               DocIdSetIterator iterator = result.build().iterator();
               return new ConstantScoreScorer(weight, score(), iterator);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3404677e/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
index 14e1adb..4089d82 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
@@ -717,8 +717,12 @@ public final class BKDReader extends PointValues implements Accountable {
       // This cell is fully outside of the query shape: stop recursing
       return 0L;
     } else if (state.index.isLeafNode()) {
-      // Assume all points match and there are no dups
-      return maxPointsInLeafNode;
+      if (r == Relation.CELL_INSIDE_QUERY) {
+        return maxPointsInLeafNode;
+      } else {
+        // Assume half the points matched
+        return (maxPointsInLeafNode + 1) / 2;
+      }
     } else {
       
       // Non-leaf node: recurse on the split left and right nodes

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3404677e/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
index 5e391f4..eeb40fa 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDWriter.java
@@ -487,7 +487,7 @@ public class BKDWriter implements Closeable {
     assert Arrays.equals(parentSplits, new int[numDims]);
 
     long indexFP = out.getFilePointer();
-    writeIndex(out, leafBlockFPs, splitPackedValues);
+    writeIndex(out, Math.toIntExact(countPerLeaf), leafBlockFPs, splitPackedValues);
     return indexFP;
   }
 
@@ -645,7 +645,7 @@ public class BKDWriter implements Closeable {
       for(int i=0;i<leafBlockFPs.size();i++) {
         arr[i] = leafBlockFPs.get(i);
       }
-      writeIndex(out, arr, index);
+      writeIndex(out, maxPointsInLeafNode, arr, index);
       return indexFP;
     }
 
@@ -1035,7 +1035,7 @@ public class BKDWriter implements Closeable {
 
     // Write index:
     long indexFP = out.getFilePointer();
-    writeIndex(out, leafBlockFPs, splitPackedValues);
+    writeIndex(out, Math.toIntExact(countPerLeaf), leafBlockFPs, splitPackedValues);
     return indexFP;
   }
 
@@ -1241,16 +1241,16 @@ public class BKDWriter implements Closeable {
     return result;
   }
 
-  private void writeIndex(IndexOutput out, long[] leafBlockFPs, byte[] splitPackedValues) throws IOException {
+  private void writeIndex(IndexOutput out, int countPerLeaf, long[] leafBlockFPs, byte[] splitPackedValues) throws IOException {
     byte[] packedIndex = packIndex(leafBlockFPs, splitPackedValues);
-    writeIndex(out, leafBlockFPs.length, packedIndex);
+    writeIndex(out, countPerLeaf, leafBlockFPs.length, packedIndex);
   }
   
-  private void writeIndex(IndexOutput out, int numLeaves, byte[] packedIndex) throws IOException {
+  private void writeIndex(IndexOutput out, int countPerLeaf, int numLeaves, byte[] packedIndex) throws IOException {
     
     CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT);
     out.writeVInt(numDims);
-    out.writeVInt(maxPointsInLeafNode);
+    out.writeVInt(countPerLeaf);
     out.writeVInt(bytesPerDim);
 
     assert numLeaves > 0;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3404677e/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java b/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java
index 5c66478..8f7beaf 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java
@@ -69,6 +69,7 @@ import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.bkd.BKDWriter;
 import org.junit.BeforeClass;
 
 public class TestPointQueries extends LuceneTestCase {
@@ -2080,4 +2081,38 @@ public class TestPointQueries extends LuceneTestCase {
     assertTrue(Float.compare(Float.NEGATIVE_INFINITY, FloatPoint.nextDown(Float.NEGATIVE_INFINITY)) == 0);
     assertTrue(Float.compare(Float.MAX_VALUE, FloatPoint.nextDown(Float.POSITIVE_INFINITY)) == 0);
   }
+
+  public void testInversePointRange() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    final int numDims = TestUtil.nextInt(random(), 1, 3);
+    final int numDocs = atLeast(10 * BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE); // we need multiple leaves to enable this optimization
+    for (int i = 0; i < numDocs; ++i) {
+      Document doc = new Document();
+      int[] values = new int[numDims];
+      Arrays.fill(values, i);
+      doc.add(new IntPoint("f", values));
+      w.addDocument(doc);
+    }
+    w.forceMerge(1);
+    IndexReader r = DirectoryReader.open(w);
+    w.close();
+
+    IndexSearcher searcher = newSearcher(r);
+    int[] low = new int[numDims];
+    int[] high = new int[numDims];
+    Arrays.fill(high, numDocs - 2);
+    assertEquals(high[0] - low[0] + 1, searcher.count(IntPoint.newRangeQuery("f", low, high)));
+    Arrays.fill(low, 1);
+    assertEquals(high[0] - low[0] + 1, searcher.count(IntPoint.newRangeQuery("f", low, high)));
+    Arrays.fill(high, numDocs - 1);
+    assertEquals(high[0] - low[0] + 1, searcher.count(IntPoint.newRangeQuery("f", low, high)));
+    Arrays.fill(low, BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE + 1);
+    assertEquals(high[0] - low[0] + 1, searcher.count(IntPoint.newRangeQuery("f", low, high)));
+    Arrays.fill(high, numDocs - BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE);
+    assertEquals(high[0] - low[0] + 1, searcher.count(IntPoint.newRangeQuery("f", low, high)));
+
+    r.close();
+    dir.close();
+  }
 }


[15/50] [abbrv] lucene-solr:apiv2: SOLR-9976: Fix init bug in SegmentsInfoRequestHandlerTest

Posted by no...@apache.org.
SOLR-9976: Fix init bug in SegmentsInfoRequestHandlerTest


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/39eec660
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/39eec660
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/39eec660

Branch: refs/heads/apiv2
Commit: 39eec660ca79b62947321390e07e83d84be419e5
Parents: e816fbe
Author: Chris Hostetter <ho...@apache.org>
Authored: Tue Jan 17 14:42:41 2017 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Tue Jan 17 14:42:41 2017 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  5 +++++
 .../admin/SegmentsInfoRequestHandlerTest.java   | 20 +++++++++++++-------
 2 files changed, 18 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/39eec660/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 5b96c20..42be8a2 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -90,6 +90,11 @@ Jetty 9.3.14.v20161028
 Detailed Change List
 ----------------------
 
+Bug Fixes
+----------------------
+
+* SOLR-9976: Fix init bug in SegmentsInfoRequestHandlerTest (hossman)
+
 Optimizations
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/39eec660/solr/core/src/test/org/apache/solr/handler/admin/SegmentsInfoRequestHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/SegmentsInfoRequestHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/SegmentsInfoRequestHandlerTest.java
index 885e419..1355e56 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/SegmentsInfoRequestHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/SegmentsInfoRequestHandlerTest.java
@@ -16,9 +16,11 @@
  */
 package org.apache.solr.handler.admin;
 
+import org.apache.lucene.index.LogDocMergePolicy;
 import org.apache.lucene.util.Version;
+import org.apache.solr.index.LogDocMergePolicyFactory;
 import org.apache.solr.util.AbstractSolrTestCase;
-import org.junit.Before;
+
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -32,13 +34,17 @@ public class SegmentsInfoRequestHandlerTest extends AbstractSolrTestCase {
   
   @BeforeClass
   public static void beforeClass() throws Exception {
-    System.setProperty("enable.update.log", "false");
-    System.setProperty("solr.tests.useMergePolicy", "false");
-    initCore("solrconfig.xml", "schema12.xml");
-  }
 
-  @Before
-  public void before() throws Exception {
+    // we need a consistent segmentation to ensure we don't get a random
+    // merge that reduces the total num docs in all segments, or the number of deletes
+    //
+    systemSetPropertySolrTestsMergePolicy(LogDocMergePolicy.class.getName());
+    systemSetPropertySolrTestsMergePolicyFactory(LogDocMergePolicyFactory.class.getName());
+    
+    System.setProperty("enable.update.log", "false"); // no _version_ in our schema
+    initCore("solrconfig.xml", "schema12.xml"); // segments API shouldn't depend on _version_ or ulog
+    
+    // build up an index with at least 2 segments and some deletes
     for (int i = 0; i < DOC_COUNT; i++) {
       assertU(adoc("id","SOLR100" + i, "name","Apache Solr:" + i));
     }


[22/50] [abbrv] lucene-solr:apiv2: SOLR-9979: Macro expansion should not be done in shard requests

Posted by no...@apache.org.
SOLR-9979: Macro expansion should not be done in shard requests


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/68d246df
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/68d246df
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/68d246df

Branch: refs/heads/apiv2
Commit: 68d246df003278ba0c35ae5f43872340b676a02f
Parents: 57626c9
Author: Tomas Fernandez Lobbe <tf...@apache.org>
Authored: Wed Jan 18 10:53:02 2017 -0800
Committer: Tomas Fernandez Lobbe <tf...@apache.org>
Committed: Wed Jan 18 10:53:02 2017 -0800

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 +
 .../apache/solr/request/json/RequestUtil.java   | 16 ++++---
 .../org/apache/solr/TestDistributedSearch.java  | 46 ++++++++++++--------
 3 files changed, 40 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/68d246df/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 5fd8a9e..cfd7a4c 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -98,6 +98,8 @@ Bug Fixes
 * SOLR-9977: Fix config bug in DistribDocExpirationUpdateProcessorTest that allowed false assumptions
   about when index version changes (hossman)
 
+* SOLR-9979: Macro expansion should not be done in shard requests (Tom�s Fern�ndez L�bbe)
+
 Optimizations
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/68d246df/solr/core/src/java/org/apache/solr/request/json/RequestUtil.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/request/json/RequestUtil.java b/solr/core/src/java/org/apache/solr/request/json/RequestUtil.java
index 20efdc3..2529e74 100644
--- a/solr/core/src/java/org/apache/solr/request/json/RequestUtil.java
+++ b/solr/core/src/java/org/apache/solr/request/json/RequestUtil.java
@@ -147,14 +147,16 @@ public class RequestUtil {
       newMap.putAll( MultiMapSolrParams.asMultiMap(invariants) );
     }
 
-    String[] doMacrosStr = newMap.get("expandMacros");
-    boolean doMacros = true;
-    if (doMacrosStr != null) {
-      doMacros = "true".equals(doMacrosStr[0]);
-    }
+    if (!isShard) { // Don't expand macros in shard requests
+      String[] doMacrosStr = newMap.get("expandMacros");
+      boolean doMacros = true;
+      if (doMacrosStr != null) {
+        doMacros = "true".equals(doMacrosStr[0]);
+      }
 
-    if (doMacros) {
-      newMap = MacroExpander.expand(newMap);
+      if (doMacros) {
+        newMap = MacroExpander.expand(newMap);
+      }
     }
     // Set these params as soon as possible so if there is an error processing later, things like
     // "wt=json" will take effect from the defaults.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/68d246df/solr/core/src/test/org/apache/solr/TestDistributedSearch.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/TestDistributedSearch.java b/solr/core/src/test/org/apache/solr/TestDistributedSearch.java
index a5cc80c..24ab689 100644
--- a/solr/core/src/test/org/apache/solr/TestDistributedSearch.java
+++ b/solr/core/src/test/org/apache/solr/TestDistributedSearch.java
@@ -84,6 +84,7 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase {
   String tdate_b = "b_n_tdt";
   
   String oddField="oddField_s";
+  String s1="a_s";
   String missingField="ignore_exception__missing_but_valid_field_t";
   String invalidField="ignore_exception__invalid_field_not_in_schema";
 
@@ -111,44 +112,49 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase {
            "foo_sev_enum", "Medium",
            tdate_a, "2010-04-20T11:00:00Z",
            tdate_b, "2009-08-20T11:00:00Z",
-           "foo_f", 1.414f, "foo_b", "true", "foo_d", 1.414d);
+           "foo_f", 1.414f, "foo_b", "true", "foo_d", 1.414d, 
+           s1, "z${foo}");
     indexr(id,2, i1, 50 , tlong, 50,t1,"to come to the aid of their country.",
            "foo_sev_enum", "Medium",
            "foo_sev_enum", "High",
            tdate_a, "2010-05-02T11:00:00Z",
-           tdate_b, "2009-11-02T11:00:00Z");
+           tdate_b, "2009-11-02T11:00:00Z",
+           s1, "z${foo}");
     indexr(id,3, i1, 2, tlong, 2,t1,"how now brown cow",
-           tdate_a, "2010-05-03T11:00:00Z");
+           tdate_a, "2010-05-03T11:00:00Z",
+           s1, "z${foo}");
     indexr(id,4, i1, -100 ,tlong, 101,
            t1,"the quick fox jumped over the lazy dog", 
            tdate_a, "2010-05-03T11:00:00Z",
-           tdate_b, "2010-05-03T11:00:00Z");
+           tdate_b, "2010-05-03T11:00:00Z",
+           s1, "a");
     indexr(id,5, i1, 500, tlong, 500 ,
            t1,"the quick fox jumped way over the lazy dog", 
-           tdate_a, "2010-05-05T11:00:00Z");
-    indexr(id,6, i1, -600, tlong, 600 ,t1,"humpty dumpy sat on a wall");
-    indexr(id,7, i1, 123, tlong, 123 ,t1,"humpty dumpy had a great fall");
+           tdate_a, "2010-05-05T11:00:00Z",
+           s1, "b");
+    indexr(id,6, i1, -600, tlong, 600 ,t1,"humpty dumpy sat on a wall", s1, "c");
+    indexr(id,7, i1, 123, tlong, 123 ,t1,"humpty dumpy had a great fall", s1, "d");
     indexr(id,8, i1, 876, tlong, 876,
            tdate_b, "2010-01-05T11:00:00Z",
            "foo_sev_enum", "High",
-           t1,"all the kings horses and all the kings men");
-    indexr(id,9, i1, 7, tlong, 7,t1,"couldn't put humpty together again");
+           t1,"all the kings horses and all the kings men", s1, "e");
+    indexr(id,9, i1, 7, tlong, 7,t1,"couldn't put humpty together again", s1, "f");
 
     commit();  // try to ensure there's more than one segment
 
-    indexr(id,10, i1, 4321, tlong, 4321,t1,"this too shall pass");
+    indexr(id,10, i1, 4321, tlong, 4321,t1,"this too shall pass", s1, "g");
     indexr(id,11, i1, -987, tlong, 987,
            "foo_sev_enum", "Medium",
-           t1,"An eye for eye only ends up making the whole world blind.");
+           t1,"An eye for eye only ends up making the whole world blind.", s1, "h");
     indexr(id,12, i1, 379, tlong, 379,
-           t1,"Great works are performed, not by strength, but by perseverance.");
+           t1,"Great works are performed, not by strength, but by perseverance.", s1, "i");
     indexr(id,13, i1, 232, tlong, 232,
            t1,"no eggs on wall, lesson learned", 
-           oddField, "odd man out");
+           oddField, "odd man out", s1, "j");
 
-    indexr(id, "1001", "lowerfilt", "toyota"); // for spellcheck
+    indexr(id, "1001", "lowerfilt", "toyota", s1, "k"); // for spellcheck
 
-    indexr(id, 14, "SubjectTerms_mfacet", new String[]  {"mathematical models", "mathematical analysis"});
+    indexr(id, 14, "SubjectTerms_mfacet", new String[]  {"mathematical models", "mathematical analysis"}, s1, "l");
     indexr(id, 15, "SubjectTerms_mfacet", new String[]  {"test 1", "test 2", "test3"});
     indexr(id, 16, "SubjectTerms_mfacet", new String[]  {"test 1", "test 2", "test3"});
     String[] vals = new String[100];
@@ -867,13 +873,19 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase {
     // Try to get better coverage for refinement queries by turning off over requesting.
     // This makes it much more likely that we may not get the top facet values and hence
     // we turn of that checking.
-    handle.put("facet_fields", SKIPVAL);    
+    handle.put("facet_fields", SKIPVAL);
     query("q","*:*", "rows",0, "facet","true", "facet.field",t1,"facet.limit",5, "facet.shard.limit",5);
     // check a complex key name
     query("q","*:*", "rows",0, "facet","true", "facet.field","{!key='$a b/c \\' \\} foo'}"+t1,"facet.limit",5, "facet.shard.limit",5);
     query("q","*:*", "rows",0, "facet","true", "facet.field","{!key='$a'}"+t1,"facet.limit",5, "facet.shard.limit",5);
     handle.remove("facet_fields");
-
+    // Make sure there is no macro expansion for field values
+    query("q","*:*", "rows",0, "facet","true", "facet.field",s1,"facet.limit",5, "facet.shard.limit",5);
+    query("q","*:*", "rows",0, "facet","true", "facet.field",s1,"facet.limit",5, "facet.shard.limit",5, "expandMacros", "true");
+    query("q","*:*", "rows",0, "facet","true", "facet.field",s1,"facet.limit",5, "facet.shard.limit",5, "expandMacros", "false");
+    // Macro expansion should still work for the parameters
+    query("q","*:*", "rows",0, "facet","true", "facet.field","${foo}", "f.${foo}.mincount", 1, "foo", s1);
+    query("q","*:*", "rows",0, "facet","true", "facet.field","${foo}", "f.${foo}.mincount", 1, "foo", s1, "expandMacros", "true");
 
     // index the same document to two servers and make sure things
     // don't blow up.


[50/50] [abbrv] lucene-solr:apiv2: Merge remote-tracking branch 'remotes/origin/master' into apiv2

Posted by no...@apache.org.
Merge remote-tracking branch 'remotes/origin/master' into apiv2


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a6e77729
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a6e77729
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a6e77729

Branch: refs/heads/apiv2
Commit: a6e77729429ecfd7f401b6f4482a1babe26e0014
Parents: 4841ce1 ee5a360
Author: Noble Paul <no...@apache.org>
Authored: Tue Jan 24 14:02:12 2017 +1030
Committer: Noble Paul <no...@apache.org>
Committed: Tue Jan 24 14:02:12 2017 +1030

----------------------------------------------------------------------
 .gitignore                                      |    1 +
 build.xml                                       |   42 +-
 .../dot.settings/org.eclipse.jdt.core.prefs     |    7 +-
 dev-tools/idea/lucene/suggest/suggest.iml       |    2 -
 .../idea/solr/contrib/analytics/analytics.iml   |    1 +
 .../lucene/analysis/common/pom.xml.template     |   36 +-
 .../maven/lucene/analysis/icu/pom.xml.template  |   42 +-
 .../lucene/analysis/kuromoji/pom.xml.template   |   38 +-
 .../lucene/analysis/morfologik/pom.xml.template |   38 +-
 .../lucene/analysis/phonetic/pom.xml.template   |   38 +-
 .../maven/lucene/analysis/pom.xml.template      |   36 +-
 .../lucene/analysis/smartcn/pom.xml.template    |   36 +-
 .../lucene/analysis/stempel/pom.xml.template    |   36 +-
 .../maven/lucene/analysis/uima/pom.xml.template |   36 +-
 .../lucene/backward-codecs/pom.xml.template     |   36 +-
 .../maven/lucene/benchmark/pom.xml.template     |   36 +-
 .../lucene/classification/pom.xml.template      |   36 +-
 dev-tools/maven/lucene/codecs/pom.xml.template  |   36 +-
 .../lucene/codecs/src/java/pom.xml.template     |   36 +-
 .../lucene/codecs/src/test/pom.xml.template     |   36 +-
 dev-tools/maven/lucene/core/pom.xml.template    |   36 +-
 .../maven/lucene/core/src/java/pom.xml.template |   36 +-
 .../maven/lucene/core/src/test/pom.xml.template |   36 +-
 dev-tools/maven/lucene/demo/pom.xml.template    |   36 +-
 .../maven/lucene/expressions/pom.xml.template   |   36 +-
 dev-tools/maven/lucene/facet/pom.xml.template   |   36 +-
 .../maven/lucene/grouping/pom.xml.template      |   36 +-
 .../maven/lucene/highlighter/pom.xml.template   |   36 +-
 dev-tools/maven/lucene/join/pom.xml.template    |   36 +-
 dev-tools/maven/lucene/memory/pom.xml.template  |   36 +-
 dev-tools/maven/lucene/misc/pom.xml.template    |   36 +-
 dev-tools/maven/lucene/pom.xml.template         |   36 +-
 dev-tools/maven/lucene/queries/pom.xml.template |   36 +-
 .../maven/lucene/queryparser/pom.xml.template   |   36 +-
 .../maven/lucene/replicator/pom.xml.template    |   38 +-
 dev-tools/maven/lucene/sandbox/pom.xml.template |   36 +-
 .../lucene/spatial-extras/pom.xml.template      |   36 +-
 dev-tools/maven/lucene/spatial/pom.xml.template |   38 +-
 .../maven/lucene/spatial3d/pom.xml.template     |   36 +-
 dev-tools/maven/lucene/suggest/pom.xml.template |   36 +-
 .../lucene/test-framework/pom.xml.template      |   36 +-
 dev-tools/maven/pom.xml.template                |   36 +-
 .../contrib/analysis-extras/pom.xml.template    |   36 +-
 .../solr/contrib/analytics/pom.xml.template     |   36 +-
 .../solr/contrib/clustering/pom.xml.template    |   36 +-
 .../dataimporthandler-extras/pom.xml.template   |   36 +-
 .../contrib/dataimporthandler/pom.xml.template  |   36 +-
 .../solr/contrib/extraction/pom.xml.template    |   36 +-
 .../maven/solr/contrib/langid/pom.xml.template  |   36 +-
 .../maven/solr/contrib/ltr/pom.xml.template     |   36 +-
 .../solr/contrib/map-reduce/pom.xml.template    |   36 +-
 .../contrib/morphlines-cell/pom.xml.template    |   36 +-
 .../contrib/morphlines-core/pom.xml.template    |   36 +-
 dev-tools/maven/solr/contrib/pom.xml.template   |   36 +-
 .../maven/solr/contrib/uima/pom.xml.template    |   36 +-
 .../solr/contrib/velocity/pom.xml.template      |   36 +-
 dev-tools/maven/solr/core/pom.xml.template      |   36 +-
 .../maven/solr/core/src/java/pom.xml.template   |   36 +-
 .../maven/solr/core/src/test/pom.xml.template   |   36 +-
 dev-tools/maven/solr/pom.xml.template           |   36 +-
 dev-tools/maven/solr/solrj/pom.xml.template     |   36 +-
 .../maven/solr/solrj/src/java/pom.xml.template  |   36 +-
 .../maven/solr/solrj/src/test/pom.xml.template  |   36 +-
 .../maven/solr/test-framework/pom.xml.template  |   36 +-
 lucene/CHANGES.txt                              |  187 +-
 .../analysis/charfilter/BaseCharFilter.java     |   26 +-
 .../analysis/core/FlattenGraphFilter.java       |  418 ++++
 .../core/FlattenGraphFilterFactory.java         |   44 +
 .../lucene/analysis/custom/CustomAnalyzer.java  |    2 +-
 .../miscellaneous/FixBrokenOffsetsFilter.java   |   78 +
 .../FixBrokenOffsetsFilterFactory.java          |   39 +
 .../miscellaneous/WordDelimiterFilter.java      |    9 +-
 .../WordDelimiterFilterFactory.java             |    6 +
 .../miscellaneous/WordDelimiterGraphFilter.java |  706 +++++++
 .../WordDelimiterGraphFilterFactory.java        |  199 ++
 .../miscellaneous/WordDelimiterIterator.java    |   59 +-
 .../analysis/ngram/EdgeNGramTokenFilter.java    |   16 +-
 .../lucene/analysis/ngram/NGramTokenFilter.java |   18 +-
 .../lucene/analysis/synonym/SynonymFilter.java  |    5 +
 .../analysis/synonym/SynonymFilterFactory.java  |    5 +
 .../analysis/synonym/SynonymGraphFilter.java    |  587 ++++++
 .../synonym/SynonymGraphFilterFactory.java      |  204 ++
 .../lucene/analysis/synonym/SynonymMap.java     |    7 +-
 .../lucene/analysis/util/CharTokenizer.java     |    6 +-
 ...ache.lucene.analysis.util.TokenFilterFactory |    4 +
 .../analysis/core/TestFlattenGraphFilter.java   |  284 +++
 .../lucene/analysis/core/TestRandomChains.java  |  180 +-
 .../analysis/custom/TestCustomAnalyzer.java     |   21 +
 .../apache/lucene/analysis/custom/mapping1.txt  |    1 +
 .../apache/lucene/analysis/custom/mapping2.txt  |    1 +
 .../TestFixBrokenOffsetsFilter.java             |   50 +
 .../miscellaneous/TestWordDelimiterFilter.java  |  125 +-
 .../TestWordDelimiterGraphFilter.java           |  897 ++++++++
 .../lucene/analysis/ngram/TestNGramFilters.java |   47 +
 .../synonym/TestSynonymGraphFilter.java         | 1925 ++++++++++++++++++
 .../apache/lucene/index/FixBrokenOffsets.java   |  125 ++
 .../java/org/apache/lucene/index/package.html   |   27 +
 .../lucene/index/TestFixBrokenOffsets.java      |  115 ++
 .../lucene/index/index.630.brokenoffsets.zip    |  Bin 0 -> 3203 bytes
 .../classification/utils/DatasetSplitter.java   |    3 +-
 .../codecs/simpletext/SimpleTextBKDReader.java  |   50 +
 .../simpletext/SimpleTextPointsWriter.java      |    2 +-
 lucene/common-build.xml                         |   27 +-
 .../lucene/analysis/TokenStreamToAutomaton.java |   50 +-
 .../tokenattributes/OffsetAttributeImpl.java    |    2 +-
 .../PackedTokenAttributeImpl.java               |   11 +-
 .../PositionIncrementAttributeImpl.java         |    3 +-
 .../PositionLengthAttributeImpl.java            |    3 +-
 .../apache/lucene/codecs/DocValuesConsumer.java |   10 +-
 .../org/apache/lucene/codecs/NormsConsumer.java |    2 +-
 .../org/apache/lucene/codecs/PointsWriter.java  |    5 +
 .../lucene/codecs/StoredFieldsWriter.java       |    2 +-
 .../apache/lucene/codecs/TermVectorsWriter.java |    2 +-
 .../CompressingStoredFieldsWriter.java          |   87 +-
 .../CompressingTermVectorsWriter.java           |   11 +-
 .../GrowableByteArrayDataOutput.java            |   83 -
 .../lucene50/Lucene50StoredFieldsFormat.java    |    2 +-
 .../codecs/lucene60/Lucene60PointsWriter.java   |   11 +-
 .../lucene70/Lucene70DocValuesConsumer.java     |  163 +-
 .../lucene70/Lucene70DocValuesFormat.java       |    5 +-
 .../lucene70/Lucene70DocValuesProducer.java     |  220 +-
 .../org/apache/lucene/document/DoublePoint.java |   30 +-
 .../org/apache/lucene/document/FloatPoint.java  |   30 +-
 .../lucene/document/NumericDocValuesField.java  |   48 +
 .../lucene/document/SortedDocValuesField.java   |   42 +
 .../document/SortedNumericDocValuesField.java   |   54 +
 .../SortedNumericDocValuesRangeQuery.java       |  145 ++
 .../document/SortedSetDocValuesField.java       |   43 +
 .../document/SortedSetDocValuesRangeQuery.java  |  188 ++
 .../apache/lucene/index/AutomatonTermsEnum.java |    3 +
 .../lucene/index/BinaryDocValuesWriter.java     |   39 +-
 .../org/apache/lucene/index/CheckIndex.java     |   66 +-
 .../lucene/index/DefaultIndexingChain.java      |  133 +-
 .../org/apache/lucene/index/DocConsumer.java    |    2 +-
 .../org/apache/lucene/index/DocIDMerger.java    |  211 +-
 .../apache/lucene/index/DocValuesWriter.java    |    4 +-
 .../lucene/index/DocumentsWriterPerThread.java  |   31 +-
 .../lucene/index/FreqProxTermsWriter.java       |    8 +-
 .../org/apache/lucene/index/IndexWriter.java    |   42 +-
 .../apache/lucene/index/IndexWriterConfig.java  |    3 +
 .../org/apache/lucene/index/LeafReader.java     |    2 +-
 .../lucene/index/LiveIndexWriterConfig.java     |   13 +
 .../lucene/index/MappingMultiPostingsEnum.java  |    2 +-
 .../org/apache/lucene/index/MergeState.java     |    8 +-
 .../apache/lucene/index/NormValuesWriter.java   |   17 +-
 .../lucene/index/NumericDocValuesWriter.java    |   51 +-
 .../org/apache/lucene/index/PointValues.java    |    7 +
 .../apache/lucene/index/PointValuesWriter.java  |  106 +-
 .../apache/lucene/index/PrefixCodedTerms.java   |    4 +-
 .../apache/lucene/index/SegmentCoreReaders.java |    8 +
 .../org/apache/lucene/index/SegmentInfos.java   |    7 +-
 .../apache/lucene/index/SortedDocValues.java    |   22 +
 .../lucene/index/SortedDocValuesWriter.java     |   76 +-
 .../index/SortedNumericDocValuesWriter.java     |   64 +-
 .../apache/lucene/index/SortedSetDocValues.java |   22 +
 .../lucene/index/SortedSetDocValuesWriter.java  |   98 +-
 .../java/org/apache/lucene/index/Sorter.java    |   57 +-
 .../apache/lucene/index/SortingLeafReader.java  |   26 +-
 .../index/SortingStoredFieldsConsumer.java      |  206 ++
 .../index/SortingTermVectorsConsumer.java       |  181 ++
 .../lucene/index/StoredFieldsConsumer.java      |   85 +
 .../lucene/index/TermVectorsConsumer.java       |    7 +-
 .../java/org/apache/lucene/index/TermsHash.java |    4 +-
 .../TrackingTmpOutputDirectoryWrapper.java      |   53 +
 .../lucene/search/Boolean2ScorerSupplier.java   |  217 ++
 .../org/apache/lucene/search/BooleanWeight.java |  136 +-
 .../apache/lucene/search/ConjunctionDISI.java   |    2 +-
 .../apache/lucene/search/ConjunctionScorer.java |    3 +-
 .../lucene/search/ConstantScoreQuery.java       |   46 +-
 .../org/apache/lucene/search/DoubleValues.java  |   38 +
 .../lucene/search/DoubleValuesSource.java       |  400 ++++
 .../lucene/search/FieldComparatorSource.java    |    8 +-
 .../lucene/search/FieldValueHitQueue.java       |   11 +-
 .../org/apache/lucene/search/GraphQuery.java    |  136 ++
 .../lucene/search/IndexOrDocValuesQuery.java    |  166 ++
 .../lucene/search/LeafFieldComparator.java      |    2 +-
 .../org/apache/lucene/search/LongValues.java    |   38 +
 .../apache/lucene/search/LongValuesSource.java  |  244 +++
 .../lucene/search/MinShouldMatchSumScorer.java  |   22 +-
 .../lucene/search/MultiCollectorManager.java    |  107 +
 .../apache/lucene/search/PointRangeQuery.java   |  214 +-
 .../apache/lucene/search/ScorerSupplier.java    |   47 +
 .../lucene/search/SimpleFieldComparator.java    |    2 +-
 .../org/apache/lucene/search/SortField.java     |    2 +-
 .../lucene/search/SortedNumericSortField.java   |    2 +-
 .../lucene/search/SortedSetSortField.java       |    2 +-
 .../apache/lucene/search/TermInSetQuery.java    |  322 +++
 .../java/org/apache/lucene/search/TopDocs.java  |   14 +-
 .../apache/lucene/search/TopFieldCollector.java |    4 +-
 .../search/UsageTrackingQueryCachingPolicy.java |    5 +-
 .../java/org/apache/lucene/search/Weight.java   |   25 +
 .../store/GrowableByteArrayDataOutput.java      |  103 +
 .../org/apache/lucene/store/MMapDirectory.java  |  128 +-
 .../java/org/apache/lucene/util/LongValues.java |    9 +
 .../org/apache/lucene/util/QueryBuilder.java    |  113 +-
 .../java/org/apache/lucene/util/Version.java    |    7 +
 .../apache/lucene/util/automaton/Automaton.java |    5 +-
 .../lucene/util/automaton/Operations.java       |    6 +-
 .../apache/lucene/util/automaton/StatePair.java |    4 +-
 .../org/apache/lucene/util/bkd/BKDReader.java   |   96 +
 .../org/apache/lucene/util/bkd/BKDWriter.java   |  150 +-
 .../apache/lucene/util/bkd/DocIdsWriter.java    |    4 +-
 .../graph/GraphTokenStreamFiniteStrings.java    |  230 +++
 .../apache/lucene/util/graph/package-info.java  |   21 +
 .../apache/lucene/util/packed/DirectWriter.java |    8 +-
 .../lucene/analysis/TestGraphTokenizers.java    |  645 ++++++
 .../TestGrowableByteArrayDataOutput.java        |   80 -
 .../lucene60/TestLucene60PointsFormat.java      |  200 +-
 .../lucene70/TestLucene70DocValuesFormat.java   |  152 ++
 .../lucene/index/Test4GBStoredFields.java       |    2 +
 .../org/apache/lucene/index/TestCheckIndex.java |    5 -
 .../apache/lucene/index/TestDocIDMerger.java    |    4 +-
 .../apache/lucene/index/TestIndexSorting.java   |   32 +-
 .../apache/lucene/index/TestIndexWriter.java    |    3 +-
 .../org/apache/lucene/index/TestTermsEnum.java  |    8 +
 .../index/TestTragicIndexWriterDeadlock.java    |   69 +-
 .../lucene/search/TermInSetQueryTest.java       |  291 +++
 .../search/TestBoolean2ScorerSupplier.java      |  332 +++
 .../search/TestBooleanQueryVisitSubscorers.java |    4 +-
 .../lucene/search/TestDocValuesQueries.java     |  271 +++
 .../lucene/search/TestDoubleValuesSource.java   |  167 ++
 .../lucene/search/TestElevationComparator.java  |   22 +-
 .../apache/lucene/search/TestFilterWeight.java  |    3 +-
 .../apache/lucene/search/TestGraphQuery.java    |   79 +
 .../search/TestIndexOrDocValuesQuery.java       |   89 +
 .../apache/lucene/search/TestLRUQueryCache.java |    7 +
 .../lucene/search/TestLongValuesSource.java     |  149 ++
 .../apache/lucene/search/TestPointQueries.java  |   63 +
 .../org/apache/lucene/store/TestDirectory.java  |   13 +-
 .../store/TestGrowableByteArrayDataOutput.java  |   80 +
 .../apache/lucene/store/TestMmapDirectory.java  |    3 +-
 .../org/apache/lucene/store/TestMultiMMap.java  |    3 +-
 .../apache/lucene/util/TestDocIdSetBuilder.java |    5 +
 .../apache/lucene/util/TestQueryBuilder.java    |   15 +-
 .../org/apache/lucene/util/bkd/TestBKD.java     |  138 +-
 .../util/bkd/TestMutablePointsReaderUtils.java  |    5 +
 .../org/apache/lucene/util/fst/Test2BFST.java   |    2 +
 .../TestGraphTokenStreamFiniteStrings.java      |  217 ++
 .../demo/facet/DistanceFacetsExample.java       |   18 +-
 .../ExpressionAggregationFacetsExample.java     |    2 +-
 lucene/expressions/build.xml                    |   11 -
 .../org/apache/lucene/expressions/Bindings.java |   14 +-
 .../apache/lucene/expressions/Expression.java   |   21 +-
 .../expressions/ExpressionComparator.java       |  100 -
 .../expressions/ExpressionFunctionValues.java   |   35 +-
 .../lucene/expressions/ExpressionRescorer.java  |   33 +-
 .../lucene/expressions/ExpressionSortField.java |   77 -
 .../expressions/ExpressionValueSource.java      |   70 +-
 .../lucene/expressions/ScoreFunctionValues.java |   46 -
 .../lucene/expressions/ScoreValueSource.java    |   61 -
 .../lucene/expressions/SimpleBindings.java      |   30 +-
 .../expressions/js/JavascriptCompiler.java      |   15 +-
 .../apache/lucene/expressions/package-info.java |    5 +-
 .../lucene/expressions/TestDemoExpressions.java |   44 +-
 .../expressions/TestExpressionSortField.java    |    2 +-
 .../expressions/TestExpressionValueSource.java  |  111 +-
 .../expressions/js/TestCustomFunctions.java     |   20 +-
 .../expressions/js/TestJavascriptFunction.java  |    2 +-
 .../js/TestJavascriptOperations.java            |    2 +-
 .../org/apache/lucene/facet/DrillSideways.java  |  331 ++-
 .../org/apache/lucene/facet/FacetQuery.java     |   52 +
 .../lucene/facet/FacetsCollectorManager.java    |   55 +
 .../apache/lucene/facet/MultiFacetQuery.java    |   60 +
 .../org/apache/lucene/facet/package-info.java   |    2 +-
 .../apache/lucene/facet/range/DoubleRange.java  |   28 +-
 .../facet/range/DoubleRangeFacetCounts.java     |   63 +-
 .../apache/lucene/facet/range/LongRange.java    |   28 +-
 .../facet/range/LongRangeFacetCounts.java       |   26 +-
 .../org/apache/lucene/facet/range/Range.java    |   31 -
 .../lucene/facet/taxonomy/FakeScorer.java       |   53 -
 .../taxonomy/TaxonomyFacetSumValueSource.java   |  114 +-
 .../apache/lucene/facet/TestDrillSideways.java  |  315 +--
 .../org/apache/lucene/facet/TestFacetQuery.java |  103 +
 .../lucene/facet/TestParallelDrillSideways.java |   90 +
 .../facet/range/TestRangeFacetCounts.java       |   61 +-
 .../TestTaxonomyFacetSumValueSource.java        |   54 +-
 .../AbstractAllGroupHeadsCollector.java         |  176 --
 .../grouping/AbstractAllGroupsCollector.java    |   67 -
 .../AbstractDistinctValuesCollector.java        |   59 -
 .../AbstractFirstPassGroupingCollector.java     |  354 ----
 .../grouping/AbstractGroupFacetCollector.java   |  319 ---
 .../AbstractSecondPassGroupingCollector.java    |  162 --
 .../search/grouping/AllGroupHeadsCollector.java |  176 ++
 .../search/grouping/AllGroupsCollector.java     |   67 +
 .../search/grouping/BlockGroupingCollector.java |    2 +-
 .../search/grouping/CollectedSearchGroup.java   |    2 +-
 .../grouping/DistinctValuesCollector.java       |   59 +
 .../grouping/FirstPassGroupingCollector.java    |  362 ++++
 .../lucene/search/grouping/GroupDocs.java       |    8 +-
 .../search/grouping/GroupFacetCollector.java    |  324 +++
 .../apache/lucene/search/grouping/Grouper.java  |   56 +
 .../lucene/search/grouping/GroupingSearch.java  |  130 +-
 .../lucene/search/grouping/SearchGroup.java     |   29 +-
 .../grouping/SecondPassGroupingCollector.java   |  169 ++
 .../lucene/search/grouping/TopGroups.java       |   13 +-
 .../FunctionAllGroupHeadsCollector.java         |   32 +-
 .../function/FunctionAllGroupsCollector.java    |    4 +-
 .../FunctionDistinctValuesCollector.java        |   35 +-
 .../FunctionFirstPassGroupingCollector.java     |    6 +-
 .../grouping/function/FunctionGrouper.java      |   69 +
 .../FunctionSecondPassGroupingCollector.java    |    6 +-
 .../term/TermAllGroupHeadsCollector.java        |   26 +-
 .../grouping/term/TermAllGroupsCollector.java   |    8 +-
 .../term/TermDistinctValuesCollector.java       |   26 +-
 .../term/TermFirstPassGroupingCollector.java    |    6 +-
 .../grouping/term/TermGroupFacetCollector.java  |   10 +-
 .../search/grouping/term/TermGrouper.java       |   81 +
 .../term/TermSecondPassGroupingCollector.java   |    6 +-
 .../grouping/AllGroupHeadsCollectorTest.java    |   14 +-
 .../search/grouping/AllGroupsCollectorTest.java |    6 +-
 .../grouping/DistinctValuesCollectorTest.java   |   46 +-
 .../grouping/GroupFacetCollectorTest.java       |   10 +-
 .../lucene/search/grouping/TestGrouping.java    |   79 +-
 .../search/uhighlight/FieldHighlighter.java     |    4 +-
 .../uhighlight/LengthGoalBreakIterator.java     |  185 ++
 .../lucene/search/uhighlight/OffsetsEnum.java   |   45 +-
 .../lucene/search/uhighlight/Passage.java       |    1 +
 .../search/highlight/TokenSourcesTest.java      |    2 +-
 .../uhighlight/LengthGoalBreakIteratorTest.java |  104 +
 .../TestUnifiedHighlighterExtensibility.java    |   57 +-
 lucene/ivy-versions.properties                  |   10 +-
 .../search/join/ToParentBlockJoinCollector.java |    2 +-
 .../search/join/ToParentBlockJoinSortField.java |    2 +-
 .../apache/lucene/index/memory/MemoryIndex.java |    5 +
 .../apache/lucene/search/DocValuesStats.java    |  409 ++++
 .../lucene/search/DocValuesStatsCollector.java  |   64 +
 .../search/TestDocValuesStatsCollector.java     |  463 +++++
 .../org/apache/lucene/queries/TermsQuery.java   |  381 ----
 .../queries/function/FunctionMatchQuery.java    |   99 +
 .../queries/function/FunctionScoreQuery.java    |  151 ++
 .../lucene/queries/function/ValueSource.java    |  113 +-
 .../apache/lucene/queries/TermsQueryTest.java   |  339 ---
 .../function/TestFunctionMatchQuery.java        |   61 +
 .../function/TestFunctionScoreExplanations.java |  105 +
 .../function/TestFunctionScoreQuery.java        |  114 ++
 .../queryparser/classic/QueryParserBase.java    |   42 +-
 .../complexPhrase/ComplexPhraseQueryParser.java |    4 +-
 .../lucene/queryparser/xml/CoreParser.java      |    2 -
 .../classic/TestMultiFieldQueryParser.java      |   11 +-
 .../queryparser/classic/TestQueryParser.java    |  131 +-
 .../complexPhrase/TestComplexPhraseQuery.java   |    6 +
 .../lucene/document/LatLonPointSortField.java   |    6 +-
 .../apache/lucene/document/RangeFieldQuery.java |    1 +
 .../lucene/search/DocValuesNumbersQuery.java    |   26 +-
 .../lucene/search/DocValuesRangeQuery.java      |  273 ---
 .../lucene/search/DocValuesTermsQuery.java      |   49 +-
 .../org/apache/lucene/search/LongHashSet.java   |  156 ++
 .../lucene/document/TestHalfFloatPoint.java     |    2 +
 .../search/BaseRangeFieldQueryTestCase.java     |    2 +-
 .../apache/lucene/search/LongHashSetTests.java  |  100 +
 .../lucene/search/TestDocValuesRangeQuery.java  |  307 ---
 .../lucene/search/TestDocValuesTermsQuery.java  |    1 +
 .../lucene/search/TestTermAutomatonQuery.java   |    3 +
 lucene/site/changes/changes2html.pl             |    3 +-
 .../prefix/TermQueryPrefixTreeStrategy.java     |   10 +-
 .../spatial/prefix/NumberRangeFacetsTest.java   |   12 +-
 .../spatial3d/Geo3DPointOutsideSortField.java   |    5 +-
 .../lucene/spatial3d/Geo3DPointSortField.java   |    5 +-
 .../lucene/spatial3d/geom/GeoBBoxFactory.java   |   34 +-
 .../lucene/spatial3d/geom/GeoCircleFactory.java |    2 +-
 .../spatial3d/geom/GeoPolygonFactory.java       |    6 +-
 .../apache/lucene/spatial3d/geom/Vector.java    |    4 +
 lucene/suggest/build.xml                        |    8 +-
 .../suggest/DocumentValueSourceDictionary.java  |   58 +-
 .../analyzing/AnalyzingInfixSuggester.java      |  137 +-
 .../suggest/analyzing/AnalyzingSuggester.java   |    3 +-
 .../suggest/analyzing/FreeTextSuggester.java    |   45 -
 .../DocumentValueSourceDictionaryTest.java      |  287 ++-
 .../analysis/BaseTokenStreamTestCase.java       |  151 +-
 .../apache/lucene/analysis/MockTokenizer.java   |    3 +-
 .../lucene/analysis/TokenStreamToDot.java       |    5 +-
 .../codecs/cranky/CrankyPointsFormat.java       |    5 +
 .../lucene/index/AssertingLeafReader.java       |    7 +
 .../index/BaseDocValuesFormatTestCase.java      |   32 +
 .../index/BaseStoredFieldsFormatTestCase.java   |    2 +
 .../index/BaseTermVectorsFormatTestCase.java    |   17 +-
 .../apache/lucene/index/BaseTestCheckIndex.java |   19 -
 .../org/apache/lucene/index/RandomCodec.java    |    2 +-
 .../apache/lucene/search/AssertingWeight.java   |   42 +-
 .../org/apache/lucene/search/QueryUtils.java    |    4 -
 .../org/apache/lucene/util/LuceneTestCase.java  |  110 +-
 .../org/apache/lucene/util/RamUsageTester.java  |  102 +-
 .../java/org/apache/lucene/util/TestUtil.java   |    4 +-
 .../lucene/analysis/TestGraphTokenizers.java    |  588 ------
 lucene/tools/javadoc/ecj.javadocs.prefs         |    2 +-
 lucene/tools/junit4/tests.policy                |    1 -
 lucene/tools/prettify/inject-javadocs.js        |   27 +
 lucene/tools/prettify/lang-apollo.js            |   18 -
 lucene/tools/prettify/lang-css.js               |   18 -
 lucene/tools/prettify/lang-hs.js                |   18 -
 lucene/tools/prettify/lang-lisp.js              |   19 -
 lucene/tools/prettify/lang-lua.js               |   18 -
 lucene/tools/prettify/lang-ml.js                |   18 -
 lucene/tools/prettify/lang-proto.js             |   17 -
 lucene/tools/prettify/lang-sql.js               |   18 -
 lucene/tools/prettify/lang-vb.js                |   18 -
 lucene/tools/prettify/lang-wiki.js              |   18 -
 lucene/tools/prettify/prettify.css              |   30 +-
 lucene/tools/prettify/prettify.js               |   90 +-
 solr/CHANGES.txt                                |  239 ++-
 solr/bin/solr                                   |   37 +-
 solr/bin/solr.cmd                               |   49 +-
 solr/bin/solr.in.cmd                            |    5 +
 solr/bin/solr.in.sh                             |    5 +
 .../apache/solr/schema/ICUCollationField.java   |   10 +-
 .../plugin/AnalyticsStatisticsCollector.java    |    4 +-
 .../solr/collection1/conf/solrconfig.xml        |    5 -
 .../solr/handler/dataimport/JdbcDataSource.java |    4 +-
 .../handler/dataimport/SolrEntityProcessor.java |  192 +-
 .../dataimport/MockSolrEntityProcessor.java     |   18 +-
 .../handler/dataimport/TestJdbcDataSource.java  |   50 +-
 .../TestPlainTextEntityProcessor.java           |  106 +
 .../TestSolrEntityProcessorEndToEnd.java        |   27 +-
 .../dataimport/TestSolrEntityProcessorUnit.java |   70 +
 solr/contrib/ltr/README.md                      |  406 +---
 solr/contrib/ltr/example/README.md              |  132 ++
 solr/contrib/ltr/example/config.json            |   13 +-
 solr/contrib/ltr/example/exampleFeatures.json   |   26 +
 solr/contrib/ltr/example/libsvm_formatter.py    |   12 +-
 solr/contrib/ltr/example/solrconfig.xml         | 1722 ----------------
 .../ltr/example/techproducts-features.json      |   26 -
 .../contrib/ltr/example/techproducts-model.json |   18 -
 .../ltr/example/train_and_upload_demo_model.py  |  143 +-
 solr/contrib/ltr/example/user_queries.txt       |   12 +-
 .../org/apache/solr/ltr/CSVFeatureLogger.java   |   62 +
 .../java/org/apache/solr/ltr/FeatureLogger.java |  132 +-
 .../java/org/apache/solr/ltr/LTRRescorer.java   |    2 +-
 .../org/apache/solr/ltr/LTRScoringQuery.java    |   24 +-
 .../org/apache/solr/ltr/LTRThreadModule.java    |   29 +
 .../solr/ltr/SolrQueryRequestContextUtils.java  |    6 +-
 .../org/apache/solr/ltr/feature/Feature.java    |    3 +-
 .../solr/ltr/feature/FieldValueFeature.java     |   18 +-
 .../solr/ltr/feature/OriginalScoreFeature.java  |   12 +-
 .../apache/solr/ltr/feature/SolrFeature.java    |   17 +-
 .../org/apache/solr/ltr/model/LinearModel.java  |   10 +
 .../ltr/model/MultipleAdditiveTreesModel.java   |    5 +
 .../LTRFeatureLoggerTransformerFactory.java     |  100 +-
 .../ltr/store/rest/ManagedFeatureStore.java     |    4 -
 .../solr/ltr/store/rest/ManagedModelStore.java  |   38 +-
 solr/contrib/ltr/src/java/overview.html         |    2 +-
 .../multipleadditivetreesmodel.json             |    2 +-
 .../test-files/solr/collection1/conf/schema.xml |    2 +
 .../solr/collection1/conf/solrconfig-ltr.xml    |    6 +-
 .../collection1/conf/solrconfig-ltr_Th10_10.xml |    5 +-
 .../collection1/conf/solrconfig-multiseg.xml    |   12 +-
 .../apache/solr/ltr/FeatureLoggerTestUtils.java |   44 +
 .../org/apache/solr/ltr/TestLTROnSolrCloud.java |   17 +-
 .../apache/solr/ltr/TestLTRQParserExplain.java  |    2 +-
 .../apache/solr/ltr/TestLTRQParserPlugin.java   |    5 +-
 .../org/apache/solr/ltr/TestLTRWithFacet.java   |    2 +-
 .../org/apache/solr/ltr/TestLTRWithSort.java    |    2 +-
 .../org/apache/solr/ltr/TestRerankBase.java     |   63 +-
 .../solr/ltr/TestSelectiveWeightCreation.java   |   13 +-
 .../ltr/feature/TestEdisMaxSolrFeature.java     |    2 +-
 .../solr/ltr/feature/TestExternalFeatures.java  |   39 +-
 .../ltr/feature/TestExternalValueFeatures.java  |   23 +-
 .../solr/ltr/feature/TestFeatureLogging.java    |  115 +-
 .../ltr/feature/TestFeatureLtrScoringModel.java |    2 +-
 .../solr/ltr/feature/TestFeatureStore.java      |    2 +-
 .../ltr/feature/TestFieldLengthFeature.java     |    2 +-
 .../solr/ltr/feature/TestFieldValueFeature.java |   57 +-
 .../solr/ltr/feature/TestFilterSolrFeature.java |    8 +-
 .../ltr/feature/TestNoMatchSolrFeature.java     |   94 +-
 .../ltr/feature/TestOriginalScoreFeature.java   |   11 +-
 .../ltr/feature/TestOriginalScoreScorer.java    |   47 +
 .../solr/ltr/feature/TestRankingFeature.java    |    2 +-
 .../ltr/feature/TestUserTermScoreWithQ.java     |    2 +-
 .../ltr/feature/TestUserTermScorerQuery.java    |    2 +-
 .../ltr/feature/TestUserTermScorereQDF.java     |    2 +-
 .../solr/ltr/feature/TestValueFeature.java      |    2 +-
 .../apache/solr/ltr/model/TestLinearModel.java  |    2 +-
 .../model/TestMultipleAdditiveTreesModel.java   |   55 +-
 .../solr/ltr/store/rest/TestModelManager.java   |   49 +-
 .../store/rest/TestModelManagerPersistence.java |    6 +-
 solr/contrib/morphlines-core/ivy.xml            |    8 +-
 .../solr/solrcloud/conf/solrconfig.xml          |   11 -
 .../solr/SolrMorphlineZkAliasTest.java          |    7 -
 solr/core/ivy.xml                               |    5 +-
 .../solr/cloud/CloudConfigSetService.java       |   24 +-
 .../org/apache/solr/cloud/CloudDescriptor.java  |    2 +-
 .../org/apache/solr/cloud/CreateAliasCmd.java   |   21 +-
 .../apache/solr/cloud/CreateCollectionCmd.java  |  146 +-
 .../org/apache/solr/cloud/ElectionContext.java  |   20 +-
 .../org/apache/solr/cloud/LeaderElector.java    |    9 +-
 .../java/org/apache/solr/cloud/Overseer.java    |    2 +-
 .../OverseerAutoReplicaFailoverThread.java      |    9 +-
 .../apache/solr/cloud/OverseerStatusCmd.java    |    4 +-
 .../org/apache/solr/cloud/ZkController.java     |  128 +-
 .../apache/solr/cloud/ZkSolrResourceLoader.java |    2 +-
 .../apache/solr/cloud/rule/ReplicaAssigner.java |   31 +-
 .../java/org/apache/solr/cloud/rule/Rule.java   |    4 +-
 .../org/apache/solr/core/ConfigSetService.java  |    7 +-
 .../org/apache/solr/core/CoreContainer.java     |  195 +-
 .../org/apache/solr/core/DirectoryFactory.java  |   54 +
 .../apache/solr/core/HdfsDirectoryFactory.java  |   29 +
 .../org/apache/solr/core/JmxMonitoredMap.java   |   63 +-
 .../solr/core/MetricsDirectoryFactory.java      |  505 +++++
 .../java/org/apache/solr/core/NodeConfig.java   |   18 +-
 .../java/org/apache/solr/core/PluginInfo.java   |    4 +-
 .../java/org/apache/solr/core/SolrConfig.java   |    2 +-
 .../src/java/org/apache/solr/core/SolrCore.java |  297 ++-
 .../apache/solr/core/SolrDeletionPolicy.java    |    6 +
 .../org/apache/solr/core/SolrInfoMBean.java     |   11 +-
 .../org/apache/solr/core/SolrXmlConfig.java     |   23 +-
 .../solr/core/StandardDirectoryFactory.java     |   46 +-
 .../apache/solr/handler/CdcrRequestHandler.java |    5 +
 .../org/apache/solr/handler/GraphHandler.java   |    4 +-
 .../org/apache/solr/handler/IndexFetcher.java   |   71 +-
 .../apache/solr/handler/PingRequestHandler.java |    5 +
 .../apache/solr/handler/ReplicationHandler.java |   21 +-
 .../apache/solr/handler/RequestHandlerBase.java |   56 +-
 .../org/apache/solr/handler/RestoreCore.java    |    2 +-
 .../org/apache/solr/handler/SchemaHandler.java  |    5 +
 .../org/apache/solr/handler/SnapShooter.java    |    6 +-
 .../apache/solr/handler/SolrConfigHandler.java  |    2 +-
 .../org/apache/solr/handler/StreamHandler.java  |   24 +-
 .../solr/handler/UpdateRequestHandler.java      |    5 +
 .../solr/handler/admin/CollectionsHandler.java  |    9 +-
 .../solr/handler/admin/ConfigSetsHandler.java   |    6 +-
 .../solr/handler/admin/CoreAdminHandler.java    |   14 +
 .../solr/handler/admin/CoreAdminOperation.java  |    1 +
 .../apache/solr/handler/admin/InfoHandler.java  |    5 +
 .../solr/handler/admin/LoggingHandler.java      |    6 +
 .../solr/handler/admin/LukeRequestHandler.java  |   30 +-
 .../solr/handler/admin/MetricsHandler.java      |  207 ++
 .../solr/handler/admin/PluginInfoHandler.java   |    5 +
 .../handler/admin/PropertiesRequestHandler.java |    5 +
 .../solr/handler/admin/SecurityConfHandler.java |    5 +
 .../admin/SegmentsInfoRequestHandler.java       |    6 +
 .../handler/admin/ShowFileRequestHandler.java   |    4 +
 .../handler/admin/SolrInfoMBeanHandler.java     |    5 +
 .../solr/handler/admin/SystemInfoHandler.java   |   58 +-
 .../solr/handler/admin/ThreadDumpHandler.java   |    5 +
 .../handler/admin/ZookeeperInfoHandler.java     |    5 +
 .../solr/handler/component/DebugComponent.java  |    5 +
 .../solr/handler/component/ExpandComponent.java |   30 +-
 .../solr/handler/component/FacetComponent.java  |   17 +-
 .../handler/component/HighlightComponent.java   |    5 +
 .../handler/component/HttpShardHandler.java     |   48 +-
 .../component/HttpShardHandlerFactory.java      |  129 +-
 .../component/MoreLikeThisComponent.java        |    7 +-
 .../solr/handler/component/QueryComponent.java  |   48 +-
 .../component/QueryElevationComponent.java      |   64 +-
 .../handler/component/RangeFacetProcessor.java  |    3 +-
 .../handler/component/RangeFacetRequest.java    |   31 +-
 .../handler/component/RealTimeGetComponent.java |    5 +
 .../solr/handler/component/ResponseBuilder.java |    1 -
 .../solr/handler/component/SearchHandler.java   |    2 +-
 .../component/ShardFieldSortedHitQueue.java     |    8 +-
 .../solr/handler/component/ShardHandler.java    |    5 +-
 .../handler/component/SpellCheckComponent.java  |    5 +
 .../solr/handler/component/StatsComponent.java  |    6 +
 .../handler/component/StatsValuesFactory.java   |    2 +-
 .../handler/component/TermVectorComponent.java  |    5 +
 .../solr/handler/component/TermsComponent.java  |    5 +
 .../solr/highlight/HighlightingPluginBase.java  |    2 +-
 .../solr/highlight/LuceneRegexFragmenter.java   |  217 ++
 .../apache/solr/highlight/RegexFragmenter.java  |  196 --
 .../solr/highlight/UnifiedSolrHighlighter.java  |  473 +++--
 .../solr/index/SlowCompositeReaderWrapper.java  |    3 -
 .../solr/metrics/OperatingSystemMetricSet.java  |   92 +
 .../solr/metrics/SolrCoreMetricManager.java     |  148 ++
 .../org/apache/solr/metrics/SolrMetricInfo.java |  104 +
 .../apache/solr/metrics/SolrMetricManager.java  |  675 ++++++
 .../apache/solr/metrics/SolrMetricProducer.java |   32 +
 .../apache/solr/metrics/SolrMetricReporter.java |   83 +
 .../org/apache/solr/metrics/package-info.java   |   23 +
 .../metrics/reporters/JmxObjectNameFactory.java |  155 ++
 .../metrics/reporters/SolrGangliaReporter.java  |  144 ++
 .../metrics/reporters/SolrGraphiteReporter.java |  129 ++
 .../solr/metrics/reporters/SolrJmxReporter.java |  179 ++
 .../metrics/reporters/SolrSlf4jReporter.java    |  127 ++
 .../solr/metrics/reporters/package-info.java    |   22 +
 .../apache/solr/parser/SolrQueryParserBase.java |   16 +-
 .../org/apache/solr/request/IntervalFacets.java |    4 +
 .../request/PerSegmentSingleValuedFaceting.java |    9 +-
 .../org/apache/solr/request/SimpleFacets.java   |   78 +-
 .../apache/solr/request/json/RequestUtil.java   |   16 +-
 .../org/apache/solr/response/DocsStreamer.java  |    8 +
 .../solr/response/JSONResponseWriter.java       |   86 +-
 .../java/org/apache/solr/schema/BoolField.java  |    4 +-
 .../org/apache/solr/schema/CollationField.java  |    3 +-
 .../apache/solr/schema/DoublePointField.java    |  186 ++
 .../java/org/apache/solr/schema/EnumField.java  |   20 +-
 .../java/org/apache/solr/schema/FieldType.java  |   29 +-
 .../org/apache/solr/schema/FloatPointField.java |  186 ++
 .../org/apache/solr/schema/IntPointField.java   |  186 ++
 .../org/apache/solr/schema/LongPointField.java  |  185 ++
 .../apache/solr/schema/NumericFieldType.java    |  151 ++
 .../java/org/apache/solr/schema/PointField.java |  233 +++
 .../apache/solr/schema/PreAnalyzedField.java    |   11 +
 .../org/apache/solr/schema/SchemaField.java     |   10 +
 .../org/apache/solr/schema/SchemaManager.java   |   99 +-
 .../org/apache/solr/schema/TrieDateField.java   |    2 +-
 .../org/apache/solr/schema/TrieDoubleField.java |    2 +-
 .../java/org/apache/solr/schema/TrieField.java  |  150 +-
 .../org/apache/solr/schema/TrieFloatField.java  |    2 +-
 .../org/apache/solr/schema/TrieIntField.java    |    2 +-
 .../org/apache/solr/schema/TrieLongField.java   |    2 +-
 .../org/apache/solr/search/CacheConfig.java     |    2 +-
 .../solr/search/CollapsingQParserPlugin.java    |   16 +-
 .../solr/search/ComplexPhraseQParserPlugin.java |   70 +-
 .../org/apache/solr/search/FastLRUCache.java    |    2 +-
 .../java/org/apache/solr/search/Grouping.java   |   14 +-
 .../apache/solr/search/SolrFieldCacheMBean.java |    4 +-
 .../apache/solr/search/SolrIndexSearcher.java   |   45 +-
 .../apache/solr/search/TermQParserPlugin.java   |   10 +-
 .../apache/solr/search/TermsQParserPlugin.java  |   14 +-
 .../facet/FacetFieldProcessorByArrayDV.java     |   49 +-
 .../apache/solr/search/facet/FacetModule.java   |    5 +
 .../apache/solr/search/facet/FacetRange.java    |   28 +-
 .../org/apache/solr/search/facet/FieldUtil.java |  147 ++
 .../org/apache/solr/search/facet/HLLAgg.java    |   13 +-
 .../apache/solr/search/facet/PercentileAgg.java |    7 +-
 .../org/apache/solr/search/facet/SlotAcc.java   |    5 +-
 .../apache/solr/search/facet/UniqueSlotAcc.java |    2 +-
 .../solr/search/grouping/CommandHandler.java    |    4 +-
 .../search/grouping/GroupingSpecification.java  |   18 +-
 .../distributed/command/GroupConverter.java     |    2 +-
 .../command/SearchGroupsFieldCommand.java       |    8 +-
 .../command/TopGroupsFieldCommand.java          |    4 +-
 .../SearchGroupShardResponseProcessor.java      |  160 +-
 .../TopGroupsShardResponseProcessor.java        |  114 +-
 .../org/apache/solr/search/join/GraphQuery.java |    4 +-
 .../apache/solr/search/mlt/CloudMLTQParser.java |   49 +-
 .../solr/search/mlt/SimpleMLTQParser.java       |   30 +-
 .../security/AttributeOnlyServletContext.java   |  291 +++
 .../ConfigurableInternodeAuthHadoopPlugin.java  |   68 +
 .../security/DelegationTokenKerberosFilter.java |    6 +-
 .../apache/solr/security/HadoopAuthFilter.java  |  198 ++
 .../apache/solr/security/HadoopAuthPlugin.java  |  241 +++
 .../apache/solr/security/KerberosFilter.java    |    6 +-
 .../apache/solr/security/KerberosPlugin.java    |  314 +--
 .../solr/security/PKIAuthenticationPlugin.java  |    6 +
 .../solr/security/PermissionNameProvider.java   |    1 +
 ...tContinuesRecorderAuthenticationHandler.java |   71 +
 .../apache/solr/servlet/SolrDispatchFilter.java |   46 +-
 .../apache/solr/servlet/SolrRequestParsers.java |   31 +-
 .../solr/spelling/DirectSolrSpellChecker.java   |   18 +-
 .../DocumentExpressionDictionaryFactory.java    |   18 +-
 .../org/apache/solr/uninverting/FieldCache.java |   16 +-
 .../apache/solr/uninverting/FieldCacheImpl.java |  182 +-
 .../uninverting/FieldCacheSanityChecker.java    |    3 +-
 .../solr/uninverting/UninvertingReader.java     |   23 +-
 .../solr/update/DirectUpdateHandler2.java       |    5 -
 .../java/org/apache/solr/update/PeerSync.java   |   50 +-
 .../org/apache/solr/update/SolrIndexConfig.java |   16 +-
 .../org/apache/solr/update/SolrIndexWriter.java |  144 ++
 .../org/apache/solr/update/UpdateHandler.java   |    5 +
 .../java/org/apache/solr/update/UpdateLog.java  |   74 +-
 .../apache/solr/update/UpdateShardHandler.java  |   81 +-
 .../solr/update/UpdateShardHandlerConfig.java   |   14 +-
 .../AddSchemaFieldsUpdateProcessorFactory.java  |    3 +-
 .../processor/DistributedUpdateProcessor.java   |    8 +-
 .../SkipExistingDocumentsProcessorFactory.java  |  255 +++
 .../src/java/org/apache/solr/util/DOMUtil.java  |    5 +
 .../src/java/org/apache/solr/util/JmxUtil.java  |   78 +
 .../src/java/org/apache/solr/util/SolrCLI.java  |   65 +-
 .../solr/util/SolrFileCleaningTracker.java      |  147 ++
 .../src/java/org/apache/solr/util/TimeOut.java  |   13 +-
 .../stats/HttpClientMetricNameStrategy.java     |   28 +
 .../stats/InstrumentedHttpRequestExecutor.java  |  136 ++
 ...entedPoolingHttpClientConnectionManager.java |   72 +
 .../org/apache/solr/util/stats/MetricUtils.java |  203 ++
 .../org/apache/solr/util/stats/TimerUtils.java  |   58 -
 .../resources/EditableSolrConfigAttributes.json |   16 +-
 .../conf/schema-distrib-interval-faceting.xml   |   14 +-
 .../conf/schema-docValuesFaceting.xml           |   12 +
 .../schema-minimal-with-another-uniqkey.xml     |   23 +
 .../solr/collection1/conf/schema-point.xml      |   92 +
 .../solr/collection1/conf/schema-sorts.xml      |   44 +-
 .../test-files/solr/collection1/conf/schema.xml |   26 +-
 .../solr/collection1/conf/schema11.xml          |   19 +-
 .../solr/collection1/conf/schema12.xml          |   15 +-
 .../solr/collection1/conf/schema_latest.xml     |   21 +-
 ...dd-schema-fields-update-processor-chains.xml |    8 +-
 .../conf/solrconfig-cache-enable-disable.xml    |   80 +
 .../conf/solrconfig-indexmetrics.xml            |   57 +
 .../collection1/conf/solrconfig-schemaless.xml  |   45 +-
 .../solr/collection1/conf/solrconfig_perf.xml   |    1 -
 .../configsets/cloud-hdfs/conf/solrconfig.xml   |    2 +
 .../solrconfig.snippet.randomindexconfig.xml    |   47 -
 .../configsets/doc-expiry/conf/solrconfig.xml   |   15 +-
 .../solr/security/hadoop_kerberos_config.json   |   16 +
 .../hadoop_simple_auth_with_delegation.json     |   29 +
 .../test-files/solr/solr-gangliareporter.xml    |   32 +
 .../test-files/solr/solr-graphitereporter.xml   |   31 +
 .../src/test-files/solr/solr-metricreporter.xml |   57 +
 .../src/test-files/solr/solr-slf4jreporter.xml  |   35 +
 .../apache/solr/TestDistributedGrouping.java    |   10 +-
 .../org/apache/solr/TestDistributedSearch.java  |   46 +-
 .../core/src/test/org/apache/solr/TestJoin.java |    6 +-
 .../org/apache/solr/TestRandomDVFaceting.java   |    8 +
 .../org/apache/solr/TestRandomFaceting.java     |   12 +-
 .../TestReversedWildcardFilterFactory.java      |   56 +-
 .../apache/solr/cloud/AliasIntegrationTest.java |   50 +-
 .../solr/cloud/BasicDistributedZkTest.java      |    2 +-
 .../apache/solr/cloud/CdcrBootstrapTest.java    |   25 +-
 .../org/apache/solr/cloud/ClusterStateTest.java |    8 +
 .../solr/cloud/CollectionsAPISolrJTest.java     |  466 ++---
 .../DeleteLastCustomShardedReplicaTest.java     |  104 +-
 .../org/apache/solr/cloud/DeleteShardTest.java  |  211 +-
 .../solr/cloud/DocValuesNotIndexedTest.java     |   54 +-
 .../apache/solr/cloud/LeaderElectionTest.java   |    2 +
 .../cloud/LeaderFailoverAfterPartitionTest.java |    2 +-
 .../cloud/LeaderFailureAfterFreshStartTest.java |   68 +-
 .../solr/cloud/MissingSegmentRecoveryTest.java  |  123 ++
 ...verseerCollectionConfigSetProcessorTest.java |   37 +-
 .../cloud/OverseerModifyCollectionTest.java     |   92 +-
 .../apache/solr/cloud/OverseerRolesTest.java    |  252 +--
 .../apache/solr/cloud/OverseerStatusTest.java   |   55 +-
 .../solr/cloud/PeerSyncReplicationTest.java     |   70 +-
 .../apache/solr/cloud/RemoteQueryErrorTest.java |   53 +-
 .../cloud/SegmentTerminateEarlyTestState.java   |   12 +-
 .../apache/solr/cloud/TestCloudPivotFacet.java  |    2 +
 .../apache/solr/cloud/TestCloudRecovery.java    |   26 +
 .../solr/cloud/TestDownShardTolerantSearch.java |   40 +-
 .../TestExclusionRuleCollectionAccess.java      |   38 +-
 .../solr/cloud/TestMiniSolrCloudCluster.java    |   50 -
 .../apache/solr/cloud/TestSegmentSorting.java   |  133 ++
 .../TestSolrCloudWithSecureImpersonation.java   |    8 +-
 .../apache/solr/cloud/TestStressLiveNodes.java  |    2 +-
 .../org/apache/solr/cloud/ZkSolrClientTest.java |   54 +
 .../HdfsCollectionsAPIDistributedZkTest.java    |    1 +
 .../apache/solr/cloud/rule/RuleEngineTest.java  |    2 +-
 .../solr/core/BlobRepositoryMockingTest.java    |    9 +-
 .../org/apache/solr/core/CoreSorterTest.java    |    7 +
 .../solr/core/MockShardHandlerFactory.java      |    2 +-
 .../test/org/apache/solr/core/TestConfig.java   |   34 +
 .../solr/handler/TestReplicationHandler.java    |   20 +-
 .../handler/XsltUpdateRequestHandlerTest.java   |    2 +-
 .../admin/CoreMergeIndexesAdminHandlerTest.java |   10 +-
 .../handler/admin/LukeRequestHandlerTest.java   |    8 +-
 .../solr/handler/admin/MBeansHandlerTest.java   |    4 +-
 .../solr/handler/admin/MetricsHandlerTest.java  |  149 ++
 .../admin/SegmentsInfoRequestHandlerTest.java   |   35 +-
 .../solr/handler/admin/StatsReloadRaceTest.java |   82 +-
 .../component/SpellCheckComponentTest.java      |   36 +
 .../handler/component/TestExpandComponent.java  |    8 +-
 .../highlight/TestUnifiedSolrHighlighter.java   |   39 +-
 .../solr/index/hdfs/CheckHdfsIndexTest.java     |    5 -
 .../org/apache/solr/metrics/JvmMetricsTest.java |   68 +
 .../solr/metrics/SolrCoreMetricManagerTest.java |  171 ++
 .../solr/metrics/SolrMetricManagerTest.java     |  273 +++
 .../solr/metrics/SolrMetricReporterTest.java    |   69 +
 .../solr/metrics/SolrMetricTestUtils.java       |  103 +
 .../metrics/SolrMetricsIntegrationTest.java     |  143 ++
 .../metrics/reporters/MockMetricReporter.java   |   80 +
 .../reporters/SolrGangliaReporterTest.java      |   82 +
 .../reporters/SolrGraphiteReporterTest.java     |  116 ++
 .../metrics/reporters/SolrJmxReporterTest.java  |  156 ++
 .../reporters/SolrSlf4jReporterTest.java        |   77 +
 .../apache/solr/request/SimpleFacetsTest.java   |   12 +-
 .../apache/solr/request/TestFacetMethods.java   |   12 +
 .../apache/solr/response/JSONWriterTest.java    |   20 +-
 .../TestSubQueryTransformerDistrib.java         |   55 +-
 .../apache/solr/schema/SortableBinaryField.java |    3 +-
 .../org/apache/solr/schema/TestPointFields.java | 1495 ++++++++++++++
 .../schema/TestSchemalessBufferedUpdates.java   |  160 ++
 .../ApacheLuceneSolrNearQueryBuilder.java       |   50 +
 .../apache/solr/search/GoodbyeQueryBuilder.java |    1 -
 .../apache/solr/search/HandyQueryBuilder.java   |    1 -
 .../apache/solr/search/HelloQueryBuilder.java   |    1 -
 .../solr/search/TestCollapseQParserPlugin.java  |    4 +-
 .../TestComplexPhraseLeadingWildcard.java       |  113 +
 .../solr/search/TestMaxScoreQueryParser.java    |    2 +-
 .../org/apache/solr/search/TestRTGBase.java     |   12 -
 .../search/TestRandomCollapseQParserPlugin.java |    2 +
 .../org/apache/solr/search/TestRealTimeGet.java |    2 +-
 .../org/apache/solr/search/TestRecovery.java    |  179 +-
 .../apache/solr/search/TestRecoveryHdfs.java    |   11 -
 .../apache/solr/search/TestSolrCoreParser.java  |  110 +
 .../solr/search/TestSolrFieldCacheMBean.java    |    2 +
 .../apache/solr/search/TestSolrQueryParser.java |   35 +-
 .../apache/solr/search/TestStressLucene.java    |    2 +-
 .../apache/solr/search/TestStressRecovery.java  |    2 +-
 .../apache/solr/search/TestStressReorder.java   |    5 +-
 .../solr/search/TestStressUserVersions.java     |    4 +-
 .../apache/solr/search/TestStressVersions.java  |    2 +-
 .../solr/search/facet/TestJsonFacets.java       |   73 +-
 .../solr/search/mlt/CloudMLTQParserTest.java    |   23 +-
 .../solr/search/mlt/SimpleMLTQParserTest.java   |   33 +-
 .../PKIAuthenticationIntegrationTest.java       |   40 +-
 .../security/TestPKIAuthenticationPlugin.java   |    8 +
 .../solr/security/hadoop/ImpersonationUtil.java |   73 +
 .../hadoop/ImpersonatorCollectionsHandler.java  |   60 +
 .../hadoop/TestDelegationWithHadoopAuth.java    |  400 ++++
 .../hadoop/TestImpersonationWithHadoopAuth.java |  215 ++
 .../TestSolrCloudWithHadoopAuthPlugin.java      |  138 ++
 .../solr/servlet/SolrRequestParserTest.java     |    3 +
 .../org/apache/solr/update/AutoCommitTest.java  |    2 +-
 .../apache/solr/update/CdcrUpdateLogTest.java   |   17 -
 .../apache/solr/update/SoftAutoCommitTest.java  |    2 +-
 .../apache/solr/update/SolrIndexConfigTest.java |    1 +
 .../solr/update/SolrIndexMetricsTest.java       |   94 +
 ...ipExistingDocumentsProcessorFactoryTest.java |  335 +++
 .../apache/solr/util/stats/MetricUtilsTest.java |   56 +
 .../apache/solr/util/stats/TimerUtilsTest.java  |   58 -
 .../example-DIH/solr/db/conf/solrconfig.xml     |   11 -
 .../example-DIH/solr/mail/conf/solrconfig.xml   |   11 -
 .../example-DIH/solr/rss/conf/solrconfig.xml    |   11 -
 .../example-DIH/solr/solr/conf/solrconfig.xml   |   11 -
 .../example-DIH/solr/tika/conf/solrconfig.xml   |   11 -
 solr/example/files/conf/solrconfig.xml          |   16 +-
 solr/licenses/byte-buddy-1.6.2.jar.sha1         |    1 +
 solr/licenses/byte-buddy-LICENSE-ASL.txt        |  202 ++
 solr/licenses/byte-buddy-NOTICE.txt             |    4 +
 solr/licenses/gmetric4j-1.0.7.jar.sha1          |    1 +
 solr/licenses/gmetric4j-LICENSE-BSD.txt         |   31 +
 solr/licenses/gmetric4j-NOTICE.txt              |    0
 solr/licenses/metrics-ganglia-3.1.2.jar.sha1    |    1 +
 solr/licenses/metrics-ganglia-LICENSE-ASL.txt   |  203 ++
 solr/licenses/metrics-ganglia-NOTICE.txt        |   12 +
 solr/licenses/metrics-graphite-3.1.2.jar.sha1   |    1 +
 solr/licenses/metrics-graphite-LICENSE-ASL.txt  |  203 ++
 solr/licenses/metrics-graphite-NOTICE.txt       |   12 +
 solr/licenses/metrics-jetty-LICENSE-ASL.txt     |  203 ++
 solr/licenses/metrics-jetty-NOTICE.txt          |   12 +
 solr/licenses/metrics-jetty9-3.1.2.jar.sha1     |    1 +
 solr/licenses/metrics-json-LICENSE-ASL.txt      |  203 ++
 solr/licenses/metrics-json-NOTICE.txt           |   12 +
 solr/licenses/metrics-jvm-3.1.2.jar.sha1        |    1 +
 solr/licenses/metrics-jvm-LICENSE-ASL.txt       |  203 ++
 solr/licenses/metrics-jvm-NOTICE.txt            |   12 +
 solr/licenses/metrics-servlets-LICENSE-ASL.txt  |  203 ++
 solr/licenses/metrics-servlets-NOTICE.txt       |   12 +
 solr/licenses/mockito-core-1.9.5.jar.sha1       |    1 -
 solr/licenses/mockito-core-2.6.2.jar.sha1       |    1 +
 solr/licenses/objenesis-1.2.jar.sha1            |    1 -
 solr/licenses/objenesis-2.5.jar.sha1            |    1 +
 solr/server/build.xml                           |    4 +-
 solr/server/etc/jetty.xml                       |   20 +-
 solr/server/ivy.xml                             |   14 +-
 solr/server/scripts/cloud-scripts/zkcli.bat     |    2 +-
 solr/server/scripts/cloud-scripts/zkcli.sh      |    2 +-
 .../basic_configs/conf/solrconfig.xml           |   17 +-
 .../conf/solrconfig.xml                         |   16 +-
 .../conf/solrconfig.xml                         |   65 +-
 .../impl/DelegationTokenHttpSolrClient.java     |   34 +-
 .../solrj/impl/HttpClientBuilderFactory.java    |   41 +
 .../solr/client/solrj/impl/HttpClientUtil.java  |   35 +-
 .../solr/client/solrj/impl/HttpSolrClient.java  |   56 +-
 .../solrj/impl/Krb5HttpClientBuilder.java       |   10 +-
 .../solr/client/solrj/io/ops/AndOperation.java  |  101 +
 .../client/solrj/io/ops/BooleanOperation.java   |   26 +
 .../client/solrj/io/ops/EqualsOperation.java    |   70 +
 .../io/ops/GreaterThanEqualToOperation.java     |   70 +
 .../solrj/io/ops/GreaterThanOperation.java      |   70 +
 .../solr/client/solrj/io/ops/LeafOperation.java |   67 +
 .../solrj/io/ops/LessThanEqualToOperation.java  |   70 +
 .../client/solrj/io/ops/LessThanOperation.java  |   70 +
 .../solr/client/solrj/io/ops/NotOperation.java  |   87 +
 .../solr/client/solrj/io/ops/OrOperation.java   |   71 +
 .../client/solrj/io/stream/CloudSolrStream.java |    4 +
 .../client/solrj/io/stream/HavingStream.java    |  173 ++
 .../solr/client/solrj/io/stream/NullStream.java |  155 ++
 .../client/solrj/io/stream/PriorityStream.java  |  161 ++
 .../solrj/io/stream/expr/StreamFactory.java     |    2 +
 .../solrj/request/CollectionAdminRequest.java   |    2 +
 .../apache/solr/common/cloud/SolrZkClient.java  |   29 +-
 .../apache/solr/common/cloud/ZkCmdExecutor.java |   15 +-
 .../apache/solr/common/params/CommonParams.java |    4 +-
 .../solr/common/params/HighlightParams.java     |    4 +-
 .../solr/client/solrj/LargeVolumeTestBase.java  |    3 +-
 .../solr/client/solrj/SolrExampleTestBase.java  |    1 -
 .../apache/solr/client/solrj/SolrQueryTest.java |    2 +-
 .../solrj/impl/BasicHttpSolrClientTest.java     |   31 +
 .../solrj/impl/CloudSolrClientCacheTest.java    |    7 +
 .../client/solrj/impl/CloudSolrClientTest.java  |    8 +-
 .../solrj/io/graph/GraphExpressionTest.java     |    3 +-
 .../solr/client/solrj/io/sql/JdbcTest.java      |    7 +-
 .../client/solrj/io/stream/JDBCStreamTest.java  |    9 +-
 .../solrj/io/stream/StreamExpressionTest.java   |  467 ++++-
 .../client/solrj/io/stream/StreamingTest.java   |   10 +-
 .../solr/common/params/CommonParamsTest.java    |    2 +
 .../java/org/apache/solr/SolrJettyTestBase.java |    2 -
 .../java/org/apache/solr/SolrTestCaseHS.java    |    4 +
 .../java/org/apache/solr/SolrTestCaseJ4.java    |   68 +-
 .../solr/cloud/AbstractDistribZkTestBase.java   |   30 +-
 .../java/org/apache/solr/cloud/ChaosMonkey.java |    2 -
 .../apache/solr/cloud/MiniSolrCloudCluster.java |   29 +-
 .../apache/solr/cloud/SolrCloudTestCase.java    |   30 +-
 .../component/TrackingShardHandlerFactory.java  |    2 +-
 .../java/org/apache/solr/util/TestHarness.java  |   12 +-
 solr/webapp/web/js/angular/services.js          |   46 +-
 885 files changed, 43196 insertions(+), 13840 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/core/CoreContainer.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 5268bb6,023e7b1..23df22b
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@@ -48,7 -55,8 +55,9 @@@ import org.apache.solr.cloud.Overseer
  import org.apache.solr.cloud.ZkController;
  import org.apache.solr.common.SolrException;
  import org.apache.solr.common.SolrException.ErrorCode;
+ import org.apache.solr.common.cloud.Replica;
+ import org.apache.solr.common.cloud.Replica.State;
 +import org.apache.solr.common.cloud.ZkStateReader;
  import org.apache.solr.common.util.ExecutorUtil;
  import org.apache.solr.common.util.IOUtils;
  import org.apache.solr.common.util.Utils;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/handler/CdcrRequestHandler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java
index 8bf00ea,04b930a..8230bf5
--- a/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/PingRequestHandler.java
@@@ -330,7 -330,7 +330,12 @@@ public class PingRequestHandler extend
    }
  
    @Override
 +  public Boolean registerV2() {
 +    return Boolean.TRUE;
 +  }
++
++  @Override
+   public Category getCategory() {
+     return Category.ADMIN;
+   }
  }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
index 4ec1985,b70c096..3c6f5fa
--- a/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
+++ b/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
@@@ -18,10 -18,9 +18,11 @@@ package org.apache.solr.handler
  
  import java.lang.invoke.MethodHandles;
  import java.net.URL;
 +import java.util.Collection;
- import java.util.concurrent.atomic.LongAdder;
  
 +import com.google.common.collect.ImmutableList;
+ import com.codahale.metrics.Counter;
+ import com.codahale.metrics.Meter;
  import com.codahale.metrics.Timer;
  import org.apache.solr.common.SolrException;
  import org.apache.solr.common.params.SolrParams;
@@@ -36,10 -37,7 +39,10 @@@ import org.apache.solr.request.SolrRequ
  import org.apache.solr.response.SolrQueryResponse;
  import org.apache.solr.search.SyntaxError;
  import org.apache.solr.util.SolrPluginUtils;
- import org.apache.solr.util.stats.TimerUtils;
 +import org.apache.solr.api.Api;
 +import org.apache.solr.api.ApiBag;
 +import org.apache.solr.api.ApiSupport;
+ import org.apache.solr.util.stats.MetricUtils;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
@@@ -48,7 -46,7 +51,7 @@@ import static org.apache.solr.core.Requ
  /**
   *
   */
- public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfoMBean, NestedRequestHandler, ApiSupport {
 -public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfoMBean, SolrMetricProducer, NestedRequestHandler {
++public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfoMBean, SolrMetricProducer, NestedRequestHandler,ApiSupport {
  
    protected NamedList initArgs = null;
    protected SolrParams defaults;
@@@ -273,19 -282,14 +287,19 @@@
    public NamedList<Object> getStatistics() {
      NamedList<Object> lst = new SimpleOrderedMap<>();
      lst.add("handlerStart",handlerStart);
-     lst.add("requests", numRequests.longValue());
-     lst.add("errors", numServerErrors.longValue() + numClientErrors.longValue());
-     lst.add("serverErrors", numServerErrors.longValue());
-     lst.add("clientErrors", numClientErrors.longValue());
-     lst.add("timeouts", numTimeouts.longValue());
-     TimerUtils.addMetrics(lst, requestTimes);
+     lst.add("requests", requests.getCount());
+     lst.add("errors", numErrors.getCount());
+     lst.add("serverErrors", numServerErrors.getCount());
+     lst.add("clientErrors", numClientErrors.getCount());
+     lst.add("timeouts", numTimeouts.getCount());
+     MetricUtils.addMetrics(lst, requestTimes);
      return lst;
    }
 +
 +  @Override
 +  public Collection<Api> getApis() {
 +    return ImmutableList.of(new ApiBag.ReqHandlerToApi(this, ApiBag.constructSpec(pluginInfo)));
 +  }
  }
  
  

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/handler/SchemaHandler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
index 28092f2,f3a8dd2..5d6f02c
--- a/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
@@@ -168,8 -160,13 +168,12 @@@ public class ConfigSetsHandler extends 
    public String getDescription() {
      return "Manage SolrCloud ConfigSets";
    }
 -
+   @Override
+   public Category getCategory() {
+     return Category.ADMIN;
+   }
  
-   public enum ConfigSetOperation {
+   enum ConfigSetOperation {
      CREATE_OP(CREATE) {
        @Override
        Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, ConfigSetsHandler h) throws Exception {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
index 3e9b447,a415d8a..af782f8
--- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
@@@ -48,7 -48,7 +49,8 @@@ import org.apache.solr.response.SolrQue
  import org.apache.solr.security.AuthorizationContext;
  import org.apache.solr.security.PermissionNameProvider;
  import org.apache.solr.util.DefaultSolrThreadFactory;
 +import org.apache.solr.api.Api;
+ import org.apache.solr.util.stats.MetricUtils;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  import org.slf4j.MDC;
@@@ -117,10 -114,12 +119,17 @@@ public class CoreAdminHandler extends R
    }
  
    @Override
 +  public Boolean registerV2() {
 +    return Boolean.TRUE;
 +  }
 +
++  @Override
+   public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) {
+     super.initializeMetrics(manager, registryName, scope);
+     parallelExecutor = MetricUtils.instrumentedExecutorService(parallelExecutor, manager.registry(registryName),
+         SolrMetricManager.mkName("parallelCoreAdminExecutor", getCategory().name(),scope, "threadPool"));
+   }
+ 
    /**
     * The instance of CoreContainer this handler handles. This should be the CoreContainer instance that created this
     * handler.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java
index 326eb9a,8fdac21..e2dc337
--- a/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/InfoHandler.java
@@@ -104,9 -103,13 +104,14 @@@ public class InfoHandler extends Reques
      return "System Information";
    }
  
+   @Override
+   public Category getCategory() {
+     return Category.ADMIN;
+   }
+ 
    protected PropertiesRequestHandler getPropertiesHandler() {
 -    return propertiesHandler;
 +    return (PropertiesRequestHandler) handlers.get("properties");
 +
    }
  
    protected ThreadDumpHandler getThreadDumpHandler() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/handler/admin/SecurityConfHandler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/schema/SchemaManager.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
index ed81466,a411bb3..5c7783f
--- a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
+++ b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
@@@ -62,7 -73,7 +73,8 @@@ import org.apache.solr.metrics.SolrMetr
  import org.apache.solr.request.SolrRequestInfo;
  import org.apache.solr.security.AuthenticationPlugin;
  import org.apache.solr.security.PKIAuthenticationPlugin;
+ import org.apache.solr.util.SolrFileCleaningTracker;
 +import org.apache.solr.api.V2HttpCall;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/java/org/apache/solr/servlet/SolrRequestParsers.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/servlet/SolrRequestParsers.java
index 94a8e7d,968320e..737a821
--- a/solr/core/src/java/org/apache/solr/servlet/SolrRequestParsers.java
+++ b/solr/core/src/java/org/apache/solr/servlet/SolrRequestParsers.java
@@@ -43,9 -42,9 +43,10 @@@ import java.util.Map
  import org.apache.commons.fileupload.FileItem;
  import org.apache.commons.fileupload.disk.DiskFileItemFactory;
  import org.apache.commons.fileupload.servlet.ServletFileUpload;
+ import org.apache.commons.io.FileCleaningTracker;
  import org.apache.commons.io.input.CloseShieldInputStream;
  import org.apache.lucene.util.IOUtils;
 +import org.apache.solr.api.V2HttpCall;
  import org.apache.solr.common.SolrException;
  import org.apache.solr.common.SolrException.ErrorCode;
  import org.apache.solr.common.params.CommonParams;
@@@ -59,8 -58,8 +60,9 @@@ import org.apache.solr.core.SolrConfig
  import org.apache.solr.core.SolrCore;
  import org.apache.solr.request.SolrQueryRequest;
  import org.apache.solr.request.SolrQueryRequestBase;
 +import org.apache.solr.util.CommandOperation;
  import org.apache.solr.util.RTimerTree;
+ import org.apache.solr.util.SolrFileCleaningTracker;
  
  import static org.apache.solr.common.params.CommonParams.PATH;
  

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/core/src/test/org/apache/solr/servlet/SolrRequestParserTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6e77729/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
----------------------------------------------------------------------


[12/50] [abbrv] lucene-solr:apiv2: LUCENE-7619: add WordDelimiterGraphFilter (replacing WordDelimiterFilter) to produce a correct token stream graph when splitting words

Posted by no...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFlattenGraphFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFlattenGraphFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFlattenGraphFilter.java
new file mode 100644
index 0000000..c69bcca
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFlattenGraphFilter.java
@@ -0,0 +1,284 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.analysis.core;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+
+public class TestFlattenGraphFilter extends BaseTokenStreamTestCase {
+  
+  private static Token token(String term, int posInc, int posLength, int startOffset, int endOffset) {
+    final Token t = new Token(term, startOffset, endOffset);
+    t.setPositionIncrement(posInc);
+    t.setPositionLength(posLength);
+    return t;
+  }
+
+  public void testSimpleMock() throws Exception {
+    Analyzer a = new Analyzer() {
+        @Override
+        protected TokenStreamComponents createComponents(String fieldName) {
+          Tokenizer tokenizer = new MockTokenizer(MockTokenizer.SIMPLE, true);
+          TokenStream ts = new FlattenGraphFilter(tokenizer);
+          return new TokenStreamComponents(tokenizer, ts);
+        }
+      };
+
+    assertAnalyzesTo(a, "wtf happened",
+                     new String[] {"wtf", "happened"},
+                     new int[]    {    0,          4},
+                     new int[]    {    3,         12},
+                     null,
+                     new int[]    {    1,          1},
+                     new int[]    {    1,          1},
+                     true);
+  }
+
+  // Make sure graph is unchanged if it's already flat
+  public void testAlreadyFlatten() throws Exception {
+    TokenStream in = new CannedTokenStream(0, 12, new Token[] {
+        token("wtf", 1, 1, 0, 3),
+        token("what", 0, 1, 0, 3),
+        token("wow", 0, 1, 0, 3),
+        token("the", 1, 1, 0, 3),
+        token("that's", 0, 1, 0, 3),
+        token("fudge", 1, 1, 0, 3),
+        token("funny", 0, 1, 0, 3),
+        token("happened", 1, 1, 4, 12)
+      });
+
+    TokenStream out = new FlattenGraphFilter(in);
+
+    // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
+    assertTokenStreamContents(out,
+                              new String[] {"wtf", "what", "wow", "the", "that's", "fudge", "funny", "happened"},
+                              new int[] {0, 0, 0, 0, 0, 0, 0, 4},
+                              new int[] {3, 3, 3, 3, 3, 3, 3, 12},
+                              new int[] {1, 0, 0, 1, 0, 1, 0, 1},
+                              new int[] {1, 1, 1, 1, 1, 1, 1, 1},
+                              12);
+  }
+
+  public void testWTF1() throws Exception {
+
+    // "wow that's funny" and "what the fudge" are separate side paths, in parallel with "wtf", on input:
+    TokenStream in = new CannedTokenStream(0, 12, new Token[] {
+        token("wtf", 1, 5, 0, 3),
+        token("what", 0, 1, 0, 3),
+        token("wow", 0, 3, 0, 3),
+        token("the", 1, 1, 0, 3),
+        token("fudge", 1, 3, 0, 3),
+        token("that's", 1, 1, 0, 3),
+        token("funny", 1, 1, 0, 3),
+        token("happened", 1, 1, 4, 12)
+      });
+
+
+    TokenStream out = new FlattenGraphFilter(in);
+
+    // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
+    assertTokenStreamContents(out,
+                              new String[] {"wtf", "what", "wow", "the", "that's", "fudge", "funny", "happened"},
+                              new int[] {0, 0, 0, 0, 0, 0, 0, 4},
+                              new int[] {3, 3, 3, 3, 3, 3, 3, 12},
+                              new int[] {1, 0, 0, 1, 0, 1, 0, 1},
+                              new int[] {3, 1, 1, 1, 1, 1, 1, 1},
+                              12);
+    
+  }
+
+  /** Same as testWTF1 except the "wtf" token comes out later */
+  public void testWTF2() throws Exception {
+
+    // "wow that's funny" and "what the fudge" are separate side paths, in parallel with "wtf", on input:
+    TokenStream in = new CannedTokenStream(0, 12, new Token[] {
+        token("what", 1, 1, 0, 3),
+        token("wow", 0, 3, 0, 3),
+        token("wtf", 0, 5, 0, 3),
+        token("the", 1, 1, 0, 3),
+        token("fudge", 1, 3, 0, 3),
+        token("that's", 1, 1, 0, 3),
+        token("funny", 1, 1, 0, 3),
+        token("happened", 1, 1, 4, 12)
+      });
+
+
+    TokenStream out = new FlattenGraphFilter(in);
+
+    // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
+    assertTokenStreamContents(out,
+                              new String[] {"what", "wow", "wtf", "the", "that's", "fudge", "funny", "happened"},
+                              new int[] {0, 0, 0, 0, 0, 0, 0, 4},
+                              new int[] {3, 3, 3, 3, 3, 3, 3, 12},
+                              new int[] {1, 0, 0, 1, 0, 1, 0, 1},
+                              new int[] {1, 1, 3, 1, 1, 1, 1, 1},
+                              12);
+    
+  }
+
+  public void testNonGreedySynonyms() throws Exception {
+    // This is just "hypothetical" for Lucene today, because SynFilter is
+    // greedy: when two syn rules match on overlapping tokens, only one
+    // (greedily) wins.  This test pretends all syn matches could match:
+
+    TokenStream in = new CannedTokenStream(0, 20, new Token[] {
+        token("wizard", 1, 1, 0, 6),
+        token("wizard_of_oz", 0, 3, 0, 12),
+        token("of", 1, 1, 7, 9),
+        token("oz", 1, 1, 10, 12),
+        token("oz_screams", 0, 2, 10, 20),
+        token("screams", 1, 1, 13, 20),
+      });
+
+
+    TokenStream out = new FlattenGraphFilter(in);
+
+    // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
+    assertTokenStreamContents(out,
+                              new String[] {"wizard", "wizard_of_oz", "of", "oz", "oz_screams", "screams"},
+                              new int[] {0, 0, 7, 10, 10, 13},
+                              new int[] {6, 12, 9, 12, 20, 20},
+                              new int[] {1, 0, 1, 1, 0, 1},
+                              new int[] {1, 3, 1, 1, 2, 1},
+                              20);
+    
+  }
+
+  public void testNonGraph() throws Exception {
+    TokenStream in = new CannedTokenStream(0, 22, new Token[] {
+        token("hello", 1, 1, 0, 5),
+        token("pseudo", 1, 1, 6, 12),
+        token("world", 1, 1, 13, 18),
+        token("fun", 1, 1, 19, 22),
+      });
+
+
+    TokenStream out = new FlattenGraphFilter(in);
+
+    // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
+    assertTokenStreamContents(out,
+                              new String[] {"hello", "pseudo", "world", "fun"},
+                              new int[] {0, 6, 13, 19},
+                              new int[] {5, 12, 18, 22},
+                              new int[] {1, 1, 1, 1},
+                              new int[] {1, 1, 1, 1},
+                              22);
+  }
+
+  public void testSimpleHole() throws Exception {
+    TokenStream in = new CannedTokenStream(0, 13, new Token[] {
+        token("hello", 1, 1, 0, 5),
+        token("hole", 2, 1, 6, 10),
+        token("fun", 1, 1, 11, 13),
+      });
+
+
+    TokenStream out = new FlattenGraphFilter(in);
+
+    // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
+    assertTokenStreamContents(out,
+                              new String[] {"hello", "hole", "fun"},
+                              new int[] {0, 6, 11},
+                              new int[] {5, 10, 13},
+                              new int[] {1, 2, 1},
+                              new int[] {1, 1, 1},
+                              13);
+  }
+
+  public void testHoleUnderSyn() throws Exception {
+    // Tests a StopFilter after SynFilter where a stopword in a syn is removed
+    //
+    //   wizard of oz -> woz syn, but then "of" becomes a hole
+
+    TokenStream in = new CannedTokenStream(0, 12, new Token[] {
+        token("wizard", 1, 1, 0, 6),
+        token("woz", 0, 3, 0, 12),
+        token("oz", 2, 1, 10, 12),
+      });
+
+
+    TokenStream out = new FlattenGraphFilter(in);
+
+    assertTokenStreamContents(out,
+                              new String[] {"wizard", "woz", "oz"},
+                              new int[] {0, 0, 10},
+                              new int[] {6, 12, 12},
+                              new int[] {1, 0, 2},
+                              new int[] {1, 3, 1},
+                              12);
+  }
+
+  public void testStrangelyNumberedNodes() throws Exception {
+
+    // Uses only nodes 0, 2, 3, i.e. 1 is just never used (it is not a hole!!)
+    TokenStream in = new CannedTokenStream(0, 27, new Token[] {
+        token("dog", 1, 3, 0, 5),
+        token("puppy", 0, 3, 0, 5),
+        token("flies", 3, 1, 6, 11),
+      });
+
+    TokenStream out = new FlattenGraphFilter(in);
+
+    assertTokenStreamContents(out,
+                              new String[] {"dog", "puppy", "flies"},
+                              new int[] {0, 0, 6},
+                              new int[] {5, 5, 11},
+                              new int[] {1, 0, 1},
+                              new int[] {1, 1, 1},
+                              27);
+  }
+
+  public void testTwoLongParallelPaths() throws Exception {
+
+    // "a a a a a a" in parallel with "b b b b b b"
+    TokenStream in = new CannedTokenStream(0, 11, new Token[] {
+        token("a", 1, 1, 0, 1),
+        token("b", 0, 2, 0, 1),
+        token("a", 1, 2, 2, 3),
+        token("b", 1, 2, 2, 3),
+        token("a", 1, 2, 4, 5),
+        token("b", 1, 2, 4, 5),
+        token("a", 1, 2, 6, 7),
+        token("b", 1, 2, 6, 7),
+        token("a", 1, 2, 8, 9),
+        token("b", 1, 2, 8, 9),
+        token("a", 1, 2, 10, 11),
+        token("b", 1, 2, 10, 11),
+      });
+
+
+    TokenStream out = new FlattenGraphFilter(in);
+    
+    // ... becomes flattened to a single path with overlapping a/b token between each node:
+    assertTokenStreamContents(out,
+                              new String[] {"a", "b", "a", "b", "a", "b", "a", "b", "a", "b", "a", "b"},
+                              new int[] {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10},
+                              new int[] {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11},
+                              new int[] {1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0},
+                              new int[] {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+                              11);
+    
+  }
+
+  // NOTE: TestSynonymGraphFilter's testRandomSyns also tests FlattenGraphFilter
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
index 7f35298..7f0481f 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
@@ -446,4 +446,73 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
       a.close();
     }
   }
+
+  /*
+  public void testToDot() throws Exception {
+    int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE | PRESERVE_ORIGINAL | CATENATE_WORDS | CATENATE_NUMBERS | STEM_ENGLISH_POSSESSIVE;
+    String text = "PowerSystem2000-5-Shot's";
+    WordDelimiterFilter wdf = new WordDelimiterFilter(new CannedTokenStream(new Token(text, 0, text.length())), DEFAULT_WORD_DELIM_TABLE, flags, null);
+    //StringWriter sw = new StringWriter();
+    // TokenStreamToDot toDot = new TokenStreamToDot(text, wdf, new PrintWriter(sw));
+    PrintWriter pw = new PrintWriter("/x/tmp/before.dot");
+    TokenStreamToDot toDot = new TokenStreamToDot(text, wdf, pw);
+    toDot.toDot();
+    pw.close();
+    System.out.println("TEST DONE");
+    //System.out.println("DOT:\n" + sw.toString());
+  }
+  */
+
+  public void testOnlyNumbers() throws Exception {
+    int flags = GENERATE_WORD_PARTS | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS;
+    Analyzer a = new Analyzer() {
+        
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(tokenizer, flags, null));
+      }
+    };
+
+    assertAnalyzesTo(a, "7-586", 
+                     new String[] {},
+                     new int[] {},
+                     new int[] {},
+                     null,
+                     new int[] {},
+                     null,
+                     false);
+  }
+
+  public void testNumberPunct() throws Exception {
+    int flags = GENERATE_WORD_PARTS | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS;
+    Analyzer a = new Analyzer() {
+        
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(tokenizer, flags, null));
+      }
+    };
+
+    assertAnalyzesTo(a, "6-", 
+                     new String[] {"6"},
+                     new int[] {0},
+                     new int[] {1},
+                     null,
+                     new int[] {1},
+                     null,
+                     false);
+  }
+
+  private Analyzer getAnalyzer(final int flags) {
+    return new Analyzer() {
+        
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(tokenizer, flags, null));
+      }
+    };
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterGraphFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterGraphFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterGraphFilter.java
new file mode 100644
index 0000000..2daf886
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterGraphFilter.java
@@ -0,0 +1,897 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.miscellaneous;
+
+import java.io.IOException;
+import java.util.*;
+
+import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.CharArraySet;
+import org.apache.lucene.analysis.StopFilter;
+import org.apache.lucene.analysis.core.KeywordTokenizer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.TestUtil;
+
+import static org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter.*;
+import static org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE;
+
+/**
+ * New WordDelimiterGraphFilter tests... most of the tests are in ConvertedLegacyTest
+ * TODO: should explicitly test things like protWords and not rely on
+ * the factory tests in Solr.
+ */
+public class TestWordDelimiterGraphFilter extends BaseTokenStreamTestCase {
+
+  public void testOffsets() throws IOException {
+    int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
+    // test that subwords and catenated subwords have
+    // the correct offsets.
+    WordDelimiterGraphFilter wdf = new WordDelimiterGraphFilter(new CannedTokenStream(new Token("foo-bar", 5, 12)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+
+    assertTokenStreamContents(wdf, 
+                              new String[] { "foobar", "foo", "bar" },
+                              new int[] { 5, 5, 9 }, 
+                              new int[] { 12, 8, 12 });
+
+    // with illegal offsets:
+    wdf = new WordDelimiterGraphFilter(new CannedTokenStream(new Token("foo-bar", 5, 6)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+    assertTokenStreamContents(wdf,
+                              new String[] { "foobar", "foo", "bar" },
+                              new int[] { 5, 5, 5 },
+                              new int[] { 6, 6, 6 });
+  }
+  
+  public void testOffsetChange() throws Exception {
+    int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
+    WordDelimiterGraphFilter wdf = new WordDelimiterGraphFilter(new CannedTokenStream(new Token("�belkeit)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+    
+    assertTokenStreamContents(wdf,
+        new String[] { "�belkeit" },
+        new int[] { 7 },
+        new int[] { 15 });
+  }
+  
+  public void testOffsetChange2() throws Exception {
+    int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
+    WordDelimiterGraphFilter wdf = new WordDelimiterGraphFilter(new CannedTokenStream(new Token("(�belkeit", 7, 17)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+    // illegal offsets:
+    assertTokenStreamContents(wdf,
+                              new String[] { "�belkeit" },
+                              new int[] { 7 },
+                              new int[] { 17 });
+  }
+  
+  public void testOffsetChange3() throws Exception {
+    int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
+    WordDelimiterGraphFilter wdf = new WordDelimiterGraphFilter(new CannedTokenStream(new Token("(�belkeit", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+    assertTokenStreamContents(wdf,
+                              new String[] { "�belkeit" },
+                              new int[] { 8 },
+                              new int[] { 16 });
+  }
+  
+  public void testOffsetChange4() throws Exception {
+    int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
+    WordDelimiterGraphFilter wdf = new WordDelimiterGraphFilter(new CannedTokenStream(new Token("(foo,bar)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+    
+    assertTokenStreamContents(wdf,
+        new String[] { "foobar", "foo", "bar"},
+        new int[] { 8, 8, 12 },
+        new int[] { 15, 11, 15 });
+  }
+
+  public void doSplit(final String input, String... output) throws Exception {
+    int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
+    WordDelimiterGraphFilter wdf = new WordDelimiterGraphFilter(keywordMockTokenizer(input),
+        WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, flags, null);
+    
+    assertTokenStreamContents(wdf, output);
+  }
+
+  public void testSplits() throws Exception {
+    doSplit("basic-split","basic","split");
+    doSplit("camelCase","camel","Case");
+
+    // non-space marking symbol shouldn't cause split
+    // this is an example in Thai    
+    doSplit("\u0e1a\u0e49\u0e32\u0e19","\u0e1a\u0e49\u0e32\u0e19");
+    // possessive followed by delimiter
+    doSplit("test's'", "test");
+
+    // some russian upper and lowercase
+    doSplit("\u0420\u043e\u0431\u0435\u0440\u0442", "\u0420\u043e\u0431\u0435\u0440\u0442");
+    // now cause a split (russian camelCase)
+    doSplit("\u0420\u043e\u0431\u0415\u0440\u0442", "\u0420\u043e\u0431", "\u0415\u0440\u0442");
+
+    // a composed titlecase character, don't split
+    doSplit("a\u01c5ungla", "a\u01c5ungla");
+    
+    // a modifier letter, don't split
+    doSplit("\u0633\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0644\u0627\u0645", "\u0633\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0640\u0644\u0627\u0645");
+    
+    // enclosing mark, don't split
+    doSplit("test\u20dd", "test\u20dd");
+    
+    // combining spacing mark (the virama), don't split
+    doSplit("\u0939\u093f\u0928\u094d\u0926\u0940", "\u0939\u093f\u0928\u094d\u0926\u0940");
+    
+    // don't split non-ascii digits
+    doSplit("\u0661\u0662\u0663\u0664", "\u0661\u0662\u0663\u0664");
+    
+    // don't split supplementaries into unpaired surrogates
+    doSplit("\U00020000\U00020000", "\U00020000\U00020000");
+  }
+  
+  public void doSplitPossessive(int stemPossessive, final String input, final String... output) throws Exception {
+    int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS;
+    flags |= (stemPossessive == 1) ? STEM_ENGLISH_POSSESSIVE : 0;
+    WordDelimiterGraphFilter wdf = new WordDelimiterGraphFilter(keywordMockTokenizer(input), flags, null);
+
+    assertTokenStreamContents(wdf, output);
+  }
+  
+  /*
+   * Test option that allows disabling the special "'s" stemming, instead treating the single quote like other delimiters. 
+   */
+  public void testPossessives() throws Exception {
+    doSplitPossessive(1, "ra's", "ra");
+    doSplitPossessive(0, "ra's", "ra", "s");
+  }
+  
+  /*
+   * Set a large position increment gap of 10 if the token is "largegap" or "/"
+   */
+  private final class LargePosIncTokenFilter extends TokenFilter {
+    private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+    private PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
+    
+    protected LargePosIncTokenFilter(TokenStream input) {
+      super(input);
+    }
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      if (input.incrementToken()) {
+        if (termAtt.toString().equals("largegap") || termAtt.toString().equals("/"))
+          posIncAtt.setPositionIncrement(10);
+        return true;
+      } else {
+        return false;
+      }
+    }  
+  }
+  
+  public void testPositionIncrements() throws Exception {
+    final int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
+    final CharArraySet protWords = new CharArraySet(new HashSet<>(Arrays.asList("NUTCH")), false);
+    
+    /* analyzer that uses whitespace + wdf */
+    Analyzer a = new Analyzer() {
+      @Override
+      public TokenStreamComponents createComponents(String field) {
+        Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+        return new TokenStreamComponents(tokenizer, new WordDelimiterGraphFilter(
+            tokenizer,
+            flags, protWords));
+      }
+    };
+
+    /* in this case, works as expected. */
+    assertAnalyzesTo(a, "LUCENE / SOLR", new String[] { "LUCENE", "SOLR" },
+        new int[] { 0, 9 },
+        new int[] { 6, 13 },
+        null,
+        new int[] { 1, 2 },
+        null,
+        false);
+
+    /* only in this case, posInc of 2 ?! */
+    assertAnalyzesTo(a, "LUCENE / solR", new String[] { "LUCENE", "solR", "sol", "R" },
+        new int[] { 0, 9, 9, 12 },
+        new int[] { 6, 13, 12, 13 },
+        null,                     
+        new int[] { 1, 2, 0, 1 },
+        null,
+        false);
+    
+    assertAnalyzesTo(a, "LUCENE / NUTCH SOLR", new String[] { "LUCENE", "NUTCH", "SOLR" },
+        new int[] { 0, 9, 15 },
+        new int[] { 6, 14, 19 },
+        null,
+        new int[] { 1, 2, 1 },
+        null,
+        false);
+    
+    /* analyzer that will consume tokens with large position increments */
+    Analyzer a2 = new Analyzer() {
+      @Override
+      public TokenStreamComponents createComponents(String field) {
+        Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+        return new TokenStreamComponents(tokenizer, new WordDelimiterGraphFilter(
+            new LargePosIncTokenFilter(tokenizer),
+            flags, protWords));
+      }
+    };
+    
+    /* increment of "largegap" is preserved */
+    assertAnalyzesTo(a2, "LUCENE largegap SOLR", new String[] { "LUCENE", "largegap", "SOLR" },
+        new int[] { 0, 7, 16 },
+        new int[] { 6, 15, 20 },
+        null,
+        new int[] { 1, 10, 1 },
+        null,
+        false);
+    
+    /* the "/" had a position increment of 10, where did it go?!?!! */
+    assertAnalyzesTo(a2, "LUCENE / SOLR", new String[] { "LUCENE", "SOLR" },
+        new int[] { 0, 9 },
+        new int[] { 6, 13 },
+        null,
+        new int[] { 1, 11 },
+        null,
+        false);
+    
+    /* in this case, the increment of 10 from the "/" is carried over */
+    assertAnalyzesTo(a2, "LUCENE / solR", new String[] { "LUCENE", "solR", "sol", "R" },
+        new int[] { 0, 9, 9, 12 },
+        new int[] { 6, 13, 12, 13 },
+        null,
+        new int[] { 1, 11, 0, 1 },
+        null,
+        false);
+    
+    assertAnalyzesTo(a2, "LUCENE / NUTCH SOLR", new String[] { "LUCENE", "NUTCH", "SOLR" },
+        new int[] { 0, 9, 15 },
+        new int[] { 6, 14, 19 },
+        null,
+        new int[] { 1, 11, 1 },
+        null,
+        false);
+
+    Analyzer a3 = new Analyzer() {
+      @Override
+      public TokenStreamComponents createComponents(String field) {
+        Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+        StopFilter filter = new StopFilter(tokenizer, StandardAnalyzer.STOP_WORDS_SET);
+        return new TokenStreamComponents(tokenizer, new WordDelimiterGraphFilter(filter, flags, protWords));
+      }
+    };
+
+    assertAnalyzesTo(a3, "lucene.solr", 
+        new String[] { "lucenesolr", "lucene", "solr" },
+        new int[] { 0, 0, 7 },
+        new int[] { 11, 6, 11 },
+        null,
+        new int[] { 1, 0, 1 },
+        null,
+        false);
+
+    /* the stopword should add a gap here */
+    assertAnalyzesTo(a3, "the lucene.solr", 
+        new String[] { "lucenesolr", "lucene", "solr" }, 
+        new int[] { 4, 4, 11 }, 
+        new int[] { 15, 10, 15 },
+        null,
+        new int[] { 2, 0, 1 },
+        null,
+        false);
+
+    IOUtils.close(a, a2, a3);
+  }
+  
+  /** concat numbers + words + all */
+  public void testLotsOfConcatenating() throws Exception {
+    final int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_WORDS | CATENATE_NUMBERS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;    
+
+    /* analyzer that uses whitespace + wdf */
+    Analyzer a = new Analyzer() {
+      @Override
+      public TokenStreamComponents createComponents(String field) {
+        Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+        return new TokenStreamComponents(tokenizer, new WordDelimiterGraphFilter(tokenizer, flags, null));
+      }
+    };
+    
+    assertAnalyzesTo(a, "abc-def-123-456", 
+        new String[] { "abcdef123456", "abcdef", "abc", "def", "123456", "123", "456" }, 
+        new int[] { 0, 0, 0, 4, 8, 8, 12 }, 
+        new int[] { 15, 7, 3, 7, 15, 11, 15 },
+        null,
+        new int[] { 1, 0, 0, 1, 1, 0, 1 },
+        null,
+        false);
+    a.close();
+  }
+  
+  /** concat numbers + words + all + preserve original */
+  public void testLotsOfConcatenating2() throws Exception {
+    final int flags = PRESERVE_ORIGINAL | GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_WORDS | CATENATE_NUMBERS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;    
+
+    /* analyzer that uses whitespace + wdf */
+    Analyzer a = new Analyzer() {
+      @Override
+      public TokenStreamComponents createComponents(String field) {
+        Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+        return new TokenStreamComponents(tokenizer, new WordDelimiterGraphFilter(tokenizer, flags, null));
+      }
+    };
+    
+    assertAnalyzesTo(a, "abc-def-123-456", 
+                     new String[] { "abcdef123456", "abc-def-123-456", "abcdef", "abc", "def", "123456", "123", "456" }, 
+                     new int[] { 0, 0, 0, 0, 4, 8, 8, 12 }, 
+                     new int[] { 15, 15, 7, 3, 7, 15, 11, 15 },
+                     null,
+                     new int[] { 1, 0, 0, 0, 1, 1, 0, 1 },
+                     null,
+                     false);
+    a.close();
+  }
+  
+  /** blast some random strings through the analyzer */
+  public void testRandomStrings() throws Exception {
+    int numIterations = atLeast(5);
+    for (int i = 0; i < numIterations; i++) {
+      final int flags = random().nextInt(512);
+      final CharArraySet protectedWords;
+      if (random().nextBoolean()) {
+        protectedWords = new CharArraySet(new HashSet<>(Arrays.asList("a", "b", "cd")), false);
+      } else {
+        protectedWords = null;
+      }
+      
+      Analyzer a = new Analyzer() {
+        
+        @Override
+        protected TokenStreamComponents createComponents(String fieldName) {
+          Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+          return new TokenStreamComponents(tokenizer, new WordDelimiterGraphFilter(tokenizer, flags, protectedWords));
+        }
+      };
+      // TODO: properly support positionLengthAttribute
+      checkRandomData(random(), a, 200*RANDOM_MULTIPLIER, 20, false, false);
+      a.close();
+    }
+  }
+  
+  /** blast some enormous random strings through the analyzer */
+  public void testRandomHugeStrings() throws Exception {
+    int numIterations = atLeast(5);
+    for (int i = 0; i < numIterations; i++) {
+      final int flags = random().nextInt(512);
+      final CharArraySet protectedWords;
+      if (random().nextBoolean()) {
+        protectedWords = new CharArraySet(new HashSet<>(Arrays.asList("a", "b", "cd")), false);
+      } else {
+        protectedWords = null;
+      }
+      
+      Analyzer a = new Analyzer() {
+        
+        @Override
+        protected TokenStreamComponents createComponents(String fieldName) {
+          Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+          TokenStream wdgf = new WordDelimiterGraphFilter(tokenizer, flags, protectedWords);
+          return new TokenStreamComponents(tokenizer, wdgf);
+        }
+      };
+      // TODO: properly support positionLengthAttribute
+      checkRandomData(random(), a, 20*RANDOM_MULTIPLIER, 8192, false, false);
+      a.close();
+    }
+  }
+  
+  public void testEmptyTerm() throws IOException {
+    Random random = random();
+    for (int i = 0; i < 512; i++) {
+      final int flags = i;
+      final CharArraySet protectedWords;
+      if (random.nextBoolean()) {
+        protectedWords = new CharArraySet(new HashSet<>(Arrays.asList("a", "b", "cd")), false);
+      } else {
+        protectedWords = null;
+      }
+    
+      Analyzer a = new Analyzer() { 
+        @Override
+        protected TokenStreamComponents createComponents(String fieldName) {
+          Tokenizer tokenizer = new KeywordTokenizer();
+          return new TokenStreamComponents(tokenizer, new WordDelimiterGraphFilter(tokenizer, flags, protectedWords));
+        }
+      };
+      // depending upon options, this thing may or may not preserve the empty term
+      checkAnalysisConsistency(random, a, random.nextBoolean(), "");
+      a.close();
+    }
+  }
+
+  private Analyzer getAnalyzer(int flags) {
+    return getAnalyzer(flags, null);
+  }
+  
+  private Analyzer getAnalyzer(int flags, CharArraySet protectedWords) {
+    return new Analyzer() { 
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName) {
+        Tokenizer tokenizer = new KeywordTokenizer();
+        return new TokenStreamComponents(tokenizer, new WordDelimiterGraphFilter(tokenizer, flags, protectedWords));
+      }
+    };
+  }
+
+  private static boolean has(int flags, int flag) {
+    return (flags & flag) != 0;
+  }
+
+  private static boolean isEnglishPossessive(String text, int pos) {
+    if (pos > 2) {
+      if ((text.charAt(pos-1) == 's' || text.charAt(pos-1) == 'S') &&
+          (pos == text.length() || text.charAt(pos) != '-')) {
+        text = text.substring(0, text.length()-2);
+      }
+    }
+    return true;
+  }
+
+  private static class WordPart {
+    final String part;
+    final int startOffset;
+    final int endOffset;
+    final int type;
+
+    public WordPart(String text, int startOffset, int endOffset) {
+      this.part = text.substring(startOffset, endOffset);
+      this.startOffset = startOffset;
+      this.endOffset = endOffset;
+      this.type = toType(part.charAt(0));
+    }
+
+    @Override
+    public String toString() {
+      return "WordPart(" + part + " " + startOffset + "-" + endOffset + ")";
+    }
+  }
+
+  private static final int NUMBER = 0;
+  private static final int LETTER = 1;
+  private static final int DELIM = 2;
+
+  private static int toType(char ch) {
+    if (Character.isDigit(ch)) {
+      // numbers
+      return NUMBER;
+    } else if (Character.isLetter(ch)) {
+      // letters
+      return LETTER;
+    } else {
+      // delimiter
+      return DELIM;
+    }
+  }
+
+  /** Does (hopefully) the same thing as WordDelimiterGraphFilter, according to the flags, but more slowly, returning all string paths combinations. */
+  private Set<String> slowWDF(String text, int flags) {
+
+    // first make word parts:
+    List<WordPart> wordParts = new ArrayList<>();
+    int lastCH = -1;
+    int wordPartStart = 0;
+    boolean inToken = false;
+
+    for(int i=0;i<text.length();i++) {
+      char ch = text.charAt(i);
+      if (toType(ch) == DELIM) {
+        // delimiter
+        if (inToken) {
+          // end current token
+          wordParts.add(new WordPart(text, wordPartStart, i));
+          inToken = false;
+        }
+
+        // strip english possessive at the end of this token?:
+        if (has(flags, STEM_ENGLISH_POSSESSIVE) &&
+            ch == '\'' && i > 0 &&
+            i < text.length()-1 &&
+            (text.charAt(i+1) == 's' || text.charAt(i+1) == 'S') &&
+            toType(text.charAt(i-1)) == LETTER &&
+            (i+2 == text.length() || toType(text.charAt(i+2)) == DELIM)) {
+          i += 2;
+        }
+    
+      } else if (inToken == false) {
+        // start new token
+        inToken = true;
+        wordPartStart = i;
+      } else {
+        boolean newToken = false;
+        if (Character.isLetter(lastCH)) {
+          if (Character.isLetter(ch)) {
+            if (has(flags, SPLIT_ON_CASE_CHANGE) && Character.isLowerCase(lastCH) && Character.isLowerCase(ch) == false) {
+              // start new token on lower -> UPPER case change (but not vice versa!)
+              newToken = true;
+            }
+          } else if (has(flags, SPLIT_ON_NUMERICS) && Character.isDigit(ch)) {
+            // start new token on letter -> number change
+            newToken = true;
+          }
+        } else {
+          assert Character.isDigit(lastCH);
+          if (Character.isLetter(ch) && has(flags, SPLIT_ON_NUMERICS) ) {
+            // start new token on number -> letter change
+            newToken = true;
+          }
+        }
+        if (newToken) {
+          wordParts.add(new WordPart(text, wordPartStart, i));
+          wordPartStart = i;
+        }
+      }
+      lastCH = ch;
+    }
+
+    if (inToken) {
+      // add last token
+      wordParts.add(new WordPart(text, wordPartStart, text.length()));
+    }
+    
+    Set<String> paths = new HashSet<>();
+    if (wordParts.isEmpty() == false) {
+      enumerate(flags, 0, text, wordParts, paths, new StringBuilder());
+    }
+
+    if (has(flags, PRESERVE_ORIGINAL)) {
+      paths.add(text);
+    }
+
+    if (has(flags, CATENATE_ALL) && wordParts.isEmpty() == false) {
+      StringBuilder b = new StringBuilder();
+      for(WordPart wordPart : wordParts) {
+        b.append(wordPart.part);
+      }
+      paths.add(b.toString());
+    }
+    
+    return paths;
+  }
+
+  private void add(StringBuilder path, String part) {
+    if (path.length() != 0) {
+      path.append(' ');
+    }
+    path.append(part);
+  }
+
+  private void add(StringBuilder path, List<WordPart> wordParts, int from, int to) {
+    if (path.length() != 0) {
+      path.append(' ');
+    }
+    // no spaces:
+    for(int i=from;i<to;i++) {
+      path.append(wordParts.get(i).part);
+    }
+  }
+
+  private void addWithSpaces(StringBuilder path, List<WordPart> wordParts, int from, int to) {
+    for(int i=from;i<to;i++) {
+      add(path, wordParts.get(i).part);
+    }
+  }
+
+  /** Finds the end (exclusive) of the series of part with the same type */
+  private int endOfRun(List<WordPart> wordParts, int start) {
+    int upto = start+1;
+    while(upto < wordParts.size() && wordParts.get(upto).type == wordParts.get(start).type) {
+      upto++;
+    }
+    return upto;
+  }
+
+  /** Recursively enumerates all paths through the word parts */
+  private void enumerate(int flags, int upto, String text, List<WordPart> wordParts, Set<String> paths, StringBuilder path) {
+    if (upto == wordParts.size()) {
+      if (path.length() > 0) {
+        paths.add(path.toString());
+      }
+    } else {
+      int savLength = path.length();
+      int end = endOfRun(wordParts, upto);
+
+      if (wordParts.get(upto).type == NUMBER) {
+        // always output single word, optionally surrounded by delims:
+        if (has(flags, GENERATE_NUMBER_PARTS) || wordParts.size() == 1) {
+          addWithSpaces(path, wordParts, upto, end);
+          if (has(flags, CATENATE_NUMBERS)) {
+            // recurse first with the parts
+            enumerate(flags, end, text, wordParts, paths, path);
+            path.setLength(savLength);
+            // .. and second with the concat
+            add(path, wordParts, upto, end);
+          }
+        } else if (has(flags, CATENATE_NUMBERS)) {
+          add(path, wordParts, upto, end);
+        }
+        enumerate(flags, end, text, wordParts, paths, path);
+        path.setLength(savLength);
+      } else {
+        assert wordParts.get(upto).type == LETTER;
+        // always output single word, optionally surrounded by delims:
+        if (has(flags, GENERATE_WORD_PARTS) || wordParts.size() == 1) {
+          addWithSpaces(path, wordParts, upto, end);
+          if (has(flags, CATENATE_WORDS)) {
+            // recurse first with the parts
+            enumerate(flags, end, text, wordParts, paths, path);
+            path.setLength(savLength);
+            // .. and second with the concat
+            add(path, wordParts, upto, end);
+          }
+        } else if (has(flags, CATENATE_WORDS)) {
+          add(path, wordParts, upto, end);
+        }
+        enumerate(flags, end, text, wordParts, paths, path);
+        path.setLength(savLength);
+      }
+    }
+  }
+
+  public void testBasicGraphSplits() throws Exception {
+    assertGraphStrings(getAnalyzer(0),
+                       "PowerShotPlus",
+                       "PowerShotPlus");
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS),
+                       "PowerShotPlus",
+                       "PowerShotPlus");
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS | SPLIT_ON_CASE_CHANGE),
+                       "PowerShotPlus",
+                       "Power Shot Plus");
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS | SPLIT_ON_CASE_CHANGE | PRESERVE_ORIGINAL),
+                       "PowerShotPlus",
+                       "PowerShotPlus",
+                       "Power Shot Plus");
+
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS),
+                       "Power-Shot-Plus",
+                       "Power Shot Plus");
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS | SPLIT_ON_CASE_CHANGE),
+                       "Power-Shot-Plus",
+                       "Power Shot Plus");
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS | SPLIT_ON_CASE_CHANGE | PRESERVE_ORIGINAL),
+                       "Power-Shot-Plus",
+                       "Power-Shot-Plus",
+                       "Power Shot Plus");
+
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS | SPLIT_ON_CASE_CHANGE),
+                       "PowerShotPlus",
+                       "Power Shot Plus");
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS | SPLIT_ON_CASE_CHANGE),
+                       "PowerShot1000Plus",
+                       "Power Shot1000Plus");
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS | SPLIT_ON_CASE_CHANGE),
+                       "Power-Shot-Plus",
+                       "Power Shot Plus");
+
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS | SPLIT_ON_CASE_CHANGE | CATENATE_WORDS),
+                       "PowerShotPlus",
+                       "Power Shot Plus",
+                       "PowerShotPlus");
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS | SPLIT_ON_CASE_CHANGE | CATENATE_WORDS),
+                       "PowerShot1000Plus",
+                       "Power Shot1000Plus",
+                       "PowerShot1000Plus");
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | SPLIT_ON_CASE_CHANGE | CATENATE_WORDS | CATENATE_NUMBERS),
+                       "Power-Shot-1000-17-Plus",
+                       "Power Shot 1000 17 Plus",
+                       "Power Shot 100017 Plus",
+                       "PowerShot 1000 17 Plus",
+                       "PowerShot 100017 Plus");
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | SPLIT_ON_CASE_CHANGE | CATENATE_WORDS | CATENATE_NUMBERS | PRESERVE_ORIGINAL),
+                       "Power-Shot-1000-17-Plus",
+                       "Power-Shot-1000-17-Plus",
+                       "Power Shot 1000 17 Plus",
+                       "Power Shot 100017 Plus",
+                       "PowerShot 1000 17 Plus",
+                       "PowerShot 100017 Plus");
+  }
+
+  /*
+  public void testToDot() throws Exception {
+    int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE | PRESERVE_ORIGINAL | CATENATE_WORDS | CATENATE_NUMBERS | STEM_ENGLISH_POSSESSIVE;
+    String text = "PowerSystem2000-5-Shot's";
+    WordDelimiterGraphFilter wdf = new WordDelimiterGraphFilter(new CannedTokenStream(new Token(text, 0, text.length())), DEFAULT_WORD_DELIM_TABLE, flags, null);
+    //StringWriter sw = new StringWriter();
+    // TokenStreamToDot toDot = new TokenStreamToDot(text, wdf, new PrintWriter(sw));
+    PrintWriter pw = new PrintWriter("/tmp/foo2.dot");
+    TokenStreamToDot toDot = new TokenStreamToDot(text, wdf, pw);
+    toDot.toDot();
+    pw.close();
+    //System.out.println("DOT:\n" + sw.toString());
+  }
+  */
+
+  private String randomWDFText() {
+    StringBuilder b = new StringBuilder();
+    int length = TestUtil.nextInt(random(), 1, 50);
+    for(int i=0;i<length;i++) {
+      int surpriseMe = random().nextInt(37);
+      int lower = -1;
+      int upper = -1;
+      if (surpriseMe < 10) {
+        // lowercase letter
+        lower = 'a';
+        upper = 'z';
+      } else if (surpriseMe < 20) {
+        // uppercase letter
+        lower = 'A';
+        upper = 'Z';
+      } else if (surpriseMe < 30) {
+        // digit
+        lower = '0';
+        upper = '9';
+      } else if (surpriseMe < 35) {
+        // punct
+        lower = '-';
+        upper = '-';
+      } else {
+        b.append("'s");
+      }
+
+      if (lower != -1) {
+        b.append((char) TestUtil.nextInt(random(), lower, upper));
+      }
+    }
+
+    return b.toString();
+  }
+
+  public void testInvalidFlag() throws Exception {
+    expectThrows(IllegalArgumentException.class,
+                 () -> {
+                   new WordDelimiterGraphFilter(new CannedTokenStream(), 1 << 31, null);
+                 });
+  }
+
+  public void testRandomPaths() throws Exception {
+    int iters = atLeast(100);
+    for(int iter=0;iter<iters;iter++) {
+      String text = randomWDFText();
+      if (VERBOSE) {
+        System.out.println("\nTEST: text=" + text + " len=" + text.length());
+      }
+
+      int flags = 0;
+      if (random().nextBoolean()) {
+        flags |= GENERATE_WORD_PARTS;
+      }
+      if (random().nextBoolean()) {
+        flags |= GENERATE_NUMBER_PARTS;
+      }
+      if (random().nextBoolean()) {
+        flags |= CATENATE_WORDS;
+      }
+      if (random().nextBoolean()) {
+        flags |= CATENATE_NUMBERS;
+      }
+      if (random().nextBoolean()) {
+        flags |= CATENATE_ALL;
+      }
+      if (random().nextBoolean()) {
+        flags |= PRESERVE_ORIGINAL;
+      }
+      if (random().nextBoolean()) {
+        flags |= SPLIT_ON_CASE_CHANGE;
+      }
+      if (random().nextBoolean()) {
+        flags |= SPLIT_ON_NUMERICS;
+      }
+      if (random().nextBoolean()) {
+        flags |= STEM_ENGLISH_POSSESSIVE;
+      }
+
+      verify(text, flags);
+    }
+  }
+
+  /** Runs normal and slow WDGF and compares results */
+  private void verify(String text, int flags) throws IOException {
+
+    Set<String> expected = slowWDF(text, flags);
+    if (VERBOSE) {
+      for(String path : expected) {
+        System.out.println("  " + path);
+      }
+    }
+
+    Set<String> actual = getGraphStrings(getAnalyzer(flags), text);
+    if (actual.equals(expected) == false) {
+      StringBuilder b = new StringBuilder();
+      b.append("\n\nFAIL: text=");
+      b.append(text);
+      b.append(" flags=");
+      b.append(WordDelimiterGraphFilter.flagsToString(flags));
+      b.append('\n');
+      b.append("  expected paths:\n");
+      for (String s : expected) {
+        b.append("    ");
+        b.append(s);
+        if (actual.contains(s) == false) {
+          b.append(" [missing!]");
+        }
+        b.append('\n');
+      }
+
+      b.append("  actual paths:\n");
+      for (String s : actual) {
+        b.append("    ");
+        b.append(s);
+        if (expected.contains(s) == false) {
+          b.append(" [unexpected!]");
+        }
+        b.append('\n');
+      }
+
+      fail(b.toString());
+    }
+  }
+
+  public void testOnlyNumbers() throws Exception {
+    // no token should be produced
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS), "7-586");
+  }
+
+  public void testNoCatenate() throws Exception {
+    // no token should be produced
+    assertGraphStrings(getAnalyzer(GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS), "a-b-c-9-d", "a b c 9 d");
+  }
+
+  public void testCuriousCase1() throws Exception {
+    verify("u-0L-4836-ip4Gw--13--q7--L07E1", CATENATE_WORDS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE);
+  }
+
+  public void testCuriousCase2() throws Exception {
+    verify("u-l-p", CATENATE_ALL);
+  }
+
+  public void testOriginalPosLength() throws Exception {
+    verify("Foo-Bar-Baz", CATENATE_WORDS | SPLIT_ON_CASE_CHANGE | PRESERVE_ORIGINAL);
+  }
+
+  public void testCuriousCase3() throws Exception {
+    verify("cQzk4-GL0izl0mKM-J8--4m-'s", GENERATE_NUMBER_PARTS | CATENATE_NUMBERS | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS);
+  }
+
+  public void testEmptyString() throws Exception {
+    WordDelimiterGraphFilter wdf = new WordDelimiterGraphFilter(new CannedTokenStream(new Token("", 0, 0)), DEFAULT_WORD_DELIM_TABLE, GENERATE_WORD_PARTS | CATENATE_ALL | PRESERVE_ORIGINAL, null);
+    wdf.reset();
+    assertTrue(wdf.incrementToken());
+    assertFalse(wdf.incrementToken());
+    wdf.end();
+    wdf.close();
+  }
+
+  public void testProtectedWords() throws Exception {
+    TokenStream tokens = new CannedTokenStream(new Token("foo17-bar", 0, 9),
+                                               new Token("foo-bar", 0, 7));
+
+    CharArraySet protectedWords = new CharArraySet(new HashSet<>(Arrays.asList("foo17-BAR")), true);
+    WordDelimiterGraphFilter wdf = new WordDelimiterGraphFilter(tokens, DEFAULT_WORD_DELIM_TABLE, GENERATE_WORD_PARTS | PRESERVE_ORIGINAL | CATENATE_ALL, protectedWords);
+    assertGraphStrings(wdf,
+                       "foo17-bar foo bar",
+                       "foo17-bar foo-bar",
+                       "foo17-bar foobar");
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestFlattenGraphFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestFlattenGraphFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestFlattenGraphFilter.java
deleted file mode 100644
index d61fa96..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestFlattenGraphFilter.java
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.lucene.analysis.synonym;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.analysis.CannedTokenStream;
-import org.apache.lucene.analysis.MockTokenizer;
-import org.apache.lucene.analysis.Token;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
-
-public class TestFlattenGraphFilter extends BaseTokenStreamTestCase {
-  
-  private static Token token(String term, int posInc, int posLength, int startOffset, int endOffset) {
-    final Token t = new Token(term, startOffset, endOffset);
-    t.setPositionIncrement(posInc);
-    t.setPositionLength(posLength);
-    return t;
-  }
-
-  public void testSimpleMock() throws Exception {
-    Analyzer a = new Analyzer() {
-        @Override
-        protected TokenStreamComponents createComponents(String fieldName) {
-          Tokenizer tokenizer = new MockTokenizer(MockTokenizer.SIMPLE, true);
-          TokenStream ts = new FlattenGraphFilter(tokenizer);
-          return new TokenStreamComponents(tokenizer, ts);
-        }
-      };
-
-    assertAnalyzesTo(a, "wtf happened",
-                     new String[] {"wtf", "happened"},
-                     new int[]    {    0,          4},
-                     new int[]    {    3,         12},
-                     null,
-                     new int[]    {    1,          1},
-                     new int[]    {    1,          1},
-                     true);
-  }
-
-  // Make sure graph is unchanged if it's already flat
-  public void testAlreadyFlatten() throws Exception {
-    TokenStream in = new CannedTokenStream(0, 12, new Token[] {
-        token("wtf", 1, 1, 0, 3),
-        token("what", 0, 1, 0, 3),
-        token("wow", 0, 1, 0, 3),
-        token("the", 1, 1, 0, 3),
-        token("that's", 0, 1, 0, 3),
-        token("fudge", 1, 1, 0, 3),
-        token("funny", 0, 1, 0, 3),
-        token("happened", 1, 1, 4, 12)
-      });
-
-    TokenStream out = new FlattenGraphFilter(in);
-
-    // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
-    assertTokenStreamContents(out,
-                              new String[] {"wtf", "what", "wow", "the", "that's", "fudge", "funny", "happened"},
-                              new int[] {0, 0, 0, 0, 0, 0, 0, 4},
-                              new int[] {3, 3, 3, 3, 3, 3, 3, 12},
-                              new int[] {1, 0, 0, 1, 0, 1, 0, 1},
-                              new int[] {1, 1, 1, 1, 1, 1, 1, 1},
-                              12);
-  }
-
-  public void testWTF1() throws Exception {
-
-    // "wow that's funny" and "what the fudge" are separate side paths, in parallel with "wtf", on input:
-    TokenStream in = new CannedTokenStream(0, 12, new Token[] {
-        token("wtf", 1, 5, 0, 3),
-        token("what", 0, 1, 0, 3),
-        token("wow", 0, 3, 0, 3),
-        token("the", 1, 1, 0, 3),
-        token("fudge", 1, 3, 0, 3),
-        token("that's", 1, 1, 0, 3),
-        token("funny", 1, 1, 0, 3),
-        token("happened", 1, 1, 4, 12)
-      });
-
-
-    TokenStream out = new FlattenGraphFilter(in);
-
-    // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
-    assertTokenStreamContents(out,
-                              new String[] {"wtf", "what", "wow", "the", "that's", "fudge", "funny", "happened"},
-                              new int[] {0, 0, 0, 0, 0, 0, 0, 4},
-                              new int[] {3, 3, 3, 3, 3, 3, 3, 12},
-                              new int[] {1, 0, 0, 1, 0, 1, 0, 1},
-                              new int[] {3, 1, 1, 1, 1, 1, 1, 1},
-                              12);
-    
-  }
-
-  /** Same as testWTF1 except the "wtf" token comes out later */
-  public void testWTF2() throws Exception {
-
-    // "wow that's funny" and "what the fudge" are separate side paths, in parallel with "wtf", on input:
-    TokenStream in = new CannedTokenStream(0, 12, new Token[] {
-        token("what", 1, 1, 0, 3),
-        token("wow", 0, 3, 0, 3),
-        token("wtf", 0, 5, 0, 3),
-        token("the", 1, 1, 0, 3),
-        token("fudge", 1, 3, 0, 3),
-        token("that's", 1, 1, 0, 3),
-        token("funny", 1, 1, 0, 3),
-        token("happened", 1, 1, 4, 12)
-      });
-
-
-    TokenStream out = new FlattenGraphFilter(in);
-
-    // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
-    assertTokenStreamContents(out,
-                              new String[] {"what", "wow", "wtf", "the", "that's", "fudge", "funny", "happened"},
-                              new int[] {0, 0, 0, 0, 0, 0, 0, 4},
-                              new int[] {3, 3, 3, 3, 3, 3, 3, 12},
-                              new int[] {1, 0, 0, 1, 0, 1, 0, 1},
-                              new int[] {1, 1, 3, 1, 1, 1, 1, 1},
-                              12);
-    
-  }
-
-  public void testNonGreedySynonyms() throws Exception {
-    // This is just "hypothetical" for Lucene today, because SynFilter is
-    // greedy: when two syn rules match on overlapping tokens, only one
-    // (greedily) wins.  This test pretends all syn matches could match:
-
-    TokenStream in = new CannedTokenStream(0, 20, new Token[] {
-        token("wizard", 1, 1, 0, 6),
-        token("wizard_of_oz", 0, 3, 0, 12),
-        token("of", 1, 1, 7, 9),
-        token("oz", 1, 1, 10, 12),
-        token("oz_screams", 0, 2, 10, 20),
-        token("screams", 1, 1, 13, 20),
-      });
-
-
-    TokenStream out = new FlattenGraphFilter(in);
-
-    // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
-    assertTokenStreamContents(out,
-                              new String[] {"wizard", "wizard_of_oz", "of", "oz", "oz_screams", "screams"},
-                              new int[] {0, 0, 7, 10, 10, 13},
-                              new int[] {6, 12, 9, 12, 20, 20},
-                              new int[] {1, 0, 1, 1, 0, 1},
-                              new int[] {1, 3, 1, 1, 2, 1},
-                              20);
-    
-  }
-
-  public void testNonGraph() throws Exception {
-    TokenStream in = new CannedTokenStream(0, 22, new Token[] {
-        token("hello", 1, 1, 0, 5),
-        token("pseudo", 1, 1, 6, 12),
-        token("world", 1, 1, 13, 18),
-        token("fun", 1, 1, 19, 22),
-      });
-
-
-    TokenStream out = new FlattenGraphFilter(in);
-
-    // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
-    assertTokenStreamContents(out,
-                              new String[] {"hello", "pseudo", "world", "fun"},
-                              new int[] {0, 6, 13, 19},
-                              new int[] {5, 12, 18, 22},
-                              new int[] {1, 1, 1, 1},
-                              new int[] {1, 1, 1, 1},
-                              22);
-  }
-
-  public void testSimpleHole() throws Exception {
-    TokenStream in = new CannedTokenStream(0, 13, new Token[] {
-        token("hello", 1, 1, 0, 5),
-        token("hole", 2, 1, 6, 10),
-        token("fun", 1, 1, 11, 13),
-      });
-
-
-    TokenStream out = new FlattenGraphFilter(in);
-
-    // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
-    assertTokenStreamContents(out,
-                              new String[] {"hello", "hole", "fun"},
-                              new int[] {0, 6, 11},
-                              new int[] {5, 10, 13},
-                              new int[] {1, 2, 1},
-                              new int[] {1, 1, 1},
-                              13);
-  }
-
-  public void testHoleUnderSyn() throws Exception {
-    // Tests a StopFilter after SynFilter where a stopword in a syn is removed
-    //
-    //   wizard of oz -> woz syn, but then "of" becomes a hole
-
-    TokenStream in = new CannedTokenStream(0, 12, new Token[] {
-        token("wizard", 1, 1, 0, 6),
-        token("woz", 0, 3, 0, 12),
-        token("oz", 2, 1, 10, 12),
-      });
-
-
-    TokenStream out = new FlattenGraphFilter(in);
-
-    assertTokenStreamContents(out,
-                              new String[] {"wizard", "woz", "oz"},
-                              new int[] {0, 0, 10},
-                              new int[] {6, 12, 12},
-                              new int[] {1, 0, 2},
-                              new int[] {1, 3, 1},
-                              12);
-  }
-
-  public void testStrangelyNumberedNodes() throws Exception {
-
-    // Uses only nodes 0, 2, 3, i.e. 1 is just never used (it is not a hole!!)
-    TokenStream in = new CannedTokenStream(0, 27, new Token[] {
-        token("dog", 1, 3, 0, 5),
-        token("puppy", 0, 3, 0, 5),
-        token("flies", 3, 1, 6, 11),
-      });
-
-    TokenStream out = new FlattenGraphFilter(in);
-
-    assertTokenStreamContents(out,
-                              new String[] {"dog", "puppy", "flies"},
-                              new int[] {0, 0, 6},
-                              new int[] {5, 5, 11},
-                              new int[] {1, 0, 1},
-                              new int[] {1, 1, 1},
-                              27);
-  }
-
-  public void testTwoLongParallelPaths() throws Exception {
-
-    // "a a a a a a" in parallel with "b b b b b b"
-    TokenStream in = new CannedTokenStream(0, 11, new Token[] {
-        token("a", 1, 1, 0, 1),
-        token("b", 0, 2, 0, 1),
-        token("a", 1, 2, 2, 3),
-        token("b", 1, 2, 2, 3),
-        token("a", 1, 2, 4, 5),
-        token("b", 1, 2, 4, 5),
-        token("a", 1, 2, 6, 7),
-        token("b", 1, 2, 6, 7),
-        token("a", 1, 2, 8, 9),
-        token("b", 1, 2, 8, 9),
-        token("a", 1, 2, 10, 11),
-        token("b", 1, 2, 10, 11),
-      });
-
-
-    TokenStream out = new FlattenGraphFilter(in);
-    
-    // ... becomes flattened to a single path with overlapping a/b token between each node:
-    assertTokenStreamContents(out,
-                              new String[] {"a", "b", "a", "b", "a", "b", "a", "b", "a", "b", "a", "b"},
-                              new int[] {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10},
-                              new int[] {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11},
-                              new int[] {1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0},
-                              new int[] {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
-                              11);
-    
-  }
-
-  // NOTE: TestSynonymGraphFilter's testRandomSyns also tests FlattenGraphFilter
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymGraphFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymGraphFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymGraphFilter.java
index edf2d2a..e00a165 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymGraphFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymGraphFilter.java
@@ -17,14 +17,22 @@
 
 package org.apache.lucene.analysis.synonym;
 
+import java.io.IOException;
+import java.io.StringReader;
+import java.text.ParseException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockGraphTokenFilter;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.TokenStreamToAutomaton;
 import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.FlattenGraphFilter;
 import org.apache.lucene.analysis.tokenattributes.*;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -35,7 +43,6 @@ import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.store.ByteArrayDataInput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.CharsRefBuilder;
 import org.apache.lucene.util.IOUtils;
@@ -49,15 +56,6 @@ import org.apache.lucene.util.automaton.TooComplexToDeterminizeException;
 import org.apache.lucene.util.automaton.Transition;
 import org.apache.lucene.util.fst.Util;
 
-import java.io.IOException;
-import java.io.StringReader;
-import java.text.ParseException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
 public class TestSynonymGraphFilter extends BaseTokenStreamTestCase {
 
   /** Set as a side effect by {@link #getAnalyzer} and {@link #getFlattenAnalyzer}. */
@@ -1832,7 +1830,7 @@ public class TestSynonymGraphFilter extends BaseTokenStreamTestCase {
                      new int[]      {1,        1,   0,        0,     0,        1,   0,        0,   1,    0,         1,    1,         1},
                      new int[]      {1,        1,   1,        1,     4,        3,   1,        1,   2,    1,         1,    1,         1});
     
-    assertAllStrings(analyzer, "the usa is wealthy", new String[] {
+    assertGraphStrings(analyzer, "the usa is wealthy", new String[] {
         "the usa is wealthy",
         "the united states is wealthy",
         "the u s a is wealthy",
@@ -1924,33 +1922,4 @@ public class TestSynonymGraphFilter extends BaseTokenStreamTestCase {
         new int[]{1, 1, 0, 1, 1});
     a.close();
   }
-
-  /**
-   * Helper method to validate all strings that can be generated from a token stream.
-   * Uses {@link TokenStreamToAutomaton} to create an automaton. Asserts the finite strings of the automaton are all
-   * and only the given valid strings.
-   * @param analyzer analyzer containing the SynonymFilter under test.
-   * @param text text to be analyzed.
-   * @param expectedStrings all expected finite strings.
-   */
-  public void assertAllStrings(Analyzer analyzer, String text, String[] expectedStrings) throws IOException {
-    TokenStream tokenStream = analyzer.tokenStream("dummy", text);
-    try {
-      Automaton automaton = new TokenStreamToAutomaton().toAutomaton(tokenStream);
-      Set<IntsRef> finiteStrings = AutomatonTestUtil.getFiniteStringsRecursive(automaton, -1);
-
-      assertEquals("Invalid resulting strings count. Expected " + expectedStrings.length + " was " + finiteStrings.size(),
-          expectedStrings.length, finiteStrings.size());
-
-      Set<String> expectedStringsSet = new HashSet<>(Arrays.asList(expectedStrings));
-
-      BytesRefBuilder scratchBytesRefBuilder = new BytesRefBuilder();
-      for (IntsRef ir: finiteStrings) {
-        String s = Util.toBytesRef(ir, scratchBytesRefBuilder).utf8ToString().replace((char) TokenStreamToAutomaton.POS_SEP, ' ');
-        assertTrue("Unexpected string found: " + s, expectedStringsSet.contains(s));
-      }
-    } finally {
-      tokenStream.close();
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java b/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java
index 64bac66..0675abe 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java
@@ -39,6 +39,7 @@ import org.apache.lucene.util.automaton.Automaton;
 public class TokenStreamToAutomaton {
 
   private boolean preservePositionIncrements;
+  private boolean finalOffsetGapAsHole;
   private boolean unicodeArcs;
 
   /** Sole constructor. */
@@ -51,6 +52,11 @@ public class TokenStreamToAutomaton {
     this.preservePositionIncrements = enablePositionIncrements;
   }
 
+  /** If true, any final offset gaps will result in adding a position hole. */
+  public void setFinalOffsetGapAsHole(boolean finalOffsetGapAsHole) {
+    this.finalOffsetGapAsHole = finalOffsetGapAsHole;
+  }
+
   /** Whether to make transition labels Unicode code points instead of UTF8 bytes, 
    *  <code>false</code> by default */
   public void setUnicodeArcs(boolean unicodeArcs) {
@@ -118,7 +124,7 @@ public class TokenStreamToAutomaton {
     int maxOffset = 0;
     while (in.incrementToken()) {
       int posInc = posIncAtt.getPositionIncrement();
-      if (!preservePositionIncrements && posInc > 1) {
+      if (preservePositionIncrements == false && posInc > 1) {
         posInc = 1;
       }
       assert pos > -1 || posInc > 0;
@@ -201,10 +207,35 @@ public class TokenStreamToAutomaton {
     }
 
     in.end();
+
     int endState = -1;
-    if (offsetAtt.endOffset() > maxOffset) {
+
+    int endPosInc = posIncAtt.getPositionIncrement();
+
+    if (endPosInc == 0 && finalOffsetGapAsHole && offsetAtt.endOffset() > maxOffset) {
+      endPosInc = 1;
+    }
+    
+    if (endPosInc > 0) {
+      // there were hole(s) after the last token
       endState = builder.createState();
-      builder.setAccept(endState, true);
+
+      // add trailing holes now:
+      int lastState = endState;
+      while (true) {
+        int state1 = builder.createState();
+        builder.addTransition(lastState, state1, HOLE);
+        endPosInc--;
+        if (endPosInc == 0) {
+          builder.setAccept(state1, true);
+          break;
+        }
+        int state2 = builder.createState();
+        builder.addTransition(state1, state2, POS_SEP);
+        lastState = state2;
+      }
+    } else {
+      endState = -1;
     }
 
     pos++;
@@ -219,7 +250,7 @@ public class TokenStreamToAutomaton {
       }
       pos++;
     }
-
+    
     return builder.finish();
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java
index cdc5d42..166d6b2 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java
@@ -43,7 +43,7 @@ public class OffsetAttributeImpl extends AttributeImpl implements OffsetAttribut
     // OffsetAtt
 
     if (startOffset < 0 || endOffset < startOffset) {
-      throw new IllegalArgumentException("startOffset must be non-negative, and endOffset must be >= startOffset, "
+      throw new IllegalArgumentException("startOffset must be non-negative, and endOffset must be >= startOffset; got "
           + "startOffset=" + startOffset + ",endOffset=" + endOffset);
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PackedTokenAttributeImpl.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PackedTokenAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PackedTokenAttributeImpl.java
index c89a374..ad1e232 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PackedTokenAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PackedTokenAttributeImpl.java
@@ -107,7 +107,7 @@ public class PackedTokenAttributeImpl extends CharTermAttributeImpl
   @Override
   public void setOffset(int startOffset, int endOffset) {
     if (startOffset < 0 || endOffset < startOffset) {
-      throw new IllegalArgumentException("startOffset must be non-negative, and endOffset must be >= startOffset, "
+      throw new IllegalArgumentException("startOffset must be non-negative, and endOffset must be >= startOffset; got "
           + "startOffset=" + startOffset + ",endOffset=" + endOffset);
     }
     this.startOffset = startOffset;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java
index 4d63d6f..e89fec1 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java
@@ -30,8 +30,7 @@ public class PositionIncrementAttributeImpl extends AttributeImpl implements Pos
   @Override
   public void setPositionIncrement(int positionIncrement) {
     if (positionIncrement < 0) {
-      throw new IllegalArgumentException
-        ("Increment must be zero or greater: got " + positionIncrement);
+      throw new IllegalArgumentException("Position increment must be zero or greater; got " + positionIncrement);
     }
     this.positionIncrement = positionIncrement;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionLengthAttributeImpl.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionLengthAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionLengthAttributeImpl.java
index 9bfdb49..d019a2b 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionLengthAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionLengthAttributeImpl.java
@@ -30,8 +30,7 @@ public class PositionLengthAttributeImpl extends AttributeImpl implements Positi
   @Override
   public void setPositionLength(int positionLength) {
     if (positionLength < 1) {
-      throw new IllegalArgumentException
-        ("Position length must be 1 or greater: got " + positionLength);
+      throw new IllegalArgumentException("Position length must be 1 or greater; got " + positionLength);
     }
     this.positionLength = positionLength;
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java b/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
index 8899dd1..7e98662 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java
@@ -21,16 +21,22 @@ import java.io.PrintWriter;
 import java.io.StringWriter;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Random;
+import java.util.Set;
 
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.IntsRef;
 import org.apache.lucene.util.automaton.Automata;
 import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.automaton.AutomatonTestUtil;
 import org.apache.lucene.util.automaton.Operations;
+import org.apache.lucene.util.fst.Util;
 
 import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES;
 
@@ -565,7 +571,13 @@ public class TestGraphTokenizers extends BaseTokenStreamTestCase {
     assertSameLanguage(join(HOLE_A, SEP_A, s2a("abc")), ts);
   }
 
-  // TODO: testEndsWithHole... but we need posInc to set in TS.end()
+  public void testEndsWithHole() throws Exception {
+    final TokenStream ts = new CannedTokenStream(1, 0,
+                                                 new Token[] {
+                                                   token("abc", 2, 1),
+                                                 });
+    assertSameLanguage(join(HOLE_A, SEP_A, s2a("abc"), SEP_A, HOLE_A), ts);
+  }
 
   public void testSynHangingOverEnd() throws Exception {
     final TokenStream ts = new CannedTokenStream(
@@ -576,14 +588,47 @@ public class TestGraphTokenizers extends BaseTokenStreamTestCase {
     assertSameLanguage(Operations.union(s2a("a"), s2a("X")), ts);
   }
 
+  /** Returns all paths */
+  private Set<String> toPathStrings(Automaton a) {
+    BytesRefBuilder scratchBytesRefBuilder = new BytesRefBuilder();
+    Set<String> paths = new HashSet<>();
+    for (IntsRef ir: AutomatonTestUtil.getFiniteStringsRecursive(a, -1)) {
+      paths.add(Util.toBytesRef(ir, scratchBytesRefBuilder).utf8ToString().replace((char) TokenStreamToAutomaton.POS_SEP, ' '));
+    }
+    return paths;
+  }
+
   private void assertSameLanguage(Automaton expected, TokenStream ts) throws IOException {
     assertSameLanguage(expected, new TokenStreamToAutomaton().toAutomaton(ts));
   }
 
   private void assertSameLanguage(Automaton expected, Automaton actual) {
-    assertTrue(Operations.sameLanguage(
-      Operations.determinize(Operations.removeDeadStates(expected), DEFAULT_MAX_DETERMINIZED_STATES),
-      Operations.determinize(Operations.removeDeadStates(actual), DEFAULT_MAX_DETERMINIZED_STATES)));
+    Automaton expectedDet = Operations.determinize(Operations.removeDeadStates(expected), DEFAULT_MAX_DETERMINIZED_STATES);
+    Automaton actualDet = Operations.determinize(Operations.removeDeadStates(actual), DEFAULT_MAX_DETERMINIZED_STATES);
+    if (Operations.sameLanguage(expectedDet, actualDet) == false) {
+      Set<String> expectedPaths = toPathStrings(expectedDet);
+      Set<String> actualPaths = toPathStrings(actualDet);
+      StringBuilder b = new StringBuilder();
+      b.append("expected:\n");
+      for(String path : expectedPaths) {
+        b.append("  ");
+        b.append(path);
+        if (actualPaths.contains(path) == false) {
+          b.append(" [missing!]");
+        }
+        b.append('\n');
+      }
+      b.append("actual:\n");
+      for(String path : actualPaths) {
+        b.append("  ");
+        b.append(path);
+        if (expectedPaths.contains(path) == false) {
+          b.append(" [unexpected!]");
+        }
+        b.append('\n');
+      }
+      fail("accepted language is different:\n\n" + b.toString());
+    }
   }
 
   public void testTokenStreamGraphWithHoles() throws Exception {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
index 19982a5..9c6a624 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
@@ -332,6 +332,7 @@ public class AnalyzingSuggester extends Lookup implements Accountable {
   TokenStreamToAutomaton getTokenStreamToAutomaton() {
     final TokenStreamToAutomaton tsta = new TokenStreamToAutomaton();
     tsta.setPreservePositionIncrements(preservePositionIncrements);
+    tsta.setFinalOffsetGapAsHole(true);
     return tsta;
   }
   
@@ -865,7 +866,7 @@ public class AnalyzingSuggester extends Lookup implements Accountable {
     // Turn tokenstream into automaton:
     Automaton automaton = null;
     try (TokenStream ts = queryAnalyzer.tokenStream("", key.toString())) {
-        automaton = getTokenStreamToAutomaton().toAutomaton(ts);
+      automaton = getTokenStreamToAutomaton().toAutomaton(ts);
     }
 
     automaton = replaceSep(automaton);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
index 924756e..070eab2 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
@@ -41,11 +41,16 @@ import org.apache.lucene.util.Attribute;
 import org.apache.lucene.util.AttributeFactory;
 import org.apache.lucene.util.AttributeImpl;
 import org.apache.lucene.util.AttributeReflector;
+import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.IntsRef;
 import org.apache.lucene.util.LineFileDocs;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.Rethrow;
 import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.automaton.AutomatonTestUtil;
+import org.apache.lucene.util.fst.Util;
 
 /** 
  * Base class for all Lucene unit tests that use TokenStreams. 
@@ -166,6 +171,8 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase {
     final Map<Integer,Integer> posToStartOffset = new HashMap<>();
     final Map<Integer,Integer> posToEndOffset = new HashMap<>();
 
+    // TODO: would be nice to be able to assert silly duplicated tokens are not created, but a number of cases do this "legitimately": LUCENE-7622
+
     ts.reset();
     int pos = -1;
     int lastStartOffset = 0;
@@ -182,7 +189,7 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase {
       checkClearAtt.getAndResetClearCalled(); // reset it, because we called clearAttribute() before
       assertTrue("token "+i+" does not exist", ts.incrementToken());
       assertTrue("clearAttributes() was not called correctly in TokenStream chain", checkClearAtt.getAndResetClearCalled());
-      
+
       assertEquals("term "+i, output[i], termAtt.toString());
       if (startOffsets != null) {
         assertEquals("startOffset " + i + " term=" + termAtt, startOffsets[i], offsetAtt.startOffset());
@@ -261,12 +268,12 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase {
         }
       }
       if (posLengthAtt != null) {
-        assertTrue("posLength must be >= 1", posLengthAtt.getPositionLength() >= 1);
+        assertTrue("posLength must be >= 1; got: " + posLengthAtt.getPositionLength(), posLengthAtt.getPositionLength() >= 1);
       }
     }
 
     if (ts.incrementToken()) {
-      fail("TokenStream has more tokens than expected (expected count=" + output.length + "); extra token=" + termAtt);
+      fail("TokenStream has more tokens than expected (expected count=" + output.length + "); extra token=" + ts.getAttribute(CharTermAttribute.class));
     }
 
     // repeat our extra safety checks for end()
@@ -977,4 +984,105 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase {
   public static AttributeFactory newAttributeFactory() {
     return newAttributeFactory(random());
   }
+
+  private static String toString(Set<String> strings) {
+    List<String> stringsList = new ArrayList<>(strings);
+    Collections.sort(stringsList);
+    StringBuilder b = new StringBuilder();
+    for(String s : stringsList) {
+      b.append("  ");
+      b.append(s);
+      b.append('\n');
+    }
+    return b.toString();
+  }
+
+  /**
+   * Enumerates all accepted strings in the token graph created by the analyzer on the provided text, and then
+   * asserts that it's equal to the expected strings.
+   * Uses {@link TokenStreamToAutomaton} to create an automaton. Asserts the finite strings of the automaton are all
+   * and only the given valid strings.
+   * @param analyzer analyzer containing the SynonymFilter under test.
+   * @param text text to be analyzed.
+   * @param expectedStrings all expected finite strings.
+   */
+  public static void assertGraphStrings(Analyzer analyzer, String text, String... expectedStrings) throws IOException {
+    checkAnalysisConsistency(random(), analyzer, true, text, true);
+    try (TokenStream tokenStream = analyzer.tokenStream("dummy", text)) {
+      assertGraphStrings(tokenStream, expectedStrings);
+    }
+  }
+
+  /**
+   * Enumerates all accepted strings in the token graph created by the already initialized {@link TokenStream}.
+   */
+  public static void assertGraphStrings(TokenStream tokenStream, String... expectedStrings) throws IOException {
+    Automaton automaton = new TokenStreamToAutomaton().toAutomaton(tokenStream);
+    Set<IntsRef> actualStringPaths = AutomatonTestUtil.getFiniteStringsRecursive(automaton, -1);
+
+    Set<String> expectedStringsSet = new HashSet<>(Arrays.asList(expectedStrings));
+
+    BytesRefBuilder scratchBytesRefBuilder = new BytesRefBuilder();
+    Set<String> actualStrings = new HashSet<>();
+    for (IntsRef ir: actualStringPaths) {
+      actualStrings.add(Util.toBytesRef(ir, scratchBytesRefBuilder).utf8ToString().replace((char) TokenStreamToAutomaton.POS_SEP, ' '));
+    }
+    for (String s : actualStrings) {
+      assertTrue("Analyzer created unexpected string path: " + s + "\nexpected:\n" + toString(expectedStringsSet) + "\nactual:\n" + toString(actualStrings), expectedStringsSet.contains(s));
+    }
+    for (String s : expectedStrings) {
+      assertTrue("Analyzer created unexpected string path: " + s + "\nexpected:\n" + toString(expectedStringsSet) + "\nactual:\n" + toString(actualStrings), actualStrings.contains(s));
+    }
+  }
+
+  /** Returns all paths accepted by the token stream graph produced by analyzing text with the provided analyzer.  The tokens {@link
+   *  CharTermAttribute} values are concatenated, and separated with space. */
+  public static Set<String> getGraphStrings(Analyzer analyzer, String text) throws IOException {
+    try(TokenStream tokenStream = analyzer.tokenStream("dummy", text)) {
+      return getGraphStrings(tokenStream);
+    }
+  }
+
+  /** Returns all paths accepted by the token stream graph produced by the already initialized {@link TokenStream}. */
+  public static Set<String> getGraphStrings(TokenStream tokenStream) throws IOException {
+    Automaton automaton = new TokenStreamToAutomaton().toAutomaton(tokenStream);
+    Set<IntsRef> actualStringPaths = AutomatonTestUtil.getFiniteStringsRecursive(automaton, -1);
+    BytesRefBuilder scratchBytesRefBuilder = new BytesRefBuilder();
+    Set<String> paths = new HashSet<>();
+    for (IntsRef ir: actualStringPaths) {
+      paths.add(Util.toBytesRef(ir, scratchBytesRefBuilder).utf8ToString().replace((char) TokenStreamToAutomaton.POS_SEP, ' '));
+    }
+    return paths;
+  }
+
+  /** Returns a {@code String} summary of the tokens this analyzer produces on this text */
+  public static String toString(Analyzer analyzer, String text) throws IOException {
+    try(TokenStream ts = analyzer.tokenStream("field", text)) {
+      StringBuilder b = new StringBuilder();
+      CharTermAttribute termAtt = ts.getAttribute(CharTermAttribute.class);
+      PositionIncrementAttribute posIncAtt = ts.getAttribute(PositionIncrementAttribute.class);
+      PositionLengthAttribute posLengthAtt = ts.getAttribute(PositionLengthAttribute.class);
+      OffsetAttribute offsetAtt = ts.getAttribute(OffsetAttribute.class);
+      assertNotNull(offsetAtt);
+      ts.reset();
+      int pos = -1;
+      while (ts.incrementToken()) {
+        pos += posIncAtt.getPositionIncrement();
+        b.append(termAtt);
+        b.append(" at pos=");
+        b.append(pos);
+        if (posLengthAtt != null) {
+          b.append(" to pos=");
+          b.append(pos + posLengthAtt.getPositionLength());
+        }
+        b.append(" offsets=");
+        b.append(offsetAtt.startOffset());
+        b.append('-');
+        b.append(offsetAtt.endOffset());
+        b.append('\n');
+      }
+      ts.end();
+      return b.toString();
+    }
+  }
 }


[42/50] [abbrv] lucene-solr:apiv2: LUCENE-7643: Fix leftover.

Posted by no...@apache.org.
LUCENE-7643: Fix leftover.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/f57e0177
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/f57e0177
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/f57e0177

Branch: refs/heads/apiv2
Commit: f57e0177ffd3f367de81bdf7f2ad67ad0f94264a
Parents: 71ca2a8
Author: Adrien Grand <jp...@gmail.com>
Authored: Fri Jan 20 13:47:29 2017 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Fri Jan 20 13:47:29 2017 +0100

----------------------------------------------------------------------
 lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/f57e0177/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
index f1b8551..7c997ca 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
@@ -281,7 +281,7 @@ public abstract class PointRangeQuery extends Query {
 
             @Override
             public Scorer get(boolean randomAccess) throws IOException {
-              if (false && values.getDocCount() == reader.maxDoc()
+              if (values.getDocCount() == reader.maxDoc()
                   && values.getDocCount() == values.size()
                   && cost() > reader.maxDoc() / 2) {
                 // If all docs have exactly one value and the cost is greater


[45/50] [abbrv] lucene-solr:apiv2: SOLR-9996: Ignore the RTG calls for tests where UpdateLog is disabled

Posted by no...@apache.org.
SOLR-9996: Ignore the RTG calls for tests where UpdateLog is disabled


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/864bed2e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/864bed2e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/864bed2e

Branch: refs/heads/apiv2
Commit: 864bed2e49f1b32602e55a4e902519b7afc7d825
Parents: b0db06b
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Sat Jan 21 21:19:11 2017 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Sat Jan 21 21:19:11 2017 +0530

----------------------------------------------------------------------
 .../org/apache/solr/schema/TestPointFields.java     | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/864bed2e/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/schema/TestPointFields.java b/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
index 8fb6926..91a7b49 100644
--- a/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
+++ b/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
@@ -786,9 +786,11 @@ public class TestPointFields extends SolrTestCaseJ4 {
       assertU(adoc("id", String.valueOf(i), field, values[i]));
     }
     // Check using RTG
-    for (int i = 0; i < values.length; i++) {
-      assertQ(req("qt", "/get", "id", String.valueOf(i)),
-      "//doc/" + type + "[@name='" + field + "'][.='" + values[i] + "']");
+    if (Boolean.getBoolean("enable.update.log")) {
+      for (int i = 0; i < values.length; i++) {
+        assertQ(req("qt", "/get", "id", String.valueOf(i)),
+            "//doc/" + type + "[@name='" + field + "'][.='" + values[i] + "']");
+      }
     }
     assertU(commit());
     String[] expected = new String[values.length + 1];
@@ -799,9 +801,11 @@ public class TestPointFields extends SolrTestCaseJ4 {
     assertQ(req("q", "*:*", "fl", "id, " + field, "rows", String.valueOf(values.length)), expected);
 
     // Check using RTG
-    for (int i = 0; i < values.length; i++) {
-      assertQ(req("qt", "/get", "id", String.valueOf(i)),
-      "//doc/" + type + "[@name='" + field + "'][.='" + values[i] + "']");
+    if (Boolean.getBoolean("enable.update.log")) {
+      for (int i = 0; i < values.length; i++) {
+        assertQ(req("qt", "/get", "id", String.valueOf(i)),
+            "//doc/" + type + "[@name='" + field + "'][.='" + values[i] + "']");
+      }
     }
   }
 


[04/50] [abbrv] lucene-solr:apiv2: Merge the two problem sections in org.eclipse.jdt.core.prefs settings.

Posted by no...@apache.org.
Merge the two problem sections in org.eclipse.jdt.core.prefs settings.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/205f9cc5
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/205f9cc5
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/205f9cc5

Branch: refs/heads/apiv2
Commit: 205f9cc59ed001e6d262930482b88e723e2ea3f8
Parents: 2301900
Author: Christine Poerschke <cp...@apache.org>
Authored: Mon Jan 16 18:23:12 2017 +0000
Committer: Christine Poerschke <cp...@apache.org>
Committed: Mon Jan 16 18:42:07 2017 +0000

----------------------------------------------------------------------
 dev-tools/eclipse/dot.settings/org.eclipse.jdt.core.prefs | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/205f9cc5/dev-tools/eclipse/dot.settings/org.eclipse.jdt.core.prefs
----------------------------------------------------------------------
diff --git a/dev-tools/eclipse/dot.settings/org.eclipse.jdt.core.prefs b/dev-tools/eclipse/dot.settings/org.eclipse.jdt.core.prefs
index 6f6533a..0f0b112 100644
--- a/dev-tools/eclipse/dot.settings/org.eclipse.jdt.core.prefs
+++ b/dev-tools/eclipse/dot.settings/org.eclipse.jdt.core.prefs
@@ -4,6 +4,7 @@ org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
 org.eclipse.jdt.core.compiler.compliance=1.8
 org.eclipse.jdt.core.compiler.doc.comment.support=enabled
 org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
+org.eclipse.jdt.core.compiler.problem.comparingIdentical=error
 org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
 org.eclipse.jdt.core.compiler.problem.invalidJavadoc=error
 org.eclipse.jdt.core.compiler.problem.invalidJavadocTags=enabled
@@ -18,6 +19,9 @@ org.eclipse.jdt.core.compiler.problem.missingJavadocTags=ignore
 org.eclipse.jdt.core.compiler.problem.missingJavadocTagsMethodTypeParameters=disabled
 org.eclipse.jdt.core.compiler.problem.missingJavadocTagsOverriding=disabled
 org.eclipse.jdt.core.compiler.problem.missingJavadocTagsVisibility=public
+org.eclipse.jdt.core.compiler.problem.noEffectAssignment=error
+org.eclipse.jdt.core.compiler.problem.unusedImport=error
+org.eclipse.jdt.core.compiler.problem.varargsArgumentNeedCast=error
 org.eclipse.jdt.core.compiler.annotation.nullanalysis=disabled
 org.eclipse.jdt.core.compiler.source=1.8
 org.eclipse.jdt.core.compiler.taskCaseSensitive=enabled
@@ -304,7 +308,3 @@ org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations=true
 org.eclipse.jdt.core.formatter.wrap_before_binary_operator=true
 org.eclipse.jdt.core.formatter.wrap_before_or_operator_multicatch=true
 org.eclipse.jdt.core.formatter.wrap_outer_expressions_when_nested=true
-org.eclipse.jdt.core.compiler.problem.comparingIdentical=error
-org.eclipse.jdt.core.compiler.problem.noEffectAssignment=error
-org.eclipse.jdt.core.compiler.problem.unusedImport=error
-org.eclipse.jdt.core.compiler.problem.varargsArgumentNeedCast=error


[48/50] [abbrv] lucene-solr:apiv2: SOLR-10011: Fix exception log message

Posted by no...@apache.org.
SOLR-10011: Fix exception log message


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/0f7990b2
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/0f7990b2
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/0f7990b2

Branch: refs/heads/apiv2
Commit: 0f7990b2c8590d169add59354cc2678260f94e03
Parents: 285a101
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Sun Jan 22 04:52:01 2017 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Sun Jan 22 04:52:01 2017 +0530

----------------------------------------------------------------------
 solr/core/src/java/org/apache/solr/schema/NumericFieldType.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0f7990b2/solr/core/src/java/org/apache/solr/schema/NumericFieldType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/NumericFieldType.java b/solr/core/src/java/org/apache/solr/schema/NumericFieldType.java
index 404693d..2d5412f 100644
--- a/solr/core/src/java/org/apache/solr/schema/NumericFieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/NumericFieldType.java
@@ -79,7 +79,7 @@ public abstract class NumericFieldType extends PrimitiveFieldType {
               max == null ? null : DateMathParser.parseMath(null, max).getTime(),
               minInclusive, maxInclusive);
       default:
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for point field");
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for numeric field");
     }
   }
   


[41/50] [abbrv] lucene-solr:apiv2: LUCENE-7643: Move IndexOrDocValuesQuery to core.

Posted by no...@apache.org.
LUCENE-7643: Move IndexOrDocValuesQuery to core.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/71ca2a84
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/71ca2a84
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/71ca2a84

Branch: refs/heads/apiv2
Commit: 71ca2a84bad2495eff3b0b15dc445f3f013ea4af
Parents: a2131a9
Author: Adrien Grand <jp...@gmail.com>
Authored: Thu Jan 19 18:12:04 2017 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Fri Jan 20 13:42:31 2017 +0100

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   7 +
 .../lucene/document/NumericDocValuesField.java  |  48 +++
 .../lucene/document/SortedDocValuesField.java   |  42 +++
 .../document/SortedNumericDocValuesField.java   |  54 ++++
 .../SortedNumericDocValuesRangeQuery.java       | 144 +++++++++
 .../document/SortedSetDocValuesField.java       |  43 +++
 .../document/SortedSetDocValuesRangeQuery.java  | 187 +++++++++++
 .../lucene/search/IndexOrDocValuesQuery.java    | 166 ++++++++++
 .../apache/lucene/search/PointRangeQuery.java   |   2 +-
 .../lucene/search/TestDocValuesQueries.java     | 238 ++++++++++++++
 .../search/TestIndexOrDocValuesQuery.java       |  89 ++++++
 .../lucene/search/DocValuesRangeQuery.java      | 276 -----------------
 .../lucene/search/IndexOrDocValuesQuery.java    | 116 -------
 .../lucene/search/TestDocValuesRangeQuery.java  | 307 -------------------
 .../search/TestIndexOrDocValuesQuery.java       |  89 ------
 .../apache/solr/schema/ICUCollationField.java   |  10 +-
 .../org/apache/solr/schema/CollationField.java  |   3 +-
 .../java/org/apache/solr/schema/EnumField.java  |  20 +-
 .../java/org/apache/solr/schema/FieldType.java  |  16 +-
 .../java/org/apache/solr/schema/TrieField.java  |  45 ++-
 20 files changed, 1082 insertions(+), 820 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 9d1cbb7..147b0e0 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -74,6 +74,9 @@ API Changes
 * LUCENE-7644: FieldComparatorSource.newComparator() and
   SortField.getComparator() no longer throw IOException (Alan Woodward)
 
+* LUCENE-7643: Replaced doc-values queries in lucene/sandbox with factory
+  methods on the *DocValuesField classes. (Adrien Grand)
+
 New Features
 
 * LUCENE-7623: Add FunctionScoreQuery and FunctionMatchQuery (Alan Woodward,
@@ -96,6 +99,10 @@ Improvements
   should be run, eg. using points or doc values depending on costs of other
   parts of the query. (Adrien Grand)
 
+* LUCENE-7643: IndexOrDocValuesQuery allows to execute range queries using
+  either points or doc values depending on which one is more efficient.
+  (Adrien Grand)
+
 Optimizations
 
 * LUCENE-7641: Optimized point range queries to compute documents that do not

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/core/src/java/org/apache/lucene/document/NumericDocValuesField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/NumericDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/NumericDocValuesField.java
index 5b6dcc8..6d84492 100644
--- a/lucene/core/src/java/org/apache/lucene/document/NumericDocValuesField.java
+++ b/lucene/core/src/java/org/apache/lucene/document/NumericDocValuesField.java
@@ -17,7 +17,15 @@
 package org.apache.lucene.document;
 
 
+import java.io.IOException;
+
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.search.IndexOrDocValuesQuery;
+import org.apache.lucene.search.Query;
 
 /**
  * <p>
@@ -54,4 +62,44 @@ public class NumericDocValuesField extends Field {
     super(name, TYPE);
     fieldsData = Long.valueOf(value);
   }
+
+  /**
+   * Create a range query that matches all documents whose value is between
+   * {@code lowerValue} and {@code upperValue} included.
+   * <p>
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting {@code lowerValue = Long.MIN_VALUE} or {@code upperValue = Long.MAX_VALUE}. 
+   * <p>
+   * Ranges are inclusive. For exclusive ranges, pass {@code Math.addExact(lowerValue, 1)}
+   * or {@code Math.addExact(upperValue, -1)}.
+   * <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
+   * which makes them slow if they are not ANDed with a selective query. As a
+   * consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
+   * alongside a range query that executes on points, such as
+   * {@link LongPoint#newRangeQuery}.
+   */
+  public static Query newRangeQuery(String field, long lowerValue, long upperValue) {
+    return new SortedNumericDocValuesRangeQuery(field, lowerValue, upperValue) {
+      @Override
+      SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException {
+        NumericDocValues values = reader.getNumericDocValues(field);
+        if (values == null) {
+          return null;
+        }
+        return DocValues.singleton(values);
+      }
+    };
+  }
+
+  /** 
+   * Create a query for matching an exact long value.
+   * <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
+   * which makes them slow if they are not ANDed with a selective query. As a
+   * consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
+   * alongside a range query that executes on points, such as
+   * {@link LongPoint#newExactQuery}.
+   */
+  public static Query newExactQuery(String field, long value) {
+    return newRangeQuery(field, value, value);
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/core/src/java/org/apache/lucene/document/SortedDocValuesField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/SortedDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/SortedDocValuesField.java
index bbfb467..feb7725 100644
--- a/lucene/core/src/java/org/apache/lucene/document/SortedDocValuesField.java
+++ b/lucene/core/src/java/org/apache/lucene/document/SortedDocValuesField.java
@@ -17,7 +17,14 @@
 package org.apache.lucene.document;
 
 
+import java.io.IOException;
+
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.search.IndexOrDocValuesQuery;
+import org.apache.lucene.search.Query;
 import org.apache.lucene.util.BytesRef;
 
 /**
@@ -59,4 +66,39 @@ public class SortedDocValuesField extends Field {
     super(name, TYPE);
     fieldsData = bytes;
   }
+
+  /**
+   * Create a range query that matches all documents whose value is between
+   * {@code lowerValue} and {@code upperValue} included.
+   * <p>
+   * You can have half-open ranges by setting {@code lowerValue = null}
+   * or {@code upperValue = null}.
+   * <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
+   * which makes them slow if they are not ANDed with a selective query. As a
+   * consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
+   * alongside a range query that executes on points, such as
+   * {@link BinaryPoint#newRangeQuery}.
+   */
+  public static Query newRangeQuery(String field,
+      BytesRef lowerValue, BytesRef upperValue,
+      boolean lowerInclusive, boolean upperInclusive) {
+    return new SortedSetDocValuesRangeQuery(field, lowerValue, upperValue, lowerInclusive, upperInclusive) {
+      @Override
+      SortedSetDocValues getValues(LeafReader reader, String field) throws IOException {
+        return DocValues.singleton(DocValues.getSorted(reader, field));
+      }
+    };
+  }
+
+  /** 
+   * Create a query for matching an exact {@link BytesRef} value.
+   * <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
+   * which makes them slow if they are not ANDed with a selective query. As a
+   * consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
+   * alongside a range query that executes on points, such as
+   * {@link BinaryPoint#newExactQuery}.
+   */
+  public static Query newExactQuery(String field, BytesRef value) {
+    return newRangeQuery(field, value, value, true, true);
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesField.java
index cbba218..6f9a271 100644
--- a/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesField.java
+++ b/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesField.java
@@ -17,7 +17,15 @@
 package org.apache.lucene.document;
 
 
+import java.io.IOException;
+
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.search.IndexOrDocValuesQuery;
+import org.apache.lucene.search.Query;
 
 /**
  * <p>
@@ -63,4 +71,50 @@ public class SortedNumericDocValuesField extends Field {
     super(name, TYPE);
     fieldsData = Long.valueOf(value);
   }
+
+  /**
+   * Create a range query that matches all documents whose value is between
+   * {@code lowerValue} and {@code upperValue} included.
+   * <p>
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting {@code lowerValue = Long.MIN_VALUE} or {@code upperValue = Long.MAX_VALUE}. 
+   * <p>
+   * Ranges are inclusive. For exclusive ranges, pass {@code Math.addExact(lowerValue, 1)}
+   * or {@code Math.addExact(upperValue, -1)}.
+   * <p>This query also works with fields that have indexed
+   * {@link NumericDocValuesField}s.
+   * <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
+   * which makes them slow if they are not ANDed with a selective query. As a
+   * consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
+   * alongside a range query that executes on points, such as
+   * {@link LongPoint#newRangeQuery}.
+   */
+  public static Query newRangeQuery(String field, long lowerValue, long upperValue) {
+    return new SortedNumericDocValuesRangeQuery(field, lowerValue, upperValue) {
+      @Override
+      SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException {
+        FieldInfo info = reader.getFieldInfos().fieldInfo(field);
+        if (info == null) {
+          // Queries have some optimizations when one sub scorer returns null rather
+          // than a scorer that does not match any documents
+          return null;
+        }
+        return DocValues.getSortedNumeric(reader, field);
+      }
+    };
+  }
+
+  /** 
+   * Create a query for matching an exact long value.
+   * <p>This query also works with fields that have indexed
+   * {@link NumericDocValuesField}s.
+   * <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
+   * which makes them slow if they are not ANDed with a selective query. As a
+   * consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
+   * alongside a range query that executes on points, such as
+   * {@link LongPoint#newExactQuery}.
+   */
+  public static Query newExactQuery(String field, long value) {
+    return newRangeQuery(field, value, value);
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesRangeQuery.java b/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesRangeQuery.java
new file mode 100644
index 0000000..18805b2
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesRangeQuery.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.document;
+
+import java.io.IOException;
+import java.util.Objects;
+
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.search.ConstantScoreScorer;
+import org.apache.lucene.search.ConstantScoreWeight;
+import org.apache.lucene.search.FieldValueQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TwoPhaseIterator;
+import org.apache.lucene.search.Weight;
+
+abstract class SortedNumericDocValuesRangeQuery extends Query {
+
+  private final String field;
+  private final long lowerValue;
+  private final long upperValue;
+
+  SortedNumericDocValuesRangeQuery(String field, long lowerValue, long upperValue) {
+    this.field = Objects.requireNonNull(field);
+    this.lowerValue = lowerValue;
+    this.upperValue = upperValue;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (sameClassAs(obj) == false) {
+      return false;
+    }
+    SortedNumericDocValuesRangeQuery that = (SortedNumericDocValuesRangeQuery) obj;
+    return Objects.equals(field, that.field)
+        && lowerValue == that.lowerValue
+        && upperValue == that.upperValue;
+  }
+
+  @Override
+  public int hashCode() {
+    int h = classHash();
+    h = 31 * h + field.hashCode();
+    h = 31 * h + Long.hashCode(lowerValue);
+    h = 31 * h + Long.hashCode(upperValue);
+    return h;
+  }
+
+  @Override
+  public String toString(String field) {
+    StringBuilder b = new StringBuilder();
+    if (this.field.equals(field) == false) {
+      b.append(this.field).append(":");
+    }
+    return b
+        .append("[")
+        .append(lowerValue)
+        .append(" TO ")
+        .append(upperValue)
+        .append("]")
+        .toString();
+  }
+
+  @Override
+  public Query rewrite(IndexReader reader) throws IOException {
+    if (lowerValue == Long.MIN_VALUE && upperValue == Long.MAX_VALUE) {
+      return new FieldValueQuery(field);
+    }
+    return super.rewrite(reader);
+  }
+
+  abstract SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException;
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
+    return new ConstantScoreWeight(this, boost) {
+      @Override
+      public Scorer scorer(LeafReaderContext context) throws IOException {
+        SortedNumericDocValues values = getValues(context.reader(), field);
+        if (values == null) {
+          return null;
+        }
+        final NumericDocValues singleton = DocValues.unwrapSingleton(values);
+        final TwoPhaseIterator iterator;
+        if (singleton != null) {
+          iterator = new TwoPhaseIterator(singleton) {
+            @Override
+            public boolean matches() throws IOException {
+              final long value = singleton.longValue();
+              return value >= lowerValue && value <= upperValue;
+            }
+
+            @Override
+            public float matchCost() {
+              return 2; // 2 comparisons
+            }
+          };
+        } else {
+          iterator = new TwoPhaseIterator(values) {
+            @Override
+            public boolean matches() throws IOException {
+              for (int i = 0, count = values.docValueCount(); i < count; ++i) {
+                final long value = values.nextValue();
+                if (value < lowerValue) {
+                  continue;
+                }
+                // Values are sorted, so the first value that is >= lowerValue is our best candidate
+                return value <= upperValue;
+              }
+              return false; // all values were < lowerValue
+            }
+
+            @Override
+            public float matchCost() {
+              return 2; // 2 comparisons
+            }
+          };
+        }
+        return new ConstantScoreScorer(this, score(), iterator);
+      }
+    };
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesField.java
index 7a273ac..26b1907 100644
--- a/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesField.java
+++ b/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesField.java
@@ -17,7 +17,14 @@
 package org.apache.lucene.document;
 
 
+import java.io.IOException;
+
+import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.search.IndexOrDocValuesQuery;
+import org.apache.lucene.search.Query;
 import org.apache.lucene.util.BytesRef;
 
 /**
@@ -60,4 +67,40 @@ public class SortedSetDocValuesField extends Field {
     super(name, TYPE);
     fieldsData = bytes;
   }
+
+  /**
+   * Create a range query that matches all documents whose value is between
+   * {@code lowerValue} and {@code upperValue}.
+   * <p>This query also works with fields that have indexed
+   * {@link SortedDocValuesField}s.
+   * <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
+   * which makes them slow if they are not ANDed with a selective query. As a
+   * consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
+   * alongside a range query that executes on points, such as
+   * {@link BinaryPoint#newRangeQuery}.
+   */
+  public static Query newRangeQuery(String field,
+      BytesRef lowerValue, BytesRef upperValue,
+      boolean lowerInclusive, boolean upperInclusive) {
+    return new SortedSetDocValuesRangeQuery(field, lowerValue, upperValue, lowerInclusive, upperInclusive) {
+      @Override
+      SortedSetDocValues getValues(LeafReader reader, String field) throws IOException {
+        return DocValues.getSortedSet(reader, field);
+      }
+    };
+  }
+
+  /** 
+   * Create a query for matching an exact {@link BytesRef} value.
+   * <p>This query also works with fields that have indexed
+   * {@link SortedDocValuesField}s.
+   * <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
+   * which makes them slow if they are not ANDed with a selective query. As a
+   * consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
+   * alongside a range query that executes on points, such as
+   * {@link BinaryPoint#newExactQuery}.
+   */
+  public static Query newExactQuery(String field, BytesRef value) {
+    return newRangeQuery(field, value, value, true, true);
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesRangeQuery.java b/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesRangeQuery.java
new file mode 100644
index 0000000..30af45f
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesRangeQuery.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.document;
+
+import java.io.IOException;
+import java.util.Objects;
+
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.search.ConstantScoreScorer;
+import org.apache.lucene.search.ConstantScoreWeight;
+import org.apache.lucene.search.FieldValueQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TwoPhaseIterator;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.BytesRef;
+
+abstract class SortedSetDocValuesRangeQuery extends Query {
+
+  private final String field;
+  private final BytesRef lowerValue;
+  private final BytesRef upperValue;
+  private final boolean lowerInclusive;
+  private final boolean upperInclusive;
+
+  SortedSetDocValuesRangeQuery(String field,
+      BytesRef lowerValue, BytesRef upperValue,
+      boolean lowerInclusive, boolean upperInclusive) {
+    this.field = Objects.requireNonNull(field);
+    this.lowerValue = lowerValue;
+    this.upperValue = upperValue;
+    this.lowerInclusive = lowerInclusive && lowerValue != null;
+    this.upperInclusive = upperInclusive && upperValue != null;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (sameClassAs(obj) == false) {
+      return false;
+    }
+    SortedSetDocValuesRangeQuery that = (SortedSetDocValuesRangeQuery) obj;
+    return Objects.equals(field, that.field)
+        && Objects.equals(lowerValue, that.lowerValue)
+        && Objects.equals(upperValue, that.upperValue)
+        && lowerInclusive == that.lowerInclusive
+        && upperInclusive == that.upperInclusive;
+  }
+
+  @Override
+  public int hashCode() {
+    int h = classHash();
+    h = 31 * h + field.hashCode();
+    h = 31 * h + Objects.hashCode(lowerValue);
+    h = 31 * h + Objects.hashCode(upperValue);
+    h = 31 * h + Boolean.hashCode(lowerInclusive);
+    h = 31 * h + Boolean.hashCode(upperInclusive);
+    return h;
+  }
+
+  @Override
+  public String toString(String field) {
+    StringBuilder b = new StringBuilder();
+    if (this.field.equals(field) == false) {
+      b.append(this.field).append(":");
+    }
+    return b
+        .append(lowerInclusive ? "[" : "{")
+        .append(lowerValue == null ? "*" : lowerValue)
+        .append(" TO ")
+        .append(upperValue == null ? "*" : upperValue)
+        .append(upperInclusive ? "]" : "}")
+        .toString();
+  }
+
+  @Override
+  public Query rewrite(IndexReader reader) throws IOException {
+    if (lowerValue == null && upperValue == null) {
+      return new FieldValueQuery(field);
+    }
+    return super.rewrite(reader);
+  }
+
+  abstract SortedSetDocValues getValues(LeafReader reader, String field) throws IOException;
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
+    return new ConstantScoreWeight(this, boost) {
+      @Override
+      public Scorer scorer(LeafReaderContext context) throws IOException {
+        SortedSetDocValues values = getValues(context.reader(), field);
+        if (values == null) {
+          return null;
+        }
+
+        final long minOrd;
+        if (lowerValue == null) {
+          minOrd = 0;
+        } else {
+          final long ord = values.lookupTerm(lowerValue);
+          if (ord < 0) {
+            minOrd = -1 - ord;
+          } else if (lowerInclusive) {
+            minOrd = ord;
+          } else {
+            minOrd = ord + 1;
+          }
+        }
+
+        final long maxOrd;
+        if (upperValue == null) {
+          maxOrd = values.getValueCount() - 1;
+        } else {
+          final long ord = values.lookupTerm(upperValue);
+          if (ord < 0) {
+            maxOrd = -2 - ord;
+          } else if (upperInclusive) {
+            maxOrd = ord;
+          } else {
+            maxOrd = ord - 1;
+          }
+        }
+
+        if (minOrd > maxOrd) {
+          return null;
+        }
+
+        final SortedDocValues singleton = DocValues.unwrapSingleton(values);
+        final TwoPhaseIterator iterator;
+        if (singleton != null) {
+          iterator = new TwoPhaseIterator(singleton) {
+            @Override
+            public boolean matches() throws IOException {
+              final long ord = singleton.ordValue();
+              return ord >= minOrd && ord <= maxOrd;
+            }
+
+            @Override
+            public float matchCost() {
+              return 2; // 2 comparisons
+            }
+          };
+        } else {
+          iterator = new TwoPhaseIterator(values) {
+            @Override
+            public boolean matches() throws IOException {
+              for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) {
+                if (ord < minOrd) {
+                  continue;
+                }
+                // Values are sorted, so the first ord that is >= minOrd is our best candidate
+                return ord <= maxOrd;
+              }
+              return false; // all ords were < minOrd
+            }
+
+            @Override
+            public float matchCost() {
+              return 2; // 2 comparisons
+            }
+          };
+        }
+        return new ConstantScoreScorer(this, score(), iterator);
+      }
+    };
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/core/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java b/lucene/core/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java
new file mode 100644
index 0000000..35067d2
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search;
+
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.document.SortedNumericDocValuesField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+
+/**
+ * A query that uses either an index structure (points or terms) or doc values
+ * in order to run a query, depending which one is more efficient. This is
+ * typically useful for range queries, whose {@link Weight#scorer} is costly
+ * to create since it usually needs to sort large lists of doc ids. For
+ * instance, for a field that both indexed {@link LongPoint}s and
+ * {@link SortedNumericDocValuesField}s with the same values, an efficient
+ * range query could be created by doing:
+ * <pre class="prettyprint">
+ *   String field;
+ *   long minValue, maxValue;
+ *   Query pointQuery = LongPoint.newRangeQuery(field, minValue, maxValue);
+ *   Query dvQuery = SortedNumericDocValuesField.newRangeQuery(field, minValue, maxValue);
+ *   Query query = new IndexOrDocValuesQuery(pointQuery, dvQuery);
+ * </pre>
+ * The above query will be efficient as it will use points in the case that they
+ * perform better, ie. when we need a good lead iterator that will be almost
+ * entirely consumed; and doc values otherwise, ie. in the case that another
+ * part of the query is already leading iteration but we still need the ability
+ * to verify that some documents match.
+ * <p><b>NOTE</b>This query currently only works well with point range/exact
+ * queries and their equivalent doc values queries.
+ * @lucene.experimental
+ */
+public final class IndexOrDocValuesQuery extends Query {
+
+  private final Query indexQuery, dvQuery;
+
+  /**
+   * Create an {@link IndexOrDocValuesQuery}. Both provided queries must match
+   * the same documents and give the same scores.
+   * @param indexQuery a query that has a good iterator but whose scorer may be costly to create
+   * @param dvQuery a query whose scorer is cheap to create that can quickly check whether a given document matches
+   */
+  public IndexOrDocValuesQuery(Query indexQuery, Query dvQuery) {
+    this.indexQuery = indexQuery;
+    this.dvQuery = dvQuery;
+  }
+
+  /** Return the wrapped query that may be costly to initialize but has a good
+   *  iterator. */
+  public Query getIndexQuery() {
+    return indexQuery;
+  }
+
+  /** Return the wrapped query that may be slow at identifying all matching
+   *  documents, but which is cheap to initialize and can efficiently
+   *  verify that some documents match. */
+  public Query getRandomAccessQuery() {
+    return dvQuery;
+  }
+
+  @Override
+  public String toString(String field) {
+    return indexQuery.toString(field);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (sameClassAs(obj) == false) {
+      return false;
+    }
+    IndexOrDocValuesQuery that = (IndexOrDocValuesQuery) obj;
+    return indexQuery.equals(that.indexQuery) && dvQuery.equals(that.dvQuery);
+  }
+
+  @Override
+  public int hashCode() {
+    int h = classHash();
+    h = 31 * h + indexQuery.hashCode();
+    h = 31 * h + dvQuery.hashCode();
+    return h;
+  }
+
+  @Override
+  public Query rewrite(IndexReader reader) throws IOException {
+    Query indexRewrite = indexQuery.rewrite(reader);
+    Query dvRewrite = dvQuery.rewrite(reader);
+    if (indexQuery != indexRewrite || dvQuery != dvRewrite) {
+      return new IndexOrDocValuesQuery(indexRewrite, dvRewrite);
+    }
+    return this;
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
+    final Weight indexWeight = indexQuery.createWeight(searcher, needsScores, boost);
+    final Weight dvWeight = dvQuery.createWeight(searcher, needsScores, boost);
+    return new Weight(this) {
+      @Override
+      public void extractTerms(Set<Term> terms) {
+        indexWeight.extractTerms(terms);
+      }
+
+      @Override
+      public Explanation explain(LeafReaderContext context, int doc) throws IOException {
+        // We need to check a single doc, so the dv query should perform better
+        return dvWeight.explain(context, doc);
+      }
+
+      @Override
+      public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
+        // Bulk scorers need to consume the entire set of docs, so using an
+        // index structure should perform better
+        return indexWeight.bulkScorer(context);
+      }
+
+      @Override
+      public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
+        final ScorerSupplier indexScorerSupplier = indexWeight.scorerSupplier(context);
+        final ScorerSupplier dvScorerSupplier = dvWeight.scorerSupplier(context);
+        if (indexScorerSupplier == null || dvScorerSupplier == null) {
+          return null;
+        }
+        return new ScorerSupplier() {
+          @Override
+          public Scorer get(boolean randomAccess) throws IOException {
+            return (randomAccess ? dvScorerSupplier : indexScorerSupplier).get(randomAccess);
+          }
+
+          @Override
+          public long cost() {
+            return Math.min(indexScorerSupplier.cost(), dvScorerSupplier.cost());
+          }
+        };
+      }
+
+      @Override
+      public Scorer scorer(LeafReaderContext context) throws IOException {
+        ScorerSupplier scorerSupplier = scorerSupplier(context);
+        if (scorerSupplier == null) {
+          return null;
+        }
+        return scorerSupplier.get(false);
+      }
+    };
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
index 7c997ca..f1b8551 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
@@ -281,7 +281,7 @@ public abstract class PointRangeQuery extends Query {
 
             @Override
             public Scorer get(boolean randomAccess) throws IOException {
-              if (values.getDocCount() == reader.maxDoc()
+              if (false && values.getDocCount() == reader.maxDoc()
                   && values.getDocCount() == values.size()
                   && cost() > reader.maxDoc() / 2) {
                 // If all docs have exactly one value and the cost is greater

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/core/src/test/org/apache/lucene/search/TestDocValuesQueries.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesQueries.java b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesQueries.java
new file mode 100644
index 0000000..501538f
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesQueries.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.SortedNumericDocValuesField;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+public class TestDocValuesQueries extends LuceneTestCase {
+
+  public void testDuelPointRangeSortedNumericRangeQuery() throws IOException {
+    doTestDuelPointRangeNumericRangeQuery(true, 1);
+  }
+
+  public void testDuelPointRangeMultivaluedSortedNumericRangeQuery() throws IOException {
+    doTestDuelPointRangeNumericRangeQuery(true, 3);
+  }
+
+  public void testDuelPointRangeNumericRangeQuery() throws IOException {
+    doTestDuelPointRangeNumericRangeQuery(false, 1);
+  }
+
+  private void doTestDuelPointRangeNumericRangeQuery(boolean sortedNumeric, int maxValuesPerDoc) throws IOException {
+    final int iters = atLeast(10);
+    for (int iter = 0; iter < iters; ++iter) {
+      Directory dir = newDirectory();
+      RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+      final int numDocs = atLeast(100);
+      for (int i = 0; i < numDocs; ++i) {
+        Document doc = new Document();
+        final int numValues = TestUtil.nextInt(random(), 0, maxValuesPerDoc);
+        for (int j = 0; j < numValues; ++j) {
+          final long value = TestUtil.nextLong(random(), -100, 10000);
+          if (sortedNumeric) {
+            doc.add(new SortedNumericDocValuesField("dv", value));
+          } else {
+            doc.add(new NumericDocValuesField("dv", value));
+          }
+          doc.add(new LongPoint("idx", value));
+        }
+        iw.addDocument(doc);
+      }
+      if (random().nextBoolean()) {
+        iw.deleteDocuments(LongPoint.newRangeQuery("idx", 0L, 10L));
+      }
+      final IndexReader reader = iw.getReader();
+      final IndexSearcher searcher = newSearcher(reader, false);
+      iw.close();
+
+      for (int i = 0; i < 100; ++i) {
+        final long min = random().nextBoolean() ? Long.MIN_VALUE : TestUtil.nextLong(random(), -100, 10000);
+        final long max = random().nextBoolean() ? Long.MAX_VALUE : TestUtil.nextLong(random(), -100, 10000);
+        final Query q1 = LongPoint.newRangeQuery("idx", min, max);
+        final Query q2;
+        if (sortedNumeric) {
+          q2 = SortedNumericDocValuesField.newRangeQuery("dv", min, max);
+        } else {
+          q2 = NumericDocValuesField.newRangeQuery("dv", min, max);
+        }
+        assertSameMatches(searcher, q1, q2, false);
+      }
+
+      reader.close();
+      dir.close();
+    }
+  }
+
+  private void doTestDuelPointRangeSortedRangeQuery(boolean sortedSet, int maxValuesPerDoc) throws IOException {
+    final int iters = atLeast(10);
+    for (int iter = 0; iter < iters; ++iter) {
+      Directory dir = newDirectory();
+      RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+      final int numDocs = atLeast(100);
+      for (int i = 0; i < numDocs; ++i) {
+        Document doc = new Document();
+        final int numValues = TestUtil.nextInt(random(), 0, maxValuesPerDoc);
+        for (int j = 0; j < numValues; ++j) {
+          final long value = TestUtil.nextLong(random(), -100, 10000);
+          byte[] encoded = new byte[Long.BYTES];
+          LongPoint.encodeDimension(value, encoded, 0);
+          if (sortedSet) {
+            doc.add(new SortedSetDocValuesField("dv", new BytesRef(encoded)));
+          } else {
+            doc.add(new SortedDocValuesField("dv", new BytesRef(encoded)));
+          }
+          doc.add(new LongPoint("idx", value));
+        }
+        iw.addDocument(doc);
+      }
+      if (random().nextBoolean()) {
+        iw.deleteDocuments(LongPoint.newRangeQuery("idx", 0L, 10L));
+      }
+      final IndexReader reader = iw.getReader();
+      final IndexSearcher searcher = newSearcher(reader, false);
+      iw.close();
+
+      for (int i = 0; i < 100; ++i) {
+        long min = random().nextBoolean() ? Long.MIN_VALUE : TestUtil.nextLong(random(), -100, 10000);
+        long max = random().nextBoolean() ? Long.MAX_VALUE : TestUtil.nextLong(random(), -100, 10000);
+        byte[] encodedMin = new byte[Long.BYTES];
+        byte[] encodedMax = new byte[Long.BYTES];
+        LongPoint.encodeDimension(min, encodedMin, 0);
+        LongPoint.encodeDimension(max, encodedMax, 0);
+        boolean includeMin = true;
+        boolean includeMax = true;
+        if (random().nextBoolean()) {
+          includeMin = false;
+          min++;
+        }
+        if (random().nextBoolean()) {
+          includeMax = false;
+          max--;
+        }
+        final Query q1 = LongPoint.newRangeQuery("idx", min, max);
+        final Query q2;
+        if (sortedSet) {
+          q2 = SortedSetDocValuesField.newRangeQuery("dv",
+              min == Long.MIN_VALUE && random().nextBoolean() ? null : new BytesRef(encodedMin),
+              max == Long.MAX_VALUE && random().nextBoolean() ? null : new BytesRef(encodedMax),
+              includeMin, includeMax);
+        } else {
+          q2 = SortedDocValuesField.newRangeQuery("dv",
+              min == Long.MIN_VALUE && random().nextBoolean() ? null : new BytesRef(encodedMin),
+              max == Long.MAX_VALUE && random().nextBoolean() ? null : new BytesRef(encodedMax),
+              includeMin, includeMax);
+        }
+        assertSameMatches(searcher, q1, q2, false);
+      }
+
+      reader.close();
+      dir.close();
+    }
+  }
+
+  public void testDuelPointRangeSortedSetRangeQuery() throws IOException {
+    doTestDuelPointRangeSortedRangeQuery(true, 1);
+  }
+
+  public void testDuelPointRangeMultivaluedSortedSetRangeQuery() throws IOException {
+    doTestDuelPointRangeSortedRangeQuery(true, 3);
+  }
+
+  public void testDuelPointRangeSortedRangeQuery() throws IOException {
+    doTestDuelPointRangeSortedRangeQuery(false, 1);
+  }
+
+  private void assertSameMatches(IndexSearcher searcher, Query q1, Query q2, boolean scores) throws IOException {
+    final int maxDoc = searcher.getIndexReader().maxDoc();
+    final TopDocs td1 = searcher.search(q1, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER);
+    final TopDocs td2 = searcher.search(q2, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER);
+    assertEquals(td1.totalHits, td2.totalHits);
+    for (int i = 0; i < td1.scoreDocs.length; ++i) {
+      assertEquals(td1.scoreDocs[i].doc, td2.scoreDocs[i].doc);
+      if (scores) {
+        assertEquals(td1.scoreDocs[i].score, td2.scoreDocs[i].score, 10e-7);
+      }
+    }
+  }
+
+  public void testEquals() {
+    Query q1 = SortedNumericDocValuesField.newRangeQuery("foo", 3, 5);
+    QueryUtils.checkEqual(q1, SortedNumericDocValuesField.newRangeQuery("foo", 3, 5));
+    QueryUtils.checkUnequal(q1, SortedNumericDocValuesField.newRangeQuery("foo", 3, 6));
+    QueryUtils.checkUnequal(q1, SortedNumericDocValuesField.newRangeQuery("foo", 4, 5));
+    QueryUtils.checkUnequal(q1, SortedNumericDocValuesField.newRangeQuery("bar", 3, 5));
+
+    Query q2 = SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), true, true);
+    QueryUtils.checkEqual(q2, SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), true, true));
+    QueryUtils.checkUnequal(q2, SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("baz"), new BytesRef("baz"), true, true));
+    QueryUtils.checkUnequal(q2, SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("bar"), true, true));
+    QueryUtils.checkUnequal(q2, SortedSetDocValuesField.newRangeQuery("quux", new BytesRef("bar"), new BytesRef("baz"), true, true));
+  }
+
+  public void testToString() {
+    Query q1 = SortedNumericDocValuesField.newRangeQuery("foo", 3, 5);
+    assertEquals("foo:[3 TO 5]", q1.toString());
+    assertEquals("[3 TO 5]", q1.toString("foo"));
+    assertEquals("foo:[3 TO 5]", q1.toString("bar"));
+
+    Query q2 = SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), true, true);
+    assertEquals("foo:[[62 61 72] TO [62 61 7a]]", q2.toString());
+    q2 = SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), false, true);
+    assertEquals("foo:{[62 61 72] TO [62 61 7a]]", q2.toString());
+    q2 = SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), false, false);
+    assertEquals("foo:{[62 61 72] TO [62 61 7a]}", q2.toString());
+    q2 = SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), null, true, true);
+    assertEquals("foo:[[62 61 72] TO *}", q2.toString());
+    q2 = SortedSetDocValuesField.newRangeQuery("foo", null, new BytesRef("baz"), true, true);
+    assertEquals("foo:{* TO [62 61 7a]]", q2.toString());
+    assertEquals("{* TO [62 61 7a]]", q2.toString("foo"));
+    assertEquals("foo:{* TO [62 61 7a]]", q2.toString("bar"));
+  }
+
+  public void testMissingField() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+    iw.addDocument(new Document());
+    IndexReader reader = iw.getReader();
+    iw.close();
+    IndexSearcher searcher = newSearcher(reader);
+    for (Query query : Arrays.asList(
+        NumericDocValuesField.newRangeQuery("foo", 2, 4),
+        SortedNumericDocValuesField.newRangeQuery("foo", 2, 4),
+        SortedDocValuesField.newRangeQuery("foo", new BytesRef("abc"), new BytesRef("bcd"), random().nextBoolean(), random().nextBoolean()),
+        SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("abc"), new BytesRef("bcd"), random().nextBoolean(), random().nextBoolean()))) {
+      Weight w = searcher.createNormalizedWeight(query, random().nextBoolean());
+      assertNull(w.scorer(searcher.getIndexReader().leaves().get(0)));
+    }
+    reader.close();
+    dir.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/core/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
new file mode 100644
index 0000000..8b81822
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search;
+
+import java.io.IOException;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+public class TestIndexOrDocValuesQuery extends LuceneTestCase {
+
+  public void testUseIndexForSelectiveQueries() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()
+        // relies on costs and PointValues.estimateCost so we need the default codec
+        .setCodec(TestUtil.getDefaultCodec()));
+    for (int i = 0; i < 2000; ++i) {
+      Document doc = new Document();
+      if (i == 42) {
+        doc.add(new StringField("f1", "bar", Store.NO));
+        doc.add(new LongPoint("f2", 42L));
+        doc.add(new NumericDocValuesField("f2", 42L));
+      } else if (i == 100) {
+        doc.add(new StringField("f1", "foo", Store.NO));
+        doc.add(new LongPoint("f2", 2L));
+        doc.add(new NumericDocValuesField("f2", 2L));
+      } else {
+        doc.add(new StringField("f1", "bar", Store.NO));
+        doc.add(new LongPoint("f2", 2L));
+        doc.add(new NumericDocValuesField("f2", 2L));
+      }
+      w.addDocument(doc);
+    }
+    w.forceMerge(1);
+    IndexReader reader = DirectoryReader.open(w);
+    IndexSearcher searcher = newSearcher(reader);
+    searcher.setQueryCache(null);
+
+    // The term query is more selective, so the IndexOrDocValuesQuery should use doc values
+    final Query q1 = new BooleanQuery.Builder()
+        .add(new TermQuery(new Term("f1", "foo")), Occur.MUST)
+        .add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 2), NumericDocValuesField.newRangeQuery("f2", 2L, 2L)), Occur.MUST)
+        .build();
+
+    final Weight w1 = searcher.createNormalizedWeight(q1, random().nextBoolean());
+    final Scorer s1 = w1.scorer(searcher.getIndexReader().leaves().get(0));
+    assertNotNull(s1.twoPhaseIterator()); // means we use doc values
+
+    // The term query is less selective, so the IndexOrDocValuesQuery should use points
+    final Query q2 = new BooleanQuery.Builder()
+        .add(new TermQuery(new Term("f1", "bar")), Occur.MUST)
+        .add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 42), NumericDocValuesField.newRangeQuery("f2", 42L, 42L)), Occur.MUST)
+        .build();
+
+    final Weight w2 = searcher.createNormalizedWeight(q2, random().nextBoolean());
+    final Scorer s2 = w2.scorer(searcher.getIndexReader().leaves().get(0));
+    assertNull(s2.twoPhaseIterator()); // means we use points
+
+    reader.close();
+    w.close();
+    dir.close();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/sandbox/src/java/org/apache/lucene/search/DocValuesRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/DocValuesRangeQuery.java b/lucene/sandbox/src/java/org/apache/lucene/search/DocValuesRangeQuery.java
deleted file mode 100644
index 3d4feb9..0000000
--- a/lucene/sandbox/src/java/org/apache/lucene/search/DocValuesRangeQuery.java
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search;
-
-import java.io.IOException;
-import java.util.Objects;
-
-import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.PointValues;
-import org.apache.lucene.index.SortedNumericDocValues;
-import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.util.BytesRef;
-
-/**
- * A range query that works on top of the doc values APIs. Such queries are
- * usually slow since they do not use an inverted index. However, in the
- * dense case where most documents match this query, it <b>might</b> be as
- * fast or faster than a regular {@link PointRangeQuery}.
- *
- * <b>NOTE:</b> This query is typically best used within a
- * {@link IndexOrDocValuesQuery} alongside a query that uses an indexed
- * structure such as {@link PointValues points} or {@link Terms terms},
- * which allows to run the query on doc values when that would be more
- * efficient, and using an index otherwise.
- *
- * @lucene.experimental
- */
-public final class DocValuesRangeQuery extends Query {
-
-  /** Create a new numeric range query on a numeric doc-values field. The field
-   *  must has been indexed with either {@link DocValuesType#NUMERIC} or
-   *  {@link DocValuesType#SORTED_NUMERIC} doc values. */
-  public static Query newLongRange(String field, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
-    return new DocValuesRangeQuery(field, lowerVal, upperVal, includeLower, includeUpper);
-  }
-
-  /** Create a new numeric range query on a numeric doc-values field. The field
-   *  must has been indexed with {@link DocValuesType#SORTED} or
-   *  {@link DocValuesType#SORTED_SET} doc values. */
-  public static Query newBytesRefRange(String field, BytesRef lowerVal, BytesRef upperVal, boolean includeLower, boolean includeUpper) {
-    return new DocValuesRangeQuery(field, deepCopyOf(lowerVal), deepCopyOf(upperVal), includeLower, includeUpper);
-  }
-
-  private static BytesRef deepCopyOf(BytesRef b) {
-    if (b == null) {
-      return null;
-    } else {
-      return BytesRef.deepCopyOf(b);
-    }
-  }
-
-  private final String field;
-  private final Object lowerVal, upperVal;
-  private final boolean includeLower, includeUpper;
-
-  private DocValuesRangeQuery(String field, Object lowerVal, Object upperVal, boolean includeLower, boolean includeUpper) {
-    this.field = Objects.requireNonNull(field);
-    this.lowerVal = lowerVal;
-    this.upperVal = upperVal;
-    this.includeLower = includeLower;
-    this.includeUpper = includeUpper;
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    return sameClassAs(other) &&
-           equalsTo(getClass().cast(other));
-  }
-
-  private boolean equalsTo(DocValuesRangeQuery other) {
-    return field.equals(other.field) && 
-           Objects.equals(lowerVal, other.lowerVal) && 
-           Objects.equals(upperVal, other.upperVal) && 
-           includeLower == other.includeLower && 
-           includeUpper == other.includeUpper;
-  }
-
-  @Override
-  public int hashCode() {
-    return 31 * classHash() + Objects.hash(field, lowerVal, upperVal, includeLower, includeUpper);
-  }
-
-  public String getField() {
-    return field;
-  }
-
-  public Object getLowerVal() {
-    return lowerVal;
-  }
-
-  public Object getUpperVal() {
-    return upperVal;
-  }
-
-  public boolean isIncludeLower() {
-    return includeLower;
-  }
-
-  public boolean isIncludeUpper() {
-    return includeUpper;
-  }
-
-  @Override
-  public String toString(String field) {
-    StringBuilder sb = new StringBuilder();
-    if (this.field.equals(field) == false) {
-      sb.append(this.field).append(':');
-    }
-    sb.append(includeLower ? '[' : '{');
-    sb.append(lowerVal == null ? "*" : lowerVal.toString());
-    sb.append(" TO ");
-    sb.append(upperVal == null ? "*" : upperVal.toString());
-    sb.append(includeUpper ? ']' : '}');
-    return sb.toString();
-  }
-
-  @Override
-  public Query rewrite(IndexReader reader) throws IOException {
-    if (lowerVal == null && upperVal == null) {
-      return new FieldValueQuery(field);
-    }
-    return super.rewrite(reader);
-  }
-
-  @Override
-  public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
-    if (lowerVal == null && upperVal == null) {
-      throw new IllegalStateException("Both min and max values must not be null, call rewrite first");
-    }
-
-    return new ConstantScoreWeight(DocValuesRangeQuery.this, boost) {
-
-      @Override
-      public Scorer scorer(LeafReaderContext context) throws IOException {
-        final TwoPhaseIterator iterator = createTwoPhaseIterator(context);
-        if (iterator == null) {
-          return null;
-        }
-        return new ConstantScoreScorer(this, score(), iterator);
-      }
-
-      private TwoPhaseIterator createTwoPhaseIterator(LeafReaderContext context) throws IOException {
-        if (lowerVal instanceof Long || upperVal instanceof Long) {
-
-          final SortedNumericDocValues values = DocValues.getSortedNumeric(context.reader(), field);
-
-          final long min;
-          if (lowerVal == null) {
-            min = Long.MIN_VALUE;
-          } else if (includeLower) {
-            min = (long) lowerVal;
-          } else {
-            if ((long) lowerVal == Long.MAX_VALUE) {
-              return null;
-            }
-            min = 1 + (long) lowerVal;
-          }
-
-          final long max;
-          if (upperVal == null) {
-            max = Long.MAX_VALUE;
-          } else if (includeUpper) {
-            max = (long) upperVal;
-          } else {
-            if ((long) upperVal == Long.MIN_VALUE) {
-              return null;
-            }
-            max = -1 + (long) upperVal;
-          }
-
-          if (min > max) {
-            return null;
-          }
-
-          return new TwoPhaseIterator(values) {
-
-            @Override
-            public boolean matches() throws IOException {
-              final int count = values.docValueCount();
-              assert count > 0;
-              for (int i = 0; i < count; ++i) {
-                final long value = values.nextValue();
-                if (value >= min && value <= max) {
-                  return true;
-                }
-              }
-              return false;
-            }
-
-            @Override
-            public float matchCost() {
-              return 2; // 2 comparisons
-            }
-
-          };
-
-        } else if (lowerVal instanceof BytesRef || upperVal instanceof BytesRef) {
-
-          final SortedSetDocValues values = DocValues.getSortedSet(context.reader(), field);
-
-          final long minOrd;
-          if (lowerVal == null) {
-            minOrd = 0;
-          } else {
-            final long ord = values.lookupTerm((BytesRef) lowerVal);
-            if (ord < 0) {
-              minOrd = -1 - ord;
-            } else if (includeLower) {
-              minOrd = ord;
-            } else {
-              minOrd = ord + 1;
-            }
-          }
-
-          final long maxOrd;
-          if (upperVal == null) {
-            maxOrd = values.getValueCount() - 1;
-          } else {
-            final long ord = values.lookupTerm((BytesRef) upperVal);
-            if (ord < 0) {
-              maxOrd = -2 - ord;
-            } else if (includeUpper) {
-              maxOrd = ord;
-            } else {
-              maxOrd = ord - 1;
-            }
-          }
-
-          if (minOrd > maxOrd) {
-            return null;
-          }
-
-          return new TwoPhaseIterator(values) {
-
-            @Override
-            public boolean matches() throws IOException {
-              for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) {
-                if (ord >= minOrd && ord <= maxOrd) {
-                  return true;
-                }
-              }
-              return false;
-            }
-
-            @Override
-            public float matchCost() {
-              return 2; // 2 comparisons
-            }
-          };
-
-        } else {
-          throw new AssertionError();
-        }
-      }
-    };
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/sandbox/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java b/lucene/sandbox/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java
deleted file mode 100644
index 0f9e8e3..0000000
--- a/lucene/sandbox/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search;
-
-import java.io.IOException;
-
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.LeafReaderContext;
-
-/**
- * A query that uses either an index (points or terms) or doc values in order
- * to run a range query, depending which one is more efficient.
- */
-public final class IndexOrDocValuesQuery extends Query {
-
-  private final Query indexQuery, dvQuery;
-
-  /**
-   * Constructor that takes both a query that executes on an index structure
-   * like the inverted index or the points tree, and another query that
-   * executes on doc values. Both queries must match the same documents and
-   * attribute constant scores.
-   */
-  public IndexOrDocValuesQuery(Query indexQuery, Query dvQuery) {
-    this.indexQuery = indexQuery;
-    this.dvQuery = dvQuery;
-  }
-
-  @Override
-  public String toString(String field) {
-    return indexQuery.toString(field);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (sameClassAs(obj) == false) {
-      return false;
-    }
-    IndexOrDocValuesQuery that = (IndexOrDocValuesQuery) obj;
-    return indexQuery.equals(that.indexQuery) && dvQuery.equals(that.dvQuery);
-  }
-
-  @Override
-  public int hashCode() {
-    int h = classHash();
-    h = 31 * h + indexQuery.hashCode();
-    h = 31 * h + dvQuery.hashCode();
-    return h;
-  }
-
-  @Override
-  public Query rewrite(IndexReader reader) throws IOException {
-    Query indexRewrite = indexQuery.rewrite(reader);
-    Query dvRewrite = dvQuery.rewrite(reader);
-    if (indexQuery != indexRewrite || dvQuery != dvRewrite) {
-      return new IndexOrDocValuesQuery(indexRewrite, dvRewrite);
-    }
-    return this;
-  }
-
-  @Override
-  public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
-    final Weight indexWeight = indexQuery.createWeight(searcher, needsScores, boost);
-    final Weight dvWeight = dvQuery.createWeight(searcher, needsScores, boost);
-    return new ConstantScoreWeight(this, boost) {
-      @Override
-      public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
-        return indexWeight.bulkScorer(context);
-      }
-
-      @Override
-      public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
-        final ScorerSupplier indexScorerSupplier = indexWeight.scorerSupplier(context);
-        final ScorerSupplier dvScorerSupplier = dvWeight.scorerSupplier(context); 
-        if (indexScorerSupplier == null || dvScorerSupplier == null) {
-          return null;
-        }
-        return new ScorerSupplier() {
-          @Override
-          public Scorer get(boolean randomAccess) throws IOException {
-            return (randomAccess ? dvScorerSupplier : indexScorerSupplier).get(randomAccess);
-          }
-
-          @Override
-          public long cost() {
-            return Math.min(indexScorerSupplier.cost(), dvScorerSupplier.cost());
-          }
-        };
-      }
-
-      @Override
-      public Scorer scorer(LeafReaderContext context) throws IOException {
-        ScorerSupplier scorerSupplier = scorerSupplier(context);
-        if (scorerSupplier == null) {
-          return null;
-        }
-        return scorerSupplier.get(false);
-      }
-    };
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/sandbox/src/test/org/apache/lucene/search/TestDocValuesRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/test/org/apache/lucene/search/TestDocValuesRangeQuery.java b/lucene/sandbox/src/test/org/apache/lucene/search/TestDocValuesRangeQuery.java
deleted file mode 100644
index c5ca64f..0000000
--- a/lucene/sandbox/src/test/org/apache/lucene/search/TestDocValuesRangeQuery.java
+++ /dev/null
@@ -1,307 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search;
-
-import java.io.IOException;
-
-import org.apache.lucene.document.LongPoint;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedNumericDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.TestUtil;
-
-public class TestDocValuesRangeQuery extends LuceneTestCase {
-
-  public void testDuelNumericRangeQuery() throws IOException {
-    final int iters = atLeast(10);
-      for (int iter = 0; iter < iters; ++iter) {
-      Directory dir = newDirectory();
-      RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-      final int numDocs = atLeast(100);
-      for (int i = 0; i < numDocs; ++i) {
-        Document doc = new Document();
-        final int numValues = random().nextInt(2);
-        for (int j = 0; j < numValues; ++j) {
-          final long value = TestUtil.nextLong(random(), -100, 10000);
-          doc.add(new SortedNumericDocValuesField("dv", value));
-          doc.add(new LongPoint("idx", value));
-        }
-        iw.addDocument(doc);
-      }
-      if (random().nextBoolean()) {
-        iw.deleteDocuments(LongPoint.newRangeQuery("idx", 0L, 10L));
-      }
-      iw.commit();
-      final IndexReader reader = iw.getReader();
-      final IndexSearcher searcher = newSearcher(reader, false);
-      iw.close();
-
-      for (int i = 0; i < 100; ++i) {
-        final Long min = TestUtil.nextLong(random(), -100, 1000);
-        final Long max = TestUtil.nextLong(random(), -100, 1000);
-        final Query q1 = LongPoint.newRangeQuery("idx", min, max);
-        final Query q2 = DocValuesRangeQuery.newLongRange("dv", min, max, true, true);
-        assertSameMatches(searcher, q1, q2, false);
-      }
-
-      reader.close();
-      dir.close();
-    }
-  }
-
-  private static BytesRef toSortableBytes(Long l) {
-    if (l == null) {
-      return null;
-    } else {
-      byte[] bytes = new byte[Long.BYTES];
-      NumericUtils.longToSortableBytes(l, bytes, 0);
-      return new BytesRef(bytes);
-    }
-  }
-
-  public void testDuelNumericSorted() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    final int numDocs = atLeast(100);
-    for (int i = 0; i < numDocs; ++i) {
-      Document doc = new Document();
-      final int numValues = random().nextInt(3);
-      for (int j = 0; j < numValues; ++j) {
-        final long value = TestUtil.nextLong(random(), -100, 10000);
-        doc.add(new SortedNumericDocValuesField("dv1", value));
-        doc.add(new SortedSetDocValuesField("dv2", toSortableBytes(value)));
-      }
-      iw.addDocument(doc);
-    }
-    if (random().nextBoolean()) {
-      iw.deleteDocuments(DocValuesRangeQuery.newLongRange("dv1", 0L, 10L, true, true));
-    }
-    iw.commit();
-    final IndexReader reader = iw.getReader();
-    final IndexSearcher searcher = newSearcher(reader);
-    iw.close();
-
-    for (int i = 0; i < 100; ++i) {
-      final Long min = random().nextBoolean() ? null : TestUtil.nextLong(random(), -100, 1000);
-      final Long max = random().nextBoolean() ? null : TestUtil.nextLong(random(), -100, 1000);
-      final boolean minInclusive = random().nextBoolean();
-      final boolean maxInclusive = random().nextBoolean();
-      final Query q1 = DocValuesRangeQuery.newLongRange("dv1", min, max, minInclusive, maxInclusive);
-      final Query q2 = DocValuesRangeQuery.newBytesRefRange("dv2", toSortableBytes(min), toSortableBytes(max), minInclusive, maxInclusive);
-      assertSameMatches(searcher, q1, q2, true);
-    }
-
-    reader.close();
-    dir.close();
-  }
-
-  public void testScore() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    final int numDocs = atLeast(100);
-    for (int i = 0; i < numDocs; ++i) {
-      Document doc = new Document();
-      final int numValues = random().nextInt(3);
-      for (int j = 0; j < numValues; ++j) {
-        final long value = TestUtil.nextLong(random(), -100, 10000);
-        doc.add(new SortedNumericDocValuesField("dv1", value));
-        doc.add(new SortedSetDocValuesField("dv2", toSortableBytes(value)));
-      }
-      iw.addDocument(doc);
-    }
-    if (random().nextBoolean()) {
-      iw.deleteDocuments(DocValuesRangeQuery.newLongRange("dv1", 0L, 10L, true, true));
-    }
-    iw.commit();
-    final IndexReader reader = iw.getReader();
-    final IndexSearcher searcher = newSearcher(reader);
-    iw.close();
-
-    for (int i = 0; i < 100; ++i) {
-      final Long min = random().nextBoolean() ? null : TestUtil.nextLong(random(), -100, 1000);
-      final Long max = random().nextBoolean() ? null : TestUtil.nextLong(random(), -100, 1000);
-      final boolean minInclusive = random().nextBoolean();
-      final boolean maxInclusive = random().nextBoolean();
-
-      final float boost = random().nextFloat() * 10;
-
-      final Query q1 = new BoostQuery(DocValuesRangeQuery.newLongRange("dv1", min, max, minInclusive, maxInclusive), boost);
-      final Query csq1 = new BoostQuery(new ConstantScoreQuery(DocValuesRangeQuery.newLongRange("dv1", min, max, minInclusive, maxInclusive)), boost);
-      assertSameMatches(searcher, q1, csq1, true);
-
-      final Query q2 = new BoostQuery(DocValuesRangeQuery.newBytesRefRange("dv2", toSortableBytes(min), toSortableBytes(max), minInclusive, maxInclusive), boost);
-      final Query csq2 = new BoostQuery(new ConstantScoreQuery(DocValuesRangeQuery.newBytesRefRange("dv2", toSortableBytes(min), toSortableBytes(max), minInclusive, maxInclusive)), boost);
-      assertSameMatches(searcher, q2, csq2, true);
-    }
-
-    reader.close();
-    dir.close();
-  }
-
-  public void testApproximation() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    final int numDocs = atLeast(100);
-    for (int i = 0; i < numDocs; ++i) {
-      Document doc = new Document();
-      final int numValues = random().nextInt(3);
-      for (int j = 0; j < numValues; ++j) {
-        final long value = TestUtil.nextLong(random(), -100, 10000);
-        doc.add(new SortedNumericDocValuesField("dv1", value));
-        doc.add(new SortedSetDocValuesField("dv2", toSortableBytes(value)));
-        doc.add(new LongPoint("idx", value));
-        doc.add(new StringField("f", random().nextBoolean() ? "a" : "b", Store.NO));
-      }
-      iw.addDocument(doc);
-    }
-    if (random().nextBoolean()) {
-      iw.deleteDocuments(LongPoint.newRangeQuery("idx", 0L, 10L));
-    }
-    iw.commit();
-    final IndexReader reader = iw.getReader();
-    final IndexSearcher searcher = newSearcher(reader, false);
-    iw.close();
-
-    for (int i = 0; i < 100; ++i) {
-      final Long min = TestUtil.nextLong(random(), -100, 1000);
-      final Long max = TestUtil.nextLong(random(), -100, 1000);
-
-      BooleanQuery.Builder ref = new BooleanQuery.Builder();
-      ref.add(LongPoint.newRangeQuery("idx", min, max), Occur.FILTER);
-      ref.add(new TermQuery(new Term("f", "a")), Occur.MUST);
-
-      BooleanQuery.Builder bq1 = new BooleanQuery.Builder();
-      bq1.add(DocValuesRangeQuery.newLongRange("dv1", min, max, true, true), Occur.FILTER);
-      bq1.add(new TermQuery(new Term("f", "a")), Occur.MUST);
-
-      assertSameMatches(searcher, ref.build(), bq1.build(), true);
-
-      BooleanQuery.Builder bq2 = new BooleanQuery.Builder();
-      bq2.add(DocValuesRangeQuery.newBytesRefRange("dv2", toSortableBytes(min), toSortableBytes(max), true, true), Occur.FILTER);
-      bq2.add(new TermQuery(new Term("f", "a")), Occur.MUST);
-
-      assertSameMatches(searcher, ref.build(), bq2.build(), true);
-    }
-
-    reader.close();
-    dir.close();
-  }
-
-  private void assertSameMatches(IndexSearcher searcher, Query q1, Query q2, boolean scores) throws IOException {
-    final int maxDoc = searcher.getIndexReader().maxDoc();
-    final TopDocs td1 = searcher.search(q1, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER);
-    final TopDocs td2 = searcher.search(q2, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER);
-    assertEquals(td1.totalHits, td2.totalHits);
-    for (int i = 0; i < td1.scoreDocs.length; ++i) {
-      assertEquals(td1.scoreDocs[i].doc, td2.scoreDocs[i].doc);
-      if (scores) {
-        assertEquals(td1.scoreDocs[i].score, td2.scoreDocs[i].score, 10e-7);
-      }
-    }
-  }
-
-  public void testToString() {
-    assertEquals("f:[2 TO 5]", DocValuesRangeQuery.newLongRange("f", 2L, 5L, true, true).toString());
-    assertEquals("f:{2 TO 5]", DocValuesRangeQuery.newLongRange("f", 2L, 5L, false, true).toString());
-    assertEquals("f:{2 TO 5}", DocValuesRangeQuery.newLongRange("f", 2L, 5L, false, false).toString());
-    assertEquals("f:{* TO 5}", DocValuesRangeQuery.newLongRange("f", null, 5L, false, false).toString());
-    assertEquals("f:[2 TO *}", DocValuesRangeQuery.newLongRange("f", 2L, null, true, false).toString());
-
-    BytesRef min = new BytesRef("a");
-    BytesRef max = new BytesRef("b");
-    assertEquals("f:[[61] TO [62]]", DocValuesRangeQuery.newBytesRefRange("f", min, max, true, true).toString());
-    assertEquals("f:{[61] TO [62]]", DocValuesRangeQuery.newBytesRefRange("f", min, max, false, true).toString());
-    assertEquals("f:{[61] TO [62]}", DocValuesRangeQuery.newBytesRefRange("f", min, max, false, false).toString());
-    assertEquals("f:{* TO [62]}", DocValuesRangeQuery.newBytesRefRange("f", null, max, false, false).toString());
-    assertEquals("f:[[61] TO *}", DocValuesRangeQuery.newBytesRefRange("f", min, null, true, false).toString());
-  }
-
-  public void testDocValuesRangeSupportsApproximation() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("dv1", 5L));
-    doc.add(new SortedDocValuesField("dv2", toSortableBytes(42L)));
-    iw.addDocument(doc);
-    iw.commit();
-    final IndexReader reader = iw.getReader();
-    final LeafReaderContext ctx = reader.leaves().get(0);
-    final IndexSearcher searcher = newSearcher(reader);
-    iw.close();
-
-    Query q1 = DocValuesRangeQuery.newLongRange("dv1", 0L, 100L, random().nextBoolean(), random().nextBoolean());
-    Weight w = searcher.createNormalizedWeight(q1, true);
-    Scorer s = w.scorer(ctx);
-    assertNotNull(s.twoPhaseIterator());
-
-    Query q2 = DocValuesRangeQuery.newBytesRefRange("dv2", toSortableBytes(0L), toSortableBytes(100L), random().nextBoolean(), random().nextBoolean());
-    w = searcher.createNormalizedWeight(q2, true);
-    s = w.scorer(ctx);
-    assertNotNull(s.twoPhaseIterator());
-
-    reader.close();
-    dir.close();
-  }
-
-  public void testLongRangeBoundaryValues() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-
-    Document doc = new Document();
-    doc.add(new SortedNumericDocValuesField("dv", 100l));
-    iw.addDocument(doc);
-
-    doc = new Document();
-    doc.add(new SortedNumericDocValuesField("dv", 200l));
-    iw.addDocument(doc);
-
-    iw.commit();
-
-    final IndexReader reader = iw.getReader();
-    final IndexSearcher searcher = newSearcher(reader, false);
-    iw.close();
-
-    Long min = Long.MIN_VALUE;
-    Long max = Long.MIN_VALUE;
-    Query query = DocValuesRangeQuery.newLongRange("dv", min, max, true, false);
-    TopDocs td = searcher.search(query, searcher.reader.maxDoc(), Sort.INDEXORDER);
-    assertEquals(0, td.totalHits);
-
-    min = Long.MAX_VALUE;
-    max = Long.MAX_VALUE;
-    query = DocValuesRangeQuery.newLongRange("dv", min, max, false, true);
-    td = searcher.search(query, searcher.reader.maxDoc(), Sort.INDEXORDER);
-    assertEquals(0, td.totalHits);
-
-    reader.close();
-    dir.close();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/lucene/sandbox/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java b/lucene/sandbox/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
deleted file mode 100644
index de289e7..0000000
--- a/lucene/sandbox/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search;
-
-import java.io.IOException;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.LongPoint;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.TestUtil;
-
-public class TestIndexOrDocValuesQuery extends LuceneTestCase {
-
-  public void testUseIndexForSelectiveQueries() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()
-        // relies on costs and PointValues.estimateCost so we need the default codec
-        .setCodec(TestUtil.getDefaultCodec()));
-    for (int i = 0; i < 2000; ++i) {
-      Document doc = new Document();
-      if (i == 42) {
-        doc.add(new StringField("f1", "bar", Store.NO));
-        doc.add(new LongPoint("f2", 42L));
-        doc.add(new NumericDocValuesField("f2", 42L));
-      } else if (i == 100) {
-        doc.add(new StringField("f1", "foo", Store.NO));
-        doc.add(new LongPoint("f2", 2L));
-        doc.add(new NumericDocValuesField("f2", 2L));
-      } else {
-        doc.add(new StringField("f1", "bar", Store.NO));
-        doc.add(new LongPoint("f2", 2L));
-        doc.add(new NumericDocValuesField("f2", 2L));
-      }
-      w.addDocument(doc);
-    }
-    w.forceMerge(1);
-    IndexReader reader = DirectoryReader.open(w);
-    IndexSearcher searcher = newSearcher(reader);
-    searcher.setQueryCache(null);
-
-    // The term query is more selective, so the IndexOrDocValuesQuery should use doc values
-    final Query q1 = new BooleanQuery.Builder()
-        .add(new TermQuery(new Term("f1", "foo")), Occur.MUST)
-        .add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 2), new DocValuesNumbersQuery("f2", 2L)), Occur.MUST)
-        .build();
-
-    final Weight w1 = searcher.createNormalizedWeight(q1, random().nextBoolean());
-    final Scorer s1 = w1.scorer(searcher.getIndexReader().leaves().get(0));
-    assertNotNull(s1.twoPhaseIterator()); // means we use doc values
-
-    // The term query is less selective, so the IndexOrDocValuesQuery should use points
-    final Query q2 = new BooleanQuery.Builder()
-        .add(new TermQuery(new Term("f1", "bar")), Occur.MUST)
-        .add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 42), new DocValuesNumbersQuery("f2", 42L)), Occur.MUST)
-        .build();
-
-    final Weight w2 = searcher.createNormalizedWeight(q2, random().nextBoolean());
-    final Scorer s2 = w2.scorer(searcher.getIndexReader().leaves().get(0));
-    assertNull(s2.twoPhaseIterator()); // means we use points
-
-    reader.close();
-    w.close();
-    dir.close();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
----------------------------------------------------------------------
diff --git a/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java b/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
index 2071163..5152768 100644
--- a/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
+++ b/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
@@ -32,7 +32,6 @@ import org.apache.lucene.collation.ICUCollationKeyAnalyzer;
 import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.search.DocValuesRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TermRangeQuery;
@@ -272,13 +271,8 @@ public class ICUCollationField extends FieldType {
     BytesRef low = part1 == null ? null : getCollationKey(f, part1);
     BytesRef high = part2 == null ? null : getCollationKey(f, part2);
     if (!field.indexed() && field.hasDocValues()) {
-      if (field.multiValued()) {
-          return DocValuesRangeQuery.newBytesRefRange(
-              field.getName(), low, high, minInclusive, maxInclusive);
-        } else {
-          return DocValuesRangeQuery.newBytesRefRange(
-              field.getName(), low, high, minInclusive, maxInclusive);
-        } 
+      return SortedSetDocValuesField.newRangeQuery(
+          field.getName(), low, high, minInclusive, maxInclusive);
     } else {
       return new TermRangeQuery(field.getName(), low, high, minInclusive, maxInclusive);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/solr/core/src/java/org/apache/solr/schema/CollationField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/CollationField.java b/solr/core/src/java/org/apache/solr/schema/CollationField.java
index 998db2a..805e204 100644
--- a/solr/core/src/java/org/apache/solr/schema/CollationField.java
+++ b/solr/core/src/java/org/apache/solr/schema/CollationField.java
@@ -36,7 +36,6 @@ import org.apache.lucene.collation.CollationKeyAnalyzer;
 import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.search.DocValuesRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TermRangeQuery;
@@ -242,7 +241,7 @@ public class CollationField extends FieldType {
     BytesRef low = part1 == null ? null : getCollationKey(f, part1);
     BytesRef high = part2 == null ? null : getCollationKey(f, part2);
     if (!field.indexed() && field.hasDocValues()) {
-      return DocValuesRangeQuery.newBytesRefRange(
+      return SortedSetDocValuesField.newRangeQuery(
           field.getName(), low, high, minInclusive, maxInclusive);
     } else {
       return new TermRangeQuery(field.getName(), low, high, minInclusive, maxInclusive);


[26/50] [abbrv] lucene-solr:apiv2: SOLR-9836: Add ability to recover from leader when index corruption is detected on SolrCore creation.

Posted by no...@apache.org.
SOLR-9836: Add ability to recover from leader when index corruption is detected on SolrCore creation.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a89560bb
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a89560bb
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a89560bb

Branch: refs/heads/apiv2
Commit: a89560bb72de57d291db45c52c04b9edf6c91d92
Parents: a37bfa7
Author: markrmiller <ma...@apache.org>
Authored: Wed Jan 18 15:17:06 2017 -0500
Committer: markrmiller <ma...@apache.org>
Committed: Wed Jan 18 19:45:22 2017 -0500

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   6 +
 .../org/apache/solr/core/CoreContainer.java     | 109 +++++++++++++++-
 .../org/apache/solr/core/DirectoryFactory.java  |  27 ++++
 .../src/java/org/apache/solr/core/SolrCore.java | 120 ++++++++++++++----
 .../org/apache/solr/handler/IndexFetcher.java   |  60 +--------
 .../org/apache/solr/handler/RestoreCore.java    |   2 +-
 .../solr/cloud/MissingSegmentRecoveryTest.java  | 123 +++++++++++++++++++
 7 files changed, 364 insertions(+), 83 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a89560bb/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 55ab04d..205c7bc 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -90,6 +90,12 @@ Jetty 9.3.14.v20161028
 Detailed Change List
 ----------------------
 
+New Features
+----------------------
+
+* SOLR-9836: Add ability to recover from leader when index corruption is detected on SolrCore creation.
+  (Mike Drob via Mark Miller)
+
 Bug Fixes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a89560bb/solr/core/src/java/org/apache/solr/core/CoreContainer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 3c4ed56..023e7b1 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -20,9 +20,12 @@ import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.text.SimpleDateFormat;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Date;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
@@ -39,22 +42,29 @@ import com.google.common.collect.Maps;
 import org.apache.http.auth.AuthSchemeProvider;
 import org.apache.http.client.CredentialsProvider;
 import org.apache.http.config.Lookup;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.store.Directory;
 import org.apache.solr.client.solrj.impl.HttpClientUtil;
 import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
 import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder;
 import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder.AuthSchemeRegistryProvider;
 import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder.CredentialsProviderProvider;
 import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
+import org.apache.solr.cloud.CloudDescriptor;
 import org.apache.solr.cloud.Overseer;
 import org.apache.solr.cloud.ZkController;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Replica.State;
 import org.apache.solr.common.util.ExecutorUtil;
 import org.apache.solr.common.util.IOUtils;
 import org.apache.solr.common.util.Utils;
+import org.apache.solr.core.DirectoryFactory.DirContext;
 import org.apache.solr.core.backup.repository.BackupRepository;
 import org.apache.solr.core.backup.repository.BackupRepositoryFactory;
 import org.apache.solr.handler.RequestHandlerBase;
+import org.apache.solr.handler.SnapShooter;
 import org.apache.solr.handler.admin.CollectionsHandler;
 import org.apache.solr.handler.admin.ConfigSetsHandler;
 import org.apache.solr.handler.admin.CoreAdminHandler;
@@ -166,6 +176,8 @@ public class CoreContainer {
 
   protected MetricsHandler metricsHandler;
 
+  private enum CoreInitFailedAction { fromleader, none }
+
   /**
    * This method instantiates a new instance of {@linkplain BackupRepository}.
    *
@@ -911,7 +923,11 @@ public class CoreContainer {
 
       ConfigSet coreConfig = coreConfigService.getConfig(dcore);
       log.info("Creating SolrCore '{}' using configuration from {}", dcore.getName(), coreConfig.getName());
-      core = new SolrCore(dcore, coreConfig);
+      try {
+        core = new SolrCore(dcore, coreConfig);
+      } catch (SolrException e) {
+        core = processCoreCreateException(e, dcore, coreConfig);
+      }
 
       // always kick off recovery if we are in non-Cloud mode
       if (!isZooKeeperAware() && core.getUpdateHandler().getUpdateLog() != null) {
@@ -923,14 +939,12 @@ public class CoreContainer {
       return core;
     } catch (Exception e) {
       coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
-      log.error("Error creating core [{}]: {}", dcore.getName(), e.getMessage(), e);
       final SolrException solrException = new SolrException(ErrorCode.SERVER_ERROR, "Unable to create core [" + dcore.getName() + "]", e);
       if(core != null && !core.isClosed())
         IOUtils.closeQuietly(core);
       throw solrException;
     } catch (Throwable t) {
       SolrException e = new SolrException(ErrorCode.SERVER_ERROR, "JVM Error creating core [" + dcore.getName() + "]: " + t.getMessage(), t);
-      log.error("Error creating core [{}]: {}", dcore.getName(), t.getMessage(), t);
       coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
       if(core != null && !core.isClosed())
         IOUtils.closeQuietly(core);
@@ -938,7 +952,96 @@ public class CoreContainer {
     } finally {
       MDCLoggingContext.clear();
     }
+  }
+  
+  /**
+   * Take action when we failed to create a SolrCore. If error is due to corrupt index, try to recover. Various recovery
+   * strategies can be specified via system properties "-DCoreInitFailedAction={fromleader, none}"
+   *
+   * @see CoreInitFailedAction
+   *
+   * @param original
+   *          the problem seen when loading the core the first time.
+   * @param dcore
+   *          core descriptor for the core to create
+   * @param coreConfig
+   *          core config for the core to create
+   * @return if possible
+   * @throws SolrException
+   *           rethrows the original exception if we will not attempt to recover, throws a new SolrException with the
+   *           original exception as a suppressed exception if there is a second problem creating the solr core.
+   */
+  private SolrCore processCoreCreateException(SolrException original, CoreDescriptor dcore, ConfigSet coreConfig) {
+    // Traverse full chain since CIE may not be root exception
+    Throwable cause = original;
+    while ((cause = cause.getCause()) != null) {
+      if (cause instanceof CorruptIndexException) {
+        break;
+      }
+    }
+    
+    // If no CorruptIndexExeption, nothing we can try here
+    if (cause == null) throw original;
+    
+    CoreInitFailedAction action = CoreInitFailedAction.valueOf(System.getProperty(CoreInitFailedAction.class.getSimpleName(), "none"));
+    log.debug("CorruptIndexException while creating core, will attempt to repair via {}", action);
+    
+    switch (action) {
+      case fromleader: // Recovery from leader on a CorruptedIndexException
+        if (isZooKeeperAware()) {
+          CloudDescriptor desc = dcore.getCloudDescriptor();
+          try {
+            Replica leader = getZkController().getClusterState()
+                .getCollection(desc.getCollectionName())
+                .getSlice(desc.getShardId())
+                .getLeader();
+            if (leader != null && leader.getState() == State.ACTIVE) {
+              log.info("Found active leader, will attempt to create fresh core and recover.");
+              resetIndexDirectory(dcore, coreConfig);
+              return new SolrCore(dcore, coreConfig);
+            }
+          } catch (SolrException se) {
+            se.addSuppressed(original);
+            throw se;
+          }
+        }
+        throw original;
+      case none:
+        throw original;
+      default:
+        log.warn("Failed to create core, and did not recognize specified 'CoreInitFailedAction': [{}]. Valid options are {}.",
+            action, Arrays.asList(CoreInitFailedAction.values()));
+        throw original;
+    }
+  }
+
+  /**
+   * Write a new index directory for the a SolrCore, but do so without loading it.
+   */
+  private void resetIndexDirectory(CoreDescriptor dcore, ConfigSet coreConfig) {
+    SolrConfig config = coreConfig.getSolrConfig();
+
+    String registryName = SolrMetricManager.getRegistryName(SolrInfoMBean.Group.core, dcore.getName());
+    DirectoryFactory df = DirectoryFactory.loadDirectoryFactory(config, this, registryName);
+    String dataDir = SolrCore.findDataDir(df, null, config, dcore);
 
+    String tmpIdxDirName = "index." + new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
+    SolrCore.modifyIndexProps(df, dataDir, config, tmpIdxDirName);
+
+    // Free the directory object that we had to create for this
+    Directory dir = null;
+    try {
+      dir = df.get(dataDir, DirContext.META_DATA, config.indexConfig.lockType);
+    } catch (IOException e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
+    } finally {
+      try {
+        df.release(dir);
+        df.doneWithDirectory(dir);
+      } catch (IOException e) {
+        SolrException.log(log, e);
+      }
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a89560bb/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
index ac18d7e..9dd0d8a 100644
--- a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
@@ -383,4 +383,31 @@ public abstract class DirectoryFactory implements NamedListInitializedPlugin,
     
     return baseDir;
   }
+
+  /**
+   * Create a new DirectoryFactory instance from the given SolrConfig and tied to the specified core container.
+   */
+  static DirectoryFactory loadDirectoryFactory(SolrConfig config, CoreContainer cc, String registryName) {
+    final PluginInfo info = config.getPluginInfo(DirectoryFactory.class.getName());
+    final DirectoryFactory dirFactory;
+    if (info != null) {
+      log.debug(info.className);
+      dirFactory = config.getResourceLoader().newInstance(info.className, DirectoryFactory.class);
+      // allow DirectoryFactory instances to access the CoreContainer
+      dirFactory.initCoreContainer(cc);
+      dirFactory.init(info.initArgs);
+    } else {
+      log.debug("solr.NRTCachingDirectoryFactory");
+      dirFactory = new NRTCachingDirectoryFactory();
+      dirFactory.initCoreContainer(cc);
+    }
+    if (config.indexConfig.metricsInfo != null && config.indexConfig.metricsInfo.isEnabled()) {
+      final DirectoryFactory factory = new MetricsDirectoryFactory(cc.getMetricManager(),
+          registryName, dirFactory);
+        factory.init(config.indexConfig.metricsInfo.initArgs);
+      return factory;
+    } else {
+      return dirFactory;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a89560bb/solr/core/src/java/org/apache/solr/core/SolrCore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java
index 697e008..74238e7 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCore.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCore.java
@@ -23,6 +23,8 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
 import java.lang.invoke.MethodHandles;
 import java.lang.reflect.Constructor;
 import java.net.URL;
@@ -67,6 +69,7 @@ import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.solr.client.solrj.impl.BinaryResponseParser;
 import org.apache.solr.cloud.CloudDescriptor;
@@ -148,6 +151,7 @@ import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
 import org.apache.solr.util.DefaultSolrThreadFactory;
 import org.apache.solr.util.NumberUtils;
 import org.apache.solr.util.PropertiesInputStream;
+import org.apache.solr.util.PropertiesOutputStream;
 import org.apache.solr.util.RefCounted;
 import org.apache.solr.util.plugin.NamedListInitializedPlugin;
 import org.apache.solr.util.plugin.PluginInfoInitialized;
@@ -646,27 +650,7 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
   }
 
   private DirectoryFactory initDirectoryFactory() {
-    final PluginInfo info = solrConfig.getPluginInfo(DirectoryFactory.class.getName());
-    final DirectoryFactory dirFactory;
-    if (info != null) {
-      log.debug(info.className);
-      dirFactory = getResourceLoader().newInstance(info.className, DirectoryFactory.class);
-      // allow DirectoryFactory instances to access the CoreContainer
-      dirFactory.initCoreContainer(getCoreDescriptor().getCoreContainer());
-      dirFactory.init(info.initArgs);
-    } else {
-      log.debug("solr.NRTCachingDirectoryFactory");
-      dirFactory = new NRTCachingDirectoryFactory();
-      dirFactory.initCoreContainer(getCoreDescriptor().getCoreContainer());
-    }
-    if (solrConfig.indexConfig.metricsInfo != null && solrConfig.indexConfig.metricsInfo.isEnabled()) {
-      final DirectoryFactory factory = new MetricsDirectoryFactory(coreDescriptor.getCoreContainer().getMetricManager(),
-          coreMetricManager.getRegistryName(), dirFactory);
-        factory.init(solrConfig.indexConfig.metricsInfo.initArgs);
-      return factory;
-    } else {
-      return dirFactory;
-    }
+    return DirectoryFactory.loadDirectoryFactory(solrConfig, getCoreDescriptor().getCoreContainer(), coreMetricManager.getRegistryName());
   }
 
   private void initIndexReaderFactory() {
@@ -1145,6 +1129,26 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
   }
 
   private String initDataDir(String dataDir, SolrConfig config, CoreDescriptor coreDescriptor) {
+    return findDataDir(getDirectoryFactory(), dataDir, config, coreDescriptor);
+  }
+
+  /**
+   * Locate the data directory for a given config and core descriptor.
+   *
+   * @param directoryFactory
+   *          The directory factory to use if necessary to calculate an absolute path. Should be the same as what will
+   *          be used to open the data directory later.
+   * @param dataDir
+   *          An optional hint to the data directory location. Will be normalized and used if not null.
+   * @param config
+   *          A solr config to retrieve the default data directory location, if used.
+   * @param coreDescriptor
+   *          descriptor to load the actual data dir from, if not using the defualt.
+   * @return a normalized data directory name
+   * @throws SolrException
+   *           if the data directory cannot be loaded from the core descriptor
+   */
+  static String findDataDir(DirectoryFactory directoryFactory, String dataDir, SolrConfig config, CoreDescriptor coreDescriptor) {
     if (dataDir == null) {
       if (coreDescriptor.usingDefaultDataDir()) {
         dataDir = config.getDataDir();
@@ -1163,6 +1167,80 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
     return SolrResourceLoader.normalizeDir(dataDir);
   }
 
+
+  public boolean modifyIndexProps(String tmpIdxDirName) {
+    return SolrCore.modifyIndexProps(getDirectoryFactory(), getDataDir(), getSolrConfig(), tmpIdxDirName);
+  }
+  
+  /**
+   * Update the index.properties file with the new index sub directory name
+   */
+  // package private
+  static boolean modifyIndexProps(DirectoryFactory directoryFactory, String dataDir, SolrConfig solrConfig, String tmpIdxDirName) {
+    log.info("Updating index properties... index="+tmpIdxDirName);
+    Directory dir = null;
+    try {
+      dir = directoryFactory.get(dataDir, DirContext.META_DATA, solrConfig.indexConfig.lockType);
+      String tmpIdxPropName = IndexFetcher.INDEX_PROPERTIES + "." + System.nanoTime();
+      writeNewIndexProps(dir, tmpIdxPropName, tmpIdxDirName);
+      directoryFactory.renameWithOverwrite(dir, tmpIdxPropName, IndexFetcher.INDEX_PROPERTIES);
+      return true;
+    } catch (IOException e1) {
+      throw new RuntimeException(e1);
+    } finally {
+      if (dir != null) {
+        try {
+          directoryFactory.release(dir);
+        } catch (IOException e) {
+          SolrException.log(log, "", e);
+        }
+      }
+    }
+  }
+  
+  /**
+   * Write the index.properties file with the new index sub directory name
+   * @param dir a data directory (containing an index.properties file)
+   * @param tmpFileName the file name to write the new index.properties to
+   * @param tmpIdxDirName new index directory name
+   */
+  private static void writeNewIndexProps(Directory dir, String tmpFileName, String tmpIdxDirName) {
+    if (tmpFileName == null) {
+      tmpFileName = IndexFetcher.INDEX_PROPERTIES;
+    }
+    final Properties p = new Properties();
+    
+    // Read existing properties
+    try {
+      final IndexInput input = dir.openInput(IndexFetcher.INDEX_PROPERTIES, DirectoryFactory.IOCONTEXT_NO_CACHE);
+      final InputStream is = new PropertiesInputStream(input);
+      try {
+        p.load(new InputStreamReader(is, StandardCharsets.UTF_8));
+      } catch (Exception e) {
+        log.error("Unable to load " + IndexFetcher.INDEX_PROPERTIES, e);
+      } finally {
+        IOUtils.closeQuietly(is);
+      }
+    } catch (IOException e) {
+      // ignore; file does not exist
+    }
+    
+    p.put("index", tmpIdxDirName);
+
+    // Write new properties
+    Writer os = null;
+    try {
+      IndexOutput out = dir.createOutput(tmpFileName, DirectoryFactory.IOCONTEXT_NO_CACHE);
+      os = new OutputStreamWriter(new PropertiesOutputStream(out), StandardCharsets.UTF_8);
+      p.store(os, IndexFetcher.INDEX_PROPERTIES);
+      dir.sync(Collections.singleton(tmpFileName));
+    } catch (Exception e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to write " + IndexFetcher.INDEX_PROPERTIES, e);
+    } finally {
+      IOUtils.closeQuietly(os);
+    }
+  }
+
   private String initUpdateLogDir(CoreDescriptor coreDescriptor) {
     String updateLogDir = coreDescriptor.getUlogDir();
     if (updateLogDir == null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a89560bb/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
index 8bdd2b8..968af61 100644
--- a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
+++ b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
@@ -21,7 +21,6 @@ import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.lang.invoke.MethodHandles;
@@ -92,7 +91,6 @@ import org.apache.solr.update.UpdateLog;
 import org.apache.solr.update.VersionInfo;
 import org.apache.solr.util.DefaultSolrThreadFactory;
 import org.apache.solr.util.FileUtils;
-import org.apache.solr.util.PropertiesInputStream;
 import org.apache.solr.util.PropertiesOutputStream;
 import org.apache.solr.util.RTimer;
 import org.apache.solr.util.RefCounted;
@@ -460,7 +458,7 @@ public class IndexFetcher {
             reloadCore = true;
             downloadConfFiles(confFilesToDownload, latestGeneration);
             if (isFullCopyNeeded) {
-              successfulInstall = IndexFetcher.modifyIndexProps(solrCore, tmpIdxDirName);
+              successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
               deleteTmpIdxDir = false;
             } else {
               successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
@@ -488,7 +486,7 @@ public class IndexFetcher {
           } else {
             terminateAndWaitFsyncService();
             if (isFullCopyNeeded) {
-              successfulInstall = IndexFetcher.modifyIndexProps(solrCore, tmpIdxDirName);
+              successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
               deleteTmpIdxDir = false;
             } else {
               successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
@@ -1189,60 +1187,6 @@ public class IndexFetcher {
     return new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(d);
   }
 
-  /**
-   * If the index is stale by any chance, load index from a different dir in the data dir.
-   */
-  protected static boolean modifyIndexProps(SolrCore solrCore, String tmpIdxDirName) {
-    LOG.info("New index installed. Updating index properties... index="+tmpIdxDirName);
-    Properties p = new Properties();
-    Directory dir = null;
-    try {
-      dir = solrCore.getDirectoryFactory().get(solrCore.getDataDir(), DirContext.META_DATA, solrCore.getSolrConfig().indexConfig.lockType);
-      if (slowFileExists(dir, IndexFetcher.INDEX_PROPERTIES)){
-        final IndexInput input = dir.openInput(IndexFetcher.INDEX_PROPERTIES, DirectoryFactory.IOCONTEXT_NO_CACHE);
-
-        final InputStream is = new PropertiesInputStream(input);
-        try {
-          p.load(new InputStreamReader(is, StandardCharsets.UTF_8));
-        } catch (Exception e) {
-          LOG.error("Unable to load " + IndexFetcher.INDEX_PROPERTIES, e);
-        } finally {
-          IOUtils.closeQuietly(is);
-        }
-      }
-
-      String tmpFileName = IndexFetcher.INDEX_PROPERTIES + "." + System.nanoTime();
-      final IndexOutput out = dir.createOutput(tmpFileName, DirectoryFactory.IOCONTEXT_NO_CACHE);
-      p.put("index", tmpIdxDirName);
-      Writer os = null;
-      try {
-        os = new OutputStreamWriter(new PropertiesOutputStream(out), StandardCharsets.UTF_8);
-        p.store(os, tmpFileName);
-        dir.sync(Collections.singleton(tmpFileName));
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Unable to write " + IndexFetcher.INDEX_PROPERTIES, e);
-      } finally {
-        IOUtils.closeQuietly(os);
-      }
-      
-      solrCore.getDirectoryFactory().renameWithOverwrite(dir, tmpFileName, IndexFetcher.INDEX_PROPERTIES);
-      return true;
-
-    } catch (IOException e1) {
-      throw new RuntimeException(e1);
-    } finally {
-      if (dir != null) {
-        try {
-          solrCore.getDirectoryFactory().release(dir);
-        } catch (IOException e) {
-          SolrException.log(LOG, "", e);
-        }
-      }
-    }
-
-  }
-
   private final Map<String, FileInfo> confFileInfoCache = new HashMap<>();
 
   /**

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a89560bb/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/RestoreCore.java b/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
index c00d7bd..e750631 100644
--- a/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
+++ b/solr/core/src/java/org/apache/solr/handler/RestoreCore.java
@@ -101,7 +101,7 @@ public class RestoreCore implements Callable<Boolean> {
         }
       }
       log.debug("Switching directories");
-      IndexFetcher.modifyIndexProps(core, restoreIndexName);
+      core.modifyIndexProps(restoreIndexName);
 
       boolean success;
       try {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a89560bb/solr/core/src/test/org/apache/solr/cloud/MissingSegmentRecoveryTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/MissingSegmentRecoveryTest.java b/solr/core/src/test/org/apache/solr/cloud/MissingSegmentRecoveryTest.java
new file mode 100644
index 0000000..a7bfa20
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/MissingSegmentRecoveryTest.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cloud;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.StandardOpenOption;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.core.SolrCore;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+@Slow
+public class MissingSegmentRecoveryTest extends SolrCloudTestCase {
+  final String collection = getClass().getSimpleName();
+  
+  Replica leader;
+  Replica replica;
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    configureCluster(2)
+        .addConfig("conf", configset("cloud-minimal"))
+        .configure();
+    useFactory("solr.StandardDirectoryFactory");
+  }
+
+  @Before
+  public void setup() throws SolrServerException, IOException {
+    CollectionAdminRequest.createCollection(collection, "conf", 1, 2)
+        .setMaxShardsPerNode(1)
+        .process(cluster.getSolrClient());
+    waitForState("Expected a collection with one shard and two replicas", collection, clusterShape(1, 2));
+    cluster.getSolrClient().setDefaultCollection(collection);
+
+    List<SolrInputDocument> docs = new ArrayList<>();
+    for (int i = 0; i < 10; i++) {
+      SolrInputDocument doc = new SolrInputDocument();
+      doc.addField("id", i);
+      docs.add(doc);
+    }
+
+    cluster.getSolrClient().add(docs);
+    cluster.getSolrClient().commit();
+    
+    DocCollection state = getCollectionState(collection);
+    leader = state.getLeader("shard1");
+    replica = getRandomReplica(state.getSlice("shard1"), (r) -> leader != r);
+  }
+  
+  @After
+  public void teardown() throws Exception {
+    System.clearProperty("CoreInitFailedAction");
+    CollectionAdminRequest.deleteCollection(collection).process(cluster.getSolrClient());
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws Exception {
+    resetFactory();
+  }
+
+  @Test
+  public void testLeaderRecovery() throws Exception {
+    System.setProperty("CoreInitFailedAction", "fromleader");
+
+    // Simulate failure by truncating the segment_* files
+    for (File segment : getSegmentFiles(replica)) {
+      truncate(segment);
+    }
+
+    // Might not need a sledge-hammer to reload the core
+    JettySolrRunner jetty = cluster.getReplicaJetty(replica);
+    jetty.stop();
+    jetty.start();
+
+    waitForState("Expected a collection with one shard and two replicas", collection, clusterShape(1, 2));
+    
+    QueryResponse resp = cluster.getSolrClient().query(collection, new SolrQuery("*:*"));
+    assertEquals(10, resp.getResults().getNumFound());
+  }
+
+  private File[] getSegmentFiles(Replica replica) {
+    try (SolrCore core = cluster.getReplicaJetty(replica).getCoreContainer().getCore(replica.getCoreName())) {
+      File indexDir = new File(core.getDataDir(), "index");
+      return indexDir.listFiles((File dir, String name) -> {
+        return name.startsWith("segments_");
+      });
+    }
+  }
+  
+  private void truncate(File file) throws IOException {
+    Files.write(file.toPath(), new byte[0], StandardOpenOption.TRUNCATE_EXISTING);
+  }
+}


[25/50] [abbrv] lucene-solr:apiv2: SOLR-9980: Expose configVersion in core admin status

Posted by no...@apache.org.
SOLR-9980: Expose configVersion in core admin status


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a37bfa75
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a37bfa75
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a37bfa75

Branch: refs/heads/apiv2
Commit: a37bfa75a0d1da3b8f513528f1b16acd4b3f0f0b
Parents: 85061e3
Author: Tomas Fernandez Lobbe <tf...@apache.org>
Authored: Wed Jan 18 15:01:27 2017 -0800
Committer: Tomas Fernandez Lobbe <tf...@apache.org>
Committed: Wed Jan 18 15:01:27 2017 -0800

----------------------------------------------------------------------
 solr/CHANGES.txt                                                 | 4 ++++
 .../java/org/apache/solr/handler/admin/CoreAdminOperation.java   | 1 +
 2 files changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37bfa75/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 1f32c24..55ab04d 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -106,6 +106,10 @@ Optimizations
 * SOLR-9941: Clear the deletes lists at UpdateLog before replaying from log. This prevents redundantly pre-applying
   DBQs, during the log replay, to every update in the log as if the DBQs were out of order. (hossman, Ishan Chattopadhyaya)
 
+Other Changes
+----------------------
+* SOLR-9980: Expose configVersion in core admin status (Jessica Cheng Mallet via Tom�s Fern�ndez L�bbe)
+
 ==================  6.4.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a37bfa75/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
index 5836ed3..af3af4d 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminOperation.java
@@ -335,6 +335,7 @@ enum CoreAdminOperation implements CoreAdminOp {
           info.add("uptime", core.getUptimeMs());
           if (cores.isZooKeeperAware()) {
             info.add("lastPublished", core.getCoreDescriptor().getCloudDescriptor().getLastPublished().toString().toLowerCase(Locale.ROOT));
+            info.add("configVersion", core.getSolrConfig().getZnodeVersion());
           }
           if (isIndexInfoNeeded) {
             RefCounted<SolrIndexSearcher> searcher = core.getSearcher();


[11/50] [abbrv] lucene-solr:apiv2: LUCENE-7619: add WordDelimiterGraphFilter (replacing WordDelimiterFilter) to produce a correct token stream graph when splitting words

Posted by no...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/test-framework/src/java/org/apache/lucene/analysis/TokenStreamToDot.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/TokenStreamToDot.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/TokenStreamToDot.java
index 4e8eeb8..64923db 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/analysis/TokenStreamToDot.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/TokenStreamToDot.java
@@ -93,7 +93,10 @@ public class TokenStreamToDot {
         final int endOffset = offsetAtt.endOffset();
         //System.out.println("start=" + startOffset + " end=" + endOffset + " len=" + inputText.length());
         if (inputText != null) {
-          arcLabel += " / " + inputText.substring(startOffset, endOffset);
+          String fragment = inputText.substring(startOffset, endOffset);
+          if (fragment.equals(termAtt.toString()) == false) {
+            arcLabel += " / " + fragment;
+          }
         } else {
           arcLabel += " / " + startOffset + "-" + endOffset;
         }


[38/50] [abbrv] lucene-solr:apiv2: SOLR-10001: Fix overseer-roles test bug

Posted by no...@apache.org.
SOLR-10001: Fix overseer-roles test bug


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/eba93909
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/eba93909
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/eba93909

Branch: refs/heads/apiv2
Commit: eba9390965bcf6b2422524a5628a160ce26c1226
Parents: a14d793
Author: Alan Woodward <ro...@apache.org>
Authored: Thu Jan 19 17:16:42 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Thu Jan 19 17:46:03 2017 +0000

----------------------------------------------------------------------
 .../solr/cloud/CollectionsAPISolrJTest.java     |  34 ----
 .../apache/solr/cloud/OverseerRolesTest.java    | 173 +++++++++----------
 2 files changed, 77 insertions(+), 130 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/eba93909/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
index 616b657..3e0d840 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPISolrJTest.java
@@ -21,7 +21,6 @@ import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.List;
 import java.util.Map;
 
 import org.apache.lucene.util.LuceneTestCase;
@@ -255,39 +254,6 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
   }
 
   @Test
-  @SuppressWarnings("unchecked")
-  public void testAddAndRemoveRole() throws InterruptedException, IOException, SolrServerException {
-
-    String node = cluster.getRandomJetty(random()).getNodeName();
-
-    CollectionAdminRequest.addRole(node, "overseer").process(cluster.getSolrClient());
-
-    CollectionAdminResponse response = CollectionAdminRequest.getClusterStatus().process(cluster.getSolrClient());
-
-    NamedList<Object> rsp = response.getResponse();
-    NamedList<Object> cs = (NamedList<Object>) rsp.get("cluster");
-    assertNotNull("Cluster state should not be null", cs);
-    Map<String, Object> roles = (Map<String, Object>) cs.get("roles");
-    assertNotNull("Role information should not be null", roles);
-    List<String> overseer = (List<String>) roles.get("overseer");
-    assertNotNull(overseer);
-    assertEquals(1, overseer.size());
-    assertTrue(overseer.contains(node));
-    
-    // Remove role
-    CollectionAdminRequest.removeRole(node, "overseer").process(cluster.getSolrClient());
-
-    response = CollectionAdminRequest.getClusterStatus().process(cluster.getSolrClient());
-    rsp = response.getResponse();
-    cs = (NamedList<Object>) rsp.get("cluster");
-    assertNotNull("Cluster state should not be null", cs);
-    roles = (Map<String, Object>) cs.get("roles");
-    assertNotNull("Role information should not be null", roles);
-    overseer = (List<String>) roles.get("overseer");
-    assertFalse(overseer.contains(node));
-  }
-
-  @Test
   public void testOverseerStatus() throws IOException, SolrServerException {
     CollectionAdminResponse response = new CollectionAdminRequest.OverseerStatus().process(cluster.getSolrClient());
     assertEquals(0, response.getStatus());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/eba93909/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
index 762bbeb..3c2ca87 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java
@@ -17,28 +17,27 @@
 package org.apache.solr.cloud;
 
 import java.lang.invoke.MethodHandles;
+import java.net.URL;
 import java.util.Collections;
 import java.util.List;
-import java.util.Map;
+import java.util.Objects;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Predicate;
 
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.cloud.overseer.OverseerAction;
-import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.data.Stat;
-import org.junit.Before;
+import org.apache.zookeeper.KeeperException;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static org.apache.solr.cloud.OverseerCollectionConfigSetProcessor.getLeaderNode;
-import static org.apache.solr.cloud.OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames;
-import static org.hamcrest.CoreMatchers.not;
+import static org.apache.solr.cloud.OverseerTaskProcessor.getSortedElectionNodes;
 
 public class OverseerRolesTest extends SolrCloudTestCase {
 
@@ -51,117 +50,99 @@ public class OverseerRolesTest extends SolrCloudTestCase {
         .configure();
   }
 
-  @Before
-  public void clearAllOverseerRoles() throws Exception {
-    for (String node : OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(zkClient())) {
-      CollectionAdminRequest.removeRole(node, "overseer").process(cluster.getSolrClient());
+  private void waitForNewOverseer(int seconds, Predicate<String> state) throws Exception {
+    TimeOut timeout = new TimeOut(seconds, TimeUnit.SECONDS);
+    String current = null;
+    while (timeout.hasTimedOut() == false) {
+      current = OverseerCollectionConfigSetProcessor.getLeaderNode(zkClient());
+      if (state.test(current))
+        return;
+      Thread.sleep(100);
     }
+    fail("Timed out waiting for overseer state change");
   }
 
-  @Test
-  public void testQuitCommand() throws Exception {
-
-    SolrZkClient zk = zkClient();
-    byte[] data = zk.getData("/overseer_elect/leader", null, new Stat(), true);
-    Map m = (Map) Utils.fromJSON(data);
-    String s = (String) m.get("id");
-    String leader = LeaderElector.getNodeName(s);
-    log.info("Current overseer: {}", leader);
-    Overseer.getStateUpdateQueue(zk)
-        .offer(Utils.toJSON(new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.QUIT.toLower(),
-                                            "id", s)));
-    final TimeOut timeout = new TimeOut(10, TimeUnit.SECONDS);
-    String newLeader = null;
-    for(;! timeout.hasTimedOut();){
-      newLeader = OverseerCollectionConfigSetProcessor.getLeaderNode(zk);
-      if (newLeader != null && !newLeader.equals(leader))
-        break;
-      Thread.sleep(100);
+  private void waitForNewOverseer(int seconds, String expected) throws Exception {
+    waitForNewOverseer(seconds, s -> Objects.equals(s, expected));
+  }
+
+  private JettySolrRunner getOverseerJetty() throws Exception {
+    String overseer = getLeaderNode(zkClient());
+    URL overseerUrl = new URL("http://" + overseer.substring(0, overseer.indexOf('_')));
+    int hostPort = overseerUrl.getPort();
+    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
+      if (jetty.getBaseUrl().getPort() == hostPort)
+        return jetty;
     }
-    assertThat("Leader not changed yet", newLeader, not(leader));
+    fail("Couldn't find overseer node " + overseer);
+    return null; // to keep the compiler happy
+  }
 
-    assertTrue("The old leader should have rejoined election",
-        OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(zk).contains(leader));
+  private void logOverseerState() throws KeeperException, InterruptedException {
+    log.info("Overseer: {}", getLeaderNode(zkClient()));
+    log.info("Election queue: ", getSortedElectionNodes(zkClient(), "/overseer_elect/election"));
   }
 
   @Test
   public void testOverseerRole() throws Exception {
 
-    List<String> l = OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(zkClient()) ;
+    logOverseerState();
+    List<String> nodes = OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(zkClient());
+    String overseer1 = OverseerCollectionConfigSetProcessor.getLeaderNode(zkClient());
+    nodes.remove(overseer1);
 
-    log.info("All nodes {}", l);
-    String currentLeader = OverseerCollectionConfigSetProcessor.getLeaderNode(zkClient());
-    log.info("Current leader {} ", currentLeader);
-    l.remove(currentLeader);
+    Collections.shuffle(nodes, random());
+    String overseer2 = nodes.get(0);
+    log.info("### Setting overseer designate {}", overseer2);
 
-    Collections.shuffle(l, random());
-    String overseerDesignate = l.get(0);
-    log.info("overseerDesignate {}", overseerDesignate);
+    CollectionAdminRequest.addRole(overseer2, "overseer").process(cluster.getSolrClient());
 
-    CollectionAdminRequest.addRole(overseerDesignate, "overseer").process(cluster.getSolrClient());
-
-    TimeOut timeout = new TimeOut(15, TimeUnit.SECONDS);
-
-    boolean leaderchanged = false;
-    for (;!timeout.hasTimedOut();) {
-      if (overseerDesignate.equals(OverseerCollectionConfigSetProcessor.getLeaderNode(zkClient()))) {
-        log.info("overseer designate is the new overseer");
-        leaderchanged =true;
-        break;
-      }
-      Thread.sleep(100);
-    }
-    assertTrue("could not set the new overseer . expected "+
-        overseerDesignate + " current order : " +
-        getSortedOverseerNodeNames(zkClient()) +
-        " ldr :"+ OverseerCollectionConfigSetProcessor.getLeaderNode(zkClient()) ,leaderchanged);
+    waitForNewOverseer(15, overseer2);
 
     //add another node as overseer
-    l.remove(overseerDesignate);
-    Collections.shuffle(l, random());
-
-    String anotherOverseer = l.get(0);
-    log.info("Adding another overseer designate {}", anotherOverseer);
-    CollectionAdminRequest.addRole(anotherOverseer, "overseer").process(cluster.getSolrClient());
-
-    String currentOverseer = getLeaderNode(zkClient());
+    nodes.remove(overseer2);
+    Collections.shuffle(nodes, random());
 
-    log.info("Current Overseer {}", currentOverseer);
+    String overseer3 = nodes.get(0);
+    log.info("### Adding another overseer designate {}", overseer3);
+    CollectionAdminRequest.addRole(overseer3, "overseer").process(cluster.getSolrClient());
 
-    String hostPort = currentOverseer.substring(0, currentOverseer.indexOf('_'));
+    // kill the current overseer, and check that the new designate becomes the new overseer
+    JettySolrRunner leaderJetty = getOverseerJetty();
+    logOverseerState();
 
-    StringBuilder sb = new StringBuilder();
-    log.info("hostPort : {}", hostPort);
-
-    JettySolrRunner leaderJetty = null;
+    ChaosMonkey.stop(leaderJetty);
+    waitForNewOverseer(10, overseer3);
+
+    // add another node as overseer
+    nodes.remove(overseer3);
+    Collections.shuffle(nodes, random());
+    String overseer4 = nodes.get(0);
+    log.info("### Adding last overseer designate {}", overseer4);
+    CollectionAdminRequest.addRole(overseer4, "overseer").process(cluster.getSolrClient());
+    logOverseerState();
+
+    // remove the overseer role from the current overseer
+    CollectionAdminRequest.removeRole(overseer3, "overseer").process(cluster.getSolrClient());
+    waitForNewOverseer(15, overseer4);
+
+    // Add it back again - we now have two delegates, 4 and 3
+    CollectionAdminRequest.addRole(overseer3, "overseer").process(cluster.getSolrClient());
+
+    // explicitly tell the overseer to quit
+    String leaderId = OverseerCollectionConfigSetProcessor.getLeaderId(zkClient());
+    String leader = OverseerCollectionConfigSetProcessor.getLeaderNode(zkClient());
+    log.info("### Sending QUIT to overseer {}", leader);
+    Overseer.getStateUpdateQueue(zkClient())
+        .offer(Utils.toJSON(new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.QUIT.toLower(),
+            "id", leaderId)));
 
-    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
-      String s = jetty.getBaseUrl().toString();
-      log.info("jetTy {}",s);
-      sb.append(s).append(" , ");
-      if (s.contains(hostPort)) {
-        leaderJetty = jetty;
-        break;
-      }
-    }
+    waitForNewOverseer(10, s -> Objects.equals(leader, s) == false);
 
-    assertNotNull("Could not find a jetty2 kill",  leaderJetty);
+    logOverseerState();
+    assertTrue("The old leader should have rejoined election",
+        OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(zkClient()).contains(leader));
 
-    log.info("leader node {}", leaderJetty.getBaseUrl());
-    log.info("current election Queue",
-        OverseerCollectionConfigSetProcessor.getSortedElectionNodes(zkClient(), "/overseer_elect/election"));
-    ChaosMonkey.stop(leaderJetty);
-    timeout = new TimeOut(10, TimeUnit.SECONDS);
-    leaderchanged = false;
-    for (; !timeout.hasTimedOut(); ) {
-      currentOverseer = getLeaderNode(zkClient());
-      if (anotherOverseer.equals(currentOverseer)) {
-        leaderchanged = true;
-        break;
-      }
-      Thread.sleep(100);
-    }
-    assertTrue("New overseer designate has not become the overseer, expected : " + anotherOverseer + "actual : " + getLeaderNode(zkClient()), leaderchanged);
   }
 
 }


[07/50] [abbrv] lucene-solr:apiv2: LUCENE-7055: Add ScorerProvider to get an estimation of the cost of scorers before building them.

Posted by no...@apache.org.
LUCENE-7055: Add ScorerProvider to get an estimation of the cost of scorers before building them.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/86233cb9
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/86233cb9
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/86233cb9

Branch: refs/heads/apiv2
Commit: 86233cb95de6f24aa2ae7fd016b7d75d535024c7
Parents: 38af094
Author: Adrien Grand <jp...@gmail.com>
Authored: Mon Jan 16 15:47:53 2017 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Tue Jan 17 08:51:58 2017 +0100

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   7 +
 .../codecs/simpletext/SimpleTextBKDReader.java  |  50 +++
 .../org/apache/lucene/codecs/PointsWriter.java  |   5 +
 .../org/apache/lucene/index/CheckIndex.java     |  37 +++
 .../org/apache/lucene/index/PointValues.java    |   7 +
 .../apache/lucene/index/PointValuesWriter.java  |  10 +
 .../apache/lucene/index/SortingLeafReader.java  |   5 +
 .../lucene/search/Boolean2ScorerSupplier.java   | 217 ++++++++++++
 .../org/apache/lucene/search/BooleanWeight.java | 136 ++------
 .../apache/lucene/search/ConjunctionDISI.java   |   2 +-
 .../apache/lucene/search/ConjunctionScorer.java |   3 +-
 .../lucene/search/ConstantScoreQuery.java       |  46 ++-
 .../lucene/search/MinShouldMatchSumScorer.java  |  22 +-
 .../apache/lucene/search/PointRangeQuery.java   | 144 +++++---
 .../apache/lucene/search/ScorerSupplier.java    |  47 +++
 .../java/org/apache/lucene/search/Weight.java   |  25 ++
 .../org/apache/lucene/util/bkd/BKDReader.java   |  59 ++++
 .../search/TestBoolean2ScorerSupplier.java      | 332 +++++++++++++++++++
 .../search/TestBooleanQueryVisitSubscorers.java |   4 +-
 .../apache/lucene/search/TestFilterWeight.java  |   3 +-
 .../apache/lucene/util/TestDocIdSetBuilder.java |   5 +
 .../util/bkd/TestMutablePointsReaderUtils.java  |   5 +
 .../apache/lucene/index/memory/MemoryIndex.java |   5 +
 .../lucene/search/DocValuesRangeQuery.java      |  11 +-
 .../lucene/search/IndexOrDocValuesQuery.java    | 116 +++++++
 .../search/TestIndexOrDocValuesQuery.java       |  89 +++++
 .../codecs/cranky/CrankyPointsFormat.java       |   5 +
 .../lucene/index/AssertingLeafReader.java       |   7 +
 .../apache/lucene/search/AssertingWeight.java   |  42 ++-
 29 files changed, 1248 insertions(+), 198 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 851ed72..59992ea 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -73,6 +73,13 @@ Bug Fixes
 * LUCENE-7630: Fix (Edge)NGramTokenFilter to no longer drop payloads
   and preserve all attributes. (Nathan Gass via Uwe Schindler)
 
+Improvements
+
+* LUCENE-7055: Added Weight#scorerSupplier, which allows to estimate the cost
+  of a Scorer before actually building it, in order to optimize how the query
+  should be run, eg. using points or doc values depending on costs of other
+  parts of the query. (Adrien Grand)
+
 ======================= Lucene 6.4.0 =======================
 
 API Changes

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java
index 488547b..b7af45a 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java
@@ -286,6 +286,56 @@ final class SimpleTextBKDReader extends PointValues implements Accountable {
     }
   }
 
+  @Override
+  public long estimatePointCount(IntersectVisitor visitor) {
+    return estimatePointCount(getIntersectState(visitor), 1, minPackedValue, maxPackedValue);
+  }
+
+  private long estimatePointCount(IntersectState state,
+      int nodeID, byte[] cellMinPacked, byte[] cellMaxPacked) {
+    Relation r = state.visitor.compare(cellMinPacked, cellMaxPacked);
+
+    if (r == Relation.CELL_OUTSIDE_QUERY) {
+      // This cell is fully outside of the query shape: stop recursing
+      return 0L;
+    } else if (nodeID >= leafNodeOffset) {
+      // Assume all points match and there are no dups
+      return maxPointsInLeafNode;
+    } else {
+      
+      // Non-leaf node: recurse on the split left and right nodes
+
+      int address = nodeID * bytesPerIndexEntry;
+      int splitDim;
+      if (numDims == 1) {
+        splitDim = 0;
+      } else {
+        splitDim = splitPackedValues[address++] & 0xff;
+      }
+      
+      assert splitDim < numDims;
+
+      // TODO: can we alloc & reuse this up front?
+
+      byte[] splitPackedValue = new byte[packedBytesLength];
+
+      // Recurse on left sub-tree:
+      System.arraycopy(cellMaxPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(splitPackedValues, address, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
+      final long leftCost = estimatePointCount(state,
+                2*nodeID,
+                cellMinPacked, splitPackedValue);
+
+      // Recurse on right sub-tree:
+      System.arraycopy(cellMinPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(splitPackedValues, address, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
+      final long rightCost = estimatePointCount(state,
+                2*nodeID+1,
+                splitPackedValue, cellMaxPacked);
+      return leftCost + rightCost;
+    }
+  }
+
   /** Copies the split value for this node into the provided byte array */
   public void copySplitValue(int nodeID, byte[] splitPackedValue) {
     int address = nodeID * bytesPerIndexEntry;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/codecs/PointsWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PointsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/PointsWriter.java
index 38cd440..d9a0b30 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PointsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PointsWriter.java
@@ -128,6 +128,11 @@ public abstract class PointsWriter implements Closeable {
               }
 
               @Override
+              public long estimatePointCount(IntersectVisitor visitor) {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
               public byte[] getMinPackedValue() {
                 throw new UnsupportedOperationException();
               }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
index 3bb10d3..7611a7f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
@@ -42,6 +42,8 @@ import org.apache.lucene.codecs.TermVectorsReader;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.DocumentStoredFieldVisitor;
 import org.apache.lucene.index.CheckIndex.Status.DocValuesStatus;
+import org.apache.lucene.index.PointValues.IntersectVisitor;
+import org.apache.lucene.index.PointValues.Relation;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.LeafFieldComparator;
 import org.apache.lucene.search.Sort;
@@ -1810,6 +1812,19 @@ public final class CheckIndex implements Closeable {
             long size = values.size();
             int docCount = values.getDocCount();
 
+            final long crossCost = values.estimatePointCount(new ConstantRelationIntersectVisitor(Relation.CELL_CROSSES_QUERY));
+            if (crossCost < size) {
+              throw new RuntimeException("estimatePointCount should return >= size when all cells match");
+            }
+            final long insideCost = values.estimatePointCount(new ConstantRelationIntersectVisitor(Relation.CELL_INSIDE_QUERY));
+            if (insideCost < size) {
+              throw new RuntimeException("estimatePointCount should return >= size when all cells fully match");
+            }
+            final long outsideCost = values.estimatePointCount(new ConstantRelationIntersectVisitor(Relation.CELL_OUTSIDE_QUERY));
+            if (outsideCost != 0) {
+              throw new RuntimeException("estimatePointCount should return 0 when no cells match");
+            }
+
             VerifyPointsVisitor visitor = new VerifyPointsVisitor(fieldInfo.name, reader.maxDoc(), values);
             values.intersect(visitor);
 
@@ -2002,6 +2017,28 @@ public final class CheckIndex implements Closeable {
     }
   }
 
+  private static class ConstantRelationIntersectVisitor implements IntersectVisitor {
+    private final Relation relation;
+
+    ConstantRelationIntersectVisitor(Relation relation) {
+      this.relation = relation;
+    }
+
+    @Override
+    public void visit(int docID) throws IOException {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void visit(int docID, byte[] packedValue) throws IOException {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+      return relation;
+    }
+  }
   
   /**
    * Test stored fields.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/index/PointValues.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/PointValues.java b/lucene/core/src/java/org/apache/lucene/index/PointValues.java
index ffac5f7..01f77e4 100644
--- a/lucene/core/src/java/org/apache/lucene/index/PointValues.java
+++ b/lucene/core/src/java/org/apache/lucene/index/PointValues.java
@@ -26,6 +26,7 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FloatPoint;
 import org.apache.lucene.document.IntPoint;
 import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.bkd.BKDWriter;
 
@@ -220,6 +221,12 @@ public abstract class PointValues {
    *  to test whether each document is deleted, if necessary. */
   public abstract void intersect(IntersectVisitor visitor) throws IOException;
 
+  /** Estimate the number of points that would be visited by {@link #intersect}
+   * with the given {@link IntersectVisitor}. This should run many times faster
+   * than {@link #intersect(IntersectVisitor)}.
+   * @see DocIdSetIterator#cost */
+  public abstract long estimatePointCount(IntersectVisitor visitor);
+
   /** Returns minimum value for each dimension, packed, or null if {@link #size} is <code>0</code> */
   public abstract byte[] getMinPackedValue() throws IOException;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java b/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java
index 07cf293..4aaf095 100644
--- a/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/PointValuesWriter.java
@@ -91,6 +91,11 @@ class PointValuesWriter {
       }
 
       @Override
+      public long estimatePointCount(IntersectVisitor visitor) {
+        throw new UnsupportedOperationException();
+      }
+
+      @Override
       public byte[] getMinPackedValue() {
         throw new UnsupportedOperationException();
       }
@@ -209,6 +214,11 @@ class PointValuesWriter {
     }
 
     @Override
+    public long estimatePointCount(IntersectVisitor visitor) {
+      return in.estimatePointCount(visitor);
+    }
+
+    @Override
     public byte[] getMinPackedValue() throws IOException {
       return in.getMinPackedValue();
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
index a6748b8..b36b284 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
@@ -328,6 +328,11 @@ class SortingLeafReader extends FilterLeafReader {
     }
 
     @Override
+    public long estimatePointCount(IntersectVisitor visitor) {
+      return in.estimatePointCount(visitor);
+    }
+
+    @Override
     public byte[] getMinPackedValue() throws IOException {
       return in.getMinPackedValue();
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java b/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java
new file mode 100644
index 0000000..4540c85
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.OptionalLong;
+import java.util.stream.Stream;
+
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.util.PriorityQueue;
+
+final class Boolean2ScorerSupplier extends ScorerSupplier {
+
+  private final BooleanWeight weight;
+  private final Map<BooleanClause.Occur, Collection<ScorerSupplier>> subs;
+  private final boolean needsScores;
+  private final int minShouldMatch;
+  private long cost = -1;
+
+  Boolean2ScorerSupplier(BooleanWeight weight,
+      Map<Occur, Collection<ScorerSupplier>> subs,
+      boolean needsScores, int minShouldMatch) {
+    if (minShouldMatch < 0) {
+      throw new IllegalArgumentException("minShouldMatch must be positive, but got: " + minShouldMatch);
+    }
+    if (minShouldMatch != 0 && minShouldMatch >= subs.get(Occur.SHOULD).size()) {
+      throw new IllegalArgumentException("minShouldMatch must be strictly less than the number of SHOULD clauses");
+    }
+    if (needsScores == false && minShouldMatch == 0 && subs.get(Occur.SHOULD).size() > 0
+        && subs.get(Occur.MUST).size() + subs.get(Occur.FILTER).size() > 0) {
+      throw new IllegalArgumentException("Cannot pass purely optional clauses if scores are not needed");
+    }
+    if (subs.get(Occur.SHOULD).size() + subs.get(Occur.MUST).size() + subs.get(Occur.FILTER).size() == 0) {
+      throw new IllegalArgumentException("There should be at least one positive clause");
+    }
+    this.weight = weight;
+    this.subs = subs;
+    this.needsScores = needsScores;
+    this.minShouldMatch = minShouldMatch;
+  }
+
+  private long computeCost() {
+    OptionalLong minRequiredCost = Stream.concat(
+        subs.get(Occur.MUST).stream(),
+        subs.get(Occur.FILTER).stream())
+        .mapToLong(ScorerSupplier::cost)
+        .min();
+    if (minRequiredCost.isPresent() && minShouldMatch == 0) {
+      return minRequiredCost.getAsLong();
+    } else {
+      final Collection<ScorerSupplier> optionalScorers = subs.get(Occur.SHOULD);
+      final long shouldCost = MinShouldMatchSumScorer.cost(
+          optionalScorers.stream().mapToLong(ScorerSupplier::cost),
+          optionalScorers.size(), minShouldMatch);
+      return Math.min(minRequiredCost.orElse(Long.MAX_VALUE), shouldCost);
+    }
+  }
+
+  @Override
+  public long cost() {
+    if (cost == -1) {
+      cost = computeCost();
+    }
+    return cost;
+  }
+
+  @Override
+  public Scorer get(boolean randomAccess) throws IOException {
+    // three cases: conjunction, disjunction, or mix
+
+    // pure conjunction
+    if (subs.get(Occur.SHOULD).isEmpty()) {
+      return excl(req(subs.get(Occur.FILTER), subs.get(Occur.MUST), randomAccess), subs.get(Occur.MUST_NOT));
+    }
+
+    // pure disjunction
+    if (subs.get(Occur.FILTER).isEmpty() && subs.get(Occur.MUST).isEmpty()) {
+      return excl(opt(subs.get(Occur.SHOULD), minShouldMatch, needsScores, randomAccess), subs.get(Occur.MUST_NOT));
+    }
+
+    // conjunction-disjunction mix:
+    // we create the required and optional pieces, and then
+    // combine the two: if minNrShouldMatch > 0, then it's a conjunction: because the
+    // optional side must match. otherwise it's required + optional
+
+    if (minShouldMatch > 0) {
+      boolean reqRandomAccess = true;
+      boolean msmRandomAccess = true;
+      if (randomAccess == false) {
+        // We need to figure out whether the MUST/FILTER or the SHOULD clauses would lead the iteration
+        final long reqCost = Stream.concat(
+            subs.get(Occur.MUST).stream(),
+            subs.get(Occur.FILTER).stream())
+            .mapToLong(ScorerSupplier::cost)
+            .min().getAsLong();
+        final long msmCost = MinShouldMatchSumScorer.cost(
+            subs.get(Occur.SHOULD).stream().mapToLong(ScorerSupplier::cost),
+            subs.get(Occur.SHOULD).size(), minShouldMatch);
+        reqRandomAccess = reqCost > msmCost;
+        msmRandomAccess = msmCost > reqCost;
+      }
+      Scorer req = excl(req(subs.get(Occur.FILTER), subs.get(Occur.MUST), reqRandomAccess), subs.get(Occur.MUST_NOT));
+      Scorer opt = opt(subs.get(Occur.SHOULD), minShouldMatch, needsScores, msmRandomAccess);
+      return new ConjunctionScorer(weight, Arrays.asList(req, opt), Arrays.asList(req, opt));
+    } else {
+      assert needsScores;
+      return new ReqOptSumScorer(
+          excl(req(subs.get(Occur.FILTER), subs.get(Occur.MUST), randomAccess), subs.get(Occur.MUST_NOT)),
+          opt(subs.get(Occur.SHOULD), minShouldMatch, needsScores, true));
+    }
+  }
+
+  /** Create a new scorer for the given required clauses. Note that
+   *  {@code requiredScoring} is a subset of {@code required} containing
+   *  required clauses that should participate in scoring. */
+  private Scorer req(Collection<ScorerSupplier> requiredNoScoring, Collection<ScorerSupplier> requiredScoring, boolean randomAccess) throws IOException {
+    if (requiredNoScoring.size() + requiredScoring.size() == 1) {
+      Scorer req = (requiredNoScoring.isEmpty() ? requiredScoring : requiredNoScoring).iterator().next().get(randomAccess);
+
+      if (needsScores == false) {
+        return req;
+      }
+
+      if (requiredScoring.isEmpty()) {
+        // Scores are needed but we only have a filter clause
+        // BooleanWeight expects that calling score() is ok so we need to wrap
+        // to prevent score() from being propagated
+        return new FilterScorer(req) {
+          @Override
+          public float score() throws IOException {
+            return 0f;
+          }
+          @Override
+          public int freq() throws IOException {
+            return 0;
+          }
+        };
+      }
+
+      return req;
+    } else {
+      long minCost = Math.min(
+          requiredNoScoring.stream().mapToLong(ScorerSupplier::cost).min().orElse(Long.MAX_VALUE),
+          requiredScoring.stream().mapToLong(ScorerSupplier::cost).min().orElse(Long.MAX_VALUE));
+      List<Scorer> requiredScorers = new ArrayList<>();
+      List<Scorer> scoringScorers = new ArrayList<>();
+      for (ScorerSupplier s : requiredNoScoring) {
+        requiredScorers.add(s.get(randomAccess || s.cost() > minCost));
+      }
+      for (ScorerSupplier s : requiredScoring) {
+        Scorer scorer = s.get(randomAccess || s.cost() > minCost);
+        requiredScorers.add(scorer);
+        scoringScorers.add(scorer);
+      }
+      return new ConjunctionScorer(weight, requiredScorers, scoringScorers);
+    }
+  }
+
+  private Scorer excl(Scorer main, Collection<ScorerSupplier> prohibited) throws IOException {
+    if (prohibited.isEmpty()) {
+      return main;
+    } else {
+      return new ReqExclScorer(main, opt(prohibited, 1, false, true));
+    }
+  }
+
+  private Scorer opt(Collection<ScorerSupplier> optional, int minShouldMatch,
+      boolean needsScores, boolean randomAccess) throws IOException {
+    if (optional.size() == 1) {
+      return optional.iterator().next().get(randomAccess);
+    } else if (minShouldMatch > 1) {
+      final List<Scorer> optionalScorers = new ArrayList<>();
+      final PriorityQueue<ScorerSupplier> pq = new PriorityQueue<ScorerSupplier>(subs.get(Occur.SHOULD).size() - minShouldMatch + 1) {
+        @Override
+        protected boolean lessThan(ScorerSupplier a, ScorerSupplier b) {
+          return a.cost() > b.cost();
+        }
+      };
+      for (ScorerSupplier scorer : subs.get(Occur.SHOULD)) {
+        ScorerSupplier overflow = pq.insertWithOverflow(scorer);
+        if (overflow != null) {
+          optionalScorers.add(overflow.get(true));
+        }
+      }
+      for (ScorerSupplier scorer : pq) {
+        optionalScorers.add(scorer.get(randomAccess));
+      }
+      return new MinShouldMatchSumScorer(weight, optionalScorers, minShouldMatch);
+    } else {
+      final List<Scorer> optionalScorers = new ArrayList<>();
+      for (ScorerSupplier scorer : optional) {
+        optionalScorers.add(scorer.get(randomAccess));
+      }
+      return new DisjunctionSumScorer(weight, optionalScorers, needsScores);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java b/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java
index ce4419f..dc44d53 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java
@@ -19,9 +19,11 @@ package org.apache.lucene.search;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumMap;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.lucene.index.LeafReaderContext;
@@ -265,7 +267,9 @@ final class BooleanWeight extends Weight {
     if (prohibited.isEmpty()) {
       return positiveScorer;
     } else {
-      Scorer prohibitedScorer = opt(prohibited, 1);
+      Scorer prohibitedScorer = prohibited.size() == 1
+          ? prohibited.get(0)
+          : new DisjunctionSumScorer(this, prohibited, false);
       if (prohibitedScorer.twoPhaseIterator() != null) {
         // ReqExclBulkScorer can't deal efficiently with two-phased prohibited clauses
         return null;
@@ -288,50 +292,48 @@ final class BooleanWeight extends Weight {
 
   @Override
   public Scorer scorer(LeafReaderContext context) throws IOException {
-    // initially the user provided value,
-    // but if minNrShouldMatch == optional.size(),
-    // we will optimize and move these to required, making this 0
+    ScorerSupplier scorerSupplier = scorerSupplier(context);
+    if (scorerSupplier == null) {
+      return null;
+    }
+    return scorerSupplier.get(false);
+  }
+
+  @Override
+  public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
     int minShouldMatch = query.getMinimumNumberShouldMatch();
 
-    List<Scorer> required = new ArrayList<>();
-    // clauses that are required AND participate in scoring, subset of 'required'
-    List<Scorer> requiredScoring = new ArrayList<>();
-    List<Scorer> prohibited = new ArrayList<>();
-    List<Scorer> optional = new ArrayList<>();
+    final Map<Occur, Collection<ScorerSupplier>> scorers = new EnumMap<>(Occur.class);
+    for (Occur occur : Occur.values()) {
+      scorers.put(occur, new ArrayList<>());
+    }
+
     Iterator<BooleanClause> cIter = query.iterator();
     for (Weight w  : weights) {
       BooleanClause c =  cIter.next();
-      Scorer subScorer = w.scorer(context);
+      ScorerSupplier subScorer = w.scorerSupplier(context);
       if (subScorer == null) {
         if (c.isRequired()) {
           return null;
         }
-      } else if (c.isRequired()) {
-        required.add(subScorer);
-        if (c.isScoring()) {
-          requiredScoring.add(subScorer);
-        }
-      } else if (c.isProhibited()) {
-        prohibited.add(subScorer);
       } else {
-        optional.add(subScorer);
+        scorers.get(c.getOccur()).add(subScorer);
       }
     }
-    
+
     // scorer simplifications:
     
-    if (optional.size() == minShouldMatch) {
+    if (scorers.get(Occur.SHOULD).size() == minShouldMatch) {
       // any optional clauses are in fact required
-      required.addAll(optional);
-      requiredScoring.addAll(optional);
-      optional.clear();
+      scorers.get(Occur.MUST).addAll(scorers.get(Occur.SHOULD));
+      scorers.get(Occur.SHOULD).clear();
       minShouldMatch = 0;
     }
     
-    if (required.isEmpty() && optional.isEmpty()) {
+    if (scorers.get(Occur.FILTER).isEmpty() && scorers.get(Occur.MUST).isEmpty() && scorers.get(Occur.SHOULD).isEmpty()) {
       // no required and optional clauses.
       return null;
-    } else if (optional.size() < minShouldMatch) {
+    } else if (scorers.get(Occur.SHOULD).size() < minShouldMatch) {
       // either >1 req scorer, or there are 0 req scorers and at least 1
       // optional scorer. Therefore if there are not enough optional scorers
       // no documents will be matched by the query
@@ -339,87 +341,11 @@ final class BooleanWeight extends Weight {
     }
 
     // we don't need scores, so if we have required clauses, drop optional clauses completely
-    if (!needsScores && minShouldMatch == 0 && required.size() > 0) {
-      optional.clear();
-    }
-    
-    // three cases: conjunction, disjunction, or mix
-    
-    // pure conjunction
-    if (optional.isEmpty()) {
-      return excl(req(required, requiredScoring), prohibited);
+    if (!needsScores && minShouldMatch == 0 && scorers.get(Occur.MUST).size() + scorers.get(Occur.FILTER).size() > 0) {
+      scorers.get(Occur.SHOULD).clear();
     }
-    
-    // pure disjunction
-    if (required.isEmpty()) {
-      return excl(opt(optional, minShouldMatch), prohibited);
-    }
-    
-    // conjunction-disjunction mix:
-    // we create the required and optional pieces, and then
-    // combine the two: if minNrShouldMatch > 0, then it's a conjunction: because the
-    // optional side must match. otherwise it's required + optional
-    
-    Scorer req = excl(req(required, requiredScoring), prohibited);
-    Scorer opt = opt(optional, minShouldMatch);
 
-    if (minShouldMatch > 0) {
-      return new ConjunctionScorer(this, Arrays.asList(req, opt), Arrays.asList(req, opt));
-    } else {
-      return new ReqOptSumScorer(req, opt);          
-    }
+    return new Boolean2ScorerSupplier(this, scorers, needsScores, minShouldMatch);
   }
 
-  /** Create a new scorer for the given required clauses. Note that
-   *  {@code requiredScoring} is a subset of {@code required} containing
-   *  required clauses that should participate in scoring. */
-  private Scorer req(List<Scorer> required, List<Scorer> requiredScoring) {
-    if (required.size() == 1) {
-      Scorer req = required.get(0);
-
-      if (needsScores == false) {
-        return req;
-      }
-
-      if (requiredScoring.isEmpty()) {
-        // Scores are needed but we only have a filter clause
-        // BooleanWeight expects that calling score() is ok so we need to wrap
-        // to prevent score() from being propagated
-        return new FilterScorer(req) {
-          @Override
-          public float score() throws IOException {
-            return 0f;
-          }
-          @Override
-          public int freq() throws IOException {
-            return 0;
-          }
-        };
-      }
-      
-      return req;
-    } else {
-      return new ConjunctionScorer(this, required, requiredScoring);
-    }
-  }
-  
-  private Scorer excl(Scorer main, List<Scorer> prohibited) throws IOException {
-    if (prohibited.isEmpty()) {
-      return main;
-    } else if (prohibited.size() == 1) {
-      return new ReqExclScorer(main, prohibited.get(0));
-    } else {
-      return new ReqExclScorer(main, new DisjunctionSumScorer(this, prohibited, false));
-    }
-  }
-  
-  private Scorer opt(List<Scorer> optional, int minShouldMatch) throws IOException {
-    if (optional.size() == 1) {
-      return optional.get(0);
-    } else if (minShouldMatch > 1) {
-      return new MinShouldMatchSumScorer(this, optional, minShouldMatch);
-    } else {
-      return new DisjunctionSumScorer(this, optional, needsScores);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/search/ConjunctionDISI.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConjunctionDISI.java b/lucene/core/src/java/org/apache/lucene/search/ConjunctionDISI.java
index 43d03b2..780e854 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConjunctionDISI.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConjunctionDISI.java
@@ -41,7 +41,7 @@ public final class ConjunctionDISI extends DocIdSetIterator {
    * returned {@link DocIdSetIterator} might leverage two-phase iteration in
    * which case it is possible to retrieve the {@link TwoPhaseIterator} using
    * {@link TwoPhaseIterator#unwrap}. */
-  public static DocIdSetIterator intersectScorers(List<Scorer> scorers) {
+  public static DocIdSetIterator intersectScorers(Collection<Scorer> scorers) {
     if (scorers.size() < 2) {
       throw new IllegalArgumentException("Cannot make a ConjunctionDISI of less than 2 iterators");
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
index 0066952..9cddab8 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
@@ -20,7 +20,6 @@ package org.apache.lucene.search;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.List;
 
 /** Scorer for conjunctions, sets of queries, all of which are required. */
 class ConjunctionScorer extends Scorer {
@@ -29,7 +28,7 @@ class ConjunctionScorer extends Scorer {
   final Scorer[] scorers;
 
   /** Create a new {@link ConjunctionScorer}, note that {@code scorers} must be a subset of {@code required}. */
-  ConjunctionScorer(Weight weight, List<Scorer> required, List<Scorer> scorers) {
+  ConjunctionScorer(Weight weight, Collection<Scorer> required, Collection<Scorer> scorers) {
     super(weight);
     assert required.containsAll(scorers);
     this.disi = ConjunctionDISI.intersectScorers(required);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
index c5a7d08..dbd05e8 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
@@ -125,28 +125,48 @@ public final class ConstantScoreQuery extends Query {
         }
 
         @Override
-        public Scorer scorer(LeafReaderContext context) throws IOException {
-          final Scorer innerScorer = innerWeight.scorer(context);
-          if (innerScorer == null) {
+        public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
+          ScorerSupplier innerScorerSupplier = innerWeight.scorerSupplier(context);
+          if (innerScorerSupplier == null) {
             return null;
           }
-          final float score = score();
-          return new FilterScorer(innerScorer) {
+          return new ScorerSupplier() {
             @Override
-            public float score() throws IOException {
-              return score;
-            }
-            @Override
-            public int freq() throws IOException {
-              return 1;
+            public Scorer get(boolean randomAccess) throws IOException {
+              final Scorer innerScorer = innerScorerSupplier.get(randomAccess);
+              final float score = score();
+              return new FilterScorer(innerScorer) {
+                @Override
+                public float score() throws IOException {
+                  return score;
+                }
+                @Override
+                public int freq() throws IOException {
+                  return 1;
+                }
+                @Override
+                public Collection<ChildScorer> getChildren() {
+                  return Collections.singleton(new ChildScorer(innerScorer, "constant"));
+                }
+              };
             }
+
             @Override
-            public Collection<ChildScorer> getChildren() {
-              return Collections.singleton(new ChildScorer(innerScorer, "constant"));
+            public long cost() {
+              return innerScorerSupplier.cost();
             }
           };
         }
 
+        @Override
+        public Scorer scorer(LeafReaderContext context) throws IOException {
+          ScorerSupplier scorerSupplier = scorerSupplier(context);
+          if (scorerSupplier == null) {
+            return null;
+          }
+          return scorerSupplier.get(false);
+        }
+
       };
     } else {
       return innerWeight;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
index 032b5fe..c2c419c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
@@ -22,6 +22,8 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
+import java.util.stream.LongStream;
+import java.util.stream.StreamSupport;
 
 import org.apache.lucene.util.PriorityQueue;
 
@@ -47,7 +49,7 @@ import static org.apache.lucene.search.DisiPriorityQueue.rightNode;
  */
 final class MinShouldMatchSumScorer extends Scorer {
 
-  private static long cost(Collection<Scorer> scorers, int minShouldMatch) {
+  static long cost(LongStream costs, int numScorers, int minShouldMatch) {
     // the idea here is the following: a boolean query c1,c2,...cn with minShouldMatch=m
     // could be rewritten to:
     // (c1 AND (c2..cn|msm=m-1)) OR (!c1 AND (c2..cn|msm=m))
@@ -61,20 +63,14 @@ final class MinShouldMatchSumScorer extends Scorer {
 
     // If we recurse infinitely, we find out that the cost of a msm query is the sum of the
     // costs of the num_scorers - minShouldMatch + 1 least costly scorers
-    final PriorityQueue<Scorer> pq = new PriorityQueue<Scorer>(scorers.size() - minShouldMatch + 1) {
+    final PriorityQueue<Long> pq = new PriorityQueue<Long>(numScorers - minShouldMatch + 1) {
       @Override
-      protected boolean lessThan(Scorer a, Scorer b) {
-        return a.iterator().cost() > b.iterator().cost();
+      protected boolean lessThan(Long a, Long b) {
+        return a > b;
       }
     };
-    for (Scorer scorer : scorers) {
-      pq.insertWithOverflow(scorer);
-    }
-    long cost = 0;
-    for (Scorer scorer = pq.pop(); scorer != null; scorer = pq.pop()) {
-      cost += scorer.iterator().cost();
-    }
-    return cost;
+    costs.forEach(pq::insertWithOverflow);
+    return StreamSupport.stream(pq.spliterator(), false).mapToLong(Number::longValue).sum();
   }
 
   final int minShouldMatch;
@@ -124,7 +120,7 @@ final class MinShouldMatchSumScorer extends Scorer {
       children.add(new ChildScorer(scorer, "SHOULD"));
     }
     this.childScorers = Collections.unmodifiableCollection(children);
-    this.cost = cost(scorers, minShouldMatch);
+    this.cost = cost(scorers.stream().map(Scorer::iterator).mapToLong(DocIdSetIterator::cost), scorers.size(), minShouldMatch);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
index 5fd0167..29c6e7f 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PointRangeQuery.java
@@ -104,71 +104,67 @@ public abstract class PointRangeQuery extends Query {
 
     return new ConstantScoreWeight(this, boost) {
 
-      private DocIdSet buildMatchingDocIdSet(LeafReader reader, PointValues values) throws IOException {
-        DocIdSetBuilder result = new DocIdSetBuilder(reader.maxDoc(), values, field);
+      private IntersectVisitor getIntersectVisitor(DocIdSetBuilder result) {
+        return new IntersectVisitor() {
 
-        values.intersect(
-            new IntersectVisitor() {
+          DocIdSetBuilder.BulkAdder adder;
 
-              DocIdSetBuilder.BulkAdder adder;
+          @Override
+          public void grow(int count) {
+            adder = result.grow(count);
+          }
 
-              @Override
-              public void grow(int count) {
-                adder = result.grow(count);
-              }
+          @Override
+          public void visit(int docID) {
+            adder.add(docID);
+          }
 
-              @Override
-              public void visit(int docID) {
-                adder.add(docID);
+          @Override
+          public void visit(int docID, byte[] packedValue) {
+            for(int dim=0;dim<numDims;dim++) {
+              int offset = dim*bytesPerDim;
+              if (StringHelper.compare(bytesPerDim, packedValue, offset, lowerPoint, offset) < 0) {
+                // Doc's value is too low, in this dimension
+                return;
               }
-
-              @Override
-              public void visit(int docID, byte[] packedValue) {
-                for(int dim=0;dim<numDims;dim++) {
-                  int offset = dim*bytesPerDim;
-                  if (StringHelper.compare(bytesPerDim, packedValue, offset, lowerPoint, offset) < 0) {
-                    // Doc's value is too low, in this dimension
-                    return;
-                  }
-                  if (StringHelper.compare(bytesPerDim, packedValue, offset, upperPoint, offset) > 0) {
-                    // Doc's value is too high, in this dimension
-                    return;
-                  }
-                }
-
-                // Doc is in-bounds
-                adder.add(docID);
+              if (StringHelper.compare(bytesPerDim, packedValue, offset, upperPoint, offset) > 0) {
+                // Doc's value is too high, in this dimension
+                return;
               }
+            }
 
-              @Override
-              public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
-
-                boolean crosses = false;
+            // Doc is in-bounds
+            adder.add(docID);
+          }
 
-                for(int dim=0;dim<numDims;dim++) {
-                  int offset = dim*bytesPerDim;
+          @Override
+          public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
 
-                  if (StringHelper.compare(bytesPerDim, minPackedValue, offset, upperPoint, offset) > 0 ||
-                      StringHelper.compare(bytesPerDim, maxPackedValue, offset, lowerPoint, offset) < 0) {
-                    return Relation.CELL_OUTSIDE_QUERY;
-                  }
+            boolean crosses = false;
 
-                  crosses |= StringHelper.compare(bytesPerDim, minPackedValue, offset, lowerPoint, offset) < 0 ||
-                    StringHelper.compare(bytesPerDim, maxPackedValue, offset, upperPoint, offset) > 0;
-                }
+            for(int dim=0;dim<numDims;dim++) {
+              int offset = dim*bytesPerDim;
 
-                if (crosses) {
-                  return Relation.CELL_CROSSES_QUERY;
-                } else {
-                  return Relation.CELL_INSIDE_QUERY;
-                }
+              if (StringHelper.compare(bytesPerDim, minPackedValue, offset, upperPoint, offset) > 0 ||
+                  StringHelper.compare(bytesPerDim, maxPackedValue, offset, lowerPoint, offset) < 0) {
+                return Relation.CELL_OUTSIDE_QUERY;
               }
-            });
-        return result.build();
+
+              crosses |= StringHelper.compare(bytesPerDim, minPackedValue, offset, lowerPoint, offset) < 0 ||
+                  StringHelper.compare(bytesPerDim, maxPackedValue, offset, upperPoint, offset) > 0;
+            }
+
+            if (crosses) {
+              return Relation.CELL_CROSSES_QUERY;
+            } else {
+              return Relation.CELL_INSIDE_QUERY;
+            }
+          }
+        };
       }
 
       @Override
-      public Scorer scorer(LeafReaderContext context) throws IOException {
+      public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
         LeafReader reader = context.reader();
 
         PointValues values = reader.getPointValues(field);
@@ -201,15 +197,55 @@ public abstract class PointRangeQuery extends Query {
           allDocsMatch = false;
         }
 
-        DocIdSetIterator iterator;
+        final Weight weight = this;
         if (allDocsMatch) {
           // all docs have a value and all points are within bounds, so everything matches
-          iterator = DocIdSetIterator.all(reader.maxDoc());
+          return new ScorerSupplier() {
+            @Override
+            public Scorer get(boolean randomAccess) {
+              return new ConstantScoreScorer(weight, score(),
+                  DocIdSetIterator.all(reader.maxDoc()));
+            }
+            
+            @Override
+            public long cost() {
+              return reader.maxDoc();
+            }
+          };
         } else {
-          iterator = buildMatchingDocIdSet(reader, values).iterator();
+          return new ScorerSupplier() {
+
+            final DocIdSetBuilder result = new DocIdSetBuilder(reader.maxDoc(), values, field);
+            final IntersectVisitor visitor = getIntersectVisitor(result);
+            long cost = -1;
+
+            @Override
+            public Scorer get(boolean randomAccess) throws IOException {
+              values.intersect(visitor);
+              DocIdSetIterator iterator = result.build().iterator();
+              return new ConstantScoreScorer(weight, score(), iterator);
+            }
+            
+            @Override
+            public long cost() {
+              if (cost == -1) {
+                // Computing the cost may be expensive, so only do it if necessary
+                cost = values.estimatePointCount(visitor);
+                assert cost >= 0;
+              }
+              return cost;
+            }
+          };
         }
+      }
 
-        return new ConstantScoreScorer(this, score(), iterator);
+      @Override
+      public Scorer scorer(LeafReaderContext context) throws IOException {
+        ScorerSupplier scorerSupplier = scorerSupplier(context);
+        if (scorerSupplier == null) {
+          return null;
+        }
+        return scorerSupplier.get(false);
       }
     };
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/search/ScorerSupplier.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/ScorerSupplier.java b/lucene/core/src/java/org/apache/lucene/search/ScorerSupplier.java
new file mode 100644
index 0000000..3f6906a
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/ScorerSupplier.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search;
+
+import java.io.IOException;
+
+/**
+ * A supplier of {@link Scorer}. This allows to get an estimate of the cost before
+ * building the {@link Scorer}.
+ */
+public abstract class ScorerSupplier {
+
+  /**
+   * Get the {@link Scorer}. This may not return {@code null} and must be called
+   * at most once.
+   * @param randomAccess A hint about the expected usage of the {@link Scorer}.
+   * If {@link DocIdSetIterator#advance} or {@link TwoPhaseIterator} will be
+   * used to check whether given doc ids match, then pass {@code true}.
+   * Otherwise if the {@link Scorer} will be mostly used to lead the iteration
+   * using {@link DocIdSetIterator#nextDoc()}, then {@code false} should be
+   * passed. Under doubt, pass {@code false} which usually has a better
+   * worst-case.
+   */
+  public abstract Scorer get(boolean randomAccess) throws IOException;
+
+  /**
+   * Get an estimate of the {@link Scorer} that would be returned by {@link #get}.
+   * This may be a costly operation, so it should only be called if necessary.
+   * @see DocIdSetIterator#cost
+   */
+  public abstract long cost();
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/search/Weight.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/Weight.java b/lucene/core/src/java/org/apache/lucene/search/Weight.java
index 47f553e..eef052d 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Weight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Weight.java
@@ -103,6 +103,31 @@ public abstract class Weight {
   public abstract Scorer scorer(LeafReaderContext context) throws IOException;
 
   /**
+   * Optional method.
+   * Get a {@link ScorerSupplier}, which allows to know the cost of the {@link Scorer}
+   * before building it. The default implementation calls {@link #scorer} and
+   * builds a {@link ScorerSupplier} wrapper around it.
+   * @see #scorer
+   */
+  public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
+    final Scorer scorer = scorer(context);
+    if (scorer == null) {
+      return null;
+    }
+    return new ScorerSupplier() {
+      @Override
+      public Scorer get(boolean randomAccess) {
+        return scorer;
+      }
+
+      @Override
+      public long cost() {
+        return scorer.iterator().cost();
+      }
+    };
+  }
+
+  /**
    * Optional method, to return a {@link BulkScorer} to
    * score the query and send hits to a {@link Collector}.
    * Only queries that have a different top-level approach

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
index 44744c1..14e1adb 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
@@ -482,10 +482,16 @@ public final class BKDReader extends PointValues implements Accountable {
     }
   }
 
+  @Override
   public void intersect(IntersectVisitor visitor) throws IOException {
     intersect(getIntersectState(visitor), minPackedValue, maxPackedValue);
   }
 
+  @Override
+  public long estimatePointCount(IntersectVisitor visitor) {
+    return estimatePointCount(getIntersectState(visitor), minPackedValue, maxPackedValue);
+  }
+
   /** Fast path: this is called when the query box fully encompasses all cells under this node. */
   private void addAll(IntersectState state) throws IOException {
     //System.out.println("R: addAll nodeID=" + nodeID);
@@ -696,6 +702,59 @@ public final class BKDReader extends PointValues implements Accountable {
     }
   }
 
+  private long estimatePointCount(IntersectState state, byte[] cellMinPacked, byte[] cellMaxPacked) {
+
+    /*
+    System.out.println("\nR: intersect nodeID=" + state.index.getNodeID());
+    for(int dim=0;dim<numDims;dim++) {
+      System.out.println("  dim=" + dim + "\n    cellMin=" + new BytesRef(cellMinPacked, dim*bytesPerDim, bytesPerDim) + "\n    cellMax=" + new BytesRef(cellMaxPacked, dim*bytesPerDim, bytesPerDim));
+    }
+    */
+
+    Relation r = state.visitor.compare(cellMinPacked, cellMaxPacked);
+
+    if (r == Relation.CELL_OUTSIDE_QUERY) {
+      // This cell is fully outside of the query shape: stop recursing
+      return 0L;
+    } else if (state.index.isLeafNode()) {
+      // Assume all points match and there are no dups
+      return maxPointsInLeafNode;
+    } else {
+      
+      // Non-leaf node: recurse on the split left and right nodes
+      int splitDim = state.index.getSplitDim();
+      assert splitDim >= 0: "splitDim=" + splitDim;
+      assert splitDim < numDims;
+
+      byte[] splitPackedValue = state.index.getSplitPackedValue();
+      BytesRef splitDimValue = state.index.getSplitDimValue();
+      assert splitDimValue.length == bytesPerDim;
+      //System.out.println("  splitDimValue=" + splitDimValue + " splitDim=" + splitDim);
+
+      // make sure cellMin <= splitValue <= cellMax:
+      assert StringHelper.compare(bytesPerDim, cellMinPacked, splitDim*bytesPerDim, splitDimValue.bytes, splitDimValue.offset) <= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numDims=" + numDims;
+      assert StringHelper.compare(bytesPerDim, cellMaxPacked, splitDim*bytesPerDim, splitDimValue.bytes, splitDimValue.offset) >= 0: "bytesPerDim=" + bytesPerDim + " splitDim=" + splitDim + " numDims=" + numDims;
+
+      // Recurse on left sub-tree:
+      System.arraycopy(cellMaxPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(splitDimValue.bytes, splitDimValue.offset, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
+      state.index.pushLeft();
+      final long leftCost = estimatePointCount(state, cellMinPacked, splitPackedValue);
+      state.index.pop();
+
+      // Restore the split dim value since it may have been overwritten while recursing:
+      System.arraycopy(splitPackedValue, splitDim*bytesPerDim, splitDimValue.bytes, splitDimValue.offset, bytesPerDim);
+
+      // Recurse on right sub-tree:
+      System.arraycopy(cellMinPacked, 0, splitPackedValue, 0, packedBytesLength);
+      System.arraycopy(splitDimValue.bytes, splitDimValue.offset, splitPackedValue, splitDim*bytesPerDim, bytesPerDim);
+      state.index.pushRight();
+      final long rightCost = estimatePointCount(state, splitPackedValue, cellMaxPacked);
+      state.index.pop();
+      return leftCost + rightCost;
+    }
+  }
+
   @Override
   public long ramBytesUsed() {
     if (packedIndex != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/test/org/apache/lucene/search/TestBoolean2ScorerSupplier.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2ScorerSupplier.java b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2ScorerSupplier.java
new file mode 100644
index 0000000..7f46a22
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2ScorerSupplier.java
@@ -0,0 +1,332 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumMap;
+import java.util.Map;
+
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+
+public class TestBoolean2ScorerSupplier extends LuceneTestCase {
+
+  private static class FakeScorer extends Scorer {
+
+    private final DocIdSetIterator it;
+
+    FakeScorer(long cost) {
+      super(null);
+      this.it = DocIdSetIterator.all(Math.toIntExact(cost));
+    }
+
+    @Override
+    public int docID() {
+      return it.docID();
+    }
+
+    @Override
+    public float score() throws IOException {
+      return 1;
+    }
+
+    @Override
+    public int freq() throws IOException {
+      return 1;
+    }
+
+    @Override
+    public DocIdSetIterator iterator() {
+      return it;
+    }
+
+    @Override
+    public String toString() {
+      return "FakeScorer(cost=" + it.cost() + ")";
+    }
+
+  }
+
+  private static class FakeScorerSupplier extends ScorerSupplier {
+
+    private final long cost;
+    private final Boolean randomAccess;
+
+    FakeScorerSupplier(long cost) {
+      this.cost = cost;
+      this.randomAccess = null;
+    }
+
+    FakeScorerSupplier(long cost, boolean randomAccess) {
+      this.cost = cost;
+      this.randomAccess = randomAccess;
+    }
+
+    @Override
+    public Scorer get(boolean randomAccess) throws IOException {
+      if (this.randomAccess != null) {
+        assertEquals(this.toString(), this.randomAccess, randomAccess);
+      }
+      return new FakeScorer(cost);
+    }
+
+    @Override
+    public long cost() {
+      return cost;
+    }
+    
+    @Override
+    public String toString() {
+      return "FakeLazyScorer(cost=" + cost + ",randomAccess=" + randomAccess + ")";
+    }
+
+  }
+
+  public void testConjunctionCost() {
+    Map<Occur, Collection<ScorerSupplier>> subs = new EnumMap<>(Occur.class);
+    for (Occur occur : Occur.values()) {
+      subs.put(occur, new ArrayList<>());
+    }
+
+    subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(42));
+    assertEquals(42, new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).cost());
+
+    subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(12));
+    assertEquals(12, new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).cost());
+
+    subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(20));
+    assertEquals(12, new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).cost());
+  }
+
+  public void testDisjunctionCost() throws IOException {
+    Map<Occur, Collection<ScorerSupplier>> subs = new EnumMap<>(Occur.class);
+    for (Occur occur : Occur.values()) {
+      subs.put(occur, new ArrayList<>());
+    }
+
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42));
+    ScorerSupplier s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0);
+    assertEquals(42, s.cost());
+    assertEquals(42, s.get(random().nextBoolean()).iterator().cost());
+
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12));
+    s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0);
+    assertEquals(42 + 12, s.cost());
+    assertEquals(42 + 12, s.get(random().nextBoolean()).iterator().cost());
+
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(20));
+    s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0);
+    assertEquals(42 + 12 + 20, s.cost());
+    assertEquals(42 + 12 + 20, s.get(random().nextBoolean()).iterator().cost());
+  }
+
+  public void testDisjunctionWithMinShouldMatchCost() throws IOException {
+    Map<Occur, Collection<ScorerSupplier>> subs = new EnumMap<>(Occur.class);
+    for (Occur occur : Occur.values()) {
+      subs.put(occur, new ArrayList<>());
+    }
+
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42));
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12));
+    ScorerSupplier s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 1);
+    assertEquals(42 + 12, s.cost());
+    assertEquals(42 + 12, s.get(random().nextBoolean()).iterator().cost());
+
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(20));
+    s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 1);
+    assertEquals(42 + 12 + 20, s.cost());
+    assertEquals(42 + 12 + 20, s.get(random().nextBoolean()).iterator().cost());
+    s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 2);
+    assertEquals(12 + 20, s.cost());
+    assertEquals(12 + 20, s.get(random().nextBoolean()).iterator().cost());
+
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30));
+    s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 1);
+    assertEquals(42 + 12 + 20 + 30, s.cost());
+    assertEquals(42 + 12 + 20 + 30, s.get(random().nextBoolean()).iterator().cost());
+    s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 2);
+    assertEquals(12 + 20 + 30, s.cost());
+    assertEquals(12 + 20 + 30, s.get(random().nextBoolean()).iterator().cost());
+    s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 3);
+    assertEquals(12 + 20, s.cost());
+    assertEquals(12 + 20, s.get(random().nextBoolean()).iterator().cost());
+  }
+
+  public void testDuelCost() throws Exception {
+    final int iters = atLeast(1000);
+    for (int iter = 0; iter < iters; ++iter) {
+      Map<Occur, Collection<ScorerSupplier>> subs = new EnumMap<>(Occur.class);
+      for (Occur occur : Occur.values()) {
+        subs.put(occur, new ArrayList<>());
+      }
+      int numClauses = TestUtil.nextInt(random(), 1, 10);
+      int numShoulds = 0;
+      int numRequired = 0;
+      for (int j = 0; j < numClauses; ++j) {
+        Occur occur = RandomPicks.randomFrom(random(), Occur.values());
+        subs.get(occur).add(new FakeScorerSupplier(random().nextInt(100)));
+        if (occur == Occur.SHOULD) {
+          ++numShoulds;
+        } else if (occur == Occur.FILTER || occur == Occur.MUST) {
+          numRequired++;
+        }
+      }
+      boolean needsScores = random().nextBoolean();
+      if (needsScores == false && numRequired > 0) {
+        numClauses -= numShoulds;
+        numShoulds = 0;
+        subs.get(Occur.SHOULD).clear();
+      }
+      if (numShoulds + numRequired == 0) {
+        // only negative clauses, invalid
+        continue;
+      }
+      int minShouldMatch = numShoulds == 0 ? 0 : TestUtil.nextInt(random(), 0, numShoulds - 1);
+      Boolean2ScorerSupplier supplier = new Boolean2ScorerSupplier(null,
+          subs, needsScores, minShouldMatch);
+      long cost1 = supplier.cost();
+      long cost2 = supplier.get(false).iterator().cost();
+      assertEquals("clauses=" + subs + ", minShouldMatch=" + minShouldMatch, cost1, cost2);
+    }
+  }
+
+  // test the tester...
+  public void testFakeScorerSupplier() {
+    FakeScorerSupplier randomAccessSupplier = new FakeScorerSupplier(random().nextInt(100), true);
+    expectThrows(AssertionError.class, () -> randomAccessSupplier.get(false));
+    FakeScorerSupplier sequentialSupplier = new FakeScorerSupplier(random().nextInt(100), false);
+    expectThrows(AssertionError.class, () -> sequentialSupplier.get(true));
+  }
+
+  public void testConjunctionRandomAccess() throws IOException {
+    Map<Occur, Collection<ScorerSupplier>> subs = new EnumMap<>(Occur.class);
+    for (Occur occur : Occur.values()) {
+      subs.put(occur, new ArrayList<>());
+    }
+
+    // If sequential access is required, only the least costly clause does not use random-access
+    subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(42, true));
+    subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(12, false));
+    new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).get(false); // triggers assertions as a side-effect
+
+    subs = new EnumMap<>(Occur.class);
+    for (Occur occur : Occur.values()) {
+      subs.put(occur, new ArrayList<>());
+    }
+
+    // If random access is required, then we propagate to sub clauses
+    subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(42, true));
+    subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(12, true));
+    new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).get(true); // triggers assertions as a side-effect
+  }
+
+  public void testDisjunctionRandomAccess() throws IOException {
+    // disjunctions propagate
+    for (boolean randomAccess : new boolean[] {false, true}) {
+      Map<Occur, Collection<ScorerSupplier>> subs = new EnumMap<>(Occur.class);
+      for (Occur occur : Occur.values()) {
+        subs.put(occur, new ArrayList<>());
+      }
+      subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42, randomAccess));
+      subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, randomAccess));
+      new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).get(randomAccess); // triggers assertions as a side-effect
+    }
+  }
+
+  public void testDisjunctionWithMinShouldMatchRandomAccess() throws IOException {
+    Map<Occur, Collection<ScorerSupplier>> subs = new EnumMap<>(Occur.class);
+    for (Occur occur : Occur.values()) {
+      subs.put(occur, new ArrayList<>());
+    }
+
+    // Only the most costly clause uses random-access in that case:
+    // most of time, we will find agreement between the 2 least costly
+    // clauses and only then check whether the 3rd one matches too
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42, true));
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, false));
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, false));
+    new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 2).get(false); // triggers assertions as a side-effect
+
+    subs = new EnumMap<>(Occur.class);
+    for (Occur occur : Occur.values()) {
+      subs.put(occur, new ArrayList<>());
+    }
+
+    // When random-access is true, just propagate
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42, true));
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, true));
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, true));
+    new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 2).get(true); // triggers assertions as a side-effect
+
+    subs = new EnumMap<>(Occur.class);
+    for (Occur occur : Occur.values()) {
+      subs.put(occur, new ArrayList<>());
+    }
+
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42, true));
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, false));
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, false));
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(20, false));
+    new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 2).get(false); // triggers assertions as a side-effect
+
+    subs = new EnumMap<>(Occur.class);
+    for (Occur occur : Occur.values()) {
+      subs.put(occur, new ArrayList<>());
+    }
+
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42, true));
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, false));
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, true));
+    subs.get(Occur.SHOULD).add(new FakeScorerSupplier(20, false));
+    new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 3).get(false); // triggers assertions as a side-effect
+  }
+
+  public void testProhibitedRandomAccess() throws IOException {
+    for (boolean randomAccess : new boolean[] {false, true}) {
+      Map<Occur, Collection<ScorerSupplier>> subs = new EnumMap<>(Occur.class);
+      for (Occur occur : Occur.values()) {
+        subs.put(occur, new ArrayList<>());
+      }
+
+      // The MUST_NOT clause always uses random-access
+      subs.get(Occur.MUST).add(new FakeScorerSupplier(42, randomAccess));
+      subs.get(Occur.MUST_NOT).add(new FakeScorerSupplier(TestUtil.nextInt(random(), 1, 100), true));
+      new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).get(randomAccess); // triggers assertions as a side-effect
+    }
+  }
+
+  public void testMixedRandomAccess() throws IOException {
+    for (boolean randomAccess : new boolean[] {false, true}) {
+      Map<Occur, Collection<ScorerSupplier>> subs = new EnumMap<>(Occur.class);
+      for (Occur occur : Occur.values()) {
+        subs.put(occur, new ArrayList<>());
+      }
+
+      // The SHOULD clause always uses random-access if there is a MUST clause
+      subs.get(Occur.MUST).add(new FakeScorerSupplier(42, randomAccess));
+      subs.get(Occur.SHOULD).add(new FakeScorerSupplier(TestUtil.nextInt(random(), 1, 100), true));
+      new Boolean2ScorerSupplier(null, subs, true, 0).get(randomAccess); // triggers assertions as a side-effect
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
index 60ba528..38ddcab 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
@@ -206,8 +206,8 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase {
           "    MUST ConstantScoreScorer\n" +
           "    MUST MinShouldMatchSumScorer\n" +
           "            SHOULD TermScorer body:nutch\n" +
-          "            SHOULD TermScorer body:web\n" +
-          "            SHOULD TermScorer body:crawler",
+          "            SHOULD TermScorer body:crawler\n" +
+          "            SHOULD TermScorer body:web",
           summary);
     }
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/test/org/apache/lucene/search/TestFilterWeight.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFilterWeight.java b/lucene/core/src/test/org/apache/lucene/search/TestFilterWeight.java
index cfa01bf..b58fe1b 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFilterWeight.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFilterWeight.java
@@ -18,6 +18,7 @@ package org.apache.lucene.search;
 
 import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
+import java.util.Arrays;
 
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
@@ -35,7 +36,7 @@ public class TestFilterWeight extends LuceneTestCase {
       final int modifiers = superClassMethod.getModifiers();
       if (Modifier.isFinal(modifiers)) continue;
       if (Modifier.isStatic(modifiers)) continue;
-      if (superClassMethod.getName().equals("bulkScorer")) {
+      if (Arrays.asList("bulkScorer", "scorerSupplier").contains(superClassMethod.getName())) {
         try {
           final Method subClassMethod = subClass.getDeclaredMethod(
               superClassMethod.getName(),

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java b/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java
index 625b8c2..f87a73a 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java
@@ -312,6 +312,11 @@ public class TestDocIdSetBuilder extends LuceneTestCase {
     }
 
     @Override
+    public long estimatePointCount(IntersectVisitor visitor) {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
     public byte[] getMinPackedValue() throws IOException {
       throw new UnsupportedOperationException();
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/core/src/test/org/apache/lucene/util/bkd/TestMutablePointsReaderUtils.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/bkd/TestMutablePointsReaderUtils.java b/lucene/core/src/test/org/apache/lucene/util/bkd/TestMutablePointsReaderUtils.java
index 8d2ea3e..62ab2b8 100644
--- a/lucene/core/src/test/org/apache/lucene/util/bkd/TestMutablePointsReaderUtils.java
+++ b/lucene/core/src/test/org/apache/lucene/util/bkd/TestMutablePointsReaderUtils.java
@@ -221,6 +221,11 @@ public class TestMutablePointsReaderUtils extends LuceneTestCase {
     }
 
     @Override
+    public long estimatePointCount(IntersectVisitor visitor) {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
     public byte[] getMinPackedValue() throws IOException {
       throw new UnsupportedOperationException();
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
----------------------------------------------------------------------
diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index 218d26c..b1adf60 100644
--- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -1522,6 +1522,11 @@ public class MemoryIndex {
       }
 
       @Override
+      public long estimatePointCount(IntersectVisitor visitor) {
+        return 1L;
+      }
+
+      @Override
       public byte[] getMinPackedValue() throws IOException {
         BytesRef[] values = info.pointValues;
         if (values != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/sandbox/src/java/org/apache/lucene/search/DocValuesRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/DocValuesRangeQuery.java b/lucene/sandbox/src/java/org/apache/lucene/search/DocValuesRangeQuery.java
index 459ffa4..3d4feb9 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/search/DocValuesRangeQuery.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/DocValuesRangeQuery.java
@@ -23,8 +23,10 @@ import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.PointValues;
 import org.apache.lucene.index.SortedNumericDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.Terms;
 import org.apache.lucene.util.BytesRef;
 
 /**
@@ -33,10 +35,11 @@ import org.apache.lucene.util.BytesRef;
  * dense case where most documents match this query, it <b>might</b> be as
  * fast or faster than a regular {@link PointRangeQuery}.
  *
- * <p>
- * <b>NOTE</b>: be very careful using this query: it is
- * typically much slower than using {@code TermsQuery},
- * but in certain specialized cases may be faster.
+ * <b>NOTE:</b> This query is typically best used within a
+ * {@link IndexOrDocValuesQuery} alongside a query that uses an indexed
+ * structure such as {@link PointValues points} or {@link Terms terms},
+ * which allows to run the query on doc values when that would be more
+ * efficient, and using an index otherwise.
  *
  * @lucene.experimental
  */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/sandbox/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java b/lucene/sandbox/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java
new file mode 100644
index 0000000..0f9e8e3
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+
+/**
+ * A query that uses either an index (points or terms) or doc values in order
+ * to run a range query, depending which one is more efficient.
+ */
+public final class IndexOrDocValuesQuery extends Query {
+
+  private final Query indexQuery, dvQuery;
+
+  /**
+   * Constructor that takes both a query that executes on an index structure
+   * like the inverted index or the points tree, and another query that
+   * executes on doc values. Both queries must match the same documents and
+   * attribute constant scores.
+   */
+  public IndexOrDocValuesQuery(Query indexQuery, Query dvQuery) {
+    this.indexQuery = indexQuery;
+    this.dvQuery = dvQuery;
+  }
+
+  @Override
+  public String toString(String field) {
+    return indexQuery.toString(field);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (sameClassAs(obj) == false) {
+      return false;
+    }
+    IndexOrDocValuesQuery that = (IndexOrDocValuesQuery) obj;
+    return indexQuery.equals(that.indexQuery) && dvQuery.equals(that.dvQuery);
+  }
+
+  @Override
+  public int hashCode() {
+    int h = classHash();
+    h = 31 * h + indexQuery.hashCode();
+    h = 31 * h + dvQuery.hashCode();
+    return h;
+  }
+
+  @Override
+  public Query rewrite(IndexReader reader) throws IOException {
+    Query indexRewrite = indexQuery.rewrite(reader);
+    Query dvRewrite = dvQuery.rewrite(reader);
+    if (indexQuery != indexRewrite || dvQuery != dvRewrite) {
+      return new IndexOrDocValuesQuery(indexRewrite, dvRewrite);
+    }
+    return this;
+  }
+
+  @Override
+  public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
+    final Weight indexWeight = indexQuery.createWeight(searcher, needsScores, boost);
+    final Weight dvWeight = dvQuery.createWeight(searcher, needsScores, boost);
+    return new ConstantScoreWeight(this, boost) {
+      @Override
+      public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
+        return indexWeight.bulkScorer(context);
+      }
+
+      @Override
+      public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
+        final ScorerSupplier indexScorerSupplier = indexWeight.scorerSupplier(context);
+        final ScorerSupplier dvScorerSupplier = dvWeight.scorerSupplier(context); 
+        if (indexScorerSupplier == null || dvScorerSupplier == null) {
+          return null;
+        }
+        return new ScorerSupplier() {
+          @Override
+          public Scorer get(boolean randomAccess) throws IOException {
+            return (randomAccess ? dvScorerSupplier : indexScorerSupplier).get(randomAccess);
+          }
+
+          @Override
+          public long cost() {
+            return Math.min(indexScorerSupplier.cost(), dvScorerSupplier.cost());
+          }
+        };
+      }
+
+      @Override
+      public Scorer scorer(LeafReaderContext context) throws IOException {
+        ScorerSupplier scorerSupplier = scorerSupplier(context);
+        if (scorerSupplier == null) {
+          return null;
+        }
+        return scorerSupplier.get(false);
+      }
+    };
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/sandbox/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java b/lucene/sandbox/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
new file mode 100644
index 0000000..2a16e5d
--- /dev/null
+++ b/lucene/sandbox/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search;
+
+import java.io.IOException;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+public class TestIndexOrDocValuesQuery extends LuceneTestCase {
+
+  public void testUseIndexForSelectiveQueries() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()
+        // relies on costs and PointValues.estimateCost so we need the default codec
+        .setCodec(TestUtil.getDefaultCodec()));
+    for (int i = 0; i < 2000; ++i) {
+      Document doc = new Document();
+      if (i == 42) {
+        doc.add(new StringField("f1", "bar", Store.NO));
+        doc.add(new LongPoint("f2", 42L));
+        doc.add(new NumericDocValuesField("f2", 42L));
+      } else if (i == 100) {
+        doc.add(new StringField("f1", "foo", Store.NO));
+        doc.add(new LongPoint("f2", 2L));
+        doc.add(new NumericDocValuesField("f2", 2L));
+      } else {
+        doc.add(new StringField("f1", "bar", Store.NO));
+        doc.add(new LongPoint("f2", 2L));
+        doc.add(new NumericDocValuesField("f2", 2L));
+      }
+      w.addDocument(doc);
+    }
+    w.forceMerge(1);
+    IndexReader reader = DirectoryReader.open(w);
+    IndexSearcher searcher = newSearcher(reader);
+    searcher.setQueryCache(null);
+
+    // The term query is more selective, so the IndexOrDocValuesQuery should use doc values
+    final Query q1 = new BooleanQuery.Builder()
+        .add(new TermQuery(new Term("f1", "foo")), Occur.MUST)
+        .add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 2), new DocValuesNumbersQuery("f2", 2L)), Occur.MUST)
+        .build();
+
+    final Weight w1 = searcher.createNormalizedWeight(q1, random().nextBoolean());
+    final Scorer s1 = w1.scorer(reader.leaves().get(0));
+    assertNotNull(s1.twoPhaseIterator()); // means we use doc values
+
+    // The term query is less selective, so the IndexOrDocValuesQuery should use points
+    final Query q2 = new BooleanQuery.Builder()
+        .add(new TermQuery(new Term("f1", "bar")), Occur.MUST)
+        .add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 42), new DocValuesNumbersQuery("f2", 42L)), Occur.MUST)
+        .build();
+
+    final Weight w2 = searcher.createNormalizedWeight(q2, random().nextBoolean());
+    final Scorer s2 = w2.scorer(reader.leaves().get(0));
+    assertNull(s2.twoPhaseIterator()); // means we use points
+
+    reader.close();
+    w.close();
+    dir.close();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java
index ec7d75a..486d81c 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPointsFormat.java
@@ -134,6 +134,11 @@ class CrankyPointsFormat extends PointsFormat {
         }
 
         @Override
+        public long estimatePointCount(IntersectVisitor visitor) {
+          return delegate.estimatePointCount(visitor);
+        }
+
+        @Override
         public byte[] getMinPackedValue() throws IOException {
           if (random.nextInt(100) == 0) {
             throw new IOException("Fake IOException");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
index 37c549e..e837359 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
@@ -884,6 +884,13 @@ public class AssertingLeafReader extends FilterLeafReader {
     }
 
     @Override
+    public long estimatePointCount(IntersectVisitor visitor) {
+      long cost = in.estimatePointCount(visitor);
+      assert cost >= 0;
+      return cost;
+    }
+
+    @Override
     public byte[] getMinPackedValue() throws IOException {
       return Objects.requireNonNull(in.getMinPackedValue());
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/86233cb9/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
index 75529df..7b6727d 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
@@ -33,9 +33,45 @@ class AssertingWeight extends FilterWeight {
 
   @Override
   public Scorer scorer(LeafReaderContext context) throws IOException {
-    final Scorer inScorer = in.scorer(context);
-    assert inScorer == null || inScorer.docID() == -1;
-    return AssertingScorer.wrap(new Random(random.nextLong()), inScorer, needsScores);
+    if (random.nextBoolean()) {
+      final Scorer inScorer = in.scorer(context);
+      assert inScorer == null || inScorer.docID() == -1;
+      return AssertingScorer.wrap(new Random(random.nextLong()), inScorer, needsScores);
+    } else {
+      final ScorerSupplier scorerSupplier = scorerSupplier(context);
+      if (scorerSupplier == null) {
+        return null;
+      }
+      if (random.nextBoolean()) {
+        // Evil: make sure computing the cost has no side effects
+        scorerSupplier.cost();
+      }
+      return scorerSupplier.get(false);
+    }
+  }
+
+  @Override
+  public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
+    final ScorerSupplier inScorerSupplier = in.scorerSupplier(context);
+    if (inScorerSupplier == null) {
+      return null;
+    }
+    return new ScorerSupplier() {
+      private boolean getCalled = false;
+      @Override
+      public Scorer get(boolean randomAccess) throws IOException {
+        assert getCalled == false;
+        getCalled = true;
+        return AssertingScorer.wrap(new Random(random.nextLong()), inScorerSupplier.get(randomAccess), needsScores);
+      }
+
+      @Override
+      public long cost() {
+        final long cost = inScorerSupplier.cost();
+        assert cost >= 0;
+        return cost;
+      }
+    };
   }
 
   @Override


[46/50] [abbrv] lucene-solr:apiv2: SOLR-9132: Fix HDFS test

Posted by no...@apache.org.
SOLR-9132: Fix HDFS test


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/49fa7b0d
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/49fa7b0d
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/49fa7b0d

Branch: refs/heads/apiv2
Commit: 49fa7b0dd514fb9d4b7a508d80ae6f9b12cdf6b0
Parents: 864bed2
Author: Alan Woodward <ro...@apache.org>
Authored: Sat Jan 21 21:55:14 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Sat Jan 21 21:55:33 2017 +0000

----------------------------------------------------------------------
 .../src/test-files/solr/configsets/cloud-hdfs/conf/solrconfig.xml  | 2 ++
 .../solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java       | 1 +
 2 files changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/49fa7b0d/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/solrconfig.xml b/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/solrconfig.xml
index 88290da..648e92e 100644
--- a/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/solrconfig.xml
+++ b/solr/core/src/test-files/solr/configsets/cloud-hdfs/conf/solrconfig.xml
@@ -38,6 +38,8 @@
     <updateLog></updateLog>
   </updateHandler>
 
+  <jmx/>
+
   <requestHandler name="/select" class="solr.SearchHandler">
     <lst name="defaults">
       <str name="echoParams">explicit</str>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/49fa7b0d/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
index 55fb6cd..fc938a1 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
@@ -42,6 +42,7 @@ public class HdfsCollectionsAPIDistributedZkTest extends CollectionsAPIDistribut
 
     ZkConfigManager configManager = new ZkConfigManager(zkClient());
     configManager.uploadConfigDir(configset("cloud-hdfs"), "conf");
+    configManager.uploadConfigDir(configset("cloud-hdfs"), "conf2");
 
     System.setProperty("solr.hdfs.home", HdfsTestUtil.getDataDir(dfsCluster, "data"));
   }


[19/50] [abbrv] lucene-solr:apiv2: LUCENE-7640: Speed up PointValues#estimatePointCount with Relation.CELL_INSIDE_QUERY.

Posted by no...@apache.org.
LUCENE-7640: Speed up PointValues#estimatePointCount with Relation.CELL_INSIDE_QUERY.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/71aa463d
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/71aa463d
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/71aa463d

Branch: refs/heads/apiv2
Commit: 71aa463d4bbdfc03efb11b52ed2c4ce51d49bfb3
Parents: 3404677
Author: Adrien Grand <jp...@gmail.com>
Authored: Wed Jan 18 15:07:06 2017 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Wed Jan 18 15:07:06 2017 +0100

----------------------------------------------------------------------
 .../org/apache/lucene/util/bkd/BKDReader.java   |  45 ++++-
 .../lucene60/TestLucene60PointsFormat.java      | 192 ++++++++++++++++++-
 .../org/apache/lucene/util/bkd/TestBKD.java     |  90 +++++++++
 3 files changed, 319 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71aa463d/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
index 4089d82..e120435 100644
--- a/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
+++ b/lucene/core/src/java/org/apache/lucene/util/bkd/BKDReader.java
@@ -223,6 +223,41 @@ public final class BKDReader extends PointValues implements Accountable {
     
     /** Only valid after pushLeft or pushRight, not pop! */
     public abstract long getLeafBlockFP();
+
+    /** Return the number of leaves below the current node. */
+    public int getNumLeaves() {
+      int leftMostLeafNode = nodeID;
+      while (leftMostLeafNode < leafNodeOffset) {
+        leftMostLeafNode = leftMostLeafNode * 2;
+      }
+      int rightMostLeafNode = nodeID;
+      while (rightMostLeafNode < leafNodeOffset) {
+        rightMostLeafNode = rightMostLeafNode * 2 + 1;
+      }
+      final int numLeaves;
+      if (rightMostLeafNode >= leftMostLeafNode) {
+        // both are on the same level
+        numLeaves = rightMostLeafNode - leftMostLeafNode + 1;
+      } else {
+        // left is one level deeper than right
+        numLeaves = rightMostLeafNode - leftMostLeafNode + 1 + leafNodeOffset;
+      }
+      assert numLeaves == getNumLeavesSlow(nodeID) : numLeaves + " " + getNumLeavesSlow(nodeID);
+      return numLeaves;
+    }
+
+    // for assertions
+    private int getNumLeavesSlow(int node) {
+      if (node >= 2 * leafNodeOffset) {
+        return 0;
+      } else if (node >= leafNodeOffset) {
+        return 1;
+      } else {
+        final int leftCount = getNumLeavesSlow(node * 2);
+        final int rightCount = getNumLeavesSlow(node * 2 + 1);
+        return leftCount + rightCount;
+      }
+    }
   }
 
   /** Reads the original simple yet heap-heavy index format */
@@ -716,13 +751,11 @@ public final class BKDReader extends PointValues implements Accountable {
     if (r == Relation.CELL_OUTSIDE_QUERY) {
       // This cell is fully outside of the query shape: stop recursing
       return 0L;
+    } else if (r == Relation.CELL_INSIDE_QUERY) {
+      return (long) maxPointsInLeafNode * state.index.getNumLeaves();
     } else if (state.index.isLeafNode()) {
-      if (r == Relation.CELL_INSIDE_QUERY) {
-        return maxPointsInLeafNode;
-      } else {
-        // Assume half the points matched
-        return (maxPointsInLeafNode + 1) / 2;
-      }
+      // Assume half the points matched
+      return (maxPointsInLeafNode + 1) / 2;
     } else {
       
       // Non-leaf node: recurse on the split left and right nodes

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71aa463d/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java
index afa8ec4..3a08bfa 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java
@@ -18,29 +18,43 @@ package org.apache.lucene.codecs.lucene60;
 
 
 import java.io.IOException;
+import java.util.Arrays;
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.FilterCodec;
 import org.apache.lucene.codecs.PointsFormat;
 import org.apache.lucene.codecs.PointsReader;
 import org.apache.lucene.codecs.PointsWriter;
+import org.apache.lucene.document.BinaryPoint;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.BasePointsFormatTestCase;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.PointValues;
 import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.index.SegmentWriteState;
+import org.apache.lucene.index.PointValues.IntersectVisitor;
+import org.apache.lucene.index.PointValues.Relation;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.bkd.BKDWriter;
 
 /**
  * Tests Lucene60PointsFormat
  */
 public class TestLucene60PointsFormat extends BasePointsFormatTestCase {
   private final Codec codec;
+  private final int maxPointsInLeafNode;
   
   public TestLucene60PointsFormat() {
     // standard issue
     Codec defaultCodec = TestUtil.getDefaultCodec();
     if (random().nextBoolean()) {
       // randomize parameters
-      int maxPointsInLeafNode = TestUtil.nextInt(random(), 50, 500);
+      maxPointsInLeafNode = TestUtil.nextInt(random(), 50, 500);
       double maxMBSortInHeap = 3.0 + (3*random().nextDouble());
       if (VERBOSE) {
         System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode + " and maxMBSortInHeap=" + maxMBSortInHeap);
@@ -66,6 +80,7 @@ public class TestLucene60PointsFormat extends BasePointsFormatTestCase {
     } else {
       // standard issue
       codec = defaultCodec;
+      maxPointsInLeafNode = BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE;
     }
   }
 
@@ -79,5 +94,178 @@ public class TestLucene60PointsFormat extends BasePointsFormatTestCase {
     assumeFalse("TODO: mess with the parameters and test gets angry!", codec instanceof FilterCodec);
     super.testMergeStability();
   }
-  
+
+  public void testEstimatePointCount() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    byte[] pointValue = new byte[3];
+    byte[] uniquePointValue = new byte[3];
+    random().nextBytes(uniquePointValue);
+    final int numDocs = atLeast(10000); // make sure we have several leaves
+    for (int i = 0; i < numDocs; ++i) {
+      Document doc = new Document();
+      if (i == numDocs / 2) {
+        doc.add(new BinaryPoint("f", uniquePointValue));
+      } else {
+        do {
+          random().nextBytes(pointValue);
+        } while (Arrays.equals(pointValue, uniquePointValue));
+        doc.add(new BinaryPoint("f", pointValue));
+      }
+      w.addDocument(doc);
+    }
+    w.forceMerge(1);
+    final IndexReader r = DirectoryReader.open(w);
+    w.close();
+    final LeafReader lr = getOnlyLeafReader(r);
+    PointValues points = lr.getPointValues("f");
+
+    // If all points match, then the point count is numLeaves * maxPointsInLeafNode
+    final int numLeaves = (int) Math.ceil((double) numDocs / maxPointsInLeafNode);
+    assertEquals(numLeaves * maxPointsInLeafNode,
+        points.estimatePointCount(new IntersectVisitor() {
+          @Override
+          public void visit(int docID, byte[] packedValue) throws IOException {}
+          
+          @Override
+          public void visit(int docID) throws IOException {}
+          
+          @Override
+          public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+            return Relation.CELL_INSIDE_QUERY;
+          }
+        }));
+
+    // Return 0 if no points match
+    assertEquals(0,
+        points.estimatePointCount(new IntersectVisitor() {
+          @Override
+          public void visit(int docID, byte[] packedValue) throws IOException {}
+          
+          @Override
+          public void visit(int docID) throws IOException {}
+          
+          @Override
+          public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+            return Relation.CELL_OUTSIDE_QUERY;
+          }
+        }));
+
+    // If only one point matches, then the point count is (maxPointsInLeafNode + 1) / 2
+    assertEquals((maxPointsInLeafNode + 1) / 2,
+        points.estimatePointCount(new IntersectVisitor() {
+          @Override
+          public void visit(int docID, byte[] packedValue) throws IOException {}
+          
+          @Override
+          public void visit(int docID) throws IOException {}
+          
+          @Override
+          public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+            if (StringHelper.compare(3, uniquePointValue, 0, maxPackedValue, 0) > 0 ||
+                StringHelper.compare(3, uniquePointValue, 0, minPackedValue, 0) < 0) {
+              return Relation.CELL_OUTSIDE_QUERY;
+            }
+            return Relation.CELL_CROSSES_QUERY;
+          }
+        }));
+
+    r.close();
+    dir.close();
+  }
+
+  // The tree is always balanced in the N dims case, and leaves are
+  // not all full so things are a bit different
+  public void testEstimatePointCount2Dims() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    byte[][] pointValue = new byte[2][];
+    pointValue[0] = new byte[3];
+    pointValue[1] = new byte[3];
+    byte[][] uniquePointValue = new byte[2][];
+    uniquePointValue[0] = new byte[3];
+    uniquePointValue[1] = new byte[3];
+    random().nextBytes(uniquePointValue[0]);
+    random().nextBytes(uniquePointValue[1]);
+    final int numDocs = atLeast(10000); // make sure we have several leaves
+    for (int i = 0; i < numDocs; ++i) {
+      Document doc = new Document();
+      if (i == numDocs / 2) {
+        doc.add(new BinaryPoint("f", uniquePointValue));
+      } else {
+        do {
+          random().nextBytes(pointValue[0]);
+          random().nextBytes(pointValue[1]);
+        } while (Arrays.equals(pointValue[0], uniquePointValue[0]) || Arrays.equals(pointValue[1], uniquePointValue[1]));
+        doc.add(new BinaryPoint("f", pointValue));
+      }
+      w.addDocument(doc);
+    }
+    w.forceMerge(1);
+    final IndexReader r = DirectoryReader.open(w);
+    w.close();
+    final LeafReader lr = getOnlyLeafReader(r);
+    PointValues points = lr.getPointValues("f");
+
+    // With >1 dims, the tree is balanced
+    int actualMaxPointsInLeafNode = numDocs;
+    while (actualMaxPointsInLeafNode > maxPointsInLeafNode) {
+      actualMaxPointsInLeafNode = (actualMaxPointsInLeafNode + 1) / 2;
+    }
+
+    // If all points match, then the point count is numLeaves * maxPointsInLeafNode
+    final int numLeaves = Integer.highestOneBit((numDocs - 1) / actualMaxPointsInLeafNode) << 1;
+    assertEquals(numLeaves * actualMaxPointsInLeafNode,
+        points.estimatePointCount(new IntersectVisitor() {
+          @Override
+          public void visit(int docID, byte[] packedValue) throws IOException {}
+          
+          @Override
+          public void visit(int docID) throws IOException {}
+          
+          @Override
+          public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+            return Relation.CELL_INSIDE_QUERY;
+          }
+        }));
+
+    // Return 0 if no points match
+    assertEquals(0,
+        points.estimatePointCount(new IntersectVisitor() {
+          @Override
+          public void visit(int docID, byte[] packedValue) throws IOException {}
+          
+          @Override
+          public void visit(int docID) throws IOException {}
+          
+          @Override
+          public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+            return Relation.CELL_OUTSIDE_QUERY;
+          }
+        }));
+
+    // If only one point matches, then the point count is (actualMaxPointsInLeafNode + 1) / 2
+    assertEquals((actualMaxPointsInLeafNode + 1) / 2,
+        points.estimatePointCount(new IntersectVisitor() {
+          @Override
+          public void visit(int docID, byte[] packedValue) throws IOException {}
+          
+          @Override
+          public void visit(int docID) throws IOException {}
+          
+          @Override
+          public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+            for (int dim = 0; dim < 2; ++dim) {
+              if (StringHelper.compare(3, uniquePointValue[0], 0, maxPackedValue, dim * 3) > 0 ||
+                  StringHelper.compare(3, uniquePointValue[0], 0, minPackedValue, dim * 3) < 0) {
+                return Relation.CELL_OUTSIDE_QUERY;
+              }
+            }
+            return Relation.CELL_CROSSES_QUERY;
+          }
+        }));
+
+    r.close();
+    dir.close();
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71aa463d/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java b/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
index c5c5c1f..f01f058 100644
--- a/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
+++ b/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
@@ -1104,4 +1104,94 @@ public class TestBKD extends LuceneTestCase {
     in.close();
     dir.close();
   }
+
+  public void testEstimatePointCount() throws IOException {
+    Directory dir = newDirectory();
+    final int numValues = atLeast(10000); // make sure to have multiple leaves
+    final int maxPointsInLeafNode = TestUtil.nextInt(random(), 50, 500);
+    final int numBytesPerDim = TestUtil.nextInt(random(), 1, 4);
+    final byte[] pointValue = new byte[numBytesPerDim];
+    final byte[] uniquePointValue = new byte[numBytesPerDim];
+    random().nextBytes(uniquePointValue);
+
+    BKDWriter w = new BKDWriter(numValues, dir, "_temp", 1, numBytesPerDim, maxPointsInLeafNode,
+        BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP, numValues, true);
+    for (int i = 0; i < numValues; ++i) {
+      if (i == numValues / 2) {
+        w.add(uniquePointValue, i);
+      } else {
+        do {
+          random().nextBytes(pointValue);
+        } while (Arrays.equals(pointValue, uniquePointValue));
+        w.add(pointValue, i);
+      }
+    }
+    final long indexFP;
+    try (IndexOutput out = dir.createOutput("bkd", IOContext.DEFAULT)) {
+      indexFP = w.finish(out);
+      w.close();
+    }
+    
+    IndexInput pointsIn = dir.openInput("bkd", IOContext.DEFAULT);
+    pointsIn.seek(indexFP);
+    BKDReader points = new BKDReader(pointsIn);
+
+    int actualMaxPointsInLeafNode = numValues;
+    while (actualMaxPointsInLeafNode > maxPointsInLeafNode) {
+      actualMaxPointsInLeafNode = (actualMaxPointsInLeafNode + 1) / 2;
+    }
+
+    // If all points match, then the point count is numLeaves * maxPointsInLeafNode
+    final int numLeaves = Integer.highestOneBit((numValues - 1) / actualMaxPointsInLeafNode) << 1;
+    assertEquals(numLeaves * actualMaxPointsInLeafNode,
+        points.estimatePointCount(new IntersectVisitor() {
+          @Override
+          public void visit(int docID, byte[] packedValue) throws IOException {}
+          
+          @Override
+          public void visit(int docID) throws IOException {}
+          
+          @Override
+          public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+            return Relation.CELL_INSIDE_QUERY;
+          }
+        }));
+
+    // Return 0 if no points match
+    assertEquals(0,
+        points.estimatePointCount(new IntersectVisitor() {
+          @Override
+          public void visit(int docID, byte[] packedValue) throws IOException {}
+          
+          @Override
+          public void visit(int docID) throws IOException {}
+          
+          @Override
+          public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+            return Relation.CELL_OUTSIDE_QUERY;
+          }
+        }));
+
+    // If only one point matches, then the point count is (actualMaxPointsInLeafNode + 1) / 2
+    assertEquals((actualMaxPointsInLeafNode + 1) / 2,
+        points.estimatePointCount(new IntersectVisitor() {
+          @Override
+          public void visit(int docID, byte[] packedValue) throws IOException {}
+          
+          @Override
+          public void visit(int docID) throws IOException {}
+          
+          @Override
+          public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
+            if (StringHelper.compare(3, uniquePointValue, 0, maxPackedValue, 0) > 0 ||
+                StringHelper.compare(3, uniquePointValue, 0, minPackedValue, 0) < 0) {
+              return Relation.CELL_OUTSIDE_QUERY;
+            }
+            return Relation.CELL_CROSSES_QUERY;
+          }
+        }));
+
+    pointsIn.close();
+    dir.close();
+  }
 }


[43/50] [abbrv] lucene-solr:apiv2: SOLR-9996: Unstored IntPointField returns Long type

Posted by no...@apache.org.
SOLR-9996: Unstored IntPointField returns Long type


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/69055aa4
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/69055aa4
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/69055aa4

Branch: refs/heads/apiv2
Commit: 69055aa4a82d144dc04bf10547912ccc4a7011df
Parents: f57e017
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Fri Jan 20 19:08:05 2017 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Fri Jan 20 19:08:05 2017 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                         |  2 ++
 .../src/java/org/apache/solr/schema/IntPointField.java   |  2 +-
 .../src/test/org/apache/solr/schema/TestPointFields.java | 11 +++++++++++
 3 files changed, 14 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/69055aa4/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index c0fe505..748125a 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -74,6 +74,8 @@ Optimizations
 * SOLR-9584: Support Solr being proxied with another endpoint than default /solr, by using relative links
   in AdminUI javascripts (Yun Jie Zhou via janhoy)
 
+* SOLR-9996: Unstored IntPointField returns Long type (Ishan Chattopadhyaya)
+
 Other Changes
 ----------------------
 * SOLR-8396: Add support for PointFields in Solr (Ishan Chattopadhyaya, Tom�s Fern�ndez L�bbe)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/69055aa4/solr/core/src/java/org/apache/solr/schema/IntPointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/IntPointField.java b/solr/core/src/java/org/apache/solr/schema/IntPointField.java
index a7bab07..2271282 100644
--- a/solr/core/src/java/org/apache/solr/schema/IntPointField.java
+++ b/solr/core/src/java/org/apache/solr/schema/IntPointField.java
@@ -89,7 +89,7 @@ public class IntPointField extends PointField implements IntValueFieldType {
   public Object toObject(IndexableField f) {
     final Number val = f.numericValue();
     if (val != null) {
-      return val;
+      return val.intValue();
     } else {
       throw new AssertionError("Unexpected state. Field: '" + f + "'");
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/69055aa4/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/schema/TestPointFields.java b/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
index 12f1504..8fb6926 100644
--- a/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
+++ b/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
@@ -785,6 +785,11 @@ public class TestPointFields extends SolrTestCaseJ4 {
     for (int i=0; i < values.length; i++) {
       assertU(adoc("id", String.valueOf(i), field, values[i]));
     }
+    // Check using RTG
+    for (int i = 0; i < values.length; i++) {
+      assertQ(req("qt", "/get", "id", String.valueOf(i)),
+      "//doc/" + type + "[@name='" + field + "'][.='" + values[i] + "']");
+    }
     assertU(commit());
     String[] expected = new String[values.length + 1];
     expected[0] = "//*[@numFound='" + values.length + "']"; 
@@ -792,6 +797,12 @@ public class TestPointFields extends SolrTestCaseJ4 {
       expected[i] = "//result/doc[" + i + "]/" + type + "[@name='" + field + "'][.='" + values[i-1] + "']";
     }
     assertQ(req("q", "*:*", "fl", "id, " + field, "rows", String.valueOf(values.length)), expected);
+
+    // Check using RTG
+    for (int i = 0; i < values.length; i++) {
+      assertQ(req("qt", "/get", "id", String.valueOf(i)),
+      "//doc/" + type + "[@name='" + field + "'][.='" + values[i] + "']");
+    }
   }
 
   private void doTestIntPointFieldRangeQuery(String fieldName, String type, boolean testLong) throws Exception {


[13/50] [abbrv] lucene-solr:apiv2: LUCENE-7619: add WordDelimiterGraphFilter (replacing WordDelimiterFilter) to produce a correct token stream graph when splitting words

Posted by no...@apache.org.
LUCENE-7619: add WordDelimiterGraphFilter (replacing WordDelimiterFilter) to produce a correct token stream graph when splitting words


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/637915b8
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/637915b8
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/637915b8

Branch: refs/heads/apiv2
Commit: 637915b890d9f0e5cfaa6887609f221029327a25
Parents: 7d7e5d2
Author: Mike McCandless <mi...@apache.org>
Authored: Tue Jan 17 10:38:07 2017 -0500
Committer: Mike McCandless <mi...@apache.org>
Committed: Tue Jan 17 10:38:07 2017 -0500

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   5 +
 .../analysis/core/FlattenGraphFilter.java       | 418 +++++++++
 .../core/FlattenGraphFilterFactory.java         |  44 +
 .../miscellaneous/WordDelimiterFilter.java      |   9 +-
 .../WordDelimiterFilterFactory.java             |   6 +
 .../miscellaneous/WordDelimiterGraphFilter.java | 692 ++++++++++++++
 .../WordDelimiterGraphFilterFactory.java        | 199 ++++
 .../miscellaneous/WordDelimiterIterator.java    |  59 +-
 .../analysis/synonym/FlattenGraphFilter.java    | 417 ---------
 .../synonym/FlattenGraphFilterFactory.java      |  44 -
 .../lucene/analysis/synonym/SynonymFilter.java  |   1 +
 .../analysis/synonym/SynonymFilterFactory.java  |   1 +
 .../analysis/synonym/SynonymGraphFilter.java    |  11 +-
 ...ache.lucene.analysis.util.TokenFilterFactory |   3 +-
 .../analysis/core/TestFlattenGraphFilter.java   | 284 ++++++
 .../miscellaneous/TestWordDelimiterFilter.java  |  69 ++
 .../TestWordDelimiterGraphFilter.java           | 897 +++++++++++++++++++
 .../synonym/TestFlattenGraphFilter.java         | 284 ------
 .../synonym/TestSynonymGraphFilter.java         |  51 +-
 .../lucene/analysis/TokenStreamToAutomaton.java |  39 +-
 .../tokenattributes/OffsetAttributeImpl.java    |   2 +-
 .../PackedTokenAttributeImpl.java               |   2 +-
 .../PositionIncrementAttributeImpl.java         |   3 +-
 .../PositionLengthAttributeImpl.java            |   3 +-
 .../lucene/analysis/TestGraphTokenizers.java    |  53 +-
 .../suggest/analyzing/AnalyzingSuggester.java   |   3 +-
 .../analysis/BaseTokenStreamTestCase.java       | 114 ++-
 .../lucene/analysis/TokenStreamToDot.java       |   5 +-
 28 files changed, 2899 insertions(+), 819 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 2e015a3..4df7a67 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -76,6 +76,11 @@ New Features
 * LUCENE-7623: Add FunctionScoreQuery and FunctionMatchQuery (Alan Woodward,
   Adrien Grand, David Smiley)
 
+* LUCENE-7619: Add WordDelimiterGraphFilter, just like
+  WordDelimiterFilter except it produces correct token graphs so that
+  proximity queries at search time will produce correct results (Mike
+  McCandless)
+
 Bug Fixes
 
 * LUCENE-7630: Fix (Edge)NGramTokenFilter to no longer drop payloads

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/FlattenGraphFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/FlattenGraphFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/FlattenGraphFilter.java
new file mode 100644
index 0000000..01e1f6f
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/FlattenGraphFilter.java
@@ -0,0 +1,418 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.analysis.core;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.synonym.SynonymGraphFilter;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
+import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.RollingBuffer;
+
+/**
+ * Converts an incoming graph token stream, such as one from
+ * {@link SynonymGraphFilter}, into a flat form so that
+ * all nodes form a single linear chain with no side paths.  Every
+ * path through the graph touches every node.  This is necessary
+ * when indexing a graph token stream, because the index does not
+ * save {@link PositionLengthAttribute} and so it cannot
+ * preserve the graph structure.  However, at search time,
+ * query parsers can correctly handle the graph and this token
+ * filter should <b>not</b> be used.
+ *
+ * <p>If the graph was not already flat to start, this
+ * is likely a lossy process, i.e. it will often cause the 
+ * graph to accept token sequences it should not, and to
+ * reject token sequences it should not.
+ *
+ * <p>However, when applying synonyms during indexing, this
+ * is necessary because Lucene already does not index a graph 
+ * and so the indexing process is already lossy
+ * (it ignores the {@link PositionLengthAttribute}).
+ *
+ * @lucene.experimental
+ */
+public final class FlattenGraphFilter extends TokenFilter {
+
+  /** Holds all tokens leaving a given input position. */
+  private final static class InputNode implements RollingBuffer.Resettable {
+    private final List<AttributeSource.State> tokens = new ArrayList<>();
+
+    /** Our input node, or -1 if we haven't been assigned yet */
+    int node = -1;
+
+    /** Maximum to input node for all tokens leaving here; we use this
+     *  to know when we can freeze. */
+    int maxToNode = -1;
+
+    /** Where we currently map to; this changes (can only
+     *  increase as we see more input tokens), until we are finished
+     *  with this position. */
+    int outputNode = -1;
+
+    /** Which token (index into {@link #tokens}) we will next output. */
+    int nextOut;
+
+    @Override
+    public void reset() {
+      tokens.clear();
+      node = -1;
+      outputNode = -1;
+      maxToNode = -1;
+      nextOut = 0;
+    }
+  }
+
+  /** Gathers up merged input positions into a single output position,
+   *  only for the current "frontier" of nodes we've seen but can't yet
+   *  output because they are not frozen. */
+  private final static class OutputNode implements RollingBuffer.Resettable {
+    private final List<Integer> inputNodes = new ArrayList<>();
+
+    /** Node ID for this output, or -1 if we haven't been assigned yet. */
+    int node = -1;
+
+    /** Which input node (index into {@link #inputNodes}) we will next output. */
+    int nextOut;
+    
+    /** Start offset of tokens leaving this node. */
+    int startOffset = -1;
+
+    /** End offset of tokens arriving to this node. */
+    int endOffset = -1;
+
+    @Override
+    public void reset() {
+      inputNodes.clear();
+      node = -1;
+      nextOut = 0;
+      startOffset = -1;
+      endOffset = -1;
+    }
+  }
+
+  private final RollingBuffer<InputNode> inputNodes = new RollingBuffer<InputNode>() {
+    @Override
+    protected InputNode newInstance() {
+      return new InputNode();
+    }
+  };
+
+  private final RollingBuffer<OutputNode> outputNodes = new RollingBuffer<OutputNode>() {
+    @Override
+    protected OutputNode newInstance() {
+      return new OutputNode();
+    }
+  };
+
+  private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
+  private final PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class);
+  private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+
+  /** Which input node the last seen token leaves from */
+  private int inputFrom;
+
+  /** We are currently releasing tokens leaving from this output node */
+  private int outputFrom;
+
+  // for debugging:
+  //private int retOutputFrom;
+
+  private boolean done;
+
+  private int lastOutputFrom;
+
+  private int finalOffset;
+
+  private int finalPosInc;
+
+  private int maxLookaheadUsed;
+
+  private int lastStartOffset;
+
+  public FlattenGraphFilter(TokenStream in) {
+    super(in);
+  }
+
+  private boolean releaseBufferedToken() {
+
+    // We only need the while loop (retry) if we have a hole (an output node that has no tokens leaving):
+    while (outputFrom < outputNodes.getMaxPos()) {
+      OutputNode output = outputNodes.get(outputFrom);
+      if (output.inputNodes.isEmpty()) {
+        // No tokens arrived to this node, which happens for the first node
+        // after a hole:
+        //System.out.println("    skip empty outputFrom=" + outputFrom);
+        outputFrom++;
+        continue;
+      }
+
+      int maxToNode = -1;
+      for(int inputNodeID : output.inputNodes) {
+        InputNode inputNode = inputNodes.get(inputNodeID);
+        assert inputNode.outputNode == outputFrom;
+        maxToNode = Math.max(maxToNode, inputNode.maxToNode);
+      }
+      //System.out.println("  release maxToNode=" + maxToNode + " vs inputFrom=" + inputFrom);
+
+      // TODO: we could shrink the frontier here somewhat if we
+      // always output posLen=1 as part of our "sausagizing":
+      if (maxToNode <= inputFrom || done) {
+        //System.out.println("  output node merged these inputs: " + output.inputNodes);
+        // These tokens are now frozen
+        assert output.nextOut < output.inputNodes.size(): "output.nextOut=" + output.nextOut + " vs output.inputNodes.size()=" + output.inputNodes.size();
+        InputNode inputNode = inputNodes.get(output.inputNodes.get(output.nextOut));
+        if (done && inputNode.tokens.size() == 0 && outputFrom >= outputNodes.getMaxPos()) {
+          return false;
+        }
+        if (inputNode.tokens.size() == 0) {
+          assert inputNode.nextOut == 0;
+          assert output.nextOut == 0;
+          // Hole dest nodes should never be merged since 1) we always
+          // assign them to a new output position, and 2) since they never
+          // have arriving tokens they cannot be pushed:
+          assert output.inputNodes.size() == 1: output.inputNodes.size();
+          outputFrom++;
+          inputNodes.freeBefore(output.inputNodes.get(0));
+          outputNodes.freeBefore(outputFrom);
+          continue;
+        }
+
+        assert inputNode.nextOut < inputNode.tokens.size();
+
+        restoreState(inputNode.tokens.get(inputNode.nextOut));
+
+        // Correct posInc
+        assert outputFrom >= lastOutputFrom;
+        posIncAtt.setPositionIncrement(outputFrom - lastOutputFrom);
+        int toInputNodeID = inputNode.node + posLenAtt.getPositionLength();
+        InputNode toInputNode = inputNodes.get(toInputNodeID);
+
+        // Correct posLen
+        assert toInputNode.outputNode > outputFrom;
+        posLenAtt.setPositionLength(toInputNode.outputNode - outputFrom);
+        lastOutputFrom = outputFrom;
+        inputNode.nextOut++;
+        //System.out.println("  ret " + this);
+
+        OutputNode outputEndNode = outputNodes.get(toInputNode.outputNode);
+
+        // Correct offsets
+
+        // This is a bit messy; we must do this so offset don't go backwards,
+        // which would otherwise happen if the replacement has more tokens
+        // than the input:
+        int startOffset = Math.max(lastStartOffset, output.startOffset);
+
+        // We must do this in case the incoming tokens have broken offsets:
+        int endOffset = Math.max(startOffset, outputEndNode.endOffset);
+        
+        offsetAtt.setOffset(startOffset, endOffset);
+        lastStartOffset = startOffset;
+
+        if (inputNode.nextOut == inputNode.tokens.size()) {
+          output.nextOut++;
+          if (output.nextOut == output.inputNodes.size()) {
+            outputFrom++;
+            inputNodes.freeBefore(output.inputNodes.get(0));
+            outputNodes.freeBefore(outputFrom);
+          }
+        }
+
+        return true;
+      } else {
+        return false;
+      }
+    }
+
+    //System.out.println("    break false");
+    return false;
+  }
+
+  @Override
+  public boolean incrementToken() throws IOException {
+    //System.out.println("\nF.increment inputFrom=" + inputFrom + " outputFrom=" + outputFrom);
+
+    while (true) {
+      if (releaseBufferedToken()) {
+        //retOutputFrom += posIncAtt.getPositionIncrement();
+        //System.out.println("    return buffered: " + termAtt + " " + retOutputFrom + "-" + (retOutputFrom + posLenAtt.getPositionLength()));
+        //printStates();
+        return true;
+      } else if (done) {
+        //System.out.println("    done, return false");
+        return false;
+      }
+
+      if (input.incrementToken()) {
+        // Input node this token leaves from:
+        inputFrom += posIncAtt.getPositionIncrement();
+
+        int startOffset = offsetAtt.startOffset();
+        int endOffset = offsetAtt.endOffset();
+
+        // Input node this token goes to:
+        int inputTo = inputFrom + posLenAtt.getPositionLength();
+        //System.out.println("  input.inc " + termAtt + ": " + inputFrom + "-" + inputTo);
+
+        InputNode src = inputNodes.get(inputFrom);
+        if (src.node == -1) {
+          // This means the "from" node of this token was never seen as a "to" node,
+          // which should only happen if we just crossed a hole.  This is a challenging
+          // case for us because we normally rely on the full dependencies expressed
+          // by the arcs to assign outgoing node IDs.  It would be better if tokens
+          // were never dropped but instead just marked deleted with a new
+          // TermDeletedAttribute (boolean valued) ... but until that future, we have
+          // a hack here to forcefully jump the output node ID:
+          assert src.outputNode == -1;
+          src.node = inputFrom;
+
+          src.outputNode = outputNodes.getMaxPos() + 1;
+          //System.out.println("    hole: force to outputNode=" + src.outputNode);
+          OutputNode outSrc = outputNodes.get(src.outputNode);
+
+          // Not assigned yet:
+          assert outSrc.node == -1;
+          outSrc.node = src.outputNode;
+          outSrc.inputNodes.add(inputFrom);
+          outSrc.startOffset = startOffset;
+        } else {
+          OutputNode outSrc = outputNodes.get(src.outputNode);
+          if (outSrc.startOffset == -1 || startOffset > outSrc.startOffset) {
+            // "shrink wrap" the offsets so the original tokens (with most
+            // restrictive offsets) win:
+            outSrc.startOffset = Math.max(startOffset, outSrc.startOffset);
+          }
+        }
+
+        // Buffer this token:
+        src.tokens.add(captureState());
+        src.maxToNode = Math.max(src.maxToNode, inputTo);
+        maxLookaheadUsed = Math.max(maxLookaheadUsed, inputNodes.getBufferSize());
+
+        InputNode dest = inputNodes.get(inputTo);
+        if (dest.node == -1) {
+          // Common case: first time a token is arriving to this input position:
+          dest.node = inputTo;
+        }
+
+        // Always number output nodes sequentially:
+        int outputEndNode = src.outputNode + 1;
+
+        if (outputEndNode > dest.outputNode) {
+          if (dest.outputNode != -1) {
+            boolean removed = outputNodes.get(dest.outputNode).inputNodes.remove(Integer.valueOf(inputTo));
+            assert removed;
+          }
+          //System.out.println("    increase output node: " + dest.outputNode + " vs " + outputEndNode);
+          outputNodes.get(outputEndNode).inputNodes.add(inputTo);
+          dest.outputNode = outputEndNode;
+
+          // Since all we ever do is merge incoming nodes together, and then renumber
+          // the merged nodes sequentially, we should only ever assign smaller node
+          // numbers:
+          assert outputEndNode <= inputTo: "outputEndNode=" + outputEndNode + " vs inputTo=" + inputTo;
+        }
+
+        OutputNode outDest = outputNodes.get(dest.outputNode);
+        // "shrink wrap" the offsets so the original tokens (with most
+        // restrictive offsets) win:
+        if (outDest.endOffset == -1 || endOffset < outDest.endOffset) {
+          outDest.endOffset = endOffset;
+        }
+
+      } else {
+        //System.out.println("  got false from input");
+        input.end();
+        finalPosInc = posIncAtt.getPositionIncrement();
+        finalOffset = offsetAtt.endOffset();
+        done = true;
+        // Don't return false here: we need to force release any buffered tokens now
+      }
+    }
+  }
+
+  // Only for debugging:
+  /*
+  private void printStates() {
+    System.out.println("states:");
+    for(int i=outputFrom;i<outputNodes.getMaxPos();i++) {
+      OutputNode outputNode = outputNodes.get(i);
+      System.out.println("  output " + i + ": inputs " + outputNode.inputNodes);
+      for(int inputNodeID : outputNode.inputNodes) {
+        InputNode inputNode = inputNodes.get(inputNodeID);
+        assert inputNode.outputNode == i;
+      }
+    }
+  }
+  */
+
+  @Override
+  public void end() throws IOException {
+    if (done == false) {
+      super.end();
+    } else {
+      // NOTE, shady: don't call super.end, because we did already from incrementToken
+    }
+
+    clearAttributes();
+    if (done) {
+      // On exc, done is false, and we will not have set these:
+      posIncAtt.setPositionIncrement(finalPosInc);
+      offsetAtt.setOffset(finalOffset, finalOffset);
+    } else {
+      super.end();
+    }
+  }
+  
+  @Override
+  public void reset() throws IOException {
+    //System.out.println("F: reset");
+    super.reset();
+    inputFrom = -1;
+    inputNodes.reset();
+    InputNode in = inputNodes.get(0);
+    in.node = 0;
+    in.outputNode = 0;
+
+    outputNodes.reset();
+    OutputNode out = outputNodes.get(0);
+    out.node = 0;
+    out.inputNodes.add(0);
+    out.startOffset = 0;
+    outputFrom = 0;
+    //retOutputFrom = -1;
+    lastOutputFrom = -1;
+    done = false;
+    finalPosInc = -1;
+    finalOffset = -1;
+    lastStartOffset = 0;
+    maxLookaheadUsed = 0;
+  }
+
+  /** For testing */
+  public int getMaxLookaheadUsed() {
+    return maxLookaheadUsed;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/FlattenGraphFilterFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/FlattenGraphFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/FlattenGraphFilterFactory.java
new file mode 100644
index 0000000..920ab3d
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/FlattenGraphFilterFactory.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.analysis.core;
+
+import java.util.Map;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+
+/** 
+ * Factory for {@link FlattenGraphFilter}. 
+ *
+ * @lucene.experimental
+ */
+public class FlattenGraphFilterFactory extends TokenFilterFactory {
+
+  /** Creates a new FlattenGraphFilterFactory */
+  public FlattenGraphFilterFactory(Map<String,String> args) {
+    super(args);
+    if (!args.isEmpty()) {
+      throw new IllegalArgumentException("Unknown parameters: " + args);
+    }
+  }
+  
+  @Override
+  public TokenStream create(TokenStream input) {
+    return new FlattenGraphFilter(input);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
index f80ed8a..aef697c 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.java
@@ -28,6 +28,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.InPlaceMergeSorter;
@@ -80,7 +81,12 @@ import org.apache.lucene.util.InPlaceMergeSorter;
  * the current {@link StandardTokenizer} immediately removes many intra-word
  * delimiters, it is recommended that this filter be used after a tokenizer that
  * does not do this (such as {@link WhitespaceTokenizer}).
+ *
+ * @deprecated Use {@link WordDelimiterGraphFilter} instead: it produces a correct
+ * token graph so that e.g. {@link PhraseQuery} works correctly when it's used in
+ * the search time analyzer.
  */
+@Deprecated
 public final class WordDelimiterFilter extends TokenFilter {
   
   public static final int LOWER = 0x01;
@@ -116,7 +122,7 @@ public final class WordDelimiterFilter extends TokenFilter {
   /**
    * Causes maximum runs of word parts to be catenated:
    * <p>
-   * "wi-fi" =&gt; "wifi"
+   * "500-42" =&gt; "50042"
    */
   public static final int CATENATE_NUMBERS = 8;
 
@@ -494,7 +500,6 @@ public final class WordDelimiterFilter extends TokenFilter {
   private void generatePart(boolean isSingleWord) {
     clearAttributes();
     termAttribute.copyBuffer(savedBuffer, iterator.current, iterator.end - iterator.current);
-
     int startOffset = savedStartOffset + iterator.current;
     int endOffset = savedStartOffset + iterator.end;
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java
index 6a15b55..0002d65 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java
@@ -31,6 +31,7 @@ import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.util.ResourceLoader;
 import org.apache.lucene.analysis.util.ResourceLoaderAware;
 import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.apache.lucene.search.PhraseQuery;
 
 import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.*;
 
@@ -47,7 +48,12 @@ import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.*;
  *             types="wdfftypes.txt" /&gt;
  *   &lt;/analyzer&gt;
  * &lt;/fieldType&gt;</pre>
+ *
+ * @deprecated Use {@link WordDelimiterGraphFilterFactory} instead: it produces a correct
+ * token graph so that e.g. {@link PhraseQuery} works correctly when it's used in
+ * the search time analyzer.
  */
+@Deprecated
 public class WordDelimiterFilterFactory extends TokenFilterFactory implements ResourceLoaderAware {
   public static final String PROTECTED_TOKENS = "protected";
   public static final String TYPES = "types";

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java
new file mode 100644
index 0000000..ea6f6cd
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilter.java
@@ -0,0 +1,692 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */ 
+package org.apache.lucene.analysis.miscellaneous;
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.CharArraySet;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.InPlaceMergeSorter;
+import org.apache.lucene.util.RamUsageEstimator;
+
+/**
+ * Splits words into subwords and performs optional transformations on subword
+ * groups, producing a correct token graph so that e.g. {@link PhraseQuery} can
+ * work correctly when this filter is used in the search-time analyzer.  Unlike
+ * the deprecated {@link WordDelimiterFilter}, this token filter produces a
+ * correct token graph as output.  However, it cannot consume an input token
+ * graph correctly.
+ *
+ * <p>
+ * Words are split into subwords with the following rules:
+ * <ul>
+ * <li>split on intra-word delimiters (by default, all non alpha-numeric
+ * characters): <code>"Wi-Fi"</code> &#8594; <code>"Wi", "Fi"</code></li>
+ * <li>split on case transitions: <code>"PowerShot"</code> &#8594;
+ * <code>"Power", "Shot"</code></li>
+ * <li>split on letter-number transitions: <code>"SD500"</code> &#8594;
+ * <code>"SD", "500"</code></li>
+ * <li>leading and trailing intra-word delimiters on each subword are ignored:
+ * <code>"//hello---there, 'dude'"</code> &#8594;
+ * <code>"hello", "there", "dude"</code></li>
+ * <li>trailing "'s" are removed for each subword: <code>"O'Neil's"</code>
+ * &#8594; <code>"O", "Neil"</code>
+ * <ul>
+ * <li>Note: this step isn't performed in a separate filter because of possible
+ * subword combinations.</li>
+ * </ul>
+ * </li>
+ * </ul>
+ * 
+ * The <b>combinations</b> parameter affects how subwords are combined:
+ * <ul>
+ * <li>combinations="0" causes no subword combinations: <code>"PowerShot"</code>
+ * &#8594; <code>0:"Power", 1:"Shot"</code> (0 and 1 are the token positions)</li>
+ * <li>combinations="1" means that in addition to the subwords, maximum runs of
+ * non-numeric subwords are catenated and produced at the same position of the
+ * last subword in the run:
+ * <ul>
+ * <li><code>"PowerShot"</code> &#8594;
+ * <code>0:"Power", 1:"Shot" 1:"PowerShot"</code></li>
+ * <li><code>"A's+B's&amp;C's"</code> &gt; <code>0:"A", 1:"B", 2:"C", 2:"ABC"</code>
+ * </li>
+ * <li><code>"Super-Duper-XL500-42-AutoCoder!"</code> &#8594;
+ * <code>0:"Super", 1:"Duper", 2:"XL", 2:"SuperDuperXL", 3:"500" 4:"42", 5:"Auto", 6:"Coder", 6:"AutoCoder"</code>
+ * </li>
+ * </ul>
+ * </li>
+ * </ul>
+ * One use for {@link WordDelimiterGraphFilter} is to help match words with different
+ * subword delimiters. For example, if the source text contained "wi-fi" one may
+ * want "wifi" "WiFi" "wi-fi" "wi+fi" queries to all match. One way of doing so
+ * is to specify combinations="1" in the analyzer used for indexing, and
+ * combinations="0" (the default) in the analyzer used for querying. Given that
+ * the current {@link StandardTokenizer} immediately removes many intra-word
+ * delimiters, it is recommended that this filter be used after a tokenizer that
+ * does not do this (such as {@link WhitespaceTokenizer}).
+ */
+
+public final class WordDelimiterGraphFilter extends TokenFilter {
+  
+  /**
+   * Causes parts of words to be generated:
+   * <p>
+   * "PowerShot" =&gt; "Power" "Shot"
+   */
+  public static final int GENERATE_WORD_PARTS = 1;
+
+  /**
+   * Causes number subwords to be generated:
+   * <p>
+   * "500-42" =&gt; "500" "42"
+   */
+  public static final int GENERATE_NUMBER_PARTS = 2;
+
+  /**
+   * Causes maximum runs of word parts to be catenated:
+   * <p>
+   * "wi-fi" =&gt; "wifi"
+   */
+  public static final int CATENATE_WORDS = 4;
+
+  /**
+   * Causes maximum runs of number parts to be catenated:
+   * <p>
+   * "500-42" =&gt; "50042"
+   */
+  public static final int CATENATE_NUMBERS = 8;
+
+  /**
+   * Causes all subword parts to be catenated:
+   * <p>
+   * "wi-fi-4000" =&gt; "wifi4000"
+   */
+  public static final int CATENATE_ALL = 16;
+
+  /**
+   * Causes original words are preserved and added to the subword list (Defaults to false)
+   * <p>
+   * "500-42" =&gt; "500" "42" "500-42"
+   */
+  public static final int PRESERVE_ORIGINAL = 32;
+
+  /**
+   * Causes lowercase -&gt; uppercase transition to start a new subword.
+   */
+  public static final int SPLIT_ON_CASE_CHANGE = 64;
+
+  /**
+   * If not set, causes numeric changes to be ignored (subwords will only be generated
+   * given SUBWORD_DELIM tokens).
+   */
+  public static final int SPLIT_ON_NUMERICS = 128;
+
+  /**
+   * Causes trailing "'s" to be removed for each subword
+   * <p>
+   * "O'Neil's" =&gt; "O", "Neil"
+   */
+  public static final int STEM_ENGLISH_POSSESSIVE = 256;
+  
+  /**
+   * If not null is the set of tokens to protect from being delimited
+   *
+   */
+  final CharArraySet protWords;
+
+  private final int flags;
+
+  // packs start pos, end pos, start part, end part (= slice of the term text) for each buffered part:
+  private int[] bufferedParts = new int[16];
+  private int bufferedLen;
+  private int bufferedPos;
+
+  // holds text for each buffered part, or null if it's a simple slice of the original term
+  private char[][] bufferedTermParts = new char[4][];
+  
+  private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
+  private final OffsetAttribute offsetAttribute = addAttribute(OffsetAttribute.class);
+  private final PositionIncrementAttribute posIncAttribute = addAttribute(PositionIncrementAttribute.class);
+  private final PositionLengthAttribute posLenAttribute = addAttribute(PositionLengthAttribute.class);
+  private final TypeAttribute typeAttribute = addAttribute(TypeAttribute.class);
+
+  // used for iterating word delimiter breaks
+  private final WordDelimiterIterator iterator;
+
+  // used for concatenating runs of similar typed subwords (word,number)
+  private final WordDelimiterConcatenation concat = new WordDelimiterConcatenation();
+
+  // number of subwords last output by concat.
+  private int lastConcatCount;
+
+  // used for catenate all
+  private final WordDelimiterConcatenation concatAll = new WordDelimiterConcatenation();
+
+  // used for accumulating position increment gaps so that we preserve incoming holes:
+  private int accumPosInc;
+
+  private char[] savedTermBuffer = new char[16];
+  private int savedTermLength;
+  private int savedStartOffset;
+  private int savedEndOffset;
+  private AttributeSource.State savedState;
+  
+  // if length by start + end offsets doesn't match the term text then assume
+  // this is a synonym and don't adjust the offsets.
+  private boolean hasIllegalOffsets;
+
+  private int wordPos;
+
+  /**
+   * Creates a new WordDelimiterGraphFilter
+   *
+   * @param in TokenStream to be filtered
+   * @param charTypeTable table containing character types
+   * @param configurationFlags Flags configuring the filter
+   * @param protWords If not null is the set of tokens to protect from being delimited
+   */
+  public WordDelimiterGraphFilter(TokenStream in, byte[] charTypeTable, int configurationFlags, CharArraySet protWords) {
+    super(in);
+    if ((configurationFlags &
+        ~(GENERATE_WORD_PARTS |
+          GENERATE_NUMBER_PARTS |
+          CATENATE_WORDS |
+          CATENATE_NUMBERS |
+          CATENATE_ALL |
+          PRESERVE_ORIGINAL |
+          SPLIT_ON_CASE_CHANGE |
+          SPLIT_ON_NUMERICS |
+          STEM_ENGLISH_POSSESSIVE)) != 0) {
+      throw new IllegalArgumentException("flags contains unrecognized flag: " + configurationFlags);
+    }
+    this.flags = configurationFlags;
+    this.protWords = protWords;
+    this.iterator = new WordDelimiterIterator(
+        charTypeTable, has(SPLIT_ON_CASE_CHANGE), has(SPLIT_ON_NUMERICS), has(STEM_ENGLISH_POSSESSIVE));
+  }
+
+  /**
+   * Creates a new WordDelimiterGraphFilter using {@link WordDelimiterIterator#DEFAULT_WORD_DELIM_TABLE}
+   * as its charTypeTable
+   *
+   * @param in TokenStream to be filtered
+   * @param configurationFlags Flags configuring the filter
+   * @param protWords If not null is the set of tokens to protect from being delimited
+   */
+  public WordDelimiterGraphFilter(TokenStream in, int configurationFlags, CharArraySet protWords) {
+    this(in, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, configurationFlags, protWords);
+  }
+
+  /** Iterates all words parts and concatenations, buffering up the term parts we should return. */
+  private void bufferWordParts() throws IOException {
+
+    saveState();
+
+    // if length by start + end offsets doesn't match the term's text then set offsets for all our word parts/concats to the incoming
+    // offsets.  this can happen if WDGF is applied to an injected synonym, or to a stem'd form, etc:
+    hasIllegalOffsets = (savedEndOffset - savedStartOffset != savedTermLength);
+
+    bufferedLen = 0;
+    lastConcatCount = 0;
+    wordPos = 0;
+
+    if (iterator.isSingleWord()) {
+      buffer(wordPos, wordPos+1, iterator.current, iterator.end);
+      wordPos++;
+      iterator.next();
+    } else {
+
+      // iterate all words parts, possibly buffering them, building up concatenations and possibly buffering them too:
+      while (iterator.end != WordDelimiterIterator.DONE) {
+        int wordType = iterator.type();
+      
+        // do we already have queued up incompatible concatenations?
+        if (concat.isNotEmpty() && (concat.type & wordType) == 0) {
+          flushConcatenation(concat);
+        }
+
+        // add subwords depending upon options
+        if (shouldConcatenate(wordType)) {
+          concatenate(concat);
+        }
+      
+        // add all subwords (catenateAll)
+        if (has(CATENATE_ALL)) {
+          concatenate(concatAll);
+        }
+      
+        // if we should output the word or number part
+        if (shouldGenerateParts(wordType)) {
+          buffer(wordPos, wordPos+1, iterator.current, iterator.end);
+          wordPos++;
+        }
+        iterator.next();
+      }
+
+      if (concat.isNotEmpty()) {
+        // flush final concatenation
+        flushConcatenation(concat);
+      }
+        
+      if (concatAll.isNotEmpty()) {
+        // only if we haven't output this same combo above, e.g. PowerShot with CATENATE_WORDS:
+        if (concatAll.subwordCount > lastConcatCount) {
+          if (wordPos == concatAll.startPos) {
+            // we are not generating parts, so we must advance wordPos now
+            wordPos++;
+          }
+          concatAll.write();
+        }
+        concatAll.clear();
+      }
+    }
+
+    if (has(PRESERVE_ORIGINAL)) {
+      if (wordPos == 0) {
+        // can happen w/ strange flag combos and inputs :)
+        wordPos++;
+      }
+      // add the original token now so that we can set the correct end position
+      buffer(0, wordPos, 0, savedTermLength);
+    }
+            
+    sorter.sort(0, bufferedLen);
+    wordPos = 0;
+
+    // set back to 0 for iterating from the buffer
+    bufferedPos = 0;
+  }
+
+  @Override
+  public boolean incrementToken() throws IOException {
+    while (true) {
+      if (savedState == null) {
+
+        // process a new input token
+        if (input.incrementToken() == false) {
+          return false;
+        }
+
+        int termLength = termAttribute.length();
+        char[] termBuffer = termAttribute.buffer();
+
+        accumPosInc += posIncAttribute.getPositionIncrement();
+
+        // iterate & cache all word parts up front:
+        iterator.setText(termBuffer, termLength);
+        iterator.next();
+        
+        // word of no delimiters, or protected word: just return it
+        if ((iterator.current == 0 && iterator.end == termLength) ||
+            (protWords != null && protWords.contains(termBuffer, 0, termLength))) {
+          posIncAttribute.setPositionIncrement(accumPosInc);
+          accumPosInc = 0;
+          return true;
+        }
+        
+        // word of simply delimiters: swallow this token, creating a hole, and move on to next token
+        if (iterator.end == WordDelimiterIterator.DONE) {
+          if (has(PRESERVE_ORIGINAL) == false) {
+            continue;
+          } else {
+            return true;
+          }
+        }
+
+        // otherwise, we have delimiters, process & buffer all parts:
+        bufferWordParts();
+      }
+
+      if (bufferedPos < bufferedLen) {
+        clearAttributes();
+        restoreState(savedState);
+
+        char[] termPart = bufferedTermParts[bufferedPos];
+        int startPos = bufferedParts[4*bufferedPos];
+        int endPos = bufferedParts[4*bufferedPos+1];
+        int startPart = bufferedParts[4*bufferedPos+2];
+        int endPart = bufferedParts[4*bufferedPos+3];
+        bufferedPos++;
+
+        if (hasIllegalOffsets) {
+          offsetAttribute.setOffset(savedStartOffset, savedEndOffset);
+        } else {
+          offsetAttribute.setOffset(savedStartOffset + startPart, savedStartOffset + endPart);
+        }
+
+        if (termPart == null) {
+          termAttribute.copyBuffer(savedTermBuffer, startPart, endPart - startPart);
+        } else {
+          termAttribute.copyBuffer(termPart, 0, termPart.length);
+        }
+
+        posIncAttribute.setPositionIncrement(accumPosInc + startPos - wordPos);
+        accumPosInc = 0;
+        posLenAttribute.setPositionLength(endPos - startPos);
+        wordPos = startPos;
+        return true;
+      }
+        
+      // no saved concatenations, on to the next input word
+      savedState = null;
+    }
+  }
+
+  @Override
+  public void reset() throws IOException {
+    super.reset();
+    accumPosInc = 0;
+    savedState = null;
+    concat.clear();
+    concatAll.clear();
+  }
+
+  // ================================================= Helper Methods ================================================
+
+  private class PositionSorter extends InPlaceMergeSorter {
+    @Override
+    protected int compare(int i, int j) {
+      // sort by smaller start position
+      int iPosStart = bufferedParts[4*i];
+      int jPosStart = bufferedParts[4*j];
+      int cmp = Integer.compare(iPosStart, jPosStart);
+      if (cmp != 0) {
+        return cmp;
+      }
+
+      // tie break by longest pos length:
+      int iPosEnd = bufferedParts[4*i+1];
+      int jPosEnd = bufferedParts[4*j+1];
+      return Integer.compare(jPosEnd, iPosEnd);
+    }
+
+    @Override
+    protected void swap(int i, int j) {
+      int iOffset = 4*i;
+      int jOffset = 4*j;
+      for(int x=0;x<4;x++) {
+        int tmp = bufferedParts[iOffset+x];
+        bufferedParts[iOffset+x] = bufferedParts[jOffset+x];
+        bufferedParts[jOffset+x] = tmp;
+      }
+
+      char[] tmp2 = bufferedTermParts[i];
+      bufferedTermParts[i] = bufferedTermParts[j];
+      bufferedTermParts[j] = tmp2;
+    }
+  }
+  
+  final PositionSorter sorter = new PositionSorter();
+
+  /** 
+   * startPos, endPos -> graph start/end position
+   * startPart, endPart -> slice of the original term for this part
+   */
+
+  void buffer(int startPos, int endPos, int startPart, int endPart) {
+    buffer(null, startPos, endPos, startPart, endPart);
+  }
+
+  /** 
+   * a null termPart means it's a simple slice of the original term
+   */
+  void buffer(char[] termPart, int startPos, int endPos, int startPart, int endPart) {
+    /*
+    System.out.println("buffer: pos=" + startPos + "-" + endPos + " part=" + startPart + "-" + endPart);
+    if (termPart != null) {
+      System.out.println("  termIn=" + new String(termPart));
+    } else {
+      System.out.println("  term=" + new String(savedTermBuffer, startPart, endPart-startPart));
+    }
+    */
+    assert endPos > startPos: "startPos=" + startPos + " endPos=" + endPos;
+    assert endPart > startPart || (endPart == 0 && startPart == 0 && savedTermLength == 0): "startPart=" + startPart + " endPart=" + endPart;
+    if ((bufferedLen+1)*4 > bufferedParts.length) {
+      bufferedParts = ArrayUtil.grow(bufferedParts, (bufferedLen+1)*4);
+    }
+    if (bufferedTermParts.length == bufferedLen) {
+      int newSize = ArrayUtil.oversize(bufferedLen+1, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
+      char[][] newArray = new char[newSize][];
+      System.arraycopy(bufferedTermParts, 0, newArray, 0, bufferedTermParts.length);
+      bufferedTermParts = newArray;
+    }
+    bufferedTermParts[bufferedLen] = termPart;
+    bufferedParts[bufferedLen*4] = startPos;
+    bufferedParts[bufferedLen*4+1] = endPos;
+    bufferedParts[bufferedLen*4+2] = startPart;
+    bufferedParts[bufferedLen*4+3] = endPart;
+    bufferedLen++;
+  }
+  
+  /**
+   * Saves the existing attribute states
+   */
+  private void saveState() {
+    savedTermLength = termAttribute.length();
+    savedStartOffset = offsetAttribute.startOffset();
+    savedEndOffset = offsetAttribute.endOffset();
+    savedState = captureState();
+
+    if (savedTermBuffer.length < savedTermLength) {
+      savedTermBuffer = new char[ArrayUtil.oversize(savedTermLength, Character.BYTES)];
+    }
+
+    System.arraycopy(termAttribute.buffer(), 0, savedTermBuffer, 0, savedTermLength);
+  }
+
+  /**
+   * Flushes the given WordDelimiterConcatenation by either writing its concat and then clearing, or just clearing.
+   *
+   * @param concat WordDelimiterConcatenation that will be flushed
+   */
+  private void flushConcatenation(WordDelimiterConcatenation concat) {
+    if (wordPos == concat.startPos) {
+      // we are not generating parts, so we must advance wordPos now
+      wordPos++;
+    }
+    lastConcatCount = concat.subwordCount;
+    if (concat.subwordCount != 1 || shouldGenerateParts(concat.type) == false) {
+      concat.write();
+    }
+    concat.clear();
+  }
+
+  /**
+   * Determines whether to concatenate a word or number if the current word is the given type
+   *
+   * @param wordType Type of the current word used to determine if it should be concatenated
+   * @return {@code true} if concatenation should occur, {@code false} otherwise
+   */
+  private boolean shouldConcatenate(int wordType) {
+    return (has(CATENATE_WORDS) && WordDelimiterIterator.isAlpha(wordType)) || (has(CATENATE_NUMBERS) && WordDelimiterIterator.isDigit(wordType));
+  }
+
+  /**
+   * Determines whether a word/number part should be generated for a word of the given type
+   *
+   * @param wordType Type of the word used to determine if a word/number part should be generated
+   * @return {@code true} if a word/number part should be generated, {@code false} otherwise
+   */
+  private boolean shouldGenerateParts(int wordType) {
+    return (has(GENERATE_WORD_PARTS) && WordDelimiterIterator.isAlpha(wordType)) || (has(GENERATE_NUMBER_PARTS) && WordDelimiterIterator.isDigit(wordType));
+  }
+
+  /**
+   * Concatenates the saved buffer to the given WordDelimiterConcatenation
+   *
+   * @param concatenation WordDelimiterConcatenation to concatenate the buffer to
+   */
+  private void concatenate(WordDelimiterConcatenation concatenation) {
+    if (concatenation.isEmpty()) {
+      concatenation.type = iterator.type();
+      concatenation.startPart = iterator.current;
+      concatenation.startPos = wordPos;
+    }
+    concatenation.append(savedTermBuffer, iterator.current, iterator.end - iterator.current);
+    concatenation.endPart = iterator.end;
+  }
+
+  /**
+   * Determines whether the given flag is set
+   *
+   * @param flag Flag to see if set
+   * @return {@code true} if flag is set
+   */
+  private boolean has(int flag) {
+    return (flags & flag) != 0;
+  }
+
+  // ================================================= Inner Classes =================================================
+
+  /**
+   * A WDF concatenated 'run'
+   */
+  final class WordDelimiterConcatenation {
+    final StringBuilder buffer = new StringBuilder();
+    int startPart;
+    int endPart;
+    int startPos;
+    int type;
+    int subwordCount;
+
+    /**
+     * Appends the given text of the given length, to the concetenation at the given offset
+     *
+     * @param text Text to append
+     * @param offset Offset in the concetenation to add the text
+     * @param length Length of the text to append
+     */
+    void append(char text[], int offset, int length) {
+      buffer.append(text, offset, length);
+      subwordCount++;
+    }
+
+    /**
+     * Writes the concatenation to part buffer
+     */
+    void write() {
+      char[] termPart = new char[buffer.length()];
+      buffer.getChars(0, buffer.length(), termPart, 0);
+      buffer(termPart, startPos, wordPos, startPart, endPart);
+    }
+
+    /**
+     * Determines if the concatenation is empty
+     *
+     * @return {@code true} if the concatenation is empty, {@code false} otherwise
+     */
+    boolean isEmpty() {
+      return buffer.length() == 0;
+    }
+
+    boolean isNotEmpty() {
+      return isEmpty() == false;
+    }
+
+    /**
+     * Clears the concatenation and resets its state
+     */
+    void clear() {
+      buffer.setLength(0);
+      startPart = endPart = type = subwordCount = 0;
+    }
+  }
+
+  /** Returns string representation of configuration flags */
+  public static String flagsToString(int flags) {
+    StringBuilder b = new StringBuilder();
+    if ((flags & GENERATE_WORD_PARTS) != 0) {
+      b.append("GENERATE_WORD_PARTS");
+    }
+    if ((flags & GENERATE_NUMBER_PARTS) != 0) {
+      if (b.length() > 0) {
+        b.append(" | ");
+      }
+      b.append("GENERATE_NUMBER_PARTS");
+    }
+    if ((flags & CATENATE_WORDS) != 0) {
+      if (b.length() > 0) {
+        b.append(" | ");
+      }
+      b.append("CATENATE_WORDS");
+    }
+    if ((flags & CATENATE_NUMBERS) != 0) {
+      if (b.length() > 0) {
+        b.append(" | ");
+      }
+      b.append("CATENATE_NUMBERS");
+    }
+    if ((flags & CATENATE_ALL) != 0) {
+      if (b.length() > 0) {
+        b.append(" | ");
+      }
+      b.append("CATENATE_ALL");
+    }
+    if ((flags & PRESERVE_ORIGINAL) != 0) {
+      if (b.length() > 0) {
+        b.append(" | ");
+      }
+      b.append("PRESERVE_ORIGINAL");
+    }
+    if ((flags & SPLIT_ON_CASE_CHANGE) != 0) {
+      if (b.length() > 0) {
+        b.append(" | ");
+      }
+      b.append("SPLIT_ON_CASE_CHANGE");
+    }
+    if ((flags & SPLIT_ON_NUMERICS) != 0) {
+      if (b.length() > 0) {
+        b.append(" | ");
+      }
+      b.append("SPLIT_ON_NUMERICS");
+    }
+    if ((flags & STEM_ENGLISH_POSSESSIVE) != 0) {
+      if (b.length() > 0) {
+        b.append(" | ");
+      }
+      b.append("STEM_ENGLISH_POSSESSIVE");
+    }
+
+    return b.toString();
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder b = new StringBuilder();
+    b.append("WordDelimiterGraphFilter(flags=");
+    b.append(flagsToString(flags));
+    b.append(')');
+    return b.toString();
+  }
+  
+  // questions:
+  // negative numbers?  -42 indexed as just 42?
+  // dollar sign?  $42
+  // percent sign?  33%
+  // downsides:  if source text is "powershot" then a query of "PowerShot" won't match!
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilterFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilterFactory.java
new file mode 100644
index 0000000..a06cc75
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilterFactory.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.miscellaneous;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.lucene.analysis.CharArraySet;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.ResourceLoader;
+import org.apache.lucene.analysis.util.ResourceLoaderAware;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+
+import static org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter.*;
+import static org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator.*;
+
+/**
+ * Factory for {@link WordDelimiterGraphFilter}.
+ * <pre class="prettyprint">
+ * &lt;fieldType name="text_wd" class="solr.TextField" positionIncrementGap="100"&gt;
+ *   &lt;analyzer&gt;
+ *     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
+ *     &lt;filter class="solr.WordDelimiterGraphFilterFactory" protected="protectedword.txt"
+ *             preserveOriginal="0" splitOnNumerics="1" splitOnCaseChange="1"
+ *             catenateWords="0" catenateNumbers="0" catenateAll="0"
+ *             generateWordParts="1" generateNumberParts="1" stemEnglishPossessive="1"
+ *             types="wdfftypes.txt" /&gt;
+ *   &lt;/analyzer&gt;
+ * &lt;/fieldType&gt;</pre>
+ */
+public class WordDelimiterGraphFilterFactory extends TokenFilterFactory implements ResourceLoaderAware {
+  public static final String PROTECTED_TOKENS = "protected";
+  public static final String TYPES = "types";
+
+  private final String wordFiles;
+  private final String types;
+  private final int flags;
+  byte[] typeTable = null;
+  private CharArraySet protectedWords = null;
+  
+  /** Creates a new WordDelimiterGraphFilterFactory */
+  public WordDelimiterGraphFilterFactory(Map<String, String> args) {
+    super(args);
+    int flags = 0;
+    if (getInt(args, "generateWordParts", 1) != 0) {
+      flags |= GENERATE_WORD_PARTS;
+    }
+    if (getInt(args, "generateNumberParts", 1) != 0) {
+      flags |= GENERATE_NUMBER_PARTS;
+    }
+    if (getInt(args, "catenateWords", 0) != 0) {
+      flags |= CATENATE_WORDS;
+    }
+    if (getInt(args, "catenateNumbers", 0) != 0) {
+      flags |= CATENATE_NUMBERS;
+    }
+    if (getInt(args, "catenateAll", 0) != 0) {
+      flags |= CATENATE_ALL;
+    }
+    if (getInt(args, "splitOnCaseChange", 1) != 0) {
+      flags |= SPLIT_ON_CASE_CHANGE;
+    }
+    if (getInt(args, "splitOnNumerics", 1) != 0) {
+      flags |= SPLIT_ON_NUMERICS;
+    }
+    if (getInt(args, "preserveOriginal", 0) != 0) {
+      flags |= PRESERVE_ORIGINAL;
+    }
+    if (getInt(args, "stemEnglishPossessive", 1) != 0) {
+      flags |= STEM_ENGLISH_POSSESSIVE;
+    }
+    wordFiles = get(args, PROTECTED_TOKENS);
+    types = get(args, TYPES);
+    this.flags = flags;
+    if (!args.isEmpty()) {
+      throw new IllegalArgumentException("Unknown parameters: " + args);
+    }
+  }
+  
+  @Override
+  public void inform(ResourceLoader loader) throws IOException {
+    if (wordFiles != null) {  
+      protectedWords = getWordSet(loader, wordFiles, false);
+    }
+    if (types != null) {
+      List<String> files = splitFileNames( types );
+      List<String> wlist = new ArrayList<>();
+      for( String file : files ){
+        List<String> lines = getLines(loader, file.trim());
+        wlist.addAll( lines );
+      }
+      typeTable = parseTypes(wlist);
+    }
+  }
+
+  @Override
+  public TokenFilter create(TokenStream input) {
+    return new WordDelimiterGraphFilter(input, typeTable == null ? WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE : typeTable,
+                                        flags, protectedWords);
+  }
+  
+  // source => type
+  private static Pattern typePattern = Pattern.compile( "(.*)\\s*=>\\s*(.*)\\s*$" );
+  
+  // parses a list of MappingCharFilter style rules into a custom byte[] type table
+  private byte[] parseTypes(List<String> rules) {
+    SortedMap<Character,Byte> typeMap = new TreeMap<>();
+    for( String rule : rules ){
+      Matcher m = typePattern.matcher(rule);
+      if( !m.find() )
+        throw new IllegalArgumentException("Invalid Mapping Rule : [" + rule + "]");
+      String lhs = parseString(m.group(1).trim());
+      Byte rhs = parseType(m.group(2).trim());
+      if (lhs.length() != 1)
+        throw new IllegalArgumentException("Invalid Mapping Rule : [" + rule + "]. Only a single character is allowed.");
+      if (rhs == null)
+        throw new IllegalArgumentException("Invalid Mapping Rule : [" + rule + "]. Illegal type.");
+      typeMap.put(lhs.charAt(0), rhs);
+    }
+    
+    // ensure the table is always at least as big as DEFAULT_WORD_DELIM_TABLE for performance
+    byte types[] = new byte[Math.max(typeMap.lastKey()+1, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE.length)];
+    for (int i = 0; i < types.length; i++)
+      types[i] = WordDelimiterIterator.getType(i);
+    for (Map.Entry<Character,Byte> mapping : typeMap.entrySet())
+      types[mapping.getKey()] = mapping.getValue();
+    return types;
+  }
+  
+  private Byte parseType(String s) {
+    if (s.equals("LOWER"))
+      return LOWER;
+    else if (s.equals("UPPER"))
+      return UPPER;
+    else if (s.equals("ALPHA"))
+      return ALPHA;
+    else if (s.equals("DIGIT"))
+      return DIGIT;
+    else if (s.equals("ALPHANUM"))
+      return ALPHANUM;
+    else if (s.equals("SUBWORD_DELIM"))
+      return SUBWORD_DELIM;
+    else
+      return null;
+  }
+  
+  char[] out = new char[256];
+  
+  private String parseString(String s){
+    int readPos = 0;
+    int len = s.length();
+    int writePos = 0;
+    while( readPos < len ){
+      char c = s.charAt( readPos++ );
+      if( c == '\\' ){
+        if( readPos >= len )
+          throw new IllegalArgumentException("Invalid escaped char in [" + s + "]");
+        c = s.charAt( readPos++ );
+        switch( c ) {
+          case '\\' : c = '\\'; break;
+          case 'n' : c = '\n'; break;
+          case 't' : c = '\t'; break;
+          case 'r' : c = '\r'; break;
+          case 'b' : c = '\b'; break;
+          case 'f' : c = '\f'; break;
+          case 'u' :
+            if( readPos + 3 >= len )
+              throw new IllegalArgumentException("Invalid escaped char in [" + s + "]");
+            c = (char)Integer.parseInt( s.substring( readPos, readPos + 4 ), 16 );
+            readPos += 4;
+            break;
+        }
+      }
+      out[writePos++] = c;
+    }
+    return new String( out, 0, writePos );
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterIterator.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterIterator.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterIterator.java
index 0367dab..86b983d 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterIterator.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterIterator.java
@@ -16,15 +16,21 @@
  */
 package org.apache.lucene.analysis.miscellaneous;
 
-
-import static org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter.*;
-
 /**
- * A BreakIterator-like API for iterating over subwords in text, according to WordDelimiterFilter rules.
+ * A BreakIterator-like API for iterating over subwords in text, according to WordDelimiterGraphFilter rules.
  * @lucene.internal
  */
 public final class WordDelimiterIterator {
 
+  static final int LOWER = 0x01;
+  static final int UPPER = 0x02;
+  static final int DIGIT = 0x04;
+  static final int SUBWORD_DELIM = 0x08;
+
+  // combinations: for testing, not for setting bits
+  public static final int ALPHA = 0x03;
+  public static final int ALPHANUM = 0x07;
+
   /** Indicates the end of iteration */
   public static final int DONE = -1;
   
@@ -97,7 +103,7 @@ public final class WordDelimiterIterator {
    * Create a new WordDelimiterIterator operating with the supplied rules.
    * 
    * @param charTypeTable table containing character types
-   * @param splitOnCaseChange if true, causes "PowerShot" to be two tokens; ("Power-Shot" remains two parts regards)
+   * @param splitOnCaseChange if true, causes "PowerShot" to be two tokens; ("Power-Shot" remains two parts regardless)
    * @param splitOnNumerics if true, causes "j2se" to be three tokens; "j" "2" "se"
    * @param stemEnglishPossessive if true, causes trailing "'s" to be removed for each subword: "O'Neil's" =&gt; "O", "Neil"
    */
@@ -323,4 +329,45 @@ public final class WordDelimiterIterator {
       default: return SUBWORD_DELIM;
     }
   }
-}
\ No newline at end of file
+
+  /**
+   * Checks if the given word type includes {@link #ALPHA}
+   *
+   * @param type Word type to check
+   * @return {@code true} if the type contains ALPHA, {@code false} otherwise
+   */
+  static boolean isAlpha(int type) {
+    return (type & ALPHA) != 0;
+  }
+
+  /**
+   * Checks if the given word type includes {@link #DIGIT}
+   *
+   * @param type Word type to check
+   * @return {@code true} if the type contains DIGIT, {@code false} otherwise
+   */
+  static boolean isDigit(int type) {
+    return (type & DIGIT) != 0;
+  }
+
+  /**
+   * Checks if the given word type includes {@link #SUBWORD_DELIM}
+   *
+   * @param type Word type to check
+   * @return {@code true} if the type contains SUBWORD_DELIM, {@code false} otherwise
+   */
+  static boolean isSubwordDelim(int type) {
+    return (type & SUBWORD_DELIM) != 0;
+  }
+
+  /**
+   * Checks if the given word type includes {@link #UPPER}
+   *
+   * @param type Word type to check
+   * @return {@code true} if the type contains UPPER, {@code false} otherwise
+   */
+  static boolean isUpper(int type) {
+    return (type & UPPER) != 0;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/FlattenGraphFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/FlattenGraphFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/FlattenGraphFilter.java
deleted file mode 100644
index c1fa1f7..0000000
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/FlattenGraphFilter.java
+++ /dev/null
@@ -1,417 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.lucene.analysis.synonym;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.lucene.analysis.TokenFilter;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
-import org.apache.lucene.util.AttributeSource;
-import org.apache.lucene.util.RollingBuffer;
-
-/**
- * Converts an incoming graph token stream, such as one from
- * {@link SynonymGraphFilter}, into a flat form so that
- * all nodes form a single linear chain with no side paths.  Every
- * path through the graph touches every node.  This is necessary
- * when indexing a graph token stream, because the index does not
- * save {@link PositionLengthAttribute} and so it cannot
- * preserve the graph structure.  However, at search time,
- * query parsers can correctly handle the graph and this token
- * filter should <b>not</b> be used.
- *
- * <p>If the graph was not already flat to start, this
- * is likely a lossy process, i.e. it will often cause the 
- * graph to accept token sequences it should not, and to
- * reject token sequences it should not.
- *
- * <p>However, when applying synonyms during indexing, this
- * is necessary because Lucene already does not index a graph 
- * and so the indexing process is already lossy
- * (it ignores the {@link PositionLengthAttribute}).
- *
- * @lucene.experimental
- */
-public final class FlattenGraphFilter extends TokenFilter {
-
-  /** Holds all tokens leaving a given input position. */
-  private final static class InputNode implements RollingBuffer.Resettable {
-    private final List<AttributeSource.State> tokens = new ArrayList<>();
-
-    /** Our input node, or -1 if we haven't been assigned yet */
-    int node = -1;
-
-    /** Maximum to input node for all tokens leaving here; we use this
-     *  to know when we can freeze. */
-    int maxToNode = -1;
-
-    /** Where we currently map to; this changes (can only
-     *  increase as we see more input tokens), until we are finished
-     *  with this position. */
-    int outputNode = -1;
-
-    /** Which token (index into {@link #tokens}) we will next output. */
-    int nextOut;
-
-    @Override
-    public void reset() {
-      tokens.clear();
-      node = -1;
-      outputNode = -1;
-      maxToNode = -1;
-      nextOut = 0;
-    }
-  }
-
-  /** Gathers up merged input positions into a single output position,
-   *  only for the current "frontier" of nodes we've seen but can't yet
-   *  output because they are not frozen. */
-  private final static class OutputNode implements RollingBuffer.Resettable {
-    private final List<Integer> inputNodes = new ArrayList<>();
-
-    /** Node ID for this output, or -1 if we haven't been assigned yet. */
-    int node = -1;
-
-    /** Which input node (index into {@link #inputNodes}) we will next output. */
-    int nextOut;
-    
-    /** Start offset of tokens leaving this node. */
-    int startOffset = -1;
-
-    /** End offset of tokens arriving to this node. */
-    int endOffset = -1;
-
-    @Override
-    public void reset() {
-      inputNodes.clear();
-      node = -1;
-      nextOut = 0;
-      startOffset = -1;
-      endOffset = -1;
-    }
-  }
-
-  private final RollingBuffer<InputNode> inputNodes = new RollingBuffer<InputNode>() {
-    @Override
-    protected InputNode newInstance() {
-      return new InputNode();
-    }
-  };
-
-  private final RollingBuffer<OutputNode> outputNodes = new RollingBuffer<OutputNode>() {
-    @Override
-    protected OutputNode newInstance() {
-      return new OutputNode();
-    }
-  };
-
-  private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
-  private final PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class);
-  private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
-
-  /** Which input node the last seen token leaves from */
-  private int inputFrom;
-
-  /** We are currently releasing tokens leaving from this output node */
-  private int outputFrom;
-
-  // for debugging:
-  //private int retOutputFrom;
-
-  private boolean done;
-
-  private int lastOutputFrom;
-
-  private int finalOffset;
-
-  private int finalPosInc;
-
-  private int maxLookaheadUsed;
-
-  private int lastStartOffset;
-
-  public FlattenGraphFilter(TokenStream in) {
-    super(in);
-  }
-
-  private boolean releaseBufferedToken() {
-
-    // We only need the while loop (retry) if we have a hole (an output node that has no tokens leaving):
-    while (outputFrom < outputNodes.getMaxPos()) {
-      OutputNode output = outputNodes.get(outputFrom);
-      if (output.inputNodes.isEmpty()) {
-        // No tokens arrived to this node, which happens for the first node
-        // after a hole:
-        //System.out.println("    skip empty outputFrom=" + outputFrom);
-        outputFrom++;
-        continue;
-      }
-
-      int maxToNode = -1;
-      for(int inputNodeID : output.inputNodes) {
-        InputNode inputNode = inputNodes.get(inputNodeID);
-        assert inputNode.outputNode == outputFrom;
-        maxToNode = Math.max(maxToNode, inputNode.maxToNode);
-      }
-      //System.out.println("  release maxToNode=" + maxToNode + " vs inputFrom=" + inputFrom);
-
-      // TODO: we could shrink the frontier here somewhat if we
-      // always output posLen=1 as part of our "sausagizing":
-      if (maxToNode <= inputFrom || done) {
-        //System.out.println("  output node merged these inputs: " + output.inputNodes);
-        // These tokens are now frozen
-        assert output.nextOut < output.inputNodes.size(): "output.nextOut=" + output.nextOut + " vs output.inputNodes.size()=" + output.inputNodes.size();
-        InputNode inputNode = inputNodes.get(output.inputNodes.get(output.nextOut));
-        if (done && inputNode.tokens.size() == 0 && outputFrom >= outputNodes.getMaxPos()) {
-          return false;
-        }
-        if (inputNode.tokens.size() == 0) {
-          assert inputNode.nextOut == 0;
-          assert output.nextOut == 0;
-          // Hole dest nodes should never be merged since 1) we always
-          // assign them to a new output position, and 2) since they never
-          // have arriving tokens they cannot be pushed:
-          assert output.inputNodes.size() == 1: output.inputNodes.size();
-          outputFrom++;
-          inputNodes.freeBefore(output.inputNodes.get(0));
-          outputNodes.freeBefore(outputFrom);
-          continue;
-        }
-
-        assert inputNode.nextOut < inputNode.tokens.size();
-
-        restoreState(inputNode.tokens.get(inputNode.nextOut));
-
-        // Correct posInc
-        assert outputFrom >= lastOutputFrom;
-        posIncAtt.setPositionIncrement(outputFrom - lastOutputFrom);
-        int toInputNodeID = inputNode.node + posLenAtt.getPositionLength();
-        InputNode toInputNode = inputNodes.get(toInputNodeID);
-
-        // Correct posLen
-        assert toInputNode.outputNode > outputFrom;
-        posLenAtt.setPositionLength(toInputNode.outputNode - outputFrom);
-        lastOutputFrom = outputFrom;
-        inputNode.nextOut++;
-        //System.out.println("  ret " + this);
-
-        OutputNode outputEndNode = outputNodes.get(toInputNode.outputNode);
-
-        // Correct offsets
-
-        // This is a bit messy; we must do this so offset don't go backwards,
-        // which would otherwise happen if the replacement has more tokens
-        // than the input:
-        int startOffset = Math.max(lastStartOffset, output.startOffset);
-
-        // We must do this in case the incoming tokens have broken offsets:
-        int endOffset = Math.max(startOffset, outputEndNode.endOffset);
-        
-        offsetAtt.setOffset(startOffset, endOffset);
-        lastStartOffset = startOffset;
-
-        if (inputNode.nextOut == inputNode.tokens.size()) {
-          output.nextOut++;
-          if (output.nextOut == output.inputNodes.size()) {
-            outputFrom++;
-            inputNodes.freeBefore(output.inputNodes.get(0));
-            outputNodes.freeBefore(outputFrom);
-          }
-        }
-
-        return true;
-      } else {
-        return false;
-      }
-    }
-
-    //System.out.println("    break false");
-    return false;
-  }
-
-  @Override
-  public boolean incrementToken() throws IOException {
-    //System.out.println("\nF.increment inputFrom=" + inputFrom + " outputFrom=" + outputFrom);
-
-    while (true) {
-      if (releaseBufferedToken()) {
-        //retOutputFrom += posIncAtt.getPositionIncrement();
-        //System.out.println("    return buffered: " + termAtt + " " + retOutputFrom + "-" + (retOutputFrom + posLenAtt.getPositionLength()));
-        //printStates();
-        return true;
-      } else if (done) {
-        //System.out.println("    done, return false");
-        return false;
-      }
-
-      if (input.incrementToken()) {
-        // Input node this token leaves from:
-        inputFrom += posIncAtt.getPositionIncrement();
-
-        int startOffset = offsetAtt.startOffset();
-        int endOffset = offsetAtt.endOffset();
-
-        // Input node this token goes to:
-        int inputTo = inputFrom + posLenAtt.getPositionLength();
-        //System.out.println("  input.inc " + termAtt + ": " + inputFrom + "-" + inputTo);
-
-        InputNode src = inputNodes.get(inputFrom);
-        if (src.node == -1) {
-          // This means the "from" node of this token was never seen as a "to" node,
-          // which should only happen if we just crossed a hole.  This is a challenging
-          // case for us because we normally rely on the full dependencies expressed
-          // by the arcs to assign outgoing node IDs.  It would be better if tokens
-          // were never dropped but instead just marked deleted with a new
-          // TermDeletedAttribute (boolean valued) ... but until that future, we have
-          // a hack here to forcefully jump the output node ID:
-          assert src.outputNode == -1;
-          src.node = inputFrom;
-
-          src.outputNode = outputNodes.getMaxPos() + 1;
-          //System.out.println("    hole: force to outputNode=" + src.outputNode);
-          OutputNode outSrc = outputNodes.get(src.outputNode);
-
-          // Not assigned yet:
-          assert outSrc.node == -1;
-          outSrc.node = src.outputNode;
-          outSrc.inputNodes.add(inputFrom);
-          outSrc.startOffset = startOffset;
-        } else {
-          OutputNode outSrc = outputNodes.get(src.outputNode);
-          if (outSrc.startOffset == -1 || startOffset > outSrc.startOffset) {
-            // "shrink wrap" the offsets so the original tokens (with most
-            // restrictive offsets) win:
-            outSrc.startOffset = Math.max(startOffset, outSrc.startOffset);
-          }
-        }
-
-        // Buffer this token:
-        src.tokens.add(captureState());
-        src.maxToNode = Math.max(src.maxToNode, inputTo);
-        maxLookaheadUsed = Math.max(maxLookaheadUsed, inputNodes.getBufferSize());
-
-        InputNode dest = inputNodes.get(inputTo);
-        if (dest.node == -1) {
-          // Common case: first time a token is arriving to this input position:
-          dest.node = inputTo;
-        }
-
-        // Always number output nodes sequentially:
-        int outputEndNode = src.outputNode + 1;
-
-        if (outputEndNode > dest.outputNode) {
-          if (dest.outputNode != -1) {
-            boolean removed = outputNodes.get(dest.outputNode).inputNodes.remove(Integer.valueOf(inputTo));
-            assert removed;
-          }
-          //System.out.println("    increase output node: " + dest.outputNode + " vs " + outputEndNode);
-          outputNodes.get(outputEndNode).inputNodes.add(inputTo);
-          dest.outputNode = outputEndNode;
-
-          // Since all we ever do is merge incoming nodes together, and then renumber
-          // the merged nodes sequentially, we should only ever assign smaller node
-          // numbers:
-          assert outputEndNode <= inputTo: "outputEndNode=" + outputEndNode + " vs inputTo=" + inputTo;
-        }
-
-        OutputNode outDest = outputNodes.get(dest.outputNode);
-        // "shrink wrap" the offsets so the original tokens (with most
-        // restrictive offsets) win:
-        if (outDest.endOffset == -1 || endOffset < outDest.endOffset) {
-          outDest.endOffset = endOffset;
-        }
-
-      } else {
-        //System.out.println("  got false from input");
-        input.end();
-        finalPosInc = posIncAtt.getPositionIncrement();
-        finalOffset = offsetAtt.endOffset();
-        done = true;
-        // Don't return false here: we need to force release any buffered tokens now
-      }
-    }
-  }
-
-  // Only for debugging:
-  /*
-  private void printStates() {
-    System.out.println("states:");
-    for(int i=outputFrom;i<outputNodes.getMaxPos();i++) {
-      OutputNode outputNode = outputNodes.get(i);
-      System.out.println("  output " + i + ": inputs " + outputNode.inputNodes);
-      for(int inputNodeID : outputNode.inputNodes) {
-        InputNode inputNode = inputNodes.get(inputNodeID);
-        assert inputNode.outputNode == i;
-      }
-    }
-  }
-  */
-
-  @Override
-  public void end() throws IOException {
-    if (done == false) {
-      super.end();
-    } else {
-      // NOTE, shady: don't call super.end, because we did already from incrementToken
-    }
-
-    clearAttributes();
-    if (done) {
-      // On exc, done is false, and we will not have set these:
-      posIncAtt.setPositionIncrement(finalPosInc);
-      offsetAtt.setOffset(finalOffset, finalOffset);
-    } else {
-      super.end();
-    }
-  }
-  
-  @Override
-  public void reset() throws IOException {
-    //System.out.println("F: reset");
-    super.reset();
-    inputFrom = -1;
-    inputNodes.reset();
-    InputNode in = inputNodes.get(0);
-    in.node = 0;
-    in.outputNode = 0;
-
-    outputNodes.reset();
-    OutputNode out = outputNodes.get(0);
-    out.node = 0;
-    out.inputNodes.add(0);
-    out.startOffset = 0;
-    outputFrom = 0;
-    //retOutputFrom = -1;
-    lastOutputFrom = -1;
-    done = false;
-    finalPosInc = -1;
-    finalOffset = -1;
-    lastStartOffset = 0;
-    maxLookaheadUsed = 0;
-  }
-
-  // for testing
-  int getMaxLookaheadUsed() {
-    return maxLookaheadUsed;
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/FlattenGraphFilterFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/FlattenGraphFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/FlattenGraphFilterFactory.java
deleted file mode 100644
index a6cba97..0000000
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/FlattenGraphFilterFactory.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.lucene.analysis.synonym;
-
-import java.util.Map;
-
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.util.TokenFilterFactory;
-
-/** 
- * Factory for {@link FlattenGraphFilter}. 
- *
- * @lucene.experimental
- */
-public class FlattenGraphFilterFactory extends TokenFilterFactory {
-
-  /** Creates a new FlattenGraphFilterFactory */
-  public FlattenGraphFilterFactory(Map<String,String> args) {
-    super(args);
-    if (!args.isEmpty()) {
-      throw new IllegalArgumentException("Unknown parameters: " + args);
-    }
-  }
-  
-  @Override
-  public TokenStream create(TokenStream input) {
-    return new FlattenGraphFilter(input);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
index 29f6e1c..ec2676f 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
@@ -21,6 +21,7 @@ import java.util.Arrays;
 
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.FlattenGraphFilter;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java
index df10e9b..87ddc08 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java
@@ -33,6 +33,7 @@ import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.LowerCaseFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.FlattenGraphFilterFactory;
 import org.apache.lucene.analysis.core.WhitespaceTokenizer;
 import org.apache.lucene.analysis.util.ResourceLoader;
 import org.apache.lucene.analysis.util.ResourceLoaderAware;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java
index 3d50e08..788db0a 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymGraphFilter.java
@@ -17,8 +17,14 @@
 
 package org.apache.lucene.analysis.synonym;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.FlattenGraphFilter;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
@@ -31,11 +37,6 @@ import org.apache.lucene.util.CharsRefBuilder;
 import org.apache.lucene.util.RollingBuffer;
 import org.apache.lucene.util.fst.FST;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.LinkedList;
-import java.util.List;
-
 // TODO: maybe we should resolve token -> wordID then run
 // FST on wordIDs, for better perf?
  

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/637915b8/lucene/analysis/common/src/resources/META-INF/services/org.apache.lucene.analysis.util.TokenFilterFactory
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/resources/META-INF/services/org.apache.lucene.analysis.util.TokenFilterFactory b/lucene/analysis/common/src/resources/META-INF/services/org.apache.lucene.analysis.util.TokenFilterFactory
index 5f8894c..4e33006 100644
--- a/lucene/analysis/common/src/resources/META-INF/services/org.apache.lucene.analysis.util.TokenFilterFactory
+++ b/lucene/analysis/common/src/resources/META-INF/services/org.apache.lucene.analysis.util.TokenFilterFactory
@@ -78,6 +78,7 @@ org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilterFactory
 org.apache.lucene.analysis.miscellaneous.TrimFilterFactory
 org.apache.lucene.analysis.miscellaneous.TruncateTokenFilterFactory
 org.apache.lucene.analysis.miscellaneous.WordDelimiterFilterFactory
+org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilterFactory
 org.apache.lucene.analysis.miscellaneous.ScandinavianFoldingFilterFactory
 org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilterFactory
 org.apache.lucene.analysis.ngram.EdgeNGramFilterFactory
@@ -103,6 +104,6 @@ org.apache.lucene.analysis.standard.StandardFilterFactory
 org.apache.lucene.analysis.sv.SwedishLightStemFilterFactory
 org.apache.lucene.analysis.synonym.SynonymFilterFactory
 org.apache.lucene.analysis.synonym.SynonymGraphFilterFactory
-org.apache.lucene.analysis.synonym.FlattenGraphFilterFactory
+org.apache.lucene.analysis.core.FlattenGraphFilterFactory
 org.apache.lucene.analysis.tr.TurkishLowerCaseFilterFactory
 org.apache.lucene.analysis.util.ElisionFilterFactory


[49/50] [abbrv] lucene-solr:apiv2: LUCENE-7651: Fix Javadocs build for Java 8u121 by injecting "Google Code Prettify" without adding Javascript to Javadocs's -bottom parameter. Also update Prettify to latest version to fix Google Chrome issue.

Posted by no...@apache.org.
LUCENE-7651: Fix Javadocs build for Java 8u121 by injecting "Google Code Prettify" without adding Javascript to Javadocs's -bottom parameter. Also update Prettify to latest version to fix Google Chrome issue.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ee5a3601
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ee5a3601
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ee5a3601

Branch: refs/heads/apiv2
Commit: ee5a36011220bd2a7a8e45de27d5321cc7610bff
Parents: 0f7990b
Author: Uwe Schindler <us...@apache.org>
Authored: Sun Jan 22 18:29:01 2017 +0100
Committer: Uwe Schindler <us...@apache.org>
Committed: Sun Jan 22 18:29:01 2017 +0100

----------------------------------------------------------------------
 lucene/CHANGES.txt                       |  7 +++
 lucene/common-build.xml                  | 24 +++----
 lucene/tools/prettify/inject-javadocs.js | 27 ++++++++
 lucene/tools/prettify/lang-apollo.js     | 18 ------
 lucene/tools/prettify/lang-css.js        | 18 ------
 lucene/tools/prettify/lang-hs.js         | 18 ------
 lucene/tools/prettify/lang-lisp.js       | 19 ------
 lucene/tools/prettify/lang-lua.js        | 18 ------
 lucene/tools/prettify/lang-ml.js         | 18 ------
 lucene/tools/prettify/lang-proto.js      | 17 -----
 lucene/tools/prettify/lang-sql.js        | 18 ------
 lucene/tools/prettify/lang-vb.js         | 18 ------
 lucene/tools/prettify/lang-wiki.js       | 18 ------
 lucene/tools/prettify/prettify.css       | 30 ++++-----
 lucene/tools/prettify/prettify.js        | 90 ++++++++++++++-------------
 15 files changed, 102 insertions(+), 256 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 147b0e0..4e90526 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -109,6 +109,13 @@ Optimizations
   match the range on single-valued fields when more than half the documents in
   the index would match. (Adrien Grand)
 
+Build
+
+* LUCENE-7651: Fix Javadocs build for Java 8u121 by injecting "Google Code
+  Prettify" without adding Javascript to Javadocs's -bottom parameter.
+  Also update Prettify to latest version to fix Google Chrome issue.
+  (Uwe Schindler)
+
 ======================= Lucene 6.4.0 =======================
 
 API Changes

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/common-build.xml
----------------------------------------------------------------------
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index 48cf457..3b4c342 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -2078,7 +2078,7 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
       </condition>
       <antcall target="download-java8-javadoc-packagelist"/>
       <delete file="@{destdir}/stylesheet.css" failonerror="false"/>
-      <copy todir="@{destdir}" file="${prettify.dir}/prettify.js" overwrite="false" />
+      <delete file="@{destdir}/script.js" failonerror="false"/>
       <record name="@{destdir}/log_javadoc.txt" action="start" append="no"/>
       <javadoc
           overview="@{overview}"
@@ -2107,20 +2107,6 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
         <link offline="true" href="${javadoc.link}" packagelistLoc="${javadoc.packagelist.dir}/java8"/>
         <bottom><![CDATA[
           <i>Copyright &copy; ${year} Apache Software Foundation.  All Rights Reserved.</i>
-          <script src='{@docRoot}/prettify.js' type='text/javascript'></script>
-          <script type='text/javascript'>
-            (function(){
-              var oldonload = window.onload;
-              if (typeof oldonload != 'function') {
-                window.onload = prettyPrint;
-              } else {
-                window.onload = function() {
-                  oldonload();
-                  prettyPrint();
-                }
-              }
-            })();
-          </script>
         ]]></bottom>
         
         <sources />
@@ -2131,10 +2117,14 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
       </javadoc>
       <record name="@{destdir}/log_javadoc.txt" action="stop"/>
       
-      <!-- append prettify.css -->
-      <concat destfile="@{destdir}/stylesheet.css" append="true">
+      <!-- append prettify to scripts and css -->
+      <concat destfile="@{destdir}/stylesheet.css" append="true" fixlastline="true" encoding="UTF-8">
         <filelist dir="${prettify.dir}" files="prettify.css"/>
       </concat>
+      <concat destfile="@{destdir}/script.js" append="true" fixlastline="true" encoding="UTF-8">
+        <filelist dir="${prettify.dir}" files="prettify.js inject-javadocs.js"/>
+      </concat>
+      <fixcrlf srcdir="@{destdir}" includes="stylesheet.css script.js" eol="lf" fixlast="true" encoding="UTF-8" />
 
       <delete>
         <fileset file="@{destdir}/log_javadoc.txt">

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/tools/prettify/inject-javadocs.js
----------------------------------------------------------------------
diff --git a/lucene/tools/prettify/inject-javadocs.js b/lucene/tools/prettify/inject-javadocs.js
new file mode 100644
index 0000000..77f6a4b
--- /dev/null
+++ b/lucene/tools/prettify/inject-javadocs.js
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+!function(){
+  var oldonload = window.onload;
+  if (typeof oldonload != 'function') {
+    window.onload = prettyPrint;
+  } else {
+    window.onload = function() {
+      oldonload();
+      prettyPrint();
+    }
+  }
+}();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/tools/prettify/lang-apollo.js
----------------------------------------------------------------------
diff --git a/lucene/tools/prettify/lang-apollo.js b/lucene/tools/prettify/lang-apollo.js
deleted file mode 100644
index a9e4597..0000000
--- a/lucene/tools/prettify/lang-apollo.js
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-PR.registerLangHandler(PR.createSimpleLexer([["com",/^#[^\n\r]*/,null,"#"],["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r \ufffd\xa0"],["str",/^"(?:[^"\\]|\\[\S\s])*(?:"|$)/,null,'"']],[["kwd",/^(?:ADS|AD|AUG|BZF|BZMF|CAE|CAF|CA|CCS|COM|CS|DAS|DCA|DCOM|DCS|DDOUBL|DIM|DOUBLE|DTCB|DTCF|DV|DXCH|EDRUPT|EXTEND|INCR|INDEX|NDX|INHINT|LXCH|MASK|MSK|MP|MSU|NOOP|OVSK|QXCH|RAND|READ|RELINT|RESUME|RETURN|ROR|RXOR|SQUARE|SU|TCR|TCAA|OVSK|TCF|TC|TS|WAND|WOR|WRITE|XCH|XLQ|XXALQ|ZL|ZQ|ADD|ADZ|SUB|SUZ|MPY|MPR|MPZ|DVP|COM|ABS|CLA|CLZ|LDQ|STO|STQ|ALS|LLS|LRS|TRA|TSQ|TMI|TOV|AXT|TIX|DLY|INP|OUT)\s/,
-null],["typ",/^(?:-?GENADR|=MINUS|2BCADR|VN|BOF|MM|-?2CADR|-?[1-6]DNADR|ADRES|BBCON|[ES]?BANK=?|BLOCK|BNKSUM|E?CADR|COUNT\*?|2?DEC\*?|-?DNCHAN|-?DNPTR|EQUALS|ERASE|MEMORY|2?OCT|REMADR|SETLOC|SUBRO|ORG|BSS|BES|SYN|EQU|DEFINE|END)\s/,null],["lit",/^'(?:-*(?:\w|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?)?/],["pln",/^-*(?:[!-z]|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?/],["pun",/^[^\w\t\n\r "'-);\\\xa0]+/]]),["apollo","agc","aea"]);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/tools/prettify/lang-css.js
----------------------------------------------------------------------
diff --git a/lucene/tools/prettify/lang-css.js b/lucene/tools/prettify/lang-css.js
deleted file mode 100644
index e937457..0000000
--- a/lucene/tools/prettify/lang-css.js
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\f\r ]+/,null," \t\r\n"]],[["str",/^"(?:[^\n\f\r"\\]|\\(?:\r\n?|\n|\f)|\\[\S\s])*"/,null],["str",/^'(?:[^\n\f\r'\\]|\\(?:\r\n?|\n|\f)|\\[\S\s])*'/,null],["lang-css-str",/^url\(([^"')]*)\)/i],["kwd",/^(?:url|rgb|!important|@import|@page|@media|@charset|inherit)(?=[^\w-]|$)/i,null],["lang-css-kw",/^(-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*)\s*:/i],["com",/^\/\*[^*]*\*+(?:[^*/][^*]*\*+)*\//],["com",
-/^(?:<\!--|--\>)/],["lit",/^(?:\d+|\d*\.\d+)(?:%|[a-z]+)?/i],["lit",/^#[\da-f]{3,6}/i],["pln",/^-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*/i],["pun",/^[^\s\w"']+/]]),["css"]);PR.registerLangHandler(PR.createSimpleLexer([],[["kwd",/^-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*/i]]),["css-kw"]);PR.registerLangHandler(PR.createSimpleLexer([],[["str",/^[^"')]+/]]),["css-str"]);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/tools/prettify/lang-hs.js
----------------------------------------------------------------------
diff --git a/lucene/tools/prettify/lang-hs.js b/lucene/tools/prettify/lang-hs.js
deleted file mode 100644
index 0858e5c..0000000
--- a/lucene/tools/prettify/lang-hs.js
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t-\r ]+/,null,"\t\n\r "],["str",/^"(?:[^\n\f\r"\\]|\\[\S\s])*(?:"|$)/,null,'"'],["str",/^'(?:[^\n\f\r'\\]|\\[^&])'?/,null,"'"],["lit",/^(?:0o[0-7]+|0x[\da-f]+|\d+(?:\.\d+)?(?:e[+-]?\d+)?)/i,null,"0123456789"]],[["com",/^(?:--+[^\n\f\r]*|{-(?:[^-]|-+[^}-])*-})/],["kwd",/^(?:case|class|data|default|deriving|do|else|if|import|in|infix|infixl|infixr|instance|let|module|newtype|of|then|type|where|_)(?=[^\d'A-Za-z]|$)/,
-null],["pln",/^(?:[A-Z][\w']*\.)*[A-Za-z][\w']*/],["pun",/^[^\d\t-\r "'A-Za-z]+/]]),["hs"]);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/tools/prettify/lang-lisp.js
----------------------------------------------------------------------
diff --git a/lucene/tools/prettify/lang-lisp.js b/lucene/tools/prettify/lang-lisp.js
deleted file mode 100644
index dc7fa01..0000000
--- a/lucene/tools/prettify/lang-lisp.js
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-var a=null;
-PR.registerLangHandler(PR.createSimpleLexer([["opn",/^\(+/,a,"("],["clo",/^\)+/,a,")"],["com",/^;[^\n\r]*/,a,";"],["pln",/^[\t\n\r \xa0]+/,a,"\t\n\r \xa0"],["str",/^"(?:[^"\\]|\\[\S\s])*(?:"|$)/,a,'"']],[["kwd",/^(?:block|c[ad]+r|catch|con[ds]|def(?:ine|un)|do|eq|eql|equal|equalp|eval-when|flet|format|go|if|labels|lambda|let|load-time-value|locally|macrolet|multiple-value-call|nil|progn|progv|quote|require|return-from|setq|symbol-macrolet|t|tagbody|the|throw|unwind)\b/,a],
-["lit",/^[+-]?(?:[#0]x[\da-f]+|\d+\/\d+|(?:\.\d+|\d+(?:\.\d*)?)(?:[de][+-]?\d+)?)/i],["lit",/^'(?:-*(?:\w|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?)?/],["pln",/^-*(?:[_a-z]|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?/i],["pun",/^[^\w\t\n\r "'-);\\\xa0]+/]]),["cl","el","lisp","scm"]);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/tools/prettify/lang-lua.js
----------------------------------------------------------------------
diff --git a/lucene/tools/prettify/lang-lua.js b/lucene/tools/prettify/lang-lua.js
deleted file mode 100644
index f02011e..0000000
--- a/lucene/tools/prettify/lang-lua.js
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r \ufffd\xa0"],["str",/^(?:"(?:[^"\\]|\\[\S\s])*(?:"|$)|'(?:[^'\\]|\\[\S\s])*(?:'|$))/,null,"\"'"]],[["com",/^--(?:\[(=*)\[[\S\s]*?(?:]\1]|$)|[^\n\r]*)/],["str",/^\[(=*)\[[\S\s]*?(?:]\1]|$)/],["kwd",/^(?:and|break|do|else|elseif|end|false|for|function|if|in|local|nil|not|or|repeat|return|then|true|until|while)\b/,null],["lit",/^[+-]?(?:0x[\da-f]+|(?:\.\d+|\d+(?:\.\d*)?)(?:e[+-]?\d+)?)/i],
-["pln",/^[_a-z]\w*/i],["pun",/^[^\w\t\n\r \xa0][^\w\t\n\r "'+=\xa0-]*/]]),["lua"]);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/tools/prettify/lang-ml.js
----------------------------------------------------------------------
diff --git a/lucene/tools/prettify/lang-ml.js b/lucene/tools/prettify/lang-ml.js
deleted file mode 100644
index 6d17e8b..0000000
--- a/lucene/tools/prettify/lang-ml.js
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r \ufffd\xa0"],["com",/^#(?:if[\t\n\r \xa0]+(?:[$_a-z][\w']*|``[^\t\n\r`]*(?:``|$))|else|endif|light)/i,null,"#"],["str",/^(?:"(?:[^"\\]|\\[\S\s])*(?:"|$)|'(?:[^'\\]|\\[\S\s])(?:'|$))/,null,"\"'"]],[["com",/^(?:\/\/[^\n\r]*|\(\*[\S\s]*?\*\))/],["kwd",/^(?:abstract|and|as|assert|begin|class|default|delegate|do|done|downcast|downto|elif|else|end|exception|extern|false|finally|for|fun|function|if|in|inherit|inline|interface|internal|lazy|let|match|member|module|mutable|namespace|new|null|of|open|or|override|private|public|rec|return|static|struct|then|to|true|try|type|upcast|use|val|void|when|while|with|yield|asr|land|lor|lsl|lsr|lxor|mod|sig|atomic|break|checked|component|const|constraint|constructor|continue|eager|event|external|fixed|functor|global|include|method|mixin|object|parallel|process|protected|pure|sealed|trait|virtual|volatile)\b/],
-["lit",/^[+-]?(?:0x[\da-f]+|(?:\.\d+|\d+(?:\.\d*)?)(?:e[+-]?\d+)?)/i],["pln",/^(?:[_a-z][\w']*[!#?]?|``[^\t\n\r`]*(?:``|$))/i],["pun",/^[^\w\t\n\r "'\xa0]+/]]),["fs","ml"]);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/tools/prettify/lang-proto.js
----------------------------------------------------------------------
diff --git a/lucene/tools/prettify/lang-proto.js b/lucene/tools/prettify/lang-proto.js
deleted file mode 100644
index 741a438..0000000
--- a/lucene/tools/prettify/lang-proto.js
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-PR.registerLangHandler(PR.sourceDecorator({keywords:"bytes,default,double,enum,extend,extensions,false,group,import,max,message,option,optional,package,repeated,required,returns,rpc,service,syntax,to,true",types:/^(bool|(double|s?fixed|[su]?int)(32|64)|float|string)\b/,cStyleComments:!0}),["proto"]);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/tools/prettify/lang-sql.js
----------------------------------------------------------------------
diff --git a/lucene/tools/prettify/lang-sql.js b/lucene/tools/prettify/lang-sql.js
deleted file mode 100644
index 09d6558..0000000
--- a/lucene/tools/prettify/lang-sql.js
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r \ufffd\xa0"],["str",/^(?:"(?:[^"\\]|\\.)*"|'(?:[^'\\]|\\.)*')/,null,"\"'"]],[["com",/^(?:--[^\n\r]*|\/\*[\S\s]*?(?:\*\/|$))/],["kwd",/^(?:add|all|alter|and|any|as|asc|authorization|backup|begin|between|break|browse|bulk|by|cascade|case|check|checkpoint|close|clustered|coalesce|collate|column|commit|compute|constraint|contains|containstable|continue|convert|create|cross|current|current_date|current_time|current_timestamp|current_user|cursor|database|dbcc|deallocate|declare|default|delete|deny|desc|disk|distinct|distributed|double|drop|dummy|dump|else|end|errlvl|escape|except|exec|execute|exists|exit|fetch|file|fillfactor|for|foreign|freetext|freetexttable|from|full|function|goto|grant|group|having|holdlock|identity|identitycol|identity_insert|if|in|index|inner|insert|intersect|into|is|join|key|kill|left|like|lineno|load|match|merge|national|nocheck|nonclustered|not|null|nullif|of|off|offsets|on|open|opend
 atasource|openquery|openrowset|openxml|option|or|order|outer|over|percent|plan|precision|primary|print|proc|procedure|public|raiserror|read|readtext|reconfigure|references|replication|restore|restrict|return|revoke|right|rollback|rowcount|rowguidcol|rule|save|schema|select|session_user|set|setuser|shutdown|some|statistics|system_user|table|textsize|then|to|top|tran|transaction|trigger|truncate|tsequal|union|unique|update|updatetext|use|user|using|values|varying|view|waitfor|when|where|while|with|writetext)(?=[^\w-]|$)/i,
-null],["lit",/^[+-]?(?:0x[\da-f]+|(?:\.\d+|\d+(?:\.\d*)?)(?:e[+-]?\d+)?)/i],["pln",/^[_a-z][\w-]*/i],["pun",/^[^\w\t\n\r "'\xa0][^\w\t\n\r "'+\xa0-]*/]]),["sql"]);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/tools/prettify/lang-vb.js
----------------------------------------------------------------------
diff --git a/lucene/tools/prettify/lang-vb.js b/lucene/tools/prettify/lang-vb.js
deleted file mode 100644
index dad809e..0000000
--- a/lucene/tools/prettify/lang-vb.js
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0\u2028\u2029]+/,null,"\t\n\r \ufffd\xa0\u2028\u2029"],["str",/^(?:["\u201c\u201d](?:[^"\u201c\u201d]|["\u201c\u201d]{2})(?:["\u201c\u201d]c|$)|["\u201c\u201d](?:[^"\u201c\u201d]|["\u201c\u201d]{2})*(?:["\u201c\u201d]|$))/i,null,'"\u201c\u201d'],["com",/^['\u2018\u2019].*/,null,"'\u2018\u2019"]],[["kwd",/^(?:addhandler|addressof|alias|and|andalso|ansi|as|assembly|auto|boolean|byref|byte|byval|call|case|catch|cbool|cbyte|cchar|cdate|cdbl|cdec|char|cint|class|clng|cobj|const|cshort|csng|cstr|ctype|date|decimal|declare|default|delegate|dim|directcast|do|double|each|else|elseif|end|endif|enum|erase|error|event|exit|finally|for|friend|function|get|gettype|gosub|goto|handles|if|implements|imports|in|inherits|integer|interface|is|let|lib|like|long|loop|me|mod|module|mustinherit|mustoverride|mybase|myclass|namespace|new|next|not|notinheritable|notoverridable|object|on|option|optional|or|orelse|overloads|overridable|overrides|paramarray|preserve|pr
 ivate|property|protected|public|raiseevent|readonly|redim|removehandler|resume|return|select|set|shadows|shared|short|single|static|step|stop|string|structure|sub|synclock|then|throw|to|try|typeof|unicode|until|variant|wend|when|while|with|withevents|writeonly|xor|endif|gosub|let|variant|wend)\b/i,
-null],["com",/^rem.*/i],["lit",/^(?:true\b|false\b|nothing\b|\d+(?:e[+-]?\d+[dfr]?|[dfilrs])?|(?:&h[\da-f]+|&o[0-7]+)[ils]?|\d*\.\d+(?:e[+-]?\d+)?[dfr]?|#\s+(?:\d+[/-]\d+[/-]\d+(?:\s+\d+:\d+(?::\d+)?(\s*(?:am|pm))?)?|\d+:\d+(?::\d+)?(\s*(?:am|pm))?)\s+#)/i],["pln",/^(?:(?:[a-z]|_\w)\w*|\[(?:[a-z]|_\w)\w*])/i],["pun",/^[^\w\t\n\r "'[\]\xa0\u2018\u2019\u201c\u201d\u2028\u2029]+/],["pun",/^(?:\[|])/]]),["vb","vbs"]);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/tools/prettify/lang-wiki.js
----------------------------------------------------------------------
diff --git a/lucene/tools/prettify/lang-wiki.js b/lucene/tools/prettify/lang-wiki.js
deleted file mode 100644
index d43b74f..0000000
--- a/lucene/tools/prettify/lang-wiki.js
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\d\t a-gi-z\xa0]+/,null,"\t \ufffd\xa0abcdefgijklmnopqrstuvwxyz0123456789"],["pun",/^[*=[\]^~]+/,null,"=*~^[]"]],[["lang-wiki.meta",/(?:^^|\r\n?|\n)(#[a-z]+)\b/],["lit",/^[A-Z][a-z][\da-z]+[A-Z][a-z][^\W_]+\b/],["lang-",/^{{{([\S\s]+?)}}}/],["lang-",/^`([^\n\r`]+)`/],["str",/^https?:\/\/[^\s#/?]*(?:\/[^\s#?]*)?(?:\?[^\s#]*)?(?:#\S*)?/i],["pln",/^(?:\r\n|[\S\s])[^\n\r#*=A-[^`h{~]*/]]),["wiki"]);
-PR.registerLangHandler(PR.createSimpleLexer([["kwd",/^#[a-z]+/i,null,"#"]],[]),["wiki.meta"]);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/tools/prettify/prettify.css
----------------------------------------------------------------------
diff --git a/lucene/tools/prettify/prettify.css b/lucene/tools/prettify/prettify.css
index 98f5851..33f11bb 100644
--- a/lucene/tools/prettify/prettify.css
+++ b/lucene/tools/prettify/prettify.css
@@ -1,17 +1,17 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+
+ Copyright (C) 2006 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
 .pln{color:#000}@media screen{.str{color:#080}.kwd{color:#008}.com{color:#800}.typ{color:#606}.lit{color:#066}.pun,.opn,.clo{color:#660}.tag{color:#008}.atn{color:#606}.atv{color:#080}.dec,.var{color:#606}.fun{color:red}}@media print,projection{.str{color:#060}.kwd{color:#006;font-weight:bold}.com{color:#600;font-style:italic}.typ{color:#404;font-weight:bold}.lit{color:#044}.pun,.opn,.clo{color:#440}.tag{color:#006;font-weight:bold}.atn{color:#404}.atv{color:#060}}pre.prettyprint{padding:2px;border:1px solid #888}ol.linenums{margin-top:0;margin-bottom:0}li.L0,li.L1,li.L2,li.L3,li.L5,li.L6,li.L7,li.L8{list-style-type:none}li.L1,li.L3,li.L5,li.L7,li.L9{background:#eee}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ee5a3601/lucene/tools/prettify/prettify.js
----------------------------------------------------------------------
diff --git a/lucene/tools/prettify/prettify.js b/lucene/tools/prettify/prettify.js
index dc35c65..3b74b5b 100644
--- a/lucene/tools/prettify/prettify.js
+++ b/lucene/tools/prettify/prettify.js
@@ -1,44 +1,46 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-var q=null;window.PR_SHOULD_USE_CONTINUATION=!0;
-(function(){function L(a){function m(a){var f=a.charCodeAt(0);if(f!==92)return f;var b=a.charAt(1);return(f=r[b])?f:"0"<=b&&b<="7"?parseInt(a.substring(1),8):b==="u"||b==="x"?parseInt(a.substring(2),16):a.charCodeAt(1)}function e(a){if(a<32)return(a<16?"\\x0":"\\x")+a.toString(16);a=String.fromCharCode(a);if(a==="\\"||a==="-"||a==="["||a==="]")a="\\"+a;return a}function h(a){for(var f=a.substring(1,a.length-1).match(/\\u[\dA-Fa-f]{4}|\\x[\dA-Fa-f]{2}|\\[0-3][0-7]{0,2}|\\[0-7]{1,2}|\\[\S\s]|[^\\]/g),a=
-[],b=[],o=f[0]==="^",c=o?1:0,i=f.length;c<i;++c){var j=f[c];if(/\\[bdsw]/i.test(j))a.push(j);else{var j=m(j),d;c+2<i&&"-"===f[c+1]?(d=m(f[c+2]),c+=2):d=j;b.push([j,d]);d<65||j>122||(d<65||j>90||b.push([Math.max(65,j)|32,Math.min(d,90)|32]),d<97||j>122||b.push([Math.max(97,j)&-33,Math.min(d,122)&-33]))}}b.sort(function(a,f){return a[0]-f[0]||f[1]-a[1]});f=[];j=[NaN,NaN];for(c=0;c<b.length;++c)i=b[c],i[0]<=j[1]+1?j[1]=Math.max(j[1],i[1]):f.push(j=i);b=["["];o&&b.push("^");b.push.apply(b,a);for(c=0;c<
-f.length;++c)i=f[c],b.push(e(i[0])),i[1]>i[0]&&(i[1]+1>i[0]&&b.push("-"),b.push(e(i[1])));b.push("]");return b.join("")}function y(a){for(var f=a.source.match(/\[(?:[^\\\]]|\\[\S\s])*]|\\u[\dA-Fa-f]{4}|\\x[\dA-Fa-f]{2}|\\\d+|\\[^\dux]|\(\?[!:=]|[()^]|[^()[\\^]+/g),b=f.length,d=[],c=0,i=0;c<b;++c){var j=f[c];j==="("?++i:"\\"===j.charAt(0)&&(j=+j.substring(1))&&j<=i&&(d[j]=-1)}for(c=1;c<d.length;++c)-1===d[c]&&(d[c]=++t);for(i=c=0;c<b;++c)j=f[c],j==="("?(++i,d[i]===void 0&&(f[c]="(?:")):"\\"===j.charAt(0)&&
-(j=+j.substring(1))&&j<=i&&(f[c]="\\"+d[i]);for(i=c=0;c<b;++c)"^"===f[c]&&"^"!==f[c+1]&&(f[c]="");if(a.ignoreCase&&s)for(c=0;c<b;++c)j=f[c],a=j.charAt(0),j.length>=2&&a==="["?f[c]=h(j):a!=="\\"&&(f[c]=j.replace(/[A-Za-z]/g,function(a){a=a.charCodeAt(0);return"["+String.fromCharCode(a&-33,a|32)+"]"}));return f.join("")}for(var t=0,s=!1,l=!1,p=0,d=a.length;p<d;++p){var g=a[p];if(g.ignoreCase)l=!0;else if(/[a-z]/i.test(g.source.replace(/\\u[\da-f]{4}|\\x[\da-f]{2}|\\[^UXux]/gi,""))){s=!0;l=!1;break}}for(var r=
-{b:8,t:9,n:10,v:11,f:12,r:13},n=[],p=0,d=a.length;p<d;++p){g=a[p];if(g.global||g.multiline)throw Error(""+g);n.push("(?:"+y(g)+")")}return RegExp(n.join("|"),l?"gi":"g")}function M(a){function m(a){switch(a.nodeType){case 1:if(e.test(a.className))break;for(var g=a.firstChild;g;g=g.nextSibling)m(g);g=a.nodeName;if("BR"===g||"LI"===g)h[s]="\n",t[s<<1]=y++,t[s++<<1|1]=a;break;case 3:case 4:g=a.nodeValue,g.length&&(g=p?g.replace(/\r\n?/g,"\n"):g.replace(/[\t\n\r ]+/g," "),h[s]=g,t[s<<1]=y,y+=g.length,
-t[s++<<1|1]=a)}}var e=/(?:^|\s)nocode(?:\s|$)/,h=[],y=0,t=[],s=0,l;a.currentStyle?l=a.currentStyle.whiteSpace:window.getComputedStyle&&(l=document.defaultView.getComputedStyle(a,q).getPropertyValue("white-space"));var p=l&&"pre"===l.substring(0,3);m(a);return{a:h.join("").replace(/\n$/,""),c:t}}function B(a,m,e,h){m&&(a={a:m,d:a},e(a),h.push.apply(h,a.e))}function x(a,m){function e(a){for(var l=a.d,p=[l,"pln"],d=0,g=a.a.match(y)||[],r={},n=0,z=g.length;n<z;++n){var f=g[n],b=r[f],o=void 0,c;if(typeof b===
-"string")c=!1;else{var i=h[f.charAt(0)];if(i)o=f.match(i[1]),b=i[0];else{for(c=0;c<t;++c)if(i=m[c],o=f.match(i[1])){b=i[0];break}o||(b="pln")}if((c=b.length>=5&&"lang-"===b.substring(0,5))&&!(o&&typeof o[1]==="string"))c=!1,b="src";c||(r[f]=b)}i=d;d+=f.length;if(c){c=o[1];var j=f.indexOf(c),k=j+c.length;o[2]&&(k=f.length-o[2].length,j=k-c.length);b=b.substring(5);B(l+i,f.substring(0,j),e,p);B(l+i+j,c,C(b,c),p);B(l+i+k,f.substring(k),e,p)}else p.push(l+i,b)}a.e=p}var h={},y;(function(){for(var e=a.concat(m),
-l=[],p={},d=0,g=e.length;d<g;++d){var r=e[d],n=r[3];if(n)for(var k=n.length;--k>=0;)h[n.charAt(k)]=r;r=r[1];n=""+r;p.hasOwnProperty(n)||(l.push(r),p[n]=q)}l.push(/[\S\s]/);y=L(l)})();var t=m.length;return e}function u(a){var m=[],e=[];a.tripleQuotedStrings?m.push(["str",/^(?:'''(?:[^'\\]|\\[\S\s]|''?(?=[^']))*(?:'''|$)|"""(?:[^"\\]|\\[\S\s]|""?(?=[^"]))*(?:"""|$)|'(?:[^'\\]|\\[\S\s])*(?:'|$)|"(?:[^"\\]|\\[\S\s])*(?:"|$))/,q,"'\""]):a.multiLineStrings?m.push(["str",/^(?:'(?:[^'\\]|\\[\S\s])*(?:'|$)|"(?:[^"\\]|\\[\S\s])*(?:"|$)|`(?:[^\\`]|\\[\S\s])*(?:`|$))/,
-q,"'\"`"]):m.push(["str",/^(?:'(?:[^\n\r'\\]|\\.)*(?:'|$)|"(?:[^\n\r"\\]|\\.)*(?:"|$))/,q,"\"'"]);a.verbatimStrings&&e.push(["str",/^@"(?:[^"]|"")*(?:"|$)/,q]);var h=a.hashComments;h&&(a.cStyleComments?(h>1?m.push(["com",/^#(?:##(?:[^#]|#(?!##))*(?:###|$)|.*)/,q,"#"]):m.push(["com",/^#(?:(?:define|elif|else|endif|error|ifdef|include|ifndef|line|pragma|undef|warning)\b|[^\n\r]*)/,q,"#"]),e.push(["str",/^<(?:(?:(?:\.\.\/)*|\/?)(?:[\w-]+(?:\/[\w-]+)+)?[\w-]+\.h|[a-z]\w*)>/,q])):m.push(["com",/^#[^\n\r]*/,
-q,"#"]));a.cStyleComments&&(e.push(["com",/^\/\/[^\n\r]*/,q]),e.push(["com",/^\/\*[\S\s]*?(?:\*\/|$)/,q]));a.regexLiterals&&e.push(["lang-regex",/^(?:^^\.?|[!+-]|!=|!==|#|%|%=|&|&&|&&=|&=|\(|\*|\*=|\+=|,|-=|->|\/|\/=|:|::|;|<|<<|<<=|<=|=|==|===|>|>=|>>|>>=|>>>|>>>=|[?@[^]|\^=|\^\^|\^\^=|{|\||\|=|\|\||\|\|=|~|break|case|continue|delete|do|else|finally|instanceof|return|throw|try|typeof)\s*(\/(?=[^*/])(?:[^/[\\]|\\[\S\s]|\[(?:[^\\\]]|\\[\S\s])*(?:]|$))+\/)/]);(h=a.types)&&e.push(["typ",h]);a=(""+a.keywords).replace(/^ | $/g,
-"");a.length&&e.push(["kwd",RegExp("^(?:"+a.replace(/[\s,]+/g,"|")+")\\b"),q]);m.push(["pln",/^\s+/,q," \r\n\t\xa0"]);e.push(["lit",/^@[$_a-z][\w$@]*/i,q],["typ",/^(?:[@_]?[A-Z]+[a-z][\w$@]*|\w+_t\b)/,q],["pln",/^[$_a-z][\w$@]*/i,q],["lit",/^(?:0x[\da-f]+|(?:\d(?:_\d+)*\d*(?:\.\d*)?|\.\d\+)(?:e[+-]?\d+)?)[a-z]*/i,q,"0123456789"],["pln",/^\\[\S\s]?/,q],["pun",/^.[^\s\w"-$'./@\\`]*/,q]);return x(m,e)}function D(a,m){function e(a){switch(a.nodeType){case 1:if(k.test(a.className))break;if("BR"===a.nodeName)h(a),
-a.parentNode&&a.parentNode.removeChild(a);else for(a=a.firstChild;a;a=a.nextSibling)e(a);break;case 3:case 4:if(p){var b=a.nodeValue,d=b.match(t);if(d){var c=b.substring(0,d.index);a.nodeValue=c;(b=b.substring(d.index+d[0].length))&&a.parentNode.insertBefore(s.createTextNode(b),a.nextSibling);h(a);c||a.parentNode.removeChild(a)}}}}function h(a){function b(a,d){var e=d?a.cloneNode(!1):a,f=a.parentNode;if(f){var f=b(f,1),g=a.nextSibling;f.appendChild(e);for(var h=g;h;h=g)g=h.nextSibling,f.appendChild(h)}return e}
-for(;!a.nextSibling;)if(a=a.parentNode,!a)return;for(var a=b(a.nextSibling,0),e;(e=a.parentNode)&&e.nodeType===1;)a=e;d.push(a)}var k=/(?:^|\s)nocode(?:\s|$)/,t=/\r\n?|\n/,s=a.ownerDocument,l;a.currentStyle?l=a.currentStyle.whiteSpace:window.getComputedStyle&&(l=s.defaultView.getComputedStyle(a,q).getPropertyValue("white-space"));var p=l&&"pre"===l.substring(0,3);for(l=s.createElement("LI");a.firstChild;)l.appendChild(a.firstChild);for(var d=[l],g=0;g<d.length;++g)e(d[g]);m===(m|0)&&d[0].setAttribute("value",
-m);var r=s.createElement("OL");r.className="linenums";for(var n=Math.max(0,m-1|0)||0,g=0,z=d.length;g<z;++g)l=d[g],l.className="L"+(g+n)%10,l.firstChild||l.appendChild(s.createTextNode("\xa0")),r.appendChild(l);a.appendChild(r)}function k(a,m){for(var e=m.length;--e>=0;){var h=m[e];A.hasOwnProperty(h)?window.console&&console.warn("cannot override language handler %s",h):A[h]=a}}function C(a,m){if(!a||!A.hasOwnProperty(a))a=/^\s*</.test(m)?"default-markup":"default-code";return A[a]}function E(a){var m=
-a.g;try{var e=M(a.h),h=e.a;a.a=h;a.c=e.c;a.d=0;C(m,h)(a);var k=/\bMSIE\b/.test(navigator.userAgent),m=/\n/g,t=a.a,s=t.length,e=0,l=a.c,p=l.length,h=0,d=a.e,g=d.length,a=0;d[g]=s;var r,n;for(n=r=0;n<g;)d[n]!==d[n+2]?(d[r++]=d[n++],d[r++]=d[n++]):n+=2;g=r;for(n=r=0;n<g;){for(var z=d[n],f=d[n+1],b=n+2;b+2<=g&&d[b+1]===f;)b+=2;d[r++]=z;d[r++]=f;n=b}for(d.length=r;h<p;){var o=l[h+2]||s,c=d[a+2]||s,b=Math.min(o,c),i=l[h+1],j;if(i.nodeType!==1&&(j=t.substring(e,b))){k&&(j=j.replace(m,"\r"));i.nodeValue=
-j;var u=i.ownerDocument,v=u.createElement("SPAN");v.className=d[a+1];var x=i.parentNode;x.replaceChild(v,i);v.appendChild(i);e<o&&(l[h+1]=i=u.createTextNode(t.substring(b,o)),x.insertBefore(i,v.nextSibling))}e=b;e>=o&&(h+=2);e>=c&&(a+=2)}}catch(w){"console"in window&&console.log(w&&w.stack?w.stack:w)}}var v=["break,continue,do,else,for,if,return,while"],w=[[v,"auto,case,char,const,default,double,enum,extern,float,goto,int,long,register,short,signed,sizeof,static,struct,switch,typedef,union,unsigned,void,volatile"],
-"catch,class,delete,false,import,new,operator,private,protected,public,this,throw,true,try,typeof"],F=[w,"alignof,align_union,asm,axiom,bool,concept,concept_map,const_cast,constexpr,decltype,dynamic_cast,explicit,export,friend,inline,late_check,mutable,namespace,nullptr,reinterpret_cast,static_assert,static_cast,template,typeid,typename,using,virtual,where"],G=[w,"abstract,boolean,byte,extends,final,finally,implements,import,instanceof,null,native,package,strictfp,super,synchronized,throws,transient"],
-H=[G,"as,base,by,checked,decimal,delegate,descending,dynamic,event,fixed,foreach,from,group,implicit,in,interface,internal,into,is,lock,object,out,override,orderby,params,partial,readonly,ref,sbyte,sealed,stackalloc,string,select,uint,ulong,unchecked,unsafe,ushort,var"],w=[w,"debugger,eval,export,function,get,null,set,undefined,var,with,Infinity,NaN"],I=[v,"and,as,assert,class,def,del,elif,except,exec,finally,from,global,import,in,is,lambda,nonlocal,not,or,pass,print,raise,try,with,yield,False,True,None"],
-J=[v,"alias,and,begin,case,class,def,defined,elsif,end,ensure,false,in,module,next,nil,not,or,redo,rescue,retry,self,super,then,true,undef,unless,until,when,yield,BEGIN,END"],v=[v,"case,done,elif,esac,eval,fi,function,in,local,set,then,until"],K=/^(DIR|FILE|vector|(de|priority_)?queue|list|stack|(const_)?iterator|(multi)?(set|map)|bitset|u?(int|float)\d*)/,N=/\S/,O=u({keywords:[F,H,w,"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END"+
-I,J,v],hashComments:!0,cStyleComments:!0,multiLineStrings:!0,regexLiterals:!0}),A={};k(O,["default-code"]);k(x([],[["pln",/^[^<?]+/],["dec",/^<!\w[^>]*(?:>|$)/],["com",/^<\!--[\S\s]*?(?:--\>|$)/],["lang-",/^<\?([\S\s]+?)(?:\?>|$)/],["lang-",/^<%([\S\s]+?)(?:%>|$)/],["pun",/^(?:<[%?]|[%?]>)/],["lang-",/^<xmp\b[^>]*>([\S\s]+?)<\/xmp\b[^>]*>/i],["lang-js",/^<script\b[^>]*>([\S\s]*?)(<\/script\b[^>]*>)/i],["lang-css",/^<style\b[^>]*>([\S\s]*?)(<\/style\b[^>]*>)/i],["lang-in.tag",/^(<\/?[a-z][^<>]*>)/i]]),
-["default-markup","htm","html","mxml","xhtml","xml","xsl"]);k(x([["pln",/^\s+/,q," \t\r\n"],["atv",/^(?:"[^"]*"?|'[^']*'?)/,q,"\"'"]],[["tag",/^^<\/?[a-z](?:[\w-.:]*\w)?|\/?>$/i],["atn",/^(?!style[\s=]|on)[a-z](?:[\w:-]*\w)?/i],["lang-uq.val",/^=\s*([^\s"'>]*(?:[^\s"'/>]|\/(?=\s)))/],["pun",/^[/<->]+/],["lang-js",/^on\w+\s*=\s*"([^"]+)"/i],["lang-js",/^on\w+\s*=\s*'([^']+)'/i],["lang-js",/^on\w+\s*=\s*([^\s"'>]+)/i],["lang-css",/^style\s*=\s*"([^"]+)"/i],["lang-css",/^style\s*=\s*'([^']+)'/i],["lang-css",
-/^style\s*=\s*([^\s"'>]+)/i]]),["in.tag"]);k(x([],[["atv",/^[\S\s]+/]]),["uq.val"]);k(u({keywords:F,hashComments:!0,cStyleComments:!0,types:K}),["c","cc","cpp","cxx","cyc","m"]);k(u({keywords:"null,true,false"}),["json"]);k(u({keywords:H,hashComments:!0,cStyleComments:!0,verbatimStrings:!0,types:K}),["cs"]);k(u({keywords:G,cStyleComments:!0}),["java"]);k(u({keywords:v,hashComments:!0,multiLineStrings:!0}),["bsh","csh","sh"]);k(u({keywords:I,hashComments:!0,multiLineStrings:!0,tripleQuotedStrings:!0}),
-["cv","py"]);k(u({keywords:"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END",hashComments:!0,multiLineStrings:!0,regexLiterals:!0}),["perl","pl","pm"]);k(u({keywords:J,hashComments:!0,multiLineStrings:!0,regexLiterals:!0}),["rb"]);k(u({keywords:w,cStyleComments:!0,regexLiterals:!0}),["js"]);k(u({keywords:"all,and,by,catch,class,else,extends,false,finally,for,if,in,is,isnt,loop,new,no,not,null,of,off,on,or,return,super,then,true,try,unless,until,when,while,yes",
-hashComments:3,cStyleComments:!0,multilineStrings:!0,tripleQuotedStrings:!0,regexLiterals:!0}),["coffee"]);k(x([],[["str",/^[\S\s]+/]]),["regex"]);window.prettyPrintOne=function(a,m,e){var h=document.createElement("PRE");h.innerHTML=a;e&&D(h,e);E({g:m,i:e,h:h});return h.innerHTML};window.prettyPrint=function(a){function m(){for(var e=window.PR_SHOULD_USE_CONTINUATION?l.now()+250:Infinity;p<h.length&&l.now()<e;p++){var n=h[p],k=n.className;if(k.indexOf("prettyprint")>=0){var k=k.match(g),f,b;if(b=
-!k){b=n;for(var o=void 0,c=b.firstChild;c;c=c.nextSibling)var i=c.nodeType,o=i===1?o?b:c:i===3?N.test(c.nodeValue)?b:o:o;b=(f=o===b?void 0:o)&&"CODE"===f.tagName}b&&(k=f.className.match(g));k&&(k=k[1]);b=!1;for(o=n.parentNode;o;o=o.parentNode)if((o.tagName==="pre"||o.tagName==="code"||o.tagName==="xmp")&&o.className&&o.className.indexOf("prettyprint")>=0){b=!0;break}b||((b=(b=n.className.match(/\blinenums\b(?::(\d+))?/))?b[1]&&b[1].length?+b[1]:!0:!1)&&D(n,b),d={g:k,h:n,i:b},E(d))}}p<h.length?setTimeout(m,
-250):a&&a()}for(var e=[document.getElementsByTagName("pre"),document.getElementsByTagName("code"),document.getElementsByTagName("xmp")],h=[],k=0;k<e.length;++k)for(var t=0,s=e[k].length;t<s;++t)h.push(e[k][t]);var e=q,l=Date;l.now||(l={now:function(){return+new Date}});var p=0,d,g=/\blang(?:uage)?-([\w.]+)(?!\S)/;m()};window.PR={createSimpleLexer:x,registerLangHandler:k,sourceDecorator:u,PR_ATTRIB_NAME:"atn",PR_ATTRIB_VALUE:"atv",PR_COMMENT:"com",PR_DECLARATION:"dec",PR_KEYWORD:"kwd",PR_LITERAL:"lit",
-PR_NOCODE:"nocode",PR_PLAIN:"pln",PR_PUNCTUATION:"pun",PR_SOURCE:"src",PR_STRING:"str",PR_TAG:"tag",PR_TYPE:"typ"}})();
+!function(){/*
+
+ Copyright (C) 2006 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+window.PR_SHOULD_USE_CONTINUATION=!0;
+(function(){function T(a){function d(e){var b=e.charCodeAt(0);if(92!==b)return b;var a=e.charAt(1);return(b=w[a])?b:"0"<=a&&"7">=a?parseInt(e.substring(1),8):"u"===a||"x"===a?parseInt(e.substring(2),16):e.charCodeAt(1)}function f(e){if(32>e)return(16>e?"\\x0":"\\x")+e.toString(16);e=String.fromCharCode(e);return"\\"===e||"-"===e||"]"===e||"^"===e?"\\"+e:e}function b(e){var b=e.substring(1,e.length-1).match(/\\u[0-9A-Fa-f]{4}|\\x[0-9A-Fa-f]{2}|\\[0-3][0-7]{0,2}|\\[0-7]{1,2}|\\[\s\S]|-|[^-\\]/g);e=
+[];var a="^"===b[0],c=["["];a&&c.push("^");for(var a=a?1:0,g=b.length;a<g;++a){var h=b[a];if(/\\[bdsw]/i.test(h))c.push(h);else{var h=d(h),k;a+2<g&&"-"===b[a+1]?(k=d(b[a+2]),a+=2):k=h;e.push([h,k]);65>k||122<h||(65>k||90<h||e.push([Math.max(65,h)|32,Math.min(k,90)|32]),97>k||122<h||e.push([Math.max(97,h)&-33,Math.min(k,122)&-33]))}}e.sort(function(e,a){return e[0]-a[0]||a[1]-e[1]});b=[];g=[];for(a=0;a<e.length;++a)h=e[a],h[0]<=g[1]+1?g[1]=Math.max(g[1],h[1]):b.push(g=h);for(a=0;a<b.length;++a)h=b[a],
+c.push(f(h[0])),h[1]>h[0]&&(h[1]+1>h[0]&&c.push("-"),c.push(f(h[1])));c.push("]");return c.join("")}function v(e){for(var a=e.source.match(/(?:\[(?:[^\x5C\x5D]|\\[\s\S])*\]|\\u[A-Fa-f0-9]{4}|\\x[A-Fa-f0-9]{2}|\\[0-9]+|\\[^ux0-9]|\(\?[:!=]|[\(\)\^]|[^\x5B\x5C\(\)\^]+)/g),c=a.length,d=[],g=0,h=0;g<c;++g){var k=a[g];"("===k?++h:"\\"===k.charAt(0)&&(k=+k.substring(1))&&(k<=h?d[k]=-1:a[g]=f(k))}for(g=1;g<d.length;++g)-1===d[g]&&(d[g]=++A);for(h=g=0;g<c;++g)k=a[g],"("===k?(++h,d[h]||(a[g]="(?:")):"\\"===
+k.charAt(0)&&(k=+k.substring(1))&&k<=h&&(a[g]="\\"+d[k]);for(g=0;g<c;++g)"^"===a[g]&&"^"!==a[g+1]&&(a[g]="");if(e.ignoreCase&&n)for(g=0;g<c;++g)k=a[g],e=k.charAt(0),2<=k.length&&"["===e?a[g]=b(k):"\\"!==e&&(a[g]=k.replace(/[a-zA-Z]/g,function(a){a=a.charCodeAt(0);return"["+String.fromCharCode(a&-33,a|32)+"]"}));return a.join("")}for(var A=0,n=!1,l=!1,m=0,c=a.length;m<c;++m){var p=a[m];if(p.ignoreCase)l=!0;else if(/[a-z]/i.test(p.source.replace(/\\u[0-9a-f]{4}|\\x[0-9a-f]{2}|\\[^ux]/gi,""))){n=!0;
+l=!1;break}}for(var w={b:8,t:9,n:10,v:11,f:12,r:13},r=[],m=0,c=a.length;m<c;++m){p=a[m];if(p.global||p.multiline)throw Error(""+p);r.push("(?:"+v(p)+")")}return new RegExp(r.join("|"),l?"gi":"g")}function U(a,d){function f(a){var c=a.nodeType;if(1==c){if(!b.test(a.className)){for(c=a.firstChild;c;c=c.nextSibling)f(c);c=a.nodeName.toLowerCase();if("br"===c||"li"===c)v[l]="\n",n[l<<1]=A++,n[l++<<1|1]=a}}else if(3==c||4==c)c=a.nodeValue,c.length&&(c=d?c.replace(/\r\n?/g,"\n"):c.replace(/[ \t\r\n]+/g,
+" "),v[l]=c,n[l<<1]=A,A+=c.length,n[l++<<1|1]=a)}var b=/(?:^|\s)nocode(?:\s|$)/,v=[],A=0,n=[],l=0;f(a);return{a:v.join("").replace(/\n$/,""),c:n}}function J(a,d,f,b,v){f&&(a={h:a,l:1,j:null,m:null,a:f,c:null,i:d,g:null},b(a),v.push.apply(v,a.g))}function V(a){for(var d=void 0,f=a.firstChild;f;f=f.nextSibling)var b=f.nodeType,d=1===b?d?a:f:3===b?W.test(f.nodeValue)?a:d:d;return d===a?void 0:d}function G(a,d){function f(a){for(var l=a.i,m=a.h,c=[l,"pln"],p=0,w=a.a.match(v)||[],r={},e=0,t=w.length;e<
+t;++e){var z=w[e],q=r[z],g=void 0,h;if("string"===typeof q)h=!1;else{var k=b[z.charAt(0)];if(k)g=z.match(k[1]),q=k[0];else{for(h=0;h<A;++h)if(k=d[h],g=z.match(k[1])){q=k[0];break}g||(q="pln")}!(h=5<=q.length&&"lang-"===q.substring(0,5))||g&&"string"===typeof g[1]||(h=!1,q="src");h||(r[z]=q)}k=p;p+=z.length;if(h){h=g[1];var B=z.indexOf(h),D=B+h.length;g[2]&&(D=z.length-g[2].length,B=D-h.length);q=q.substring(5);J(m,l+k,z.substring(0,B),f,c);J(m,l+k+B,h,K(q,h),c);J(m,l+k+D,z.substring(D),f,c)}else c.push(l+
+k,q)}a.g=c}var b={},v;(function(){for(var f=a.concat(d),l=[],m={},c=0,p=f.length;c<p;++c){var w=f[c],r=w[3];if(r)for(var e=r.length;0<=--e;)b[r.charAt(e)]=w;w=w[1];r=""+w;m.hasOwnProperty(r)||(l.push(w),m[r]=null)}l.push(/[\0-\uffff]/);v=T(l)})();var A=d.length;return f}function y(a){var d=[],f=[];a.tripleQuotedStrings?d.push(["str",/^(?:\'\'\'(?:[^\'\\]|\\[\s\S]|\'{1,2}(?=[^\']))*(?:\'\'\'|$)|\"\"\"(?:[^\"\\]|\\[\s\S]|\"{1,2}(?=[^\"]))*(?:\"\"\"|$)|\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$))/,
+null,"'\""]):a.multiLineStrings?d.push(["str",/^(?:\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$)|\`(?:[^\\\`]|\\[\s\S])*(?:\`|$))/,null,"'\"`"]):d.push(["str",/^(?:\'(?:[^\\\'\r\n]|\\.)*(?:\'|$)|\"(?:[^\\\"\r\n]|\\.)*(?:\"|$))/,null,"\"'"]);a.verbatimStrings&&f.push(["str",/^@\"(?:[^\"]|\"\")*(?:\"|$)/,null]);var b=a.hashComments;b&&(a.cStyleComments?(1<b?d.push(["com",/^#(?:##(?:[^#]|#(?!##))*(?:###|$)|.*)/,null,"#"]):d.push(["com",/^#(?:(?:define|e(?:l|nd)if|else|error|ifn?def|include|line|pragma|undef|warning)\b|[^\r\n]*)/,
+null,"#"]),f.push(["str",/^<(?:(?:(?:\.\.\/)*|\/?)(?:[\w-]+(?:\/[\w-]+)+)?[\w-]+\.h(?:h|pp|\+\+)?|[a-z]\w*)>/,null])):d.push(["com",/^#[^\r\n]*/,null,"#"]));a.cStyleComments&&(f.push(["com",/^\/\/[^\r\n]*/,null]),f.push(["com",/^\/\*[\s\S]*?(?:\*\/|$)/,null]));if(b=a.regexLiterals){var v=(b=1<b?"":"\n\r")?".":"[\\S\\s]";f.push(["lang-regex",RegExp("^(?:^^\\.?|[+-]|[!=]=?=?|\\#|%=?|&&?=?|\\(|\\*=?|[+\\-]=|->|\\/=?|::?|<<?=?|>>?>?=?|,|;|\\?|@|\\[|~|{|\\^\\^?=?|\\|\\|?=?|break|case|continue|delete|do|else|finally|instanceof|return|throw|try|typeof)\\s*("+
+("/(?=[^/*"+b+"])(?:[^/\\x5B\\x5C"+b+"]|\\x5C"+v+"|\\x5B(?:[^\\x5C\\x5D"+b+"]|\\x5C"+v+")*(?:\\x5D|$))+/")+")")])}(b=a.types)&&f.push(["typ",b]);b=(""+a.keywords).replace(/^ | $/g,"");b.length&&f.push(["kwd",new RegExp("^(?:"+b.replace(/[\s,]+/g,"|")+")\\b"),null]);d.push(["pln",/^\s+/,null," \r\n\t\u00a0"]);b="^.[^\\s\\w.$@'\"`/\\\\]*";a.regexLiterals&&(b+="(?!s*/)");f.push(["lit",/^@[a-z_$][a-z_$@0-9]*/i,null],["typ",/^(?:[@_]?[A-Z]+[a-z][A-Za-z_$@0-9]*|\w+_t\b)/,null],["pln",/^[a-z_$][a-z_$@0-9]*/i,
+null],["lit",/^(?:0x[a-f0-9]+|(?:\d(?:_\d+)*\d*(?:\.\d*)?|\.\d\+)(?:e[+\-]?\d+)?)[a-z]*/i,null,"0123456789"],["pln",/^\\[\s\S]?/,null],["pun",new RegExp(b),null]);return G(d,f)}function L(a,d,f){function b(a){var c=a.nodeType;if(1==c&&!A.test(a.className))if("br"===a.nodeName)v(a),a.parentNode&&a.parentNode.removeChild(a);else for(a=a.firstChild;a;a=a.nextSibling)b(a);else if((3==c||4==c)&&f){var d=a.nodeValue,q=d.match(n);q&&(c=d.substring(0,q.index),a.nodeValue=c,(d=d.substring(q.index+q[0].length))&&
+a.parentNode.insertBefore(l.createTextNode(d),a.nextSibling),v(a),c||a.parentNode.removeChild(a))}}function v(a){function b(a,c){var d=c?a.cloneNode(!1):a,k=a.parentNode;if(k){var k=b(k,1),e=a.nextSibling;k.appendChild(d);for(var f=e;f;f=e)e=f.nextSibling,k.appendChild(f)}return d}for(;!a.nextSibling;)if(a=a.parentNode,!a)return;a=b(a.nextSibling,0);for(var d;(d=a.parentNode)&&1===d.nodeType;)a=d;c.push(a)}for(var A=/(?:^|\s)nocode(?:\s|$)/,n=/\r\n?|\n/,l=a.ownerDocument,m=l.createElement("li");a.firstChild;)m.appendChild(a.firstChild);
+for(var c=[m],p=0;p<c.length;++p)b(c[p]);d===(d|0)&&c[0].setAttribute("value",d);var w=l.createElement("ol");w.className="linenums";d=Math.max(0,d-1|0)||0;for(var p=0,r=c.length;p<r;++p)m=c[p],m.className="L"+(p+d)%10,m.firstChild||m.appendChild(l.createTextNode("\u00a0")),w.appendChild(m);a.appendChild(w)}function t(a,d){for(var f=d.length;0<=--f;){var b=d[f];I.hasOwnProperty(b)?E.console&&console.warn("cannot override language handler %s",b):I[b]=a}}function K(a,d){a&&I.hasOwnProperty(a)||(a=/^\s*</.test(d)?
+"default-markup":"default-code");return I[a]}function M(a){var d=a.j;try{var f=U(a.h,a.l),b=f.a;a.a=b;a.c=f.c;a.i=0;K(d,b)(a);var v=/\bMSIE\s(\d+)/.exec(navigator.userAgent),v=v&&8>=+v[1],d=/\n/g,A=a.a,n=A.length,f=0,l=a.c,m=l.length,b=0,c=a.g,p=c.length,w=0;c[p]=n;var r,e;for(e=r=0;e<p;)c[e]!==c[e+2]?(c[r++]=c[e++],c[r++]=c[e++]):e+=2;p=r;for(e=r=0;e<p;){for(var t=c[e],z=c[e+1],q=e+2;q+2<=p&&c[q+1]===z;)q+=2;c[r++]=t;c[r++]=z;e=q}c.length=r;var g=a.h;a="";g&&(a=g.style.display,g.style.display="none");
+try{for(;b<m;){var h=l[b+2]||n,k=c[w+2]||n,q=Math.min(h,k),B=l[b+1],D;if(1!==B.nodeType&&(D=A.substring(f,q))){v&&(D=D.replace(d,"\r"));B.nodeValue=D;var N=B.ownerDocument,u=N.createElement("span");u.className=c[w+1];var y=B.parentNode;y.replaceChild(u,B);u.appendChild(B);f<h&&(l[b+1]=B=N.createTextNode(A.substring(q,h)),y.insertBefore(B,u.nextSibling))}f=q;f>=h&&(b+=2);f>=k&&(w+=2)}}finally{g&&(g.style.display=a)}}catch(x){E.console&&console.log(x&&x.stack||x)}}var E=window,C=["break,continue,do,else,for,if,return,while"],
+F=[[C,"auto,case,char,const,default,double,enum,extern,float,goto,inline,int,long,register,restrict,short,signed,sizeof,static,struct,switch,typedef,union,unsigned,void,volatile"],"catch,class,delete,false,import,new,operator,private,protected,public,this,throw,true,try,typeof"],H=[F,"alignas,alignof,align_union,asm,axiom,bool,concept,concept_map,const_cast,constexpr,decltype,delegate,dynamic_cast,explicit,export,friend,generic,late_check,mutable,namespace,noexcept,noreturn,nullptr,property,reinterpret_cast,static_assert,static_cast,template,typeid,typename,using,virtual,where"],
+O=[F,"abstract,assert,boolean,byte,extends,finally,final,implements,import,instanceof,interface,null,native,package,strictfp,super,synchronized,throws,transient"],P=[F,"abstract,add,alias,as,ascending,async,await,base,bool,by,byte,checked,decimal,delegate,descending,dynamic,event,finally,fixed,foreach,from,get,global,group,implicit,in,interface,internal,into,is,join,let,lock,null,object,out,override,orderby,params,partial,readonly,ref,remove,sbyte,sealed,select,set,stackalloc,string,select,uint,ulong,unchecked,unsafe,ushort,value,var,virtual,where,yield"],
+F=[F,"abstract,async,await,constructor,debugger,enum,eval,export,function,get,implements,instanceof,interface,let,null,set,undefined,var,with,yield,Infinity,NaN"],Q=[C,"and,as,assert,class,def,del,elif,except,exec,finally,from,global,import,in,is,lambda,nonlocal,not,or,pass,print,raise,try,with,yield,False,True,None"],R=[C,"alias,and,begin,case,class,def,defined,elsif,end,ensure,false,in,module,next,nil,not,or,redo,rescue,retry,self,super,then,true,undef,unless,until,when,yield,BEGIN,END"],C=[C,"case,done,elif,esac,eval,fi,function,in,local,set,then,until"],
+S=/^(DIR|FILE|array|vector|(de|priority_)?queue|(forward_)?list|stack|(const_)?(reverse_)?iterator|(unordered_)?(multi)?(set|map)|bitset|u?(int|float)\d*)\b/,W=/\S/,X=y({keywords:[H,P,O,F,"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END",Q,R,C],hashComments:!0,cStyleComments:!0,multiLineStrings:!0,regexLiterals:!0}),I={};t(X,["default-code"]);t(G([],[["pln",/^[^<?]+/],["dec",
+/^<!\w[^>]*(?:>|$)/],["com",/^<\!--[\s\S]*?(?:-\->|$)/],["lang-",/^<\?([\s\S]+?)(?:\?>|$)/],["lang-",/^<%([\s\S]+?)(?:%>|$)/],["pun",/^(?:<[%?]|[%?]>)/],["lang-",/^<xmp\b[^>]*>([\s\S]+?)<\/xmp\b[^>]*>/i],["lang-js",/^<script\b[^>]*>([\s\S]*?)(<\/script\b[^>]*>)/i],["lang-css",/^<style\b[^>]*>([\s\S]*?)(<\/style\b[^>]*>)/i],["lang-in.tag",/^(<\/?[a-z][^<>]*>)/i]]),"default-markup htm html mxml xhtml xml xsl".split(" "));t(G([["pln",/^[\s]+/,null," \t\r\n"],["atv",/^(?:\"[^\"]*\"?|\'[^\']*\'?)/,null,
+"\"'"]],[["tag",/^^<\/?[a-z](?:[\w.:-]*\w)?|\/?>$/i],["atn",/^(?!style[\s=]|on)[a-z](?:[\w:-]*\w)?/i],["lang-uq.val",/^=\s*([^>\'\"\s]*(?:[^>\'\"\s\/]|\/(?=\s)))/],["pun",/^[=<>\/]+/],["lang-js",/^on\w+\s*=\s*\"([^\"]+)\"/i],["lang-js",/^on\w+\s*=\s*\'([^\']+)\'/i],["lang-js",/^on\w+\s*=\s*([^\"\'>\s]+)/i],["lang-css",/^style\s*=\s*\"([^\"]+)\"/i],["lang-css",/^style\s*=\s*\'([^\']+)\'/i],["lang-css",/^style\s*=\s*([^\"\'>\s]+)/i]]),["in.tag"]);t(G([],[["atv",/^[\s\S]+/]]),["uq.val"]);t(y({keywords:H,
+hashComments:!0,cStyleComments:!0,types:S}),"c cc cpp cxx cyc m".split(" "));t(y({keywords:"null,true,false"}),["json"]);t(y({keywords:P,hashComments:!0,cStyleComments:!0,verbatimStrings:!0,types:S}),["cs"]);t(y({keywords:O,cStyleComments:!0}),["java"]);t(y({keywords:C,hashComments:!0,multiLineStrings:!0}),["bash","bsh","csh","sh"]);t(y({keywords:Q,hashComments:!0,multiLineStrings:!0,tripleQuotedStrings:!0}),["cv","py","python"]);t(y({keywords:"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END",
+hashComments:!0,multiLineStrings:!0,regexLiterals:2}),["perl","pl","pm"]);t(y({keywords:R,hashComments:!0,multiLineStrings:!0,regexLiterals:!0}),["rb","ruby"]);t(y({keywords:F,cStyleComments:!0,regexLiterals:!0}),["javascript","js","ts","typescript"]);t(y({keywords:"all,and,by,catch,class,else,extends,false,finally,for,if,in,is,isnt,loop,new,no,not,null,of,off,on,or,return,super,then,throw,true,try,unless,until,when,while,yes",hashComments:3,cStyleComments:!0,multilineStrings:!0,tripleQuotedStrings:!0,
+regexLiterals:!0}),["coffee"]);t(G([],[["str",/^[\s\S]+/]]),["regex"]);var Y=E.PR={createSimpleLexer:G,registerLangHandler:t,sourceDecorator:y,PR_ATTRIB_NAME:"atn",PR_ATTRIB_VALUE:"atv",PR_COMMENT:"com",PR_DECLARATION:"dec",PR_KEYWORD:"kwd",PR_LITERAL:"lit",PR_NOCODE:"nocode",PR_PLAIN:"pln",PR_PUNCTUATION:"pun",PR_SOURCE:"src",PR_STRING:"str",PR_TAG:"tag",PR_TYPE:"typ",prettyPrintOne:E.prettyPrintOne=function(a,d,f){f=f||!1;d=d||null;var b=document.createElement("div");b.innerHTML="<pre>"+a+"</pre>";
+b=b.firstChild;f&&L(b,f,!0);M({j:d,m:f,h:b,l:1,a:null,i:null,c:null,g:null});return b.innerHTML},prettyPrint:E.prettyPrint=function(a,d){function f(){for(var b=E.PR_SHOULD_USE_CONTINUATION?c.now()+250:Infinity;p<t.length&&c.now()<b;p++){for(var d=t[p],l=g,m=d;m=m.previousSibling;){var n=m.nodeType,u=(7===n||8===n)&&m.nodeValue;if(u?!/^\??prettify\b/.test(u):3!==n||/\S/.test(m.nodeValue))break;if(u){l={};u.replace(/\b(\w+)=([\w:.%+-]+)/g,function(a,b,c){l[b]=c});break}}m=d.className;if((l!==g||r.test(m))&&
+!e.test(m)){n=!1;for(u=d.parentNode;u;u=u.parentNode)if(q.test(u.tagName)&&u.className&&r.test(u.className)){n=!0;break}if(!n){d.className+=" prettyprinted";n=l.lang;if(!n){var n=m.match(w),C;!n&&(C=V(d))&&z.test(C.tagName)&&(n=C.className.match(w));n&&(n=n[1])}if(y.test(d.tagName))u=1;else var u=d.currentStyle,x=v.defaultView,u=(u=u?u.whiteSpace:x&&x.getComputedStyle?x.getComputedStyle(d,null).getPropertyValue("white-space"):0)&&"pre"===u.substring(0,3);x=l.linenums;(x="true"===x||+x)||(x=(x=m.match(/\blinenums\b(?::(\d+))?/))?
+x[1]&&x[1].length?+x[1]:!0:!1);x&&L(d,x,u);M({j:n,h:d,m:x,l:u,a:null,i:null,c:null,g:null})}}}p<t.length?E.setTimeout(f,250):"function"===typeof a&&a()}for(var b=d||document.body,v=b.ownerDocument||document,b=[b.getElementsByTagName("pre"),b.getElementsByTagName("code"),b.getElementsByTagName("xmp")],t=[],n=0;n<b.length;++n)for(var l=0,m=b[n].length;l<m;++l)t.push(b[n][l]);var b=null,c=Date;c.now||(c={now:function(){return+new Date}});var p=0,w=/\blang(?:uage)?-([\w.]+)(?!\S)/,r=/\bprettyprint\b/,
+e=/\bprettyprinted\b/,y=/pre|xmp/i,z=/^code$/i,q=/^(?:pre|code|xmp)$/i,g={};f()}},H=E.define;"function"===typeof H&&H.amd&&H("google-code-prettify",[],function(){return Y})})();}()


[29/50] [abbrv] lucene-solr:apiv2: SOLR-8396: Add support for PointFields in Solr

Posted by no...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test-files/solr/collection1/conf/schema-point.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-point.xml b/solr/core/src/test-files/solr/collection1/conf/schema-point.xml
new file mode 100644
index 0000000..ca37ff5
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-point.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<schema name="example" version="1.6">
+  <types>
+    <fieldType name="string" class="solr.StrField" sortMissingLast="true" omitNorms="true"/>
+
+    <fieldType name="pint" class="solr.IntPointField"/>
+    <fieldType name="plong" class="solr.LongPointField"/>
+    <fieldType name="pdouble" class="solr.DoublePointField"/>
+    <fieldType name="pfloat" class="solr.FloatPointField"/>
+    
+    <fieldType name="int" class="solr.TrieIntField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
+    <fieldType name="float" class="solr.TrieFloatField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
+    <fieldType name="long" class="solr.TrieLongField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
+    <fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
+    <fieldType name="date" class="solr.TrieDateField" sortMissingLast="true" omitNorms="true"/>
+ </types>
+
+
+ <fields>
+   <field name="id" type="string"/>
+   <field name="text" type="string"/>
+   <field name="_version_" type="long" indexed="true" stored="true" multiValued="false" />
+   <field name="signatureField" type="string" indexed="true" stored="false"/>
+   <dynamicField name="*_s"  type="string"  indexed="true"  stored="true"/>
+   <dynamicField name="*_sS" type="string"  indexed="false" stored="true"/>
+   <dynamicField name="*_i"  type="int"    indexed="true"  stored="true"/>
+   <dynamicField name="*_l"  type="long"   indexed="true"  stored="true"/>
+   <dynamicField name="*_f"  type="float"  indexed="true"  stored="true"/>
+   <dynamicField name="*_d"  type="double" indexed="true"  stored="true"/>
+   <dynamicField name="*_dt"  type="date" indexed="true"  stored="true"/>
+   
+   <dynamicField name="*_p_i"  type="pint"    indexed="true"  stored="true"/>
+   <dynamicField name="*_p_i_dv"  type="pint"    indexed="true"  stored="true" docValues="true"/>
+   <dynamicField name="*_p_i_mv"  type="pint"    indexed="true"  stored="true" multiValued="true"/>
+   <dynamicField name="*_p_i_mv_dv"  type="pint"    indexed="true"  stored="true" docValues="true" multiValued="true"/>
+   <dynamicField name="*_p_i_ni_dv"  type="pint"    indexed="false"  stored="true" docValues="true"/>
+   <dynamicField name="*_p_i_ni_mv_dv"  type="pint"    indexed="false"  stored="true" docValues="true" multiValued="true"/>
+   
+   <dynamicField name="*_p_l"  type="plong"    indexed="true"  stored="true"/>
+   <dynamicField name="*_p_l_dv"  type="plong"    indexed="true"  stored="true" docValues="true"/>
+   <dynamicField name="*_p_l_mv"  type="plong"    indexed="true"  stored="true" multiValued="true"/>
+   <dynamicField name="*_p_l_mv_dv"  type="plong"    indexed="true"  stored="true" docValues="true" multiValued="true"/>
+   <dynamicField name="*_p_l_ni_dv"  type="plong"    indexed="false"  stored="true" docValues="true"/>
+   <dynamicField name="*_p_l_ni_mv_dv"  type="plong"    indexed="false"  stored="true" docValues="true" multiValued="true"/>
+   
+   <dynamicField name="*_p_d"  type="pdouble"    indexed="true"  stored="true"/>
+   <dynamicField name="*_p_d_dv"  type="pdouble"    indexed="true"  stored="true" docValues="true"/>
+   <dynamicField name="*_p_d_mv"  type="pdouble"    indexed="true"  stored="true" multiValued="true"/>
+   <dynamicField name="*_p_d_mv_dv"  type="pdouble"    indexed="true"  stored="true" docValues="true" multiValued="true"/>
+   <dynamicField name="*_p_d_ni_dv"  type="pdouble"    indexed="false"  stored="true" docValues="true"/>
+   <dynamicField name="*_p_d_ni_mv_dv"  type="pdouble"    indexed="false"  stored="true" docValues="true" multiValued="true"/>
+   
+   <dynamicField name="*_p_f"  type="pfloat"    indexed="true"  stored="true"/>
+   <dynamicField name="*_p_f_dv"  type="pfloat"    indexed="true"  stored="true" docValues="true"/>
+   <dynamicField name="*_p_f_mv"  type="pfloat"    indexed="true"  stored="true" multiValued="true"/>
+   <dynamicField name="*_p_f_mv_dv"  type="pfloat"    indexed="true"  stored="true" docValues="true" multiValued="true"/>
+   <dynamicField name="*_p_f_ni_dv"  type="pfloat"    indexed="false"  stored="true" docValues="true"/>
+   <dynamicField name="*_p_f_ni_mv_dv"  type="pfloat"    indexed="false"  stored="true" docValues="true" multiValued="true"/>
+   
+   <!-- return DV fields as  -->
+   <dynamicField name="*_p_i_dv_ns"  type="pint"    indexed="true"  stored="false" docValues="true" useDocValuesAsStored="true"/>
+   <dynamicField name="*_p_l_dv_ns"  type="plong"    indexed="true"  stored="false" docValues="true" useDocValuesAsStored="true"/>
+   <dynamicField name="*_p_d_dv_ns"  type="pdouble"    indexed="true"  stored="false" docValues="true" useDocValuesAsStored="true"/>
+   <dynamicField name="*_p_f_dv_ns"  type="pfloat"    indexed="true"  stored="false" docValues="true" useDocValuesAsStored="true"/>
+
+ </fields>
+
+ <uniqueKey>id</uniqueKey>
+
+
+</schema>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test-files/solr/collection1/conf/schema-sorts.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-sorts.xml b/solr/core/src/test-files/solr/collection1/conf/schema-sorts.xml
index c918017..f68841c 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-sorts.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-sorts.xml
@@ -45,30 +45,30 @@ NOTE: Tests expect every field in this schema to be sortable.
   <field name="int" type="int"/>
   <field name="int_last" type="int_last"/>
   <field name="int_first" type="int_first"/>
-  <field name="int_dv" type="int_dv"/>
-  <field name="int_dv_last" type="int_dv_last"/>
-  <field name="int_dv_first" type="int_dv_first"/>
+  <field name="int_dv" type="${solr.tests.intClass:pint}_dv"/>
+  <field name="int_dv_last" type="${solr.tests.intClass:pint}_dv_last"/>
+  <field name="int_dv_first" type="${solr.tests.intClass:pint}_dv_first"/>
 
   <field name="long" type="long"/>
   <field name="long_last" type="long_last"/>
   <field name="long_first" type="long_first"/>
-  <field name="long_dv" type="long_dv"/>
-  <field name="long_dv_last" type="long_dv_last"/>
-  <field name="long_dv_first" type="long_dv_first"/>
+  <field name="long_dv" type="${solr.tests.longClass:plong}_dv"/>
+  <field name="long_dv_last" type="${solr.tests.longClass:plong}_dv_last"/>
+  <field name="long_dv_first" type="${solr.tests.longClass:plong}_dv_first"/>
 
   <field name="float" type="float"/>
   <field name="float_last" type="float_last"/>
   <field name="float_first" type="float_first"/>
-  <field name="float_dv" type="float_dv"/>
-  <field name="float_dv_last" type="float_dv_last"/>
-  <field name="float_dv_first" type="float_dv_first"/>
+  <field name="float_dv" type="${solr.tests.floatClass:pfloat}_dv"/>
+  <field name="float_dv_last" type="${solr.tests.floatClass:pfloat}_dv_last"/>
+  <field name="float_dv_first" type="${solr.tests.floatClass:pfloat}_dv_first"/>
 
   <field name="double" type="double"/>
   <field name="double_last" type="double_last"/>
   <field name="double_first" type="double_first"/>
-  <field name="double_dv" type="double_dv"/>
-  <field name="double_dv_last" type="double_dv_last"/>
-  <field name="double_dv_first" type="double_dv_first"/>
+  <field name="double_dv" type="${solr.tests.doubleClass:pdouble}_dv"/>
+  <field name="double_dv_last" type="${solr.tests.doubleClass:pdouble}_dv_last"/>
+  <field name="double_dv_first" type="${solr.tests.doubleClass:pdouble}_dv_first"/>
 
   <field name="date" type="date"/>
   <field name="date_last" type="date_last"/>
@@ -220,6 +220,11 @@ NOTE: Tests expect every field in this schema to be sortable.
              sortMissingLast="true"/>
   <fieldType name="int_dv_first" class="solr.TrieIntField" stored="true" indexed="false" docValues="true"
              sortMissingFirst="true"/>
+  <fieldType name="pint_dv" class="solr.IntPointField" stored="true" indexed="false" docValues="true"/>
+  <fieldType name="pint_dv_last" class="solr.IntPointField" stored="true" indexed="false" docValues="true"
+             sortMissingLast="true"/>
+  <fieldType name="pint_dv_first" class="solr.IntPointField" stored="true" indexed="false" docValues="true"
+             sortMissingFirst="true"/>
 
   <fieldType name="long" class="solr.TrieLongField" stored="true" indexed="true"/>
   <fieldType name="long_last" class="solr.TrieLongField" stored="true" indexed="true" sortMissingLast="true"/>
@@ -229,6 +234,11 @@ NOTE: Tests expect every field in this schema to be sortable.
              sortMissingLast="true"/>
   <fieldType name="long_dv_first" class="solr.TrieLongField" stored="true" indexed="false" docValues="true"
              sortMissingFirst="true"/>
+  <fieldType name="plong_dv" class="solr.LongPointField" stored="true" indexed="false" docValues="true"/>
+  <fieldType name="plong_dv_last" class="solr.LongPointField" stored="true" indexed="false" docValues="true"
+             sortMissingLast="true"/>
+  <fieldType name="plong_dv_first" class="solr.LongPointField" stored="true" indexed="false" docValues="true"
+             sortMissingFirst="true"/>
 
   <fieldType name="float" class="solr.TrieFloatField" stored="true" indexed="true"/>
   <fieldType name="float_last" class="solr.TrieFloatField" stored="true" indexed="true" sortMissingLast="true"/>
@@ -238,6 +248,11 @@ NOTE: Tests expect every field in this schema to be sortable.
              sortMissingLast="true"/>
   <fieldType name="float_dv_first" class="solr.TrieFloatField" stored="true" indexed="false" docValues="true"
              sortMissingFirst="true"/>
+  <fieldType name="pfloat_dv" class="solr.FloatPointField" stored="true" indexed="false" docValues="true"/>
+  <fieldType name="pfloat_dv_last" class="solr.FloatPointField" stored="true" indexed="false" docValues="true"
+             sortMissingLast="true"/>
+  <fieldType name="pfloat_dv_first" class="solr.FloatPointField" stored="true" indexed="false" docValues="true"
+             sortMissingFirst="true"/>
 
   <fieldType name="double" class="solr.TrieDoubleField" stored="true" indexed="true"/>
   <fieldType name="double_last" class="solr.TrieDoubleField" stored="true" indexed="true" sortMissingLast="true"/>
@@ -247,6 +262,11 @@ NOTE: Tests expect every field in this schema to be sortable.
              sortMissingLast="true"/>
   <fieldType name="double_dv_first" class="solr.TrieDoubleField" stored="true" indexed="false" docValues="true"
              sortMissingFirst="true"/>
+  <fieldType name="pdouble_dv" class="solr.DoublePointField" stored="true" indexed="false" docValues="true"/>
+  <fieldType name="pdouble_dv_last" class="solr.DoublePointField" stored="true" indexed="false" docValues="true"
+             sortMissingLast="true"/>
+  <fieldType name="pdouble_dv_first" class="solr.DoublePointField" stored="true" indexed="false" docValues="true"
+             sortMissingFirst="true"/>
 
   <fieldType name="date" class="solr.TrieDateField" stored="true" indexed="true"/>
   <fieldType name="date_last" class="solr.TrieDateField" stored="true" indexed="true" sortMissingLast="true"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test-files/solr/collection1/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema.xml b/solr/core/src/test-files/solr/collection1/conf/schema.xml
index 35de166..be1b6f5 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema.xml
@@ -43,6 +43,12 @@
   <fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
   <fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
   <fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
+  
+  <!-- Point Fields -->
+  <fieldType name="pint" class="solr.IntPointField" docValues="true"/>
+  <fieldType name="plong" class="solr.LongPointField" docValues="true"/>
+  <fieldType name="pdouble" class="solr.DoublePointField" docValues="true"/>
+  <fieldType name="pfloat" class="solr.FloatPointField" docValues="true"/>
 
   <!-- Field type demonstrating an Analyzer failure -->
   <fieldType name="failtype1" class="solr.TextField">
@@ -550,7 +556,7 @@
   <field name="dateRange" type="dateRange" multiValued="true"/>
 
   <field name="cat" type="string" indexed="true" stored="true" multiValued="true"/>
-  <field name="price" type="float" indexed="true" stored="true" multiValued="false"/>
+  <field name="price" type="${solr.tests.floatClass:pfloat}" indexed="true" stored="true" multiValued="false"/>
   <field name="inStock" type="boolean" indexed="true" stored="true"/>
 
   <field name="subword" type="subword" indexed="true" stored="true"/>
@@ -599,7 +605,7 @@
   -->
   <dynamicField name="*_i" type="int" indexed="true" stored="true"/>
   <dynamicField name="*_i1" type="int" indexed="true" stored="true" multiValued="false" sortMissingLast="true"/>
-  <dynamicField name="*_idv" type="int" indexed="true" stored="true" docValues="true" multiValued="false"/>
+  <dynamicField name="*_idv" type="${solr.tests.intClass:pint}" indexed="true" stored="true" docValues="true" multiValued="false"/>
 
 
   <dynamicField name="*_s" type="string" indexed="true" stored="true"/>
@@ -610,9 +616,9 @@
   <dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
   <dynamicField name="*_b1" type="boolean" indexed="true" stored="true" multiValued="false"/>
   <dynamicField name="*_f" type="float" indexed="true" stored="true"/>
-  <dynamicField name="*_f1" type="float" indexed="true" stored="true" multiValued="false"/>
+  <dynamicField name="*_f1" type="${solr.tests.floatClass:pfloat}" indexed="true" stored="true" multiValued="false"/>
   <dynamicField name="*_d" type="double" indexed="true" stored="true"/>
-  <dynamicField name="*_d1" type="double" indexed="true" stored="true" multiValued="false"/>
+  <dynamicField name="*_d1" type="${solr.tests.doubleClass:pdouble}" indexed="true" stored="true" multiValued="false"/>
   <dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
   <dynamicField name="*_dt1" type="date" indexed="true" stored="true" multiValued="false"/>
 
@@ -662,7 +668,7 @@
   <dynamicField name="*_mfacet" type="string" indexed="true" stored="false" multiValued="true"/>
 
   <!-- Type used to index the lat and lon components for the "location" FieldType -->
-  <dynamicField name="*_coordinate" type="tdouble" indexed="true" stored="false" omitNorms="true"/>
+  <dynamicField name="*_coordinate" type="${solr.tests.doubleClass:pdouble}" indexed="true" stored="false" omitNorms="true"/>
 
   <dynamicField name="*_path" type="path" indexed="true" stored="true" omitNorms="true" multiValued="true"/>
   <dynamicField name="*_ancestor" type="ancestor_path" indexed="true" stored="true" omitNorms="true"
@@ -676,12 +682,12 @@
   <dynamicField name="*_f_dv" type="float" indexed="true" stored="true" docValues="true"/>
   <dynamicField name="*_d_dv" type="double" indexed="true" stored="true" docValues="true"/>
   <dynamicField name="*_dt_dv" type="date" indexed="true" stored="true" docValues="true"/>
-  <dynamicField name="*_f1_dv" type="float" indexed="true" stored="true" docValues="true" multiValued="false"/>
+  <dynamicField name="*_f1_dv" type="${solr.tests.floatClass:pfloat}" indexed="true" stored="true" docValues="true" multiValued="false"/>
 
   <!--  Non-stored, DocValues=true -->
-  <dynamicField name="*_i_dvo" multiValued="false" type="int" docValues="true" indexed="true" stored="false"
+  <dynamicField name="*_i_dvo" multiValued="false" type="${solr.tests.intClass:pint}" docValues="true" indexed="true" stored="false"
                 useDocValuesAsStored="true"/>
-  <dynamicField name="*_d_dvo" multiValued="false" type="double" docValues="true" indexed="true" stored="false"
+  <dynamicField name="*_d_dvo" multiValued="false" type="${solr.tests.doubleClass:pdouble}" docValues="true" indexed="true" stored="false"
                 useDocValuesAsStored="true"/>
   <dynamicField name="*_s_dvo" multiValued="false" type="string" docValues="true" indexed="true" stored="false"
                 useDocValuesAsStored="true"/>
@@ -691,8 +697,8 @@
                 useDocValuesAsStored="true"/>
 
   <!--  Non-stored, DocValues=true, useDocValuesAsStored=false -->
-  <field name="single_i_dvn" multiValued="false" type="int" indexed="true" stored="true"/>
-  <field name="single_d_dvn" multiValued="false" type="double" indexed="true" stored="true"/>
+  <field name="single_i_dvn" multiValued="false" type="${solr.tests.intClass:pint}" indexed="true" stored="true"/>
+  <field name="single_d_dvn" multiValued="false" type="${solr.tests.doubleClass:pdouble}" indexed="true" stored="true"/>
   <field name="single_s_dvn" multiValued="false" type="string" indexed="true" stored="true"/>
   <field name="copy_single_i_dvn" multiValued="false" type="int" docValues="true" indexed="true" stored="false"
          useDocValuesAsStored="false"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test-files/solr/collection1/conf/schema11.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema11.xml b/solr/core/src/test-files/solr/collection1/conf/schema11.xml
index e5b9233..370f321 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema11.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema11.xml
@@ -77,7 +77,12 @@
   <fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
   <fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
   <fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
-
+  
+  <!-- Point Fields -->
+  <fieldType name="pint" class="solr.IntPointField" docValues="true"/>
+  <fieldType name="plong" class="solr.LongPointField" docValues="true"/>
+  <fieldType name="pdouble" class="solr.DoublePointField" docValues="true"/>
+  <fieldType name="pfloat" class="solr.FloatPointField" docValues="true"/>
 
     <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
          is a more restricted form of the canonical representation of dateTime
@@ -340,15 +345,15 @@ valued. -->
    <dynamicField name="*_s_dv"  type="string"  indexed="true"  stored="true" docValues="true"/>
    <dynamicField name="*_ss"    type="string"  indexed="true"  stored="true" multiValued="true"/>
    <dynamicField name="*_sS"   type="string"  indexed="false" stored="true"/>
-   <dynamicField name="*_i"    type="int"    indexed="true"  stored="true"/>
+   <dynamicField name="*_i"    type="${solr.tests.intClass:pint}"    indexed="true"  stored="true"/>
    <dynamicField name="*_ii"   type="int"    indexed="true"  stored="true" multiValued="true"/>
-   <dynamicField name="*_l"    type="long"   indexed="true"  stored="true"/>
-   <dynamicField name="*_f"    type="float"  indexed="true"  stored="true"/>
-   <dynamicField name="*_d"    type="double" indexed="true"  stored="true"/>
+   <dynamicField name="*_l"    type="${solr.tests.longClass:plong}"   indexed="true"  stored="true"/>
+   <dynamicField name="*_f"    type="${solr.tests.floatClass:pfloat}"  indexed="true"  stored="true"/>
+   <dynamicField name="*_d"    type="${solr.tests.doubleClass:pdouble}" indexed="true"  stored="true"/>
 
    <dynamicField name="*_ti"      type="tint"    indexed="true"  stored="true"/>
-   <dynamicField name="*_ti_dv"   type="int"    indexed="true"  stored="true" docValues="true"/>
-   <dynamicField name="*_ti_ni_dv"   type="int"    indexed="true"  stored="true" docValues="true"/>
+   <dynamicField name="*_ti_dv"   type="tint"    indexed="true"  stored="true" docValues="true"/>
+   <dynamicField name="*_ti_ni_dv"   type="tint"    indexed="true"  stored="true" docValues="true"/>
    <dynamicField name="*_tl"      type="tlong"   indexed="true"  stored="true"/>
    <dynamicField name="*_tl_dv"    type="tlong"   indexed="true"  stored="true" docValues="true"/>
    <dynamicField name="*_tl_ni_dv"   type="tlong"   indexed="false"  stored="true" docValues="true"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test-files/solr/collection1/conf/schema12.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema12.xml b/solr/core/src/test-files/solr/collection1/conf/schema12.xml
index 0de219a..206cd9e 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema12.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema12.xml
@@ -42,6 +42,12 @@
   <fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
   <fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
   <fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
+  
+  <!-- Point Fields -->
+  <fieldType name="pint" class="solr.IntPointField" docValues="true"/>
+  <fieldType name="plong" class="solr.LongPointField" docValues="true"/>
+  <fieldType name="pdouble" class="solr.DoublePointField" docValues="true"/>
+  <fieldType name="pfloat" class="solr.FloatPointField" docValues="true"/>
 
   <!-- Field type demonstrating an Analyzer failure -->
   <fieldType name="failtype1" class="solr.TextField">
@@ -432,7 +438,7 @@
   <field name="text" type="text" indexed="true" stored="false"/>
   <field name="subject" type="text" indexed="true" stored="true"/>
   <field name="title" type="nametext" indexed="true" stored="true"/>
-  <field name="weight" type="float" indexed="true" stored="true"/>
+  <field name="weight" type="${solr.tests.floatClass:pfloat}" indexed="true" stored="true"/>
   <field name="bday" type="date" indexed="true" stored="true"/>
 
   <field name="text_np" type="text_np" indexed="true" stored="false"/>
@@ -550,7 +556,8 @@
 
   <dynamicField name="*_i" type="int" indexed="true" stored="true"/>
   <dynamicField name="*_is" type="int" indexed="true" stored="true" multiValued="true"/>
-  <dynamicField name="*_idv" type="int" indexed="true" stored="true" docValues="true" multiValued="false"/>
+  <dynamicField name="*_i_dv" type="${solr.tests.intClass:pint}" indexed="true" stored="true" docValues="true" multiValued="false"/>
+  <dynamicField name="*_is_dv" type="${solr.tests.intClass:pint}" indexed="true" stored="true" docValues="true" multiValued="false"/>
   <dynamicField name="*_s1" type="string" indexed="true" stored="true" multiValued="false"/>
   <!-- :TODO: why are these identical?!?!?! -->
   <dynamicField name="*_s" type="string" indexed="true" stored="true" multiValued="true"/>
@@ -559,9 +566,11 @@
   <dynamicField name="*_t" type="text" indexed="true" stored="true"/>
   <dynamicField name="*_tt" type="text" indexed="true" stored="true"/>
   <dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
-  <dynamicField name="*_f" type="float" indexed="true" stored="true"/>
+  <dynamicField name="*_f" type="${solr.tests.floatClass:pfloat}" indexed="true" stored="true"/>
   <dynamicField name="*_d" type="double" indexed="true" stored="true"/>
   <dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
+  
+  <dynamicField name="*_pi" type="pint" indexed="true" stored="true" docValues="false" multiValued="false"/>
 
   <!-- some trie-coded dynamic fields for faster range queries -->
   <dynamicField name="*_ti" type="tint" indexed="true" stored="true"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml b/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml
index 280fd34..c6491eb 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml
@@ -202,7 +202,7 @@
   <!-- docvalues and stored are exclusive -->
   <dynamicField name="*_i" type="int" indexed="true" stored="true"/>
   <dynamicField name="*_is" type="int" indexed="true" stored="true" multiValued="true"/>
-  <dynamicField name="*_id" type="int" indexed="true" stored="false" docValues="true"/>
+  <dynamicField name="*_id" type="${solr.tests.intClass:pint}" indexed="true" stored="false" docValues="true"/>
   <dynamicField name="*_ids" type="int" indexed="true" stored="false" multiValued="true" docValues="true"/>
   <dynamicField name="*_s" type="string" indexed="true" stored="true"/>
   <dynamicField name="*_s1" type="string" indexed="true" stored="true"/>
@@ -215,11 +215,11 @@
   <dynamicField name="*_lds" type="long" indexed="true" stored="false" multiValued="true" docValues="true"/>
   <dynamicField name="*_f" type="float" indexed="true" stored="true"/>
   <dynamicField name="*_fs" type="float" indexed="true" stored="true" multiValued="true"/>
-  <dynamicField name="*_fd" type="float" indexed="true" stored="false" docValues="true"/>
+  <dynamicField name="*_fd" type="${solr.tests.floatClass:pfloat}" indexed="true" stored="false" docValues="true"/>
   <dynamicField name="*_fds" type="float" indexed="true" stored="false" multiValued="true" docValues="true"/>
   <dynamicField name="*_d" type="double" indexed="true" stored="true"/>
   <dynamicField name="*_ds" type="double" indexed="true" stored="true" multiValued="true"/>
-  <dynamicField name="*_dd" type="double" indexed="true" stored="false" docValues="true"/>
+  <dynamicField name="*_dd" type="${solr.tests.doubleClass:pdouble}" indexed="true" stored="false" docValues="true"/>
   <dynamicField name="*_dds" type="double" indexed="true" stored="false" multiValued="true" docValues="true"/>
   <dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
   <dynamicField name="*_dts" type="date" indexed="true" stored="true" multiValued="true"/>
@@ -227,15 +227,15 @@
   <dynamicField name="*_dtds" type="date" indexed="true" stored="false" multiValued="true" docValues="true"/>
 
   <!-- docvalues and stored (S suffix) -->
-  <dynamicField name="*_idS" type="int" indexed="true" stored="true" docValues="true"/>
+  <dynamicField name="*_idS" type="${solr.tests.intClass:pint}" indexed="true" stored="true" docValues="true"/>
   <dynamicField name="*_idsS" type="int" indexed="true" stored="true" multiValued="true" docValues="true"/>
   <dynamicField name="*_sdS" type="string" indexed="true" stored="true" docValues="true"/>
   <dynamicField name="*_sdsS" type="string" indexed="true" stored="true" multiValued="true" docValues="true"/>
-  <dynamicField name="*_ldS" type="long" indexed="true" stored="true" docValues="true"/>
+  <dynamicField name="*_ldS" type="${solr.tests.longClass:plong}" indexed="true" stored="true" docValues="true"/>
   <dynamicField name="*_ldsS" type="long" indexed="true" stored="true" multiValued="true" docValues="true"/>
-  <dynamicField name="*_fdS" type="float" indexed="true" stored="true" docValues="true"/>
+  <dynamicField name="*_fdS" type="${solr.tests.floatClass:pfloat}" indexed="true" stored="true" docValues="true"/>
   <dynamicField name="*_fdsS" type="float" indexed="true" stored="true" multiValued="true" docValues="true"/>
-  <dynamicField name="*_ddS" type="double" indexed="true" stored="true" docValues="true"/>
+  <dynamicField name="*_ddS" type="${solr.tests.doubleClass:pdouble}" indexed="true" stored="true" docValues="true"/>
   <dynamicField name="*_ddsS" type="double" indexed="true" stored="true" multiValued="true" docValues="true"/>
   <dynamicField name="*_dtdS" type="date" indexed="true" stored="true" docValues="true"/>
   <dynamicField name="*_dtdsS" type="date" indexed="true" stored="true" multiValued="true" docValues="true"/>
@@ -394,6 +394,13 @@
   <fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/>
   <fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/>
   <fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/>
+  
+  <!-- Point Fields -->
+  <fieldType name="pint" class="solr.IntPointField"/>
+  <fieldType name="plong" class="solr.LongPointField"/>
+  <fieldType name="pdouble" class="solr.DoublePointField"/>
+  <fieldType name="pfloat" class="solr.FloatPointField"/>
+  
 
   <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
        is a more restricted form of the canonical representation of dateTime

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/TestDistributedGrouping.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/TestDistributedGrouping.java b/solr/core/src/test/org/apache/solr/TestDistributedGrouping.java
index ad62fcc..d0c4f36 100644
--- a/solr/core/src/test/org/apache/solr/TestDistributedGrouping.java
+++ b/solr/core/src/test/org/apache/solr/TestDistributedGrouping.java
@@ -16,19 +16,20 @@
  */
 package org.apache.solr;
 
+import java.io.IOException;
+import java.util.List;
+
 import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.SolrTestCaseJ4.SuppressPointFields;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.SolrDocumentList;
 import org.junit.Test;
 
-import java.io.IOException;
-import java.util.List;
-
 /**
  * TODO? perhaps use:
  *  http://docs.codehaus.org/display/JETTY/ServletTester
@@ -37,6 +38,7 @@ import java.util.List;
  * @since solr 4.0
  */
 @Slow
+@SuppressPointFields
 public class TestDistributedGrouping extends BaseDistributedSearchTestCase {
 
   String t1="a_t";

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/TestJoin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/TestJoin.java b/solr/core/src/test/org/apache/solr/TestJoin.java
index 419275c..f4b2cf5 100644
--- a/solr/core/src/test/org/apache/solr/TestJoin.java
+++ b/solr/core/src/test/org/apache/solr/TestJoin.java
@@ -16,6 +16,7 @@
  */
 package org.apache.solr;
 
+import org.apache.solr.SolrTestCaseJ4.SuppressPointFields;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.noggit.JSONUtil;
 import org.noggit.ObjectBuilder;
@@ -36,6 +37,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+@SuppressPointFields
 public class TestJoin extends SolrTestCaseJ4 {
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -151,7 +153,7 @@ public class TestJoin extends SolrTestCaseJ4 {
     // increase test effectiveness by avoiding 0 resultsets much of the time.
     String[][] compat = new String[][] {
         {"small_s","small2_s","small2_ss","small3_ss"},
-        {"small_i","small2_i","small2_is","small3_is"}
+        {"small_i","small2_i","small2_is","small3_is", "small_i_dv", "small_is_dv"}
     };
 
 
@@ -169,6 +171,8 @@ public class TestJoin extends SolrTestCaseJ4 {
       types.add(new FldType("small2_i",ZERO_ONE, new IRange(0,5+indexSize/3)));
       types.add(new FldType("small2_is",ZERO_TWO, new IRange(0,5+indexSize/3)));
       types.add(new FldType("small3_is",new IRange(0,25), new IRange(0,100)));
+      types.add(new FldType("small_i_dv",ZERO_ONE, new IRange(0,5+indexSize/3)));
+      types.add(new FldType("small_is_dv",ZERO_ONE, new IRange(0,5+indexSize/3)));
 
       clearIndex();
       Map<Comparable, Doc> model = indexDocs(types, null, indexSize);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java b/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java
index 90c2394..d088924 100644
--- a/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java
+++ b/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java
@@ -62,7 +62,9 @@ public class TestRandomDVFaceting extends SolrTestCaseJ4 {
     types = new ArrayList<>();
     types.add(new FldType("id",ONE_ONE, new SVal('A','Z',4,4)));
     types.add(new FldType("score_f",ONE_ONE, new FVal(1,100)));
+    types.add(new FldType("score_d",ONE_ONE, new FVal(1,100)));
     types.add(new FldType("foo_i",ZERO_ONE, new IRange(0,indexSize)));
+    types.add(new FldType("foo_l",ZERO_ONE, new IRange(0,indexSize)));
     types.add(new FldType("small_s",ZERO_ONE, new SVal('a',(char)('c'+indexSize/3),1,1)));
     types.add(new FldType("small2_s",ZERO_ONE, new SVal('a',(char)('c'+indexSize/3),1,1)));
     types.add(new FldType("small2_ss",ZERO_TWO, new SVal('a',(char)('c'+indexSize/3),1,1)));
@@ -231,6 +233,12 @@ public class TestRandomDVFaceting extends SolrTestCaseJ4 {
 
         responses.add(strResponse);
       }
+      // If there is a PointField option for this test, also test it
+      if (h.getCore().getLatestSchema().getFieldOrNull(facet_field + "_p") != null) {
+        params.set("facet.field", "{!key="+facet_field+"}"+facet_field+"_p");
+        String strResponse = h.query(req(params));
+        responses.add(strResponse);
+      }
 
       /**
       String strResponse = h.query(req(params));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/TestRandomFaceting.java b/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
index 2ffefdc..c3a3e02 100644
--- a/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
+++ b/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
@@ -184,7 +184,6 @@ public class TestRandomFaceting extends SolrTestCaseJ4 {
     SolrQueryRequest req = req();
     try {
       Random rand = random();
-      boolean validate = validateResponses;
       ModifiableSolrParams params = params("facet","true", "wt","json", "indent","true", "omitHeader","true");
       params.add("q","*:*");  // TODO: select subsets
       params.add("rows","0");
@@ -244,8 +243,9 @@ public class TestRandomFaceting extends SolrTestCaseJ4 {
 
       List<String> methods = multiValued ? multiValuedMethods : singleValuedMethods;
       List<String> responses = new ArrayList<>(methods.size());
+      
       for (String method : methods) {
-        for (boolean exists : new boolean [] {false, true}) {
+        for (boolean exists : new boolean[]{false, true}) {
           // params.add("facet.field", "{!key="+method+"}" + ftype.fname);
           // TODO: allow method to be passed on local params?
           if (method!=null) {
@@ -253,7 +253,6 @@ public class TestRandomFaceting extends SolrTestCaseJ4 {
           } else {
             params.remove("facet.method");
           }
-          
           params.set("facet.exists", ""+exists);
           if (!exists && rand.nextBoolean()) {
             params.remove("facet.exists");
@@ -275,6 +274,13 @@ public class TestRandomFaceting extends SolrTestCaseJ4 {
                   "facet.exists", req(params), ErrorCode.BAD_REQUEST);
               continue;
             }
+            if (exists && sf.getType().isPointField()) {
+              // PointFields don't yet support "enum" method or the "facet.exists" parameter
+              assertQEx("Expecting failure, since ", 
+                  "facet.exists=true is requested, but facet.method=enum can't be used with " + sf.getName(), 
+                  req(params), ErrorCode.BAD_REQUEST);
+              continue;
+            }
           }
           String strResponse = h.query(req(params));
           responses.add(strResponse);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/cloud/TestCloudPivotFacet.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudPivotFacet.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudPivotFacet.java
index 460c501..b13cb78 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCloudPivotFacet.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudPivotFacet.java
@@ -29,6 +29,7 @@ import java.util.Set;
 import org.apache.commons.lang.StringUtils;
 import org.apache.lucene.util.TestUtil;
 import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
+import org.apache.solr.SolrTestCaseJ4.SuppressPointFields;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.response.FieldStatsInfo;
 import org.apache.solr.client.solrj.response.PivotField;
@@ -78,6 +79,7 @@ import static org.apache.solr.common.params.FacetParams.FACET_DISTRIB_MCO;
  *
  */
 @SuppressSSL // Too Slow
+@SuppressPointFields
 public class TestCloudPivotFacet extends AbstractFullDistribZkTestBase {
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/handler/XsltUpdateRequestHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/XsltUpdateRequestHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/XsltUpdateRequestHandlerTest.java
index a7fb774..7062b43 100644
--- a/solr/core/src/test/org/apache/solr/handler/XsltUpdateRequestHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/XsltUpdateRequestHandlerTest.java
@@ -40,7 +40,7 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class XsltUpdateRequestHandlerTest extends SolrTestCaseJ4 {
-
+  
   @BeforeClass
   public static void beforeTests() throws Exception {
     initCore("solrconfig.xml","schema.xml");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/handler/admin/LukeRequestHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/LukeRequestHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/LukeRequestHandlerTest.java
index 66c378d..92b4943 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/LukeRequestHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/LukeRequestHandlerTest.java
@@ -16,19 +16,19 @@
  */
 package org.apache.solr.handler.admin;
 
+import java.util.Arrays;
+import java.util.EnumSet;
+
 import org.apache.solr.common.luke.FieldFlag;
 import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.schema.IndexSchema;
 import org.apache.solr.schema.CustomAnalyzerStrField; // jdoc
+import org.apache.solr.schema.IndexSchema;
 import org.apache.solr.util.AbstractSolrTestCase;
 import org.apache.solr.util.TestHarness;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.util.Arrays;
-import java.util.EnumSet;
-
 /**
  * :TODO: currently only tests some of the utilities in the LukeRequestHandler
  */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java b/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java
index f7fc13a..7baa5a9 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java
@@ -16,15 +16,19 @@
  */
 package org.apache.solr.handler.component;
 
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
 import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.SolrTestCaseJ4.SuppressPointFields;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.search.CollapsingQParserPlugin;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.util.*;
-
+@SuppressPointFields
 public class TestExpandComponent extends SolrTestCaseJ4 {
 
   @BeforeClass

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/request/TestFacetMethods.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/request/TestFacetMethods.java b/solr/core/src/test/org/apache/solr/request/TestFacetMethods.java
index 1da41f5..ea7c001 100644
--- a/solr/core/src/test/org/apache/solr/request/TestFacetMethods.java
+++ b/solr/core/src/test/org/apache/solr/request/TestFacetMethods.java
@@ -17,7 +17,9 @@
 
 package org.apache.solr.request;
 
+import org.apache.solr.request.SimpleFacets.FacetMethod;
 import org.apache.solr.schema.BoolField;
+import org.apache.solr.schema.IntPointField;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.schema.StrField;
 import org.apache.solr.schema.TrieIntField;
@@ -203,5 +205,15 @@ public class TestFacetMethods {
     assertEquals(SimpleFacets.FacetMethod.ENUM, SimpleFacets.selectFacetMethod(field, null, 1));
 
   }
+  
+  @Test
+  public void testPointFields() {
+    // Methods other than FCS are not currently supported for PointFields
+    SchemaField field = new SchemaField("foo", new IntPointField());
+    assertEquals(SimpleFacets.FacetMethod.FCS, SimpleFacets.selectFacetMethod(field, null, 0));
+    assertEquals(SimpleFacets.FacetMethod.FCS, SimpleFacets.selectFacetMethod(field, FacetMethod.ENUM, 0));
+    assertEquals(SimpleFacets.FacetMethod.FCS, SimpleFacets.selectFacetMethod(field, FacetMethod.FC, 0));
+    assertEquals(SimpleFacets.FacetMethod.FCS, SimpleFacets.selectFacetMethod(field, FacetMethod.FCS, 0));
+  }
 
 }


[40/50] [abbrv] lucene-solr:apiv2: LUCENE-7643: Move IndexOrDocValuesQuery to core.

Posted by no...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/solr/core/src/java/org/apache/solr/schema/EnumField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/EnumField.java b/solr/core/src/java/org/apache/solr/schema/EnumField.java
index 967070c..5723206 100644
--- a/solr/core/src/java/org/apache/solr/schema/EnumField.java
+++ b/solr/core/src/java/org/apache/solr/schema/EnumField.java
@@ -43,7 +43,6 @@ import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.EnumFieldSource;
 import org.apache.lucene.search.ConstantScoreQuery;
-import org.apache.lucene.search.DocValuesRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.util.BytesRef;
@@ -253,10 +252,21 @@ public class EnumField extends PrimitiveFieldType {
     Query query = null;
     final boolean matchOnly = field.hasDocValues() && !field.indexed();
     if (matchOnly) {
-      query = new ConstantScoreQuery(DocValuesRangeQuery.newLongRange(field.getName(),
-              min == null ? null : minValue.longValue(),
-              max == null ? null : maxValue.longValue(),
-              minInclusive, maxInclusive));
+      long lowerValue = Long.MIN_VALUE;
+      long upperValue = Long.MAX_VALUE;
+      if (minValue != null) {
+        lowerValue = minValue.longValue();
+        if (minInclusive == false) {
+          ++lowerValue;
+        }
+      }
+      if (maxValue != null) {
+        upperValue = maxValue.longValue();
+        if (maxInclusive == false) {
+          --upperValue;
+        }
+      }
+      query = new ConstantScoreQuery(NumericDocValuesField.newRangeQuery(field.getName(), lowerValue, upperValue));
     } else {
       query = LegacyNumericRangeQuery.newIntRange(field.getName(), DEFAULT_PRECISION_STEP,
           min == null ? null : minValue,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/solr/core/src/java/org/apache/solr/schema/FieldType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/FieldType.java b/solr/core/src/java/org/apache/solr/schema/FieldType.java
index 3922edc..54f882f 100644
--- a/solr/core/src/java/org/apache/solr/schema/FieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/FieldType.java
@@ -36,13 +36,13 @@ import org.apache.lucene.analysis.util.CharFilterFactory;
 import org.apache.lucene.analysis.util.TokenFilterFactory;
 import org.apache.lucene.analysis.util.TokenizerFactory;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.DocValuesRangeQuery;
 import org.apache.lucene.search.DocValuesRewriteMethod;
 import org.apache.lucene.search.MultiTermQuery;
 import org.apache.lucene.search.PrefixQuery;
@@ -720,17 +720,17 @@ public abstract class FieldType extends FieldProperties {
    */
   public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
     // TODO: change these all to use readableToIndexed/bytes instead (e.g. for unicode collation)
+    final BytesRef miValue = part1 == null ? null : new BytesRef(toInternal(part1));
+    final BytesRef maxValue = part2 == null ? null : new BytesRef(toInternal(part2));
     if (field.hasDocValues() && !field.indexed()) {
-      return DocValuesRangeQuery.newBytesRefRange(
-          field.getName(),
-          part1 == null ? null : new BytesRef(toInternal(part1)),
-          part2 == null ? null : new BytesRef(toInternal(part2)),
-          minInclusive, maxInclusive);
+      return SortedSetDocValuesField.newRangeQuery(
+            field.getName(),
+            miValue, maxValue,
+            minInclusive, maxInclusive);
     } else {
       SolrRangeQuery rangeQuery = new SolrRangeQuery(
             field.getName(),
-            part1 == null ? null : new BytesRef(toInternal(part1)),
-            part2 == null ? null : new BytesRef(toInternal(part2)),
+            miValue, maxValue,
             minInclusive, maxInclusive);
       return rangeQuery;
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/71ca2a84/solr/core/src/java/org/apache/solr/schema/TrieField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieField.java b/solr/core/src/java/org/apache/solr/schema/TrieField.java
index 0e8324c..57dbeff 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieField.java
@@ -43,7 +43,7 @@ import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
 import org.apache.lucene.queries.function.valuesource.IntFieldSource;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
-import org.apache.lucene.search.DocValuesRangeQuery;
+import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.SortedSetSelector;
@@ -376,9 +376,9 @@ public class TrieField extends PrimitiveFieldType {
     switch (type) {
       case INTEGER:
         if (matchOnly) {
-          query = DocValuesRangeQuery.newLongRange(field.getName(),
-                min == null ? null : (long) Integer.parseInt(min),
-                max == null ? null : (long) Integer.parseInt(max),
+          query = numericDocValuesRangeQuery(field.getName(),
+                min == null ? null : Integer.parseInt(min),
+                max == null ? null : Integer.parseInt(max),
                 minInclusive, maxInclusive);
         } else {
           query = LegacyNumericRangeQuery.newIntRange(field.getName(), ps,
@@ -399,7 +399,7 @@ public class TrieField extends PrimitiveFieldType {
         break;
       case LONG:
         if (matchOnly) {
-          query = DocValuesRangeQuery.newLongRange(field.getName(),
+          query = numericDocValuesRangeQuery(field.getName(),
                 min == null ? null : Long.parseLong(min),
                 max == null ? null : Long.parseLong(max),
                 minInclusive, maxInclusive);
@@ -422,7 +422,7 @@ public class TrieField extends PrimitiveFieldType {
         break;
       case DATE:
         if (matchOnly) {
-          query = DocValuesRangeQuery.newLongRange(field.getName(),
+          query = numericDocValuesRangeQuery(field.getName(),
                 min == null ? null : DateMathParser.parseMath(null, min).getTime(),
                 max == null ? null : DateMathParser.parseMath(null, max).getTime(),
                 minInclusive, maxInclusive);
@@ -440,6 +440,35 @@ public class TrieField extends PrimitiveFieldType {
     return query;
   }
 
+  private static Query numericDocValuesRangeQuery(
+      String field,
+      Number lowerValue, Number upperValue,
+      boolean lowerInclusive, boolean upperInclusive) {
+
+    long actualLowerValue = Long.MIN_VALUE;
+    if (lowerValue != null) {
+      actualLowerValue = lowerValue.longValue();
+      if (lowerInclusive == false) {
+        if (actualLowerValue == Long.MAX_VALUE) {
+          return new MatchNoDocsQuery();
+        }
+        ++actualLowerValue;
+      }
+    }
+
+    long actualUpperValue = Long.MAX_VALUE;
+    if (upperValue != null) {
+      actualUpperValue = upperValue.longValue();
+      if (upperInclusive == false) {
+        if (actualUpperValue == Long.MIN_VALUE) {
+          return new MatchNoDocsQuery();
+        }
+        --actualUpperValue;
+      }
+    }
+    return NumericDocValuesField.newRangeQuery(field, actualLowerValue, actualUpperValue);
+  }
+
   private static long FLOAT_NEGATIVE_INFINITY_BITS = (long)Float.floatToIntBits(Float.NEGATIVE_INFINITY);
   private static long DOUBLE_NEGATIVE_INFINITY_BITS = Double.doubleToLongBits(Double.NEGATIVE_INFINITY);
   private static long FLOAT_POSITIVE_INFINITY_BITS = (long)Float.floatToIntBits(Float.POSITIVE_INFINITY);
@@ -476,10 +505,10 @@ public class TrieField extends PrimitiveFieldType {
     } else { // If both max and min are negative (or -0d), then issue range query with max and min reversed
       if ((minVal == null || minVal.doubleValue() < 0d || minBits == minusZeroBits) &&
           (maxVal != null && (maxVal.doubleValue() < 0d || maxBits == minusZeroBits))) {
-        query = DocValuesRangeQuery.newLongRange
+        query = numericDocValuesRangeQuery
             (fieldName, maxBits, (min == null ? negativeInfinityBits : minBits), maxInclusive, minInclusive);
       } else { // If both max and min are positive, then issue range query
-        query = DocValuesRangeQuery.newLongRange
+        query = numericDocValuesRangeQuery
             (fieldName, minBits, (max == null ? positiveInfinityBits : maxBits), minInclusive, maxInclusive);
       }
     }


[33/50] [abbrv] lucene-solr:apiv2: SOLR-9984: Remove GenericHadoopAuthPlugin (HadoopAuthPlugin is there)

Posted by no...@apache.org.
SOLR-9984: Remove GenericHadoopAuthPlugin (HadoopAuthPlugin is there)


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/bb35732e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/bb35732e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/bb35732e

Branch: refs/heads/apiv2
Commit: bb35732eef90fc0ba7862d2c123c7e16356d2a0b
Parents: 1a05d6f
Author: Ishan Chattopadhyaya <ic...@gmail.com>
Authored: Thu Jan 19 10:02:13 2017 +0530
Committer: Ishan Chattopadhyaya <ic...@gmail.com>
Committed: Thu Jan 19 10:02:13 2017 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  8 -----
 .../solr/security/GenericHadoopAuthPlugin.java  | 31 --------------------
 2 files changed, 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bb35732e/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 62b8818..aab5116 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -94,12 +94,6 @@ Jetty 9.3.14.v20161028
 Detailed Change List
 ----------------------
 
-Upgrade Notes
-----------------------
-
-* SOLR-9984: GenericHadoopAuthPlugin is deprecated in favor of HadoopAuthPlugin. Simply changing the
-  name of the class in the security configurations should suffice while upgrading.
-
 New Features
 ----------------------
 
@@ -128,8 +122,6 @@ Other Changes
 ----------------------
 * SOLR-9980: Expose configVersion in core admin status (Jessica Cheng Mallet via Tom�s Fern�ndez L�bbe)
 
-* SOLR-9984: Deprecate GenericHadoopAuthPlugin in favor of HadoopAuthPlugin (Ishan Chattopadhyaya)
-
 ==================  6.4.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bb35732e/solr/core/src/java/org/apache/solr/security/GenericHadoopAuthPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/security/GenericHadoopAuthPlugin.java b/solr/core/src/java/org/apache/solr/security/GenericHadoopAuthPlugin.java
deleted file mode 100644
index 3d63fd6..0000000
--- a/solr/core/src/java/org/apache/solr/security/GenericHadoopAuthPlugin.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.security;
-
-import org.apache.solr.core.CoreContainer;
-
-/**
- *  * @deprecated Use {@link HadoopAuthPlugin}. For backcompat against Solr 6.4.
- */
-@Deprecated
-public class GenericHadoopAuthPlugin extends HadoopAuthPlugin {
-
-  public GenericHadoopAuthPlugin(CoreContainer coreContainer) {
-    super(coreContainer);
-  }
-
-}
\ No newline at end of file


[05/50] [abbrv] lucene-solr:apiv2: SOLR-9941: Moving changelog entry from 7.0.0 to 6.5.0

Posted by no...@apache.org.
SOLR-9941: Moving changelog entry from 7.0.0 to 6.5.0


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/38af094d
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/38af094d
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/38af094d

Branch: refs/heads/apiv2
Commit: 38af094d175daebe4093782cc06e964cfc2dd14b
Parents: 205f9cc
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Tue Jan 17 03:12:07 2017 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Tue Jan 17 03:12:07 2017 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/38af094d/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 4874067..5b96c20 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -74,9 +74,6 @@ Optimizations
 * SOLR-9584: Support Solr being proxied with another endpoint than default /solr, by using relative links
   in AdminUI javascripts (Yun Jie Zhou via janhoy)
 
-* SOLR-9941: Clear the deletes lists at UpdateLog before replaying from log. This prevents redundantly pre-applying
-  DBQs, during the log replay, to every update in the log as if the DBQs were out of order. (hossman, Ishan Chattopadhyaya)
-
 ==================  6.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
@@ -90,9 +87,14 @@ Apache UIMA 2.3.1
 Apache ZooKeeper 3.4.6
 Jetty 9.3.14.v20161028
 
+Detailed Change List
+----------------------
 
-(No Changes)
+Optimizations
+----------------------
 
+* SOLR-9941: Clear the deletes lists at UpdateLog before replaying from log. This prevents redundantly pre-applying
+  DBQs, during the log replay, to every update in the log as if the DBQs were out of order. (hossman, Ishan Chattopadhyaya)
 
 ==================  6.4.0 ==================
 


[10/50] [abbrv] lucene-solr:apiv2: SOLR-9786: additional test related to TermInSetQuery now requiring all terms in same field

Posted by no...@apache.org.
SOLR-9786: additional test related to TermInSetQuery now requiring all terms in same field


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/7d7e5d22
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/7d7e5d22
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/7d7e5d22

Branch: refs/heads/apiv2
Commit: 7d7e5d2246d69843f259b9815332a24dc621d9e7
Parents: 1acd2ee
Author: yonik <yo...@apache.org>
Authored: Tue Jan 17 10:20:02 2017 -0500
Committer: yonik <yo...@apache.org>
Committed: Tue Jan 17 10:20:02 2017 -0500

----------------------------------------------------------------------
 .../org/apache/solr/search/TestSolrQueryParser.java  | 15 +++++++++++++++
 1 file changed, 15 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7d7e5d22/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
index 76b441b..20c1907 100644
--- a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
@@ -259,6 +259,21 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
     }
     assertEquals(26, ((TermInSetQuery)qq).getTermData().size());
 
+    // test terms queries of two different fields (LUCENE-7637 changed to require all terms be in the same field)
+    StringBuilder sb = new StringBuilder();
+    for (int i=0; i<17; i++) {
+      char letter = (char)('a'+i);
+      sb.append("foo_s:" + letter + " bar_s:" + letter + " ");
+    }
+    qParser = QParser.getParser(sb.toString(), req);
+    qParser.setIsFilter(true); // this may change in the future
+    q = qParser.getQuery();
+    assertEquals(2, ((BooleanQuery)q).clauses().size());
+    for (BooleanClause clause : ((BooleanQuery)q).clauses()) {
+      qq = clause.getQuery();
+      assertEquals(17, ((TermInSetQuery)qq).getTermData().size());
+    }
+
     req.close();
   }
 


[23/50] [abbrv] lucene-solr:apiv2: LUCENE-7644: FieldComparatorSource.newComparator() doesn't need to throw IOException

Posted by no...@apache.org.
LUCENE-7644: FieldComparatorSource.newComparator() doesn't need to throw IOException

This allos us to also remove the throws clause on SortField.getComparator(),
TopDocs.merge() and various Collector constructors


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8c2ef3bc
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8c2ef3bc
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8c2ef3bc

Branch: refs/heads/apiv2
Commit: 8c2ef3bc7fbebe8105c2646c81489aa9393ad402
Parents: 68d246d
Author: Alan Woodward <ro...@apache.org>
Authored: Wed Jan 18 15:16:06 2017 +0000
Committer: Alan Woodward <ro...@apache.org>
Committed: Wed Jan 18 19:17:19 2017 +0000

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   3 +
 .../lucene/search/DoubleValuesSource.java       |   2 +-
 .../lucene/search/FieldComparatorSource.java    |   8 +-
 .../lucene/search/FieldValueHitQueue.java       |  11 +-
 .../apache/lucene/search/LongValuesSource.java  |   2 +-
 .../org/apache/lucene/search/SortField.java     |   2 +-
 .../lucene/search/SortedNumericSortField.java   |   2 +-
 .../lucene/search/SortedSetSortField.java       |   2 +-
 .../java/org/apache/lucene/search/TopDocs.java  |  14 +-
 .../apache/lucene/search/TopFieldCollector.java |   4 +-
 .../lucene/search/TestElevationComparator.java  |  22 ++-
 .../search/grouping/BlockGroupingCollector.java |   2 +-
 .../grouping/FirstPassGroupingCollector.java    |   3 +-
 .../lucene/search/grouping/SearchGroup.java     |   8 +-
 .../lucene/search/grouping/TopGroups.java       |   5 +-
 .../search/join/ToParentBlockJoinCollector.java |   2 +-
 .../search/join/ToParentBlockJoinSortField.java |   2 +-
 .../lucene/queries/function/ValueSource.java    |   2 +-
 .../lucene/document/LatLonPointSortField.java   |   6 +-
 .../spatial3d/Geo3DPointOutsideSortField.java   |   5 +-
 .../lucene/spatial3d/Geo3DPointSortField.java   |   5 +-
 .../component/QueryElevationComponent.java      |  60 ++++---
 .../component/ShardFieldSortedHitQueue.java     |   8 +-
 .../solr/search/CollapsingQParserPlugin.java    |   2 +-
 .../SearchGroupShardResponseProcessor.java      | 160 +++++++++----------
 .../TopGroupsShardResponseProcessor.java        | 114 +++++++------
 .../apache/solr/schema/SortableBinaryField.java |   3 +-
 27 files changed, 214 insertions(+), 245 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index cee0335..9d1cbb7 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -71,6 +71,9 @@ API Changes
 * LUCENE-7637: TermInSetQuery requires that all terms come from the same field.
   (Adrien Grand)
 
+* LUCENE-7644: FieldComparatorSource.newComparator() and
+  SortField.getComparator() no longer throw IOException (Alan Woodward)
+
 New Features
 
 * LUCENE-7623: Add FunctionScoreQuery and FunctionMatchQuery (Alan Woodward,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java b/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java
index af24e1a..c22a3c3 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java
@@ -343,7 +343,7 @@ public abstract class DoubleValuesSource {
 
     @Override
     public FieldComparator<Double> newComparator(String fieldname, int numHits,
-                                               int sortPos, boolean reversed) throws IOException {
+                                               int sortPos, boolean reversed) {
       return new FieldComparator.DoubleComparator(numHits, fieldname, 0.0){
 
         LeafReaderContext ctx;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/core/src/java/org/apache/lucene/search/FieldComparatorSource.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldComparatorSource.java b/lucene/core/src/java/org/apache/lucene/search/FieldComparatorSource.java
index 295ec9c..e7db0ba 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldComparatorSource.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldComparatorSource.java
@@ -17,8 +17,6 @@
 package org.apache.lucene.search;
 
 
-import java.io.IOException;
-
 /**
  * Provides a {@link FieldComparator} for custom field sorting.
  *
@@ -33,9 +31,7 @@ public abstract class FieldComparatorSource {
    * @param fieldname
    *          Name of the field to create comparator for.
    * @return FieldComparator.
-   * @throws IOException
-   *           If an error occurs reading the index.
    */
-  public abstract FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed)
-      throws IOException;
+  public abstract FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed);
+
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java b/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
index c53774c..bd1967b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
@@ -58,8 +58,7 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
     private final int oneReverseMul;
     private final FieldComparator<?> oneComparator;
     
-    public OneComparatorFieldValueHitQueue(SortField[] fields, int size)
-        throws IOException {
+    public OneComparatorFieldValueHitQueue(SortField[] fields, int size) {
       super(fields, size);
 
       assert fields.length == 1;
@@ -96,8 +95,7 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
    */
   private static final class MultiComparatorsFieldValueHitQueue<T extends FieldValueHitQueue.Entry> extends FieldValueHitQueue<T> {
 
-    public MultiComparatorsFieldValueHitQueue(SortField[] fields, int size)
-        throws IOException {
+    public MultiComparatorsFieldValueHitQueue(SortField[] fields, int size) {
       super(fields, size);
     }
   
@@ -123,7 +121,7 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
   }
   
   // prevent instantiation and extension.
-  private FieldValueHitQueue(SortField[] fields, int size) throws IOException {
+  private FieldValueHitQueue(SortField[] fields, int size) {
     super(size);
     // When we get here, fields.length is guaranteed to be > 0, therefore no
     // need to check it again.
@@ -154,9 +152,8 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
    *          priority first); cannot be <code>null</code> or empty
    * @param size
    *          The number of hits to retain. Must be greater than zero.
-   * @throws IOException if there is a low-level IO error
    */
-  public static <T extends FieldValueHitQueue.Entry> FieldValueHitQueue<T> create(SortField[] fields, int size) throws IOException {
+  public static <T extends FieldValueHitQueue.Entry> FieldValueHitQueue<T> create(SortField[] fields, int size) {
 
     if (fields.length == 0) {
       throw new IllegalArgumentException("Sort must contain at least one field");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/core/src/java/org/apache/lucene/search/LongValuesSource.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/LongValuesSource.java b/lucene/core/src/java/org/apache/lucene/search/LongValuesSource.java
index 524822c..9a23cab 100644
--- a/lucene/core/src/java/org/apache/lucene/search/LongValuesSource.java
+++ b/lucene/core/src/java/org/apache/lucene/search/LongValuesSource.java
@@ -172,7 +172,7 @@ public abstract class LongValuesSource {
 
     @Override
     public FieldComparator<Long> newComparator(String fieldname, int numHits,
-                                                 int sortPos, boolean reversed) throws IOException {
+                                                 int sortPos, boolean reversed) {
       return new FieldComparator.LongComparator(numHits, fieldname, 0L){
 
         LeafReaderContext ctx;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/core/src/java/org/apache/lucene/search/SortField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/SortField.java b/lucene/core/src/java/org/apache/lucene/search/SortField.java
index 412a50a..2cfae46 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SortField.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SortField.java
@@ -335,7 +335,7 @@ public class SortField {
    *   optimize themselves when they are the primary sort.
    * @return {@link FieldComparator} to use when sorting
    */
-  public FieldComparator<?> getComparator(final int numHits, final int sortPos) throws IOException {
+  public FieldComparator<?> getComparator(final int numHits, final int sortPos) {
 
     switch (type) {
     case SCORE:

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/core/src/java/org/apache/lucene/search/SortedNumericSortField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/SortedNumericSortField.java b/lucene/core/src/java/org/apache/lucene/search/SortedNumericSortField.java
index 5b1492d..fff000b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SortedNumericSortField.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SortedNumericSortField.java
@@ -136,7 +136,7 @@ public class SortedNumericSortField extends SortField {
   }
   
   @Override
-  public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
+  public FieldComparator<?> getComparator(int numHits, int sortPos) {
     switch(type) {
       case INT:
         return new FieldComparator.IntComparator(numHits, getField(), (Integer) missingValue) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/core/src/java/org/apache/lucene/search/SortedSetSortField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/SortedSetSortField.java b/lucene/core/src/java/org/apache/lucene/search/SortedSetSortField.java
index da2546f..b095c6e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SortedSetSortField.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SortedSetSortField.java
@@ -118,7 +118,7 @@ public class SortedSetSortField extends SortField {
   }
   
   @Override
-  public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
+  public FieldComparator<?> getComparator(int numHits, int sortPos) {
     return new FieldComparator.TermOrdValComparator(numHits, getField(), missingValue == STRING_LAST) {
       @Override
       protected SortedDocValues getSortedDocValues(LeafReaderContext context, String field) throws IOException {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/core/src/java/org/apache/lucene/search/TopDocs.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/TopDocs.java b/lucene/core/src/java/org/apache/lucene/search/TopDocs.java
index 69fa3c6..c1f825e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TopDocs.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TopDocs.java
@@ -19,8 +19,6 @@ package org.apache.lucene.search;
 
 import org.apache.lucene.util.PriorityQueue;
 
-import java.io.IOException;
-
 /** Represents hits returned by {@link
  * IndexSearcher#search(Query,int)}. */
 public class TopDocs {
@@ -123,7 +121,7 @@ public class TopDocs {
     final FieldComparator<?>[] comparators;
     final int[] reverseMul;
 
-    public MergeSortQueue(Sort sort, TopDocs[] shardHits) throws IOException {
+    public MergeSortQueue(Sort sort, TopDocs[] shardHits) {
       super(shardHits.length);
       this.shardHits = new ScoreDoc[shardHits.length][];
       for(int shardIDX=0;shardIDX<shardHits.length;shardIDX++) {
@@ -196,7 +194,7 @@ public class TopDocs {
    *  the provided TopDocs, sorting by score. Each {@link TopDocs}
    *  instance must be sorted.
    *  @lucene.experimental */
-  public static TopDocs merge(int topN, TopDocs[] shardHits) throws IOException {
+  public static TopDocs merge(int topN, TopDocs[] shardHits) {
     return merge(0, topN, shardHits);
   }
 
@@ -205,7 +203,7 @@ public class TopDocs {
    * {@code start} top docs. This is typically useful for pagination.
    * @lucene.experimental
    */
-  public static TopDocs merge(int start, int topN, TopDocs[] shardHits) throws IOException {
+  public static TopDocs merge(int start, int topN, TopDocs[] shardHits) {
     return mergeAux(null, start, topN, shardHits);
   }
 
@@ -216,7 +214,7 @@ public class TopDocs {
    *  filled (ie, <code>fillFields=true</code> must be
    *  passed to {@link TopFieldCollector#create}).
    * @lucene.experimental */
-  public static TopFieldDocs merge(Sort sort, int topN, TopFieldDocs[] shardHits) throws IOException {
+  public static TopFieldDocs merge(Sort sort, int topN, TopFieldDocs[] shardHits) {
     return merge(sort, 0, topN, shardHits);
   }
 
@@ -225,7 +223,7 @@ public class TopDocs {
    * {@code start} top docs. This is typically useful for pagination.
    * @lucene.experimental
    */
-  public static TopFieldDocs merge(Sort sort, int start, int topN, TopFieldDocs[] shardHits) throws IOException {
+  public static TopFieldDocs merge(Sort sort, int start, int topN, TopFieldDocs[] shardHits) {
     if (sort == null) {
       throw new IllegalArgumentException("sort must be non-null when merging field-docs");
     }
@@ -234,7 +232,7 @@ public class TopDocs {
 
   /** Auxiliary method used by the {@link #merge} impls. A sort value of null
    *  is used to indicate that docs should be sorted by score. */
-  private static TopDocs mergeAux(Sort sort, int start, int size, TopDocs[] shardHits) throws IOException {
+  private static TopDocs mergeAux(Sort sort, int start, int size, TopDocs[] shardHits) {
     final PriorityQueue<ShardRef> queue;
     if (sort == null) {
       queue = new ScoreMergeSortQueue(shardHits);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java b/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java
index c7274d5..3433906 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TopFieldCollector.java
@@ -475,11 +475,9 @@ public abstract class TopFieldCollector extends TopDocsCollector<Entry> {
    *          <code>trackDocScores</code> to true as well.
    * @return a {@link TopFieldCollector} instance which will sort the results by
    *         the sort criteria.
-   * @throws IOException if there is a low-level I/O error
    */
   public static TopFieldCollector create(Sort sort, int numHits, FieldDoc after,
-      boolean fillFields, boolean trackDocScores, boolean trackMaxScore)
-      throws IOException {
+      boolean fillFields, boolean trackDocScores, boolean trackMaxScore) {
 
     if (sort.fields.length == 0) {
       throw new IllegalArgumentException("Sort must contain at least one field");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java b/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java
index 9ca2302..fb01e1d 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java
@@ -17,20 +17,26 @@
 package org.apache.lucene.search;
 
 
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.index.*;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.search.FieldValueHitQueue.Entry;
 import org.apache.lucene.search.similarities.ClassicSimilarity;
-import org.apache.lucene.store.*;
-import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
+import org.apache.lucene.util.LuceneTestCase;
 
 public class TestElevationComparator extends LuceneTestCase {
 
@@ -144,7 +150,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
   }
 
   @Override
-  public FieldComparator<Integer> newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
+  public FieldComparator<Integer> newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) {
    return new FieldComparator<Integer>() {
 
      private final int[] values = new int[numHits];

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
index 8d1781e..c965042 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
@@ -216,7 +216,7 @@ public class BlockGroupingCollector extends SimpleCollector {
    *  @param lastDocPerGroup a {@link Weight} that marks the
    *    last document in each group.
    */
-  public BlockGroupingCollector(Sort groupSort, int topNGroups, boolean needsScores, Weight lastDocPerGroup) throws IOException {
+  public BlockGroupingCollector(Sort groupSort, int topNGroups, boolean needsScores, Weight lastDocPerGroup) {
 
     if (topNGroups < 1) {
       throw new IllegalArgumentException("topNGroups must be >= 1 (got " + topNGroups + ")");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java
index ef47f96..02bb1a2 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java
@@ -67,10 +67,9 @@ abstract public class FirstPassGroupingCollector<T> extends SimpleCollector {
    *    ie, if you want to groupSort by relevance use
    *    Sort.RELEVANCE.
    *  @param topNGroups How many top groups to keep.
-   *  @throws IOException If I/O related errors occur
    */
   @SuppressWarnings({"unchecked", "rawtypes"})
-  public FirstPassGroupingCollector(Sort groupSort, int topNGroups) throws IOException {
+  public FirstPassGroupingCollector(Sort groupSort, int topNGroups) {
     if (topNGroups < 1) {
       throw new IllegalArgumentException("topNGroups must be >= 1 (got " + topNGroups + ")");
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java
index 95a507c..58e1f74 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.search.grouping;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -167,7 +166,7 @@ public class SearchGroup<T> {
     public final int[] reversed;
 
     @SuppressWarnings({"unchecked", "rawtypes"})
-    public GroupComparator(Sort groupSort) throws IOException {
+    public GroupComparator(Sort groupSort) {
       final SortField[] sortFields = groupSort.getSort();
       comparators = new FieldComparator[sortFields.length];
       reversed = new int[sortFields.length];
@@ -208,7 +207,7 @@ public class SearchGroup<T> {
     private final NavigableSet<MergedGroup<T>> queue;
     private final Map<T,MergedGroup<T>> groupsSeen;
 
-    public GroupMerger(Sort groupSort) throws IOException {
+    public GroupMerger(Sort groupSort) {
       groupComp = new GroupComparator<>(groupSort);
       queue = new TreeSet<>(groupComp);
       groupsSeen = new HashMap<>();
@@ -340,8 +339,7 @@ public class SearchGroup<T> {
    *
    * <p>NOTE: this returns null if the topGroups is empty.
    */
-  public static <T> Collection<SearchGroup<T>> merge(List<Collection<SearchGroup<T>>> topGroups, int offset, int topN, Sort groupSort)
-    throws IOException {
+  public static <T> Collection<SearchGroup<T>> merge(List<Collection<SearchGroup<T>>> topGroups, int offset, int topN, Sort groupSort) {
     if (topGroups.isEmpty()) {
       return null;
     } else {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java
index 803482b..36ab8d9 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java
@@ -16,8 +16,6 @@
  */
 package org.apache.lucene.search.grouping;
 
-import java.io.IOException;
-
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
@@ -97,8 +95,7 @@ public class TopGroups<T> {
    * <b>NOTE</b>: the topDocs in each GroupDocs is actually
    * an instance of TopDocsAndShards
    */
-  public static <T> TopGroups<T> merge(TopGroups<T>[] shardGroups, Sort groupSort, Sort docSort, int docOffset, int docTopN, ScoreMergeMode scoreMergeMode)
-    throws IOException {
+  public static <T> TopGroups<T> merge(TopGroups<T>[] shardGroups, Sort groupSort, Sort docSort, int docOffset, int docTopN, ScoreMergeMode scoreMergeMode) {
 
     //System.out.println("TopGroups.merge");
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
index 70e1549..f81b943 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
@@ -116,7 +116,7 @@ public class ToParentBlockJoinCollector implements Collector {
    *  not be null.  If you pass true trackScores, all
    *  ToParentBlockQuery instances must not use
    *  ScoreMode.None. */
-  public ToParentBlockJoinCollector(Sort sort, int numParentHits, boolean trackScores, boolean trackMaxScore) throws IOException {
+  public ToParentBlockJoinCollector(Sort sort, int numParentHits, boolean trackScores, boolean trackMaxScore) {
     // TODO: allow null sort to be specialized to relevance
     // only collector
     this.sort = sort;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinSortField.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinSortField.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinSortField.java
index 1b82c0c..c757086 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinSortField.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinSortField.java
@@ -87,7 +87,7 @@ public class ToParentBlockJoinSortField extends SortField {
   }
 
   @Override
-  public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
+  public FieldComparator<?> getComparator(int numHits, int sortPos) {
     switch (getType()) {
       case STRING:
         return getStringComparator(numHits);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java
index 5bf6324..6bf2926 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java
@@ -229,7 +229,7 @@ public abstract class ValueSource {
 
     @Override
     public FieldComparator<Double> newComparator(String fieldname, int numHits,
-                                         int sortPos, boolean reversed) throws IOException {
+                                         int sortPos, boolean reversed) {
       return new ValueSourceComparator(context, numHits);
     }
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/sandbox/src/java/org/apache/lucene/document/LatLonPointSortField.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/java/org/apache/lucene/document/LatLonPointSortField.java b/lucene/sandbox/src/java/org/apache/lucene/document/LatLonPointSortField.java
index c886438..10e72cc 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/document/LatLonPointSortField.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/document/LatLonPointSortField.java
@@ -16,11 +16,9 @@
  */
 package org.apache.lucene.document;
 
-import java.io.IOException;
-
+import org.apache.lucene.geo.GeoUtils;
 import org.apache.lucene.search.FieldComparator;
 import org.apache.lucene.search.SortField;
-import org.apache.lucene.geo.GeoUtils;
 
 /**
  * Sorts by distance from an origin location.
@@ -42,7 +40,7 @@ final class LatLonPointSortField extends SortField {
   }
   
   @Override
-  public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
+  public FieldComparator<?> getComparator(int numHits, int sortPos) {
     return new LatLonPointDistanceComparator(getField(), latitude, longitude, numHits);
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointOutsideSortField.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointOutsideSortField.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointOutsideSortField.java
index b48984c..3f37b22 100644
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointOutsideSortField.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointOutsideSortField.java
@@ -16,11 +16,8 @@
  */
 package org.apache.lucene.spatial3d;
 
-import java.io.IOException;
-
 import org.apache.lucene.search.FieldComparator;
 import org.apache.lucene.search.SortField;
-
 import org.apache.lucene.spatial3d.geom.GeoOutsideDistance;
 
 /**
@@ -42,7 +39,7 @@ final class Geo3DPointOutsideSortField extends SortField {
   }
   
   @Override
-  public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
+  public FieldComparator<?> getComparator(int numHits, int sortPos) {
     return new Geo3DPointOutsideDistanceComparator(getField(), distanceShape, numHits);
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointSortField.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointSortField.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointSortField.java
index 4d6b417..bf1de77 100644
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointSortField.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/Geo3DPointSortField.java
@@ -16,11 +16,8 @@
  */
 package org.apache.lucene.spatial3d;
 
-import java.io.IOException;
-
 import org.apache.lucene.search.FieldComparator;
 import org.apache.lucene.search.SortField;
-
 import org.apache.lucene.spatial3d.geom.GeoDistanceShape;
 
 /**
@@ -42,7 +39,7 @@ final class Geo3DPointSortField extends SortField {
   }
   
   @Override
-  public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
+  public FieldComparator<?> getComparator(int numHits, int sortPos) {
     return new Geo3DPointDistanceComparator(getField(), distanceShape, numHits);
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
index 25157cf..8482d65 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
@@ -16,14 +16,35 @@
  */
 package org.apache.solr.handler.component;
 
+import javax.xml.xpath.XPath;
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathExpressionException;
+import javax.xml.xpath.XPathFactory;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.invoke.MethodHandles;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.WeakHashMap;
+
+import com.carrotsearch.hppc.IntIntHashMap;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -46,22 +67,22 @@ import org.apache.solr.cloud.ZkController;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.QueryElevationParams;
 import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.StrUtils;
-import org.apache.solr.schema.IndexSchema;
-import org.apache.solr.search.QueryParsing;
-import org.apache.solr.search.grouping.GroupingSpecification;
-import org.apache.solr.util.DOMUtil;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.StrUtils;
 import org.apache.solr.core.Config;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.transform.ElevatedMarkerFactory;
 import org.apache.solr.response.transform.ExcludedMarkerFactory;
 import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.IndexSchema;
 import org.apache.solr.schema.SchemaField;
+import org.apache.solr.search.QueryParsing;
 import org.apache.solr.search.SolrIndexSearcher;
 import org.apache.solr.search.SortSpec;
+import org.apache.solr.search.grouping.GroupingSpecification;
+import org.apache.solr.util.DOMUtil;
 import org.apache.solr.util.RefCounted;
 import org.apache.solr.util.VersionedFile;
 import org.apache.solr.util.plugin.SolrCoreAware;
@@ -71,29 +92,6 @@ import org.w3c.dom.Node;
 import org.w3c.dom.NodeList;
 import org.xml.sax.InputSource;
 
-import com.carrotsearch.hppc.IntIntHashMap;
-
-import javax.xml.xpath.XPath;
-import javax.xml.xpath.XPathConstants;
-import javax.xml.xpath.XPathExpressionException;
-import javax.xml.xpath.XPathFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.invoke.MethodHandles;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.WeakHashMap;
-
 /**
  * A component to elevate some documents to the top of the result set.
  *
@@ -628,7 +626,7 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
   }
 
   @Override
-  public FieldComparator<Integer> newComparator(String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
+  public FieldComparator<Integer> newComparator(String fieldname, final int numHits, int sortPos, boolean reversed) {
     return new SimpleFieldComparator<Integer>() {
       private final int[] values = new int[numHits];
       private int bottomVal;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/solr/core/src/java/org/apache/solr/handler/component/ShardFieldSortedHitQueue.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ShardFieldSortedHitQueue.java b/solr/core/src/java/org/apache/solr/handler/component/ShardFieldSortedHitQueue.java
index 81aaf66..ef0e624 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/ShardFieldSortedHitQueue.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ShardFieldSortedHitQueue.java
@@ -149,13 +149,7 @@ public class ShardFieldSortedHitQueue extends PriorityQueue<ShardDoc> {
   }
 
   Comparator<ShardDoc> comparatorFieldComparator(SortField sortField) {
-    final FieldComparator fieldComparator;
-    try {
-      fieldComparator = sortField.getComparator(0, 0);
-    } catch (IOException e) {
-      throw new RuntimeException("Unable to get FieldComparator for sortField " + sortField);
-    }
-
+    final FieldComparator fieldComparator = sortField.getComparator(0, 0);
     return new ShardComparator(sortField) {
       // Since the PriorityQueue keeps the biggest elements by default,
       // we need to reverse the field compare ordering so that the

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
index 44aade5..65d470e 100644
--- a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java
@@ -2652,7 +2652,7 @@ public class CollapsingQParserPlugin extends QParserPlugin {
      * Constructs an instance based on the the (raw, un-rewritten) SortFields to be used, 
      * and an initial number of expected groups (will grow as needed).
      */
-    public SortFieldsCompare(SortField[] sorts, int initNumGroups) throws IOException {
+    public SortFieldsCompare(SortField[] sorts, int initNumGroups) {
       this.sorts = sorts;
       numClauses = sorts.length;
       fieldComparators = new FieldComparator[numClauses];

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/SearchGroupShardResponseProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/SearchGroupShardResponseProcessor.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/SearchGroupShardResponseProcessor.java
index 0acd6f9..1645b1e 100644
--- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/SearchGroupShardResponseProcessor.java
+++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/SearchGroupShardResponseProcessor.java
@@ -16,11 +16,20 @@
  */
 package org.apache.solr.search.grouping.distributed.responseprocessor;
 
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.grouping.SearchGroup;
 import org.apache.lucene.util.BytesRef;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
@@ -33,11 +42,6 @@ import org.apache.solr.search.grouping.distributed.ShardResponseProcessor;
 import org.apache.solr.search.grouping.distributed.command.SearchGroupsFieldCommandResult;
 import org.apache.solr.search.grouping.distributed.shardresultserializer.SearchGroupsResultTransformer;
 
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.util.*;
-
 /**
  * Concrete implementation for merging {@link SearchGroup} instances from shard responses.
  */
@@ -65,94 +69,90 @@ public class SearchGroupShardResponseProcessor implements ShardResponseProcessor
     }
 
     SearchGroupsResultTransformer serializer = new SearchGroupsResultTransformer(rb.req.getSearcher());
-    try {
-      int maxElapsedTime = 0;
-      int hitCountDuringFirstPhase = 0;
+    int maxElapsedTime = 0;
+    int hitCountDuringFirstPhase = 0;
 
-      NamedList<Object> shardInfo = null;
-      if (rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
-        shardInfo = new SimpleOrderedMap<>(shardRequest.responses.size());
-        rb.rsp.getValues().add(ShardParams.SHARDS_INFO + ".firstPhase", shardInfo);
-      }
+    NamedList<Object> shardInfo = null;
+    if (rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
+      shardInfo = new SimpleOrderedMap<>(shardRequest.responses.size());
+      rb.rsp.getValues().add(ShardParams.SHARDS_INFO + ".firstPhase", shardInfo);
+    }
 
-      for (ShardResponse srsp : shardRequest.responses) {
-        if (shardInfo != null) {
-          SimpleOrderedMap<Object> nl = new SimpleOrderedMap<>(4);
+    for (ShardResponse srsp : shardRequest.responses) {
+      if (shardInfo != null) {
+        SimpleOrderedMap<Object> nl = new SimpleOrderedMap<>(4);
 
-          if (srsp.getException() != null) {
-            Throwable t = srsp.getException();
-            if (t instanceof SolrServerException) {
-              t = ((SolrServerException) t).getCause();
-            }
-            nl.add("error", t.toString());
-            StringWriter trace = new StringWriter();
-            t.printStackTrace(new PrintWriter(trace));
-            nl.add("trace", trace.toString());
-          } else {
-            nl.add("numFound", (Integer) srsp.getSolrResponse().getResponse().get("totalHitCount"));
-          }
-          if (srsp.getSolrResponse() != null) {
-            nl.add("time", srsp.getSolrResponse().getElapsedTime());
-          }
-          if (srsp.getShardAddress() != null) {
-            nl.add("shardAddress", srsp.getShardAddress());
+        if (srsp.getException() != null) {
+          Throwable t = srsp.getException();
+          if (t instanceof SolrServerException) {
+            t = ((SolrServerException) t).getCause();
           }
-          shardInfo.add(srsp.getShard(), nl);
+          nl.add("error", t.toString());
+          StringWriter trace = new StringWriter();
+          t.printStackTrace(new PrintWriter(trace));
+          nl.add("trace", trace.toString());
+        } else {
+          nl.add("numFound", (Integer) srsp.getSolrResponse().getResponse().get("totalHitCount"));
         }
-        if (rb.req.getParams().getBool(ShardParams.SHARDS_TOLERANT, false) && srsp.getException() != null) {
-          if(rb.rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY) == null) {
-            rb.rsp.getResponseHeader().add(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
-          }
-          continue; // continue if there was an error and we're tolerant.  
+        if (srsp.getSolrResponse() != null) {
+          nl.add("time", srsp.getSolrResponse().getElapsedTime());
+        }
+        if (srsp.getShardAddress() != null) {
+          nl.add("shardAddress", srsp.getShardAddress());
         }
-        maxElapsedTime = (int) Math.max(maxElapsedTime, srsp.getSolrResponse().getElapsedTime());
-        @SuppressWarnings("unchecked")
-        NamedList<NamedList> firstPhaseResult = (NamedList<NamedList>) srsp.getSolrResponse().getResponse().get("firstPhase");
-        final Map<String, SearchGroupsFieldCommandResult> result = serializer.transformToNative(firstPhaseResult, groupSort, sortWithinGroup, srsp.getShard());
-        for (String field : commandSearchGroups.keySet()) {
-          final SearchGroupsFieldCommandResult firstPhaseCommandResult = result.get(field);
+        shardInfo.add(srsp.getShard(), nl);
+      }
+      if (rb.req.getParams().getBool(ShardParams.SHARDS_TOLERANT, false) && srsp.getException() != null) {
+        if(rb.rsp.getResponseHeader().get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY) == null) {
+          rb.rsp.getResponseHeader().add(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
+        }
+        continue; // continue if there was an error and we're tolerant.
+      }
+      maxElapsedTime = (int) Math.max(maxElapsedTime, srsp.getSolrResponse().getElapsedTime());
+      @SuppressWarnings("unchecked")
+      NamedList<NamedList> firstPhaseResult = (NamedList<NamedList>) srsp.getSolrResponse().getResponse().get("firstPhase");
+      final Map<String, SearchGroupsFieldCommandResult> result = serializer.transformToNative(firstPhaseResult, groupSort, sortWithinGroup, srsp.getShard());
+      for (String field : commandSearchGroups.keySet()) {
+        final SearchGroupsFieldCommandResult firstPhaseCommandResult = result.get(field);
 
-          final Integer groupCount = firstPhaseCommandResult.getGroupCount();
-          if (groupCount != null) {
-            Integer existingGroupCount = rb.mergedGroupCounts.get(field);
-            // Assuming groups don't cross shard boundary...
-            rb.mergedGroupCounts.put(field, existingGroupCount != null ? existingGroupCount + groupCount : groupCount);
-          }
+        final Integer groupCount = firstPhaseCommandResult.getGroupCount();
+        if (groupCount != null) {
+          Integer existingGroupCount = rb.mergedGroupCounts.get(field);
+          // Assuming groups don't cross shard boundary...
+          rb.mergedGroupCounts.put(field, existingGroupCount != null ? existingGroupCount + groupCount : groupCount);
+        }
 
-          final Collection<SearchGroup<BytesRef>> searchGroups = firstPhaseCommandResult.getSearchGroups();
-          if (searchGroups == null) {
-            continue;
-          }
+        final Collection<SearchGroup<BytesRef>> searchGroups = firstPhaseCommandResult.getSearchGroups();
+        if (searchGroups == null) {
+          continue;
+        }
 
-          commandSearchGroups.get(field).add(searchGroups);
-          for (SearchGroup<BytesRef> searchGroup : searchGroups) {
-            Map<SearchGroup<BytesRef>, java.util.Set<String>> map = tempSearchGroupToShards.get(field);
-            Set<String> shards = map.get(searchGroup);
-            if (shards == null) {
-              shards = new HashSet<>();
-              map.put(searchGroup, shards);
-            }
-            shards.add(srsp.getShard());
+        commandSearchGroups.get(field).add(searchGroups);
+        for (SearchGroup<BytesRef> searchGroup : searchGroups) {
+          Map<SearchGroup<BytesRef>, Set<String>> map = tempSearchGroupToShards.get(field);
+          Set<String> shards = map.get(searchGroup);
+          if (shards == null) {
+            shards = new HashSet<>();
+            map.put(searchGroup, shards);
           }
+          shards.add(srsp.getShard());
         }
-        hitCountDuringFirstPhase += (Integer) srsp.getSolrResponse().getResponse().get("totalHitCount");
       }
-      rb.totalHitCount = hitCountDuringFirstPhase;
-      rb.firstPhaseElapsedTime = maxElapsedTime;
-      for (String groupField : commandSearchGroups.keySet()) {
-        List<Collection<SearchGroup<BytesRef>>> topGroups = commandSearchGroups.get(groupField);
-        Collection<SearchGroup<BytesRef>> mergedTopGroups = SearchGroup.merge(topGroups, ss.getOffset(), ss.getCount(), groupSort);
-        if (mergedTopGroups == null) {
-          continue;
-        }
+      hitCountDuringFirstPhase += (Integer) srsp.getSolrResponse().getResponse().get("totalHitCount");
+    }
+    rb.totalHitCount = hitCountDuringFirstPhase;
+    rb.firstPhaseElapsedTime = maxElapsedTime;
+    for (String groupField : commandSearchGroups.keySet()) {
+      List<Collection<SearchGroup<BytesRef>>> topGroups = commandSearchGroups.get(groupField);
+      Collection<SearchGroup<BytesRef>> mergedTopGroups = SearchGroup.merge(topGroups, ss.getOffset(), ss.getCount(), groupSort);
+      if (mergedTopGroups == null) {
+        continue;
+      }
 
-        rb.mergedSearchGroups.put(groupField, mergedTopGroups);
-        for (SearchGroup<BytesRef> mergedTopGroup : mergedTopGroups) {
-          rb.searchGroupToShards.get(groupField).put(mergedTopGroup, tempSearchGroupToShards.get(groupField).get(mergedTopGroup));
-        }
+      rb.mergedSearchGroups.put(groupField, mergedTopGroups);
+      for (SearchGroup<BytesRef> mergedTopGroup : mergedTopGroups) {
+        rb.searchGroupToShards.get(groupField).put(mergedTopGroup, tempSearchGroupToShards.get(groupField).get(mergedTopGroup));
       }
-    } catch (IOException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java
index 7e38e5d..2ac83c6 100644
--- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java
+++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java
@@ -16,6 +16,13 @@
  */
 package org.apache.solr.search.grouping.distributed.responseprocessor;
 
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.TopDocs;
@@ -24,7 +31,6 @@ import org.apache.lucene.search.grouping.GroupDocs;
 import org.apache.lucene.search.grouping.TopGroups;
 import org.apache.lucene.util.BytesRef;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
@@ -38,14 +44,6 @@ import org.apache.solr.search.grouping.distributed.ShardResponseProcessor;
 import org.apache.solr.search.grouping.distributed.command.QueryCommandResult;
 import org.apache.solr.search.grouping.distributed.shardresultserializer.TopGroupsResultTransformer;
 
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
 /**
  * Concrete implementation for merging {@link TopGroups} instances from shard responses.
  */
@@ -152,68 +150,64 @@ public class TopGroupsShardResponseProcessor implements ShardResponseProcessor {
         individualShardInfo.add("maxScore", maxScore);
       }
     }
-    try {
-      for (String groupField : commandTopGroups.keySet()) {
-        List<TopGroups<BytesRef>> topGroups = commandTopGroups.get(groupField);
-        if (topGroups.isEmpty()) {
-          continue;
-        }
+    for (String groupField : commandTopGroups.keySet()) {
+      List<TopGroups<BytesRef>> topGroups = commandTopGroups.get(groupField);
+      if (topGroups.isEmpty()) {
+        continue;
+      }
 
-        TopGroups<BytesRef>[] topGroupsArr = new TopGroups[topGroups.size()];
-        int docsPerGroup = docsPerGroupDefault;
-        if (docsPerGroup < 0) {
-          docsPerGroup = 0;
-          for (TopGroups subTopGroups : topGroups) {
-            docsPerGroup += subTopGroups.totalGroupedHitCount;
-          }
+      TopGroups<BytesRef>[] topGroupsArr = new TopGroups[topGroups.size()];
+      int docsPerGroup = docsPerGroupDefault;
+      if (docsPerGroup < 0) {
+        docsPerGroup = 0;
+        for (TopGroups subTopGroups : topGroups) {
+          docsPerGroup += subTopGroups.totalGroupedHitCount;
         }
-        rb.mergedTopGroups.put(groupField, TopGroups.merge(topGroups.toArray(topGroupsArr), groupSort, sortWithinGroup, groupOffsetDefault, docsPerGroup, TopGroups.ScoreMergeMode.None));
       }
+      rb.mergedTopGroups.put(groupField, TopGroups.merge(topGroups.toArray(topGroupsArr), groupSort, sortWithinGroup, groupOffsetDefault, docsPerGroup, TopGroups.ScoreMergeMode.None));
+    }
 
-      for (String query : commandTopDocs.keySet()) {
-        List<QueryCommandResult> queryCommandResults = commandTopDocs.get(query);
-        List<TopDocs> topDocs = new ArrayList<>(queryCommandResults.size());
-        int mergedMatches = 0;
-        for (QueryCommandResult queryCommandResult : queryCommandResults) {
-          topDocs.add(queryCommandResult.getTopDocs());
-          mergedMatches += queryCommandResult.getMatches();
-        }
+    for (String query : commandTopDocs.keySet()) {
+      List<QueryCommandResult> queryCommandResults = commandTopDocs.get(query);
+      List<TopDocs> topDocs = new ArrayList<>(queryCommandResults.size());
+      int mergedMatches = 0;
+      for (QueryCommandResult queryCommandResult : queryCommandResults) {
+        topDocs.add(queryCommandResult.getTopDocs());
+        mergedMatches += queryCommandResult.getMatches();
+      }
 
-        int topN = rb.getGroupingSpec().getOffset() + rb.getGroupingSpec().getLimit();
-        final TopDocs mergedTopDocs;
-        if (sortWithinGroup.equals(Sort.RELEVANCE)) {
-          mergedTopDocs = TopDocs.merge(topN, topDocs.toArray(new TopDocs[topDocs.size()]));
-        } else {
-          mergedTopDocs = TopDocs.merge(sortWithinGroup, topN, topDocs.toArray(new TopFieldDocs[topDocs.size()]));
-        }
-        rb.mergedQueryCommandResults.put(query, new QueryCommandResult(mergedTopDocs, mergedMatches));
+      int topN = rb.getGroupingSpec().getOffset() + rb.getGroupingSpec().getLimit();
+      final TopDocs mergedTopDocs;
+      if (sortWithinGroup.equals(Sort.RELEVANCE)) {
+        mergedTopDocs = TopDocs.merge(topN, topDocs.toArray(new TopDocs[topDocs.size()]));
+      } else {
+        mergedTopDocs = TopDocs.merge(sortWithinGroup, topN, topDocs.toArray(new TopFieldDocs[topDocs.size()]));
       }
+      rb.mergedQueryCommandResults.put(query, new QueryCommandResult(mergedTopDocs, mergedMatches));
+    }
 
-      Map<Object, ShardDoc> resultIds = new HashMap<>();
-      int i = 0;
-      for (TopGroups<BytesRef> topGroups : rb.mergedTopGroups.values()) {
-        for (GroupDocs<BytesRef> group : topGroups.groups) {
-          for (ScoreDoc scoreDoc : group.scoreDocs) {
-            ShardDoc solrDoc = (ShardDoc) scoreDoc;
-            // Include the first if there are duplicate IDs
-            if ( ! resultIds.containsKey(solrDoc.id)) {
-              solrDoc.positionInResponse = i++;
-              resultIds.put(solrDoc.id, solrDoc);
-            }
+    Map<Object, ShardDoc> resultIds = new HashMap<>();
+    int i = 0;
+    for (TopGroups<BytesRef> topGroups : rb.mergedTopGroups.values()) {
+      for (GroupDocs<BytesRef> group : topGroups.groups) {
+        for (ScoreDoc scoreDoc : group.scoreDocs) {
+          ShardDoc solrDoc = (ShardDoc) scoreDoc;
+          // Include the first if there are duplicate IDs
+          if ( ! resultIds.containsKey(solrDoc.id)) {
+            solrDoc.positionInResponse = i++;
+            resultIds.put(solrDoc.id, solrDoc);
           }
         }
       }
-      for (QueryCommandResult queryCommandResult : rb.mergedQueryCommandResults.values()) {
-        for (ScoreDoc scoreDoc : queryCommandResult.getTopDocs().scoreDocs) {
-          ShardDoc solrDoc = (ShardDoc) scoreDoc;
-          solrDoc.positionInResponse = i++;
-          resultIds.put(solrDoc.id, solrDoc);
-        }
+    }
+    for (QueryCommandResult queryCommandResult : rb.mergedQueryCommandResults.values()) {
+      for (ScoreDoc scoreDoc : queryCommandResult.getTopDocs().scoreDocs) {
+        ShardDoc solrDoc = (ShardDoc) scoreDoc;
+        solrDoc.positionInResponse = i++;
+        resultIds.put(solrDoc.id, solrDoc);
       }
-
-      rb.resultIds = resultIds;
-    } catch (IOException e) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
     }
+
+    rb.resultIds = resultIds;
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8c2ef3bc/solr/core/src/test/org/apache/solr/schema/SortableBinaryField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/schema/SortableBinaryField.java b/solr/core/src/test/org/apache/solr/schema/SortableBinaryField.java
index 5bd565b..1ebf4cc 100644
--- a/solr/core/src/test/org/apache/solr/schema/SortableBinaryField.java
+++ b/solr/core/src/test/org/apache/solr/schema/SortableBinaryField.java
@@ -16,7 +16,6 @@
  */
 package org.apache.solr.schema;
 
-import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -71,7 +70,7 @@ public class SortableBinaryField extends BinaryField {
       super(field, new FieldComparatorSource() {
         @Override
         public FieldComparator.TermOrdValComparator newComparator
-            (final String fieldname, final int numHits, final int sortPos, final boolean reversed) throws IOException {
+            (final String fieldname, final int numHits, final int sortPos, final boolean reversed) {
           return new FieldComparator.TermOrdValComparator(numHits, fieldname);
         }}, reverse);
     }


[30/50] [abbrv] lucene-solr:apiv2: SOLR-8396: Add support for PointFields in Solr

Posted by no...@apache.org.
SOLR-8396: Add support for PointFields in Solr


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/57934ba4
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/57934ba4
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/57934ba4

Branch: refs/heads/apiv2
Commit: 57934ba4480d71218c7f60d0417dbae9d26188d0
Parents: a89560b
Author: Tomas Fernandez Lobbe <tf...@apache.org>
Authored: Wed Jan 18 17:27:21 2017 -0800
Committer: Tomas Fernandez Lobbe <tf...@apache.org>
Committed: Wed Jan 18 17:27:21 2017 -0800

----------------------------------------------------------------------
 lucene/common-build.xml                         |    1 +
 solr/CHANGES.txt                                |    4 +
 .../solr/handler/admin/LukeRequestHandler.java  |    7 +-
 .../solr/handler/component/FacetComponent.java  |   12 +-
 .../solr/handler/component/QueryComponent.java  |   25 +-
 .../handler/component/RangeFacetProcessor.java  |    3 +-
 .../handler/component/RangeFacetRequest.java    |   31 +-
 .../solr/handler/component/StatsComponent.java  |    6 +
 .../handler/component/StatsValuesFactory.java   |    2 +-
 .../solr/index/SlowCompositeReaderWrapper.java  |    3 -
 .../org/apache/solr/request/IntervalFacets.java |    4 +
 .../org/apache/solr/request/SimpleFacets.java   |   37 +-
 .../org/apache/solr/response/DocsStreamer.java  |    8 +
 .../apache/solr/schema/DoublePointField.java    |  187 +++
 .../java/org/apache/solr/schema/FieldType.java  |    9 +-
 .../org/apache/solr/schema/FloatPointField.java |  187 +++
 .../org/apache/solr/schema/IntPointField.java   |  186 +++
 .../org/apache/solr/schema/LongPointField.java  |  186 +++
 .../java/org/apache/solr/schema/PointField.java |  233 +++
 .../org/apache/solr/schema/SchemaField.java     |   10 +
 .../apache/solr/search/SolrIndexSearcher.java   |   44 +-
 .../apache/solr/search/TermQParserPlugin.java   |   10 +-
 .../apache/solr/search/TermsQParserPlugin.java  |   10 +
 .../apache/solr/search/facet/FacetRange.java    |   28 +-
 .../DocumentExpressionDictionaryFactory.java    |   12 +-
 .../conf/schema-distrib-interval-faceting.xml   |   14 +-
 .../conf/schema-docValuesFaceting.xml           |   12 +
 .../solr/collection1/conf/schema-point.xml      |   88 ++
 .../solr/collection1/conf/schema-sorts.xml      |   44 +-
 .../test-files/solr/collection1/conf/schema.xml |   26 +-
 .../solr/collection1/conf/schema11.xml          |   19 +-
 .../solr/collection1/conf/schema12.xml          |   15 +-
 .../solr/collection1/conf/schema_latest.xml     |   21 +-
 .../apache/solr/TestDistributedGrouping.java    |   10 +-
 .../core/src/test/org/apache/solr/TestJoin.java |    6 +-
 .../org/apache/solr/TestRandomDVFaceting.java   |    8 +
 .../org/apache/solr/TestRandomFaceting.java     |   12 +-
 .../apache/solr/cloud/TestCloudPivotFacet.java  |    2 +
 .../handler/XsltUpdateRequestHandlerTest.java   |    2 +-
 .../handler/admin/LukeRequestHandlerTest.java   |    8 +-
 .../handler/component/TestExpandComponent.java  |    8 +-
 .../apache/solr/request/TestFacetMethods.java   |   12 +
 .../org/apache/solr/schema/TestPointFields.java | 1472 ++++++++++++++++++
 .../solr/search/TestCollapseQParserPlugin.java  |    4 +-
 .../solr/search/TestMaxScoreQueryParser.java    |    2 +-
 .../search/TestRandomCollapseQParserPlugin.java |    2 +
 .../apache/solr/search/TestSolrQueryParser.java |    8 +
 .../solr/search/facet/TestJsonFacets.java       |    2 +
 .../java/org/apache/solr/SolrTestCaseJ4.java    |   44 +-
 49 files changed, 2979 insertions(+), 107 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/lucene/common-build.xml
----------------------------------------------------------------------
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index 87d2e0a..48cf457 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -1073,6 +1073,7 @@
                 <propertyref prefix="tests.leaveTemporary" />
                 <propertyref prefix="tests.leavetemporary" />
                 <propertyref prefix="solr.test.leavetmpdir" />
+                <propertyref prefix="solr.tests.preferPointFields"/>
             </syspropertyset>
 
             <!-- Pass randomized settings to the forked JVM. -->

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 205c7bc..82c3d2b 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -74,6 +74,10 @@ Optimizations
 * SOLR-9584: Support Solr being proxied with another endpoint than default /solr, by using relative links
   in AdminUI javascripts (Yun Jie Zhou via janhoy)
 
+Other Changes
+----------------------
+* SOLR-8396: Add support for PointFields in Solr (Ishan Chattopadhyaya, Tom�s Fern�ndez L�bbe)
+
 ==================  6.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
index 50f46ef..7f08684 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
@@ -289,8 +289,6 @@ public class LukeRequestHandler extends RequestHandlerBase
       f.add( "schema", getFieldFlags( sfield ) );
       f.add( "flags", getFieldFlags( field ) );
 
-      Term t = new Term(field.name(), ftype!=null ? ftype.storedToIndexed(field) : field.stringValue());
-
       f.add( "value", (ftype==null)?null:ftype.toExternal( field ) );
 
       // TODO: this really should be "stored"
@@ -301,7 +299,10 @@ public class LukeRequestHandler extends RequestHandlerBase
         f.add( "binary", Base64.byteArrayToBase64(bytes.bytes, bytes.offset, bytes.length));
       }
       f.add( "boost", field.boost() );
-      f.add( "docFreq", t.text()==null ? 0 : reader.docFreq( t ) ); // this can be 0 for non-indexed fields
+      if (!ftype.isPointField()) {
+        Term t = new Term(field.name(), ftype!=null ? ftype.storedToIndexed(field) : field.stringValue());
+        f.add( "docFreq", t.text()==null ? 0 : reader.docFreq( t ) ); // this can be 0 for non-indexed fields
+      }// TODO: Calculate docFreq for point fields
 
       // If we have a term vector, return that
       if( field.fieldType().storeTermVectors() ) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
index 1cc05ab..bcff0c2 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java
@@ -33,6 +33,7 @@ import java.util.Map.Entry;
 
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.StringUtils;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
@@ -47,6 +48,7 @@ import org.apache.solr.common.util.StrUtils;
 import org.apache.solr.request.SimpleFacets;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.PointField;
 import org.apache.solr.search.QueryParsing;
 import org.apache.solr.search.SyntaxError;
 import org.apache.solr.search.facet.FacetDebugInfo;
@@ -1477,7 +1479,13 @@ public class FacetComponent extends SearchComponent {
           if (sfc == null) {
             sfc = new ShardFacetCount();
             sfc.name = name;
-            sfc.indexed = ftype == null ? sfc.name : ftype.toInternal(sfc.name);
+            if (ftype == null) {
+              sfc.indexed = null;
+            } else if (ftype.isPointField()) {
+              sfc.indexed = ((PointField)ftype).toInternalByteRef(sfc.name);
+            } else {
+              sfc.indexed = new BytesRef(ftype.toInternal(sfc.name));
+            }
             sfc.termNum = termNum++;
             counts.put(name, sfc);
           }
@@ -1553,7 +1561,7 @@ public class FacetComponent extends SearchComponent {
   public static class ShardFacetCount {
     public String name;
     // the indexed form of the name... used for comparisons
-    public String indexed; 
+    public BytesRef indexed; 
     public long count;
     public int termNum; // term number starting at 0 (used in bit arrays)
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
index 88ff731..c357202 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
@@ -185,6 +185,11 @@ public class QueryComponent extends SearchComponent
       }
 
       rb.setSortSpec( parser.getSortSpec(true) );
+      for (SchemaField sf:rb.getSortSpec().getSchemaFields()) {
+        if (sf != null && sf.getType().isPointField() && !sf.hasDocValues()) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"Can't sort on a point field without docValues");
+        }
+      }
       rb.setQparser(parser);
 
       final String cursorStr = rb.req.getParams().get(CursorMarkParams.CURSOR_MARK_PARAM);
@@ -335,11 +340,21 @@ public class QueryComponent extends SearchComponent
       List<String> idArr = StrUtils.splitSmart(ids, ",", true);
       int[] luceneIds = new int[idArr.size()];
       int docs = 0;
-      for (int i=0; i<idArr.size(); i++) {
-        int id = searcher.getFirstMatch(
-                new Term(idField.getName(), idField.getType().toInternal(idArr.get(i))));
-        if (id >= 0)
-          luceneIds[docs++] = id;
+      if (idField.getType().isPointField()) {
+        for (int i=0; i<idArr.size(); i++) {
+          int id = searcher.search(
+              idField.getType().getFieldQuery(null, idField, idArr.get(i)), 1).scoreDocs[0].doc;
+          if (id >= 0) {
+            luceneIds[docs++] = id;
+          }
+        }
+      } else {
+        for (int i=0; i<idArr.size(); i++) {
+          int id = searcher.getFirstMatch(
+                  new Term(idField.getName(), idField.getType().toInternal(idArr.get(i))));
+          if (id >= 0)
+            luceneIds[docs++] = id;
+        }
       }
 
       DocListAndSet res = new DocListAndSet();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/handler/component/RangeFacetProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/RangeFacetProcessor.java b/solr/core/src/java/org/apache/solr/handler/component/RangeFacetProcessor.java
index 731d224..f8ab7b7 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/RangeFacetProcessor.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/RangeFacetProcessor.java
@@ -55,7 +55,6 @@ public class RangeFacetProcessor extends SimpleFacets {
    *
    * @see org.apache.solr.common.params.FacetParams#FACET_RANGE
    */
-  @SuppressWarnings("unchecked")
   public NamedList<Object> getFacetRangeCounts() throws IOException, SyntaxError {
     final NamedList<Object> resOuter = new SimpleOrderedMap<>();
 
@@ -92,7 +91,7 @@ public class RangeFacetProcessor extends SimpleFacets {
     final FieldType ft = sf.getType();
 
     if (method.equals(FacetRangeMethod.DV)) {
-      assert ft instanceof TrieField;
+      assert ft instanceof TrieField || ft.isPointField();
       resOuter.add(key, getFacetRangeCountsDocValues(rangeFacetRequest));
     } else {
       resOuter.add(key, getFacetRangeCounts(rangeFacetRequest));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/handler/component/RangeFacetRequest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/RangeFacetRequest.java b/solr/core/src/java/org/apache/solr/handler/component/RangeFacetRequest.java
index 8c0c381..f129e73 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/RangeFacetRequest.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/RangeFacetRequest.java
@@ -34,6 +34,7 @@ import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.schema.DateRangeField;
 import org.apache.solr.schema.FieldType;
 import org.apache.solr.schema.IndexSchema;
+import org.apache.solr.schema.PointField;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.schema.TrieDateField;
 import org.apache.solr.schema.TrieField;
@@ -91,6 +92,11 @@ public class RangeFacetRequest extends FacetComponent.FacetBase {
           DateRangeField.class + "'. Will use method '" + FacetParams.FacetRangeMethod.FILTER + "' instead");
       method = FacetParams.FacetRangeMethod.FILTER;
     }
+    if (method.equals(FacetParams.FacetRangeMethod.DV) && !schemaField.hasDocValues() && (schemaField.getType().isPointField())) {
+      log.warn("Range facet method '" + FacetParams.FacetRangeMethod.DV + "' is not supported on PointFields without docValues." +
+          "Will use method '" + FacetParams.FacetRangeMethod.FILTER + "' instead");
+      method = FacetParams.FacetRangeMethod.FILTER;
+    }
 
     this.start = required.getFieldParam(facetOn, FacetParams.FACET_RANGE_START);
     this.end = required.getFieldParam(facetOn, FacetParams.FACET_RANGE_END);
@@ -159,10 +165,33 @@ public class RangeFacetRequest extends FacetComponent.FacetBase {
         default:
           throw new SolrException
               (SolrException.ErrorCode.BAD_REQUEST,
-                  "Unable to range facet on tried field of unexpected type:" + this.facetOn);
+                  "Unable to range facet on Trie field of unexpected type:" + this.facetOn);
       }
     } else if (ft instanceof DateRangeField) {
       calc = new DateRangeEndpointCalculator(this, null);
+    } else if (ft.isPointField()) {
+      final PointField pointField = (PointField) ft;
+      switch (pointField.getType()) {
+        case FLOAT:
+          calc = new FloatRangeEndpointCalculator(this);
+          break;
+        case DOUBLE:
+          calc = new DoubleRangeEndpointCalculator(this);
+          break;
+        case INTEGER:
+          calc = new IntegerRangeEndpointCalculator(this);
+          break;
+        case LONG:
+          calc = new LongRangeEndpointCalculator(this);
+          break;
+        case DATE:
+          calc = new DateRangeEndpointCalculator(this, null);
+          break;
+        default:
+          throw new SolrException
+              (SolrException.ErrorCode.BAD_REQUEST,
+                  "Unable to range facet on Point field of unexpected type:" + this.facetOn);
+      }
     } else {
       throw new SolrException
           (SolrException.ErrorCode.BAD_REQUEST,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java b/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java
index 68284c7..6a6e9be 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java
@@ -45,6 +45,12 @@ public class StatsComponent extends SearchComponent {
       rb.setNeedDocSet( true );
       rb.doStats = true;
       rb._statsInfo = new StatsInfo(rb);
+      for (StatsField statsField : rb._statsInfo.getStatsFields()) {
+        if (statsField.getSchemaField() != null && statsField.getSchemaField().getType().isPointField() && !statsField.getSchemaField().hasDocValues()) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, 
+              "Can't calculate stats on a PointField without docValues");
+        }
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java b/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java
index 8a35ee0..7605f73 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java
@@ -65,7 +65,7 @@ public class StatsValuesFactory {
     
     if (TrieDateField.class.isInstance(fieldType)) {
       return new DateStatsValues(statsField);
-    } else if (TrieField.class.isInstance(fieldType)) {
+    } else if (TrieField.class.isInstance(fieldType) || PointField.class.isInstance(fieldType)) {
       return new NumericStatsValues(statsField);
     } else if (StrField.class.isInstance(fieldType)) {
       return new StringStatsValues(statsField);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/index/SlowCompositeReaderWrapper.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/index/SlowCompositeReaderWrapper.java b/solr/core/src/java/org/apache/solr/index/SlowCompositeReaderWrapper.java
index 5031faf..12f5bd1 100644
--- a/solr/core/src/java/org/apache/solr/index/SlowCompositeReaderWrapper.java
+++ b/solr/core/src/java/org/apache/solr/index/SlowCompositeReaderWrapper.java
@@ -65,9 +65,6 @@ public final class SlowCompositeReaderWrapper extends LeafReader {
   SlowCompositeReaderWrapper(CompositeReader reader, boolean merging) throws IOException {
     super();
     in = reader;
-    if (getFieldInfos().hasPointValues()) {
-      throw new IllegalArgumentException("cannot wrap points");
-    }
     fields = MultiFields.getFields(in);
     in.registerParentReader(this);
     this.merging = merging;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/request/IntervalFacets.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/request/IntervalFacets.java b/solr/core/src/java/org/apache/solr/request/IntervalFacets.java
index 14bf700..88e39fc 100644
--- a/solr/core/src/java/org/apache/solr/request/IntervalFacets.java
+++ b/solr/core/src/java/org/apache/solr/request/IntervalFacets.java
@@ -42,6 +42,7 @@ import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.request.IntervalFacets.FacetInterval;
 import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.PointField;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.schema.TrieDateField;
 import org.apache.solr.search.DocIterator;
@@ -625,6 +626,9 @@ public class IntervalFacets implements Iterable<FacetInterval> {
       if ("*".equals(value)) {
         return null;
       }
+      if (schemaField.getType().isPointField()) {
+        return ((PointField)schemaField.getType()).toInternalByteRef(value);
+      }
       return new BytesRef(schemaField.getType().toInternal(value));
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
index 641b1f3..0d9cb29 100644
--- a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
+++ b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
@@ -411,6 +411,10 @@ public class SimpleFacets {
     
     NamedList<Integer> counts;
     SchemaField sf = searcher.getSchema().getField(field);
+    if (sf.getType().isPointField() && !sf.hasDocValues()) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, 
+          "Can't facet on a PointField without docValues");
+    }
     FieldType ft = sf.getType();
 
     // determine what type of faceting method to use
@@ -579,6 +583,10 @@ public class SimpleFacets {
    static FacetMethod selectFacetMethod(SchemaField field, FacetMethod method, Integer mincount) {
 
      FieldType type = field.getType();
+     if (type.isPointField()) {
+       // Only FCS is supported for PointFields for now
+       return FacetMethod.FCS;
+     }
 
      /*The user did not specify any preference*/
      if (method == null) {
@@ -810,12 +818,20 @@ public class SimpleFacets {
    * @param terms a list of term values (in the specified field) to compute the counts for 
    */
   protected NamedList<Integer> getListedTermCounts(String field, final ParsedParams parsed, List<String> terms) throws IOException {
-    FieldType ft = searcher.getSchema().getFieldType(field);
+    SchemaField sf = searcher.getSchema().getField(field);
+    FieldType ft = sf.getType();
     NamedList<Integer> res = new NamedList<>();
-    for (String term : terms) {
-      String internal = ft.toInternal(term);
-      int count = searcher.numDocs(new TermQuery(new Term(field, internal)), parsed.docs);
-      res.add(term, count);
+    if (ft.isPointField()) {
+      for (String term : terms) {
+        int count = searcher.numDocs(ft.getFieldQuery(null, sf, term), parsed.docs);
+        res.add(term, count);
+      }
+    } else {
+      for (String term : terms) {
+        String internal = ft.toInternal(term);
+        int count = searcher.numDocs(new TermQuery(new Term(field, internal)), parsed.docs);
+        res.add(term, count);
+      }
     }
     return res;    
   }
@@ -848,7 +864,7 @@ public class SimpleFacets {
   public NamedList<Integer> getFacetTermEnumCounts(SolrIndexSearcher searcher, DocSet docs, String field, int offset, int limit, int mincount, boolean missing, 
                                       String sort, String prefix, String contains, boolean ignoreCase, boolean intersectsCheck)
     throws IOException {
-
+    
     /* :TODO: potential optimization...
     * cache the Terms with the highest docFreq and try them first
     * don't enum if we get our max from them
@@ -864,10 +880,12 @@ public class SimpleFacets {
       fastForRandomSet = new HashDocSet(sset.getDocs(), 0, sset.size());
     }
 
-
     IndexSchema schema = searcher.getSchema();
-    LeafReader r = searcher.getSlowAtomicReader();
     FieldType ft = schema.getFieldType(field);
+    assert !ft.isPointField(): "Point Fields don't support enum method";
+    
+    LeafReader r = searcher.getSlowAtomicReader();
+    
 
     boolean sortByCount = sort.equals("count") || sort.equals("true");
     final int maxsize = limit>=0 ? offset+limit : Integer.MAX_VALUE-1;
@@ -1082,6 +1100,9 @@ public class SimpleFacets {
       if (parsed.params.getBool(GroupParams.GROUP_FACET, false)) {
         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Interval Faceting can't be used with " + GroupParams.GROUP_FACET);
       }
+      if (schemaField.getType().isPointField() && !schemaField.hasDocValues()) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't use interval faceting on a PointField without docValues");
+      }
       
       SimpleOrderedMap<Integer> fieldResults = new SimpleOrderedMap<Integer>();
       res.add(parsed.key, fieldResults);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/response/DocsStreamer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/response/DocsStreamer.java b/solr/core/src/java/org/apache/solr/response/DocsStreamer.java
index bee699c..ef0b0c7 100644
--- a/solr/core/src/java/org/apache/solr/response/DocsStreamer.java
+++ b/solr/core/src/java/org/apache/solr/response/DocsStreamer.java
@@ -31,8 +31,12 @@ import org.apache.solr.common.SolrException;
 import org.apache.solr.response.transform.DocTransformer;
 import org.apache.solr.schema.BinaryField;
 import org.apache.solr.schema.BoolField;
+import org.apache.solr.schema.DoublePointField;
 import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.FloatPointField;
 import org.apache.solr.schema.IndexSchema;
+import org.apache.solr.schema.IntPointField;
+import org.apache.solr.schema.LongPointField;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.schema.StrField;
 import org.apache.solr.schema.TextField;
@@ -213,6 +217,10 @@ public class DocsStreamer implements Iterator<SolrDocument> {
     KNOWN_TYPES.add(TrieDoubleField.class);
     KNOWN_TYPES.add(TrieDateField.class);
     KNOWN_TYPES.add(BinaryField.class);
+    KNOWN_TYPES.add(IntPointField.class);
+    KNOWN_TYPES.add(LongPointField.class);
+    KNOWN_TYPES.add(DoublePointField.class);
+    KNOWN_TYPES.add(FloatPointField.class);
     // We do not add UUIDField because UUID object is not a supported type in JavaBinCodec
     // and if we write UUIDField.toObject, we wouldn't know how to handle it in the client side
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/schema/DoublePointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/DoublePointField.java b/solr/core/src/java/org/apache/solr/schema/DoublePointField.java
new file mode 100644
index 0000000..c393dfe
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/schema/DoublePointField.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.schema;
+
+import java.lang.invoke.MethodHandles;
+import java.util.Collection;
+
+import org.apache.lucene.document.DoublePoint;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.legacy.LegacyNumericType;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.solr.search.QParser;
+import org.apache.solr.uninverting.UninvertingReader.Type;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * {@code PointField} implementation for {@code Double} values.
+ * @see PointField
+ * @see DoublePoint
+ */
+public class DoublePointField extends PointField implements DoubleValueFieldType {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @Override
+  public Object toNativeType(Object val) {
+    if (val == null) return null;
+    if (val instanceof Number) return ((Number) val).doubleValue();
+    if (val instanceof String) return Double.parseDouble((String) val);
+    return super.toNativeType(val);
+  }
+
+  @Override
+  public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
+      boolean maxInclusive) {
+    double actualMin, actualMax;
+    if (min == null) {
+      actualMin = Double.NEGATIVE_INFINITY;
+    } else {
+      actualMin = Double.parseDouble(min);
+      if (!minInclusive) {
+        actualMin = DoublePoint.nextUp(actualMin);
+      }
+    }
+    if (max == null) {
+      actualMax = Double.POSITIVE_INFINITY;
+    } else {
+      actualMax = Double.parseDouble(max);
+      if (!maxInclusive) {
+        actualMax = DoublePoint.nextDown(actualMax);
+      }
+    }
+    return DoublePoint.newRangeQuery(field.getName(), actualMin, actualMax);
+  }
+
+  @Override
+  public Object toObject(SchemaField sf, BytesRef term) {
+    return DoublePoint.decodeDimension(term.bytes, term.offset);
+  }
+  
+  @Override
+  public Object toObject(IndexableField f) {
+    final Number val = f.numericValue();
+    if (val != null) {
+      if (f.fieldType().stored() == false && f.fieldType().docValuesType() == DocValuesType.NUMERIC) {
+        return Double.longBitsToDouble(val.longValue());
+      } else {
+        return val;
+      }
+    } else {
+      throw new AssertionError("Unexpected state. Field: '" + f + "'");
+    }
+  }
+
+  @Override
+  protected Query getExactQuery(SchemaField field, String externalVal) {
+    return DoublePoint.newExactQuery(field.getName(), Double.parseDouble(externalVal));
+  }
+
+  @Override
+  public Query getSetQuery(QParser parser, SchemaField field, Collection<String> externalVal) {
+    assert externalVal.size() > 0;
+    double[] values = new double[externalVal.size()];
+    int i = 0;
+    for (String val:externalVal) {
+      values[i] = Double.parseDouble(val);
+      i++;
+    }
+    return DoublePoint.newSetQuery(field.getName(), values);
+  }
+
+  @Override
+  protected String indexedToReadable(BytesRef indexedForm) {
+    return Double.toString(DoublePoint.decodeDimension(indexedForm.bytes, indexedForm.offset));
+  }
+
+  @Override
+  public void readableToIndexed(CharSequence val, BytesRefBuilder result) {
+    result.grow(Double.BYTES);
+    result.setLength(Double.BYTES);
+    DoublePoint.encodeDimension(Double.parseDouble(val.toString()), result.bytes(), 0);
+  }
+
+  @Override
+  public SortField getSortField(SchemaField field, boolean top) {
+    field.checkSortability();
+
+    Object missingValue = null;
+    boolean sortMissingLast = field.sortMissingLast();
+    boolean sortMissingFirst = field.sortMissingFirst();
+
+    if (sortMissingLast) {
+      missingValue = top ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
+    } else if (sortMissingFirst) {
+      missingValue = top ? Double.POSITIVE_INFINITY : Double.NEGATIVE_INFINITY;
+    }
+    SortField sf = new SortField(field.getName(), SortField.Type.DOUBLE, top);
+    sf.setMissingValue(missingValue);
+    return sf;
+  }
+
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    if (sf.multiValued()) {
+      throw new UnsupportedOperationException("MultiValued Point fields with DocValues is not currently supported");
+//      return Type.SORTED_DOUBLE;
+    } else {
+      return Type.DOUBLE_POINT;
+    }
+  }
+
+  @Override
+  public ValueSource getValueSource(SchemaField field, QParser qparser) {
+    field.checkFieldCacheSource();
+    return new DoubleFieldSource(field.getName());
+  }
+
+  @Override
+  public LegacyNumericType getNumericType() {
+    // TODO: refactor this to not use LegacyNumericType
+    return LegacyNumericType.DOUBLE;
+  }
+
+  @Override
+  public IndexableField createField(SchemaField field, Object value, float boost) {
+    if (!isFieldUsed(field)) return null;
+
+    if (boost != 1.0 && log.isTraceEnabled()) {
+      log.trace("Can't use document/field boost for PointField. Field: " + field.getName() + ", boost: " + boost);
+    }
+    double doubleValue = (value instanceof Number) ? ((Number) value).doubleValue() : Double.parseDouble(value.toString());
+    return new DoublePoint(field.getName(), doubleValue);
+  }
+
+  @Override
+  protected StoredField getStoredField(SchemaField sf, Object value) {
+    return new StoredField(sf.getName(), (Double) this.toNativeType(value));
+  }
+
+  @Override
+  public PointTypes getType() {
+    return PointTypes.DOUBLE;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/schema/FieldType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/FieldType.java b/solr/core/src/java/org/apache/solr/schema/FieldType.java
index a5c898a..3922edc 100644
--- a/solr/core/src/java/org/apache/solr/schema/FieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/FieldType.java
@@ -126,6 +126,10 @@ public abstract class FieldType extends FieldProperties {
   public boolean isPolyField(){
     return false;
   }
+  
+  public boolean isPointField() {
+    return false;
+  }
 
   /**
    * Returns true if the fields' docValues should be used for obtaining stored value
@@ -395,7 +399,10 @@ public abstract class FieldType extends FieldProperties {
     return toInternal(val);
   }
 
-  /** Given the readable value, return the term value that will match it. */
+  /** Given the readable value, return the term value that will match it.
+   * This method will modify the size and length of the {@code result} 
+   * parameter and write from offset 0
+   */
   public void readableToIndexed(CharSequence val, BytesRefBuilder result) {
     final String internal = readableToIndexed(val.toString());
     result.copyChars(internal);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/schema/FloatPointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/FloatPointField.java b/solr/core/src/java/org/apache/solr/schema/FloatPointField.java
new file mode 100644
index 0000000..766c6e9
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/schema/FloatPointField.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.schema;
+
+import java.lang.invoke.MethodHandles;
+import java.util.Collection;
+
+import org.apache.lucene.document.FloatPoint;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.legacy.LegacyNumericType;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.solr.search.QParser;
+import org.apache.solr.uninverting.UninvertingReader.Type;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * {@code PointField} implementation for {@code Float} values.
+ * @see PointField
+ * @see FloatPoint
+ */
+public class FloatPointField extends PointField implements FloatValueFieldType {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @Override
+  public Object toNativeType(Object val) {
+    if (val == null) return null;
+    if (val instanceof Number) return ((Number) val).floatValue();
+    if (val instanceof String) return Float.parseFloat((String) val);
+    return super.toNativeType(val);
+  }
+
+  @Override
+  public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
+      boolean maxInclusive) {
+    float actualMin, actualMax;
+    if (min == null) {
+      actualMin = Float.NEGATIVE_INFINITY;
+    } else {
+      actualMin = Float.parseFloat(min);
+      if (!minInclusive) {
+        actualMin = FloatPoint.nextUp(actualMin);
+      }
+    }
+    if (max == null) {
+      actualMax = Float.POSITIVE_INFINITY;
+    } else {
+      actualMax = Float.parseFloat(max);
+      if (!maxInclusive) {
+        actualMax = FloatPoint.nextDown(actualMax);
+      }
+    }
+    return FloatPoint.newRangeQuery(field.getName(), actualMin, actualMax);
+  }
+
+  @Override
+  public Object toObject(SchemaField sf, BytesRef term) {
+    return FloatPoint.decodeDimension(term.bytes, term.offset);
+  }
+  
+  @Override
+  public Object toObject(IndexableField f) {
+    final Number val = f.numericValue();
+    if (val != null) {
+      if (f.fieldType().stored() == false && f.fieldType().docValuesType() == DocValuesType.NUMERIC) {
+        return Float.intBitsToFloat(val.intValue());
+      } else {
+        return val;
+      }
+    } else {
+      throw new AssertionError("Unexpected state. Field: '" + f + "'");
+    }
+  }
+
+  @Override
+  protected Query getExactQuery(SchemaField field, String externalVal) {
+    return FloatPoint.newExactQuery(field.getName(), Float.parseFloat(externalVal));
+  }
+
+  @Override
+  public Query getSetQuery(QParser parser, SchemaField field, Collection<String> externalVal) {
+    assert externalVal.size() > 0;
+    float[] values = new float[externalVal.size()];
+    int i = 0;
+    for (String val:externalVal) {
+      values[i] = Float.parseFloat(val);
+      i++;
+    }
+    return FloatPoint.newSetQuery(field.getName(), values);
+  }
+
+  @Override
+  protected String indexedToReadable(BytesRef indexedForm) {
+    return Float.toString(FloatPoint.decodeDimension(indexedForm.bytes, indexedForm.offset));
+  }
+
+  @Override
+  public void readableToIndexed(CharSequence val, BytesRefBuilder result) {
+    result.grow(Float.BYTES);
+    result.setLength(Float.BYTES);
+    FloatPoint.encodeDimension(Float.parseFloat(val.toString()), result.bytes(), 0);
+  }
+
+  @Override
+  public SortField getSortField(SchemaField field, boolean top) {
+    field.checkSortability();
+
+    Object missingValue = null;
+    boolean sortMissingLast = field.sortMissingLast();
+    boolean sortMissingFirst = field.sortMissingFirst();
+
+    if (sortMissingLast) {
+      missingValue = top ? Float.NEGATIVE_INFINITY : Float.POSITIVE_INFINITY;
+    } else if (sortMissingFirst) {
+      missingValue = top ? Float.POSITIVE_INFINITY : Float.NEGATIVE_INFINITY;
+    }
+    SortField sf = new SortField(field.getName(), SortField.Type.FLOAT, top);
+    sf.setMissingValue(missingValue);
+    return sf;
+  }
+
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    if (sf.multiValued()) {
+      throw new UnsupportedOperationException("MultiValued Point fields with DocValues is not currently supported");
+//      return Type.SORTED_FLOAT;
+    } else {
+      return Type.FLOAT_POINT;
+    }
+  }
+
+  @Override
+  public ValueSource getValueSource(SchemaField field, QParser qparser) {
+    field.checkFieldCacheSource();
+    return new FloatFieldSource(field.getName());
+  }
+
+  @Override
+  public LegacyNumericType getNumericType() {
+    // TODO: refactor this to not use LegacyNumericType
+    return LegacyNumericType.FLOAT;
+  }
+
+  @Override
+  public IndexableField createField(SchemaField field, Object value, float boost) {
+    if (!isFieldUsed(field)) return null;
+
+    if (boost != 1.0 && log.isTraceEnabled()) {
+      log.trace("Can't use document/field boost for PointField. Field: " + field.getName() + ", boost: " + boost);
+    }
+    float floatValue = (value instanceof Number) ? ((Number) value).floatValue() : Float.parseFloat(value.toString());
+    return new FloatPoint(field.getName(), floatValue);
+  }
+
+  @Override
+  protected StoredField getStoredField(SchemaField sf, Object value) {
+    return new StoredField(sf.getName(), (Float) this.toNativeType(value));
+  }
+  
+  @Override
+  public PointTypes getType() {
+    return PointTypes.FLOAT;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/schema/IntPointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/IntPointField.java b/solr/core/src/java/org/apache/solr/schema/IntPointField.java
new file mode 100644
index 0000000..a7bab07
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/schema/IntPointField.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.schema;
+
+import java.lang.invoke.MethodHandles;
+import java.util.Collection;
+
+import org.apache.lucene.document.IntPoint;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.legacy.LegacyNumericType;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.queries.function.valuesource.IntFieldSource;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.solr.search.QParser;
+import org.apache.solr.uninverting.UninvertingReader.Type;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * {@code PointField} implementation for {@code Integer} values.
+ * @see PointField
+ * @see IntPoint
+ */
+public class IntPointField extends PointField implements IntValueFieldType {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @Override
+  public Object toNativeType(Object val) {
+    if (val == null) return null;
+    if (val instanceof Number) return ((Number) val).intValue();
+    try {
+      if (val instanceof String) return Integer.parseInt((String) val);
+    } catch (NumberFormatException e) {
+      Float v = Float.parseFloat((String) val);
+      return v.intValue();
+    }
+    return super.toNativeType(val);
+  }
+
+  @Override
+  public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
+      boolean maxInclusive) {
+    int actualMin, actualMax;
+    if (min == null) {
+      actualMin = Integer.MIN_VALUE;
+    } else {
+      actualMin = Integer.parseInt(min);
+      if (!minInclusive) {
+        actualMin++;
+      }
+    }
+    if (max == null) {
+      actualMax = Integer.MAX_VALUE;
+    } else {
+      actualMax = Integer.parseInt(max);
+      if (!maxInclusive) {
+        actualMax--;
+      }
+    }
+    return IntPoint.newRangeQuery(field.getName(), actualMin, actualMax);
+  }
+
+  @Override
+  public Object toObject(SchemaField sf, BytesRef term) {
+    return IntPoint.decodeDimension(term.bytes, term.offset);
+  }
+  
+  @Override
+  public Object toObject(IndexableField f) {
+    final Number val = f.numericValue();
+    if (val != null) {
+      return val;
+    } else {
+      throw new AssertionError("Unexpected state. Field: '" + f + "'");
+    }
+  }
+
+  @Override
+  protected Query getExactQuery(SchemaField field, String externalVal) {
+    return IntPoint.newExactQuery(field.getName(), Integer.parseInt(externalVal));
+  }
+  
+  @Override
+  public Query getSetQuery(QParser parser, SchemaField field, Collection<String> externalVal) {
+    assert externalVal.size() > 0;
+    int[] values = new int[externalVal.size()];
+    int i = 0;
+    for (String val:externalVal) {
+      values[i] = Integer.parseInt(val);
+      i++;
+    }
+    return IntPoint.newSetQuery(field.getName(), values);
+  }
+
+  @Override
+  protected String indexedToReadable(BytesRef indexedForm) {
+    return Integer.toString(IntPoint.decodeDimension(indexedForm.bytes, indexedForm.offset));
+  }
+
+  @Override
+  public void readableToIndexed(CharSequence val, BytesRefBuilder result) {
+    result.grow(Integer.BYTES);
+    result.setLength(Integer.BYTES);
+    IntPoint.encodeDimension(Integer.parseInt(val.toString()), result.bytes(), 0);
+  }
+
+  @Override
+  public SortField getSortField(SchemaField field, boolean top) {
+    field.checkSortability();
+
+    Object missingValue = null;
+    boolean sortMissingLast = field.sortMissingLast();
+    boolean sortMissingFirst = field.sortMissingFirst();
+
+    if (sortMissingLast) {
+      missingValue = top ? Integer.MIN_VALUE : Integer.MAX_VALUE;
+    } else if (sortMissingFirst) {
+      missingValue = top ? Integer.MAX_VALUE : Integer.MIN_VALUE;
+    }
+    SortField sf = new SortField(field.getName(), SortField.Type.INT, top);
+    sf.setMissingValue(missingValue);
+    return sf;
+  }
+
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    if (sf.multiValued()) {
+      throw new UnsupportedOperationException("MultiValued Point fields with DocValues is not currently supported");
+//      return Type.SORTED_INTEGER;
+    } else {
+      return Type.INTEGER_POINT;
+    }
+  }
+
+  @Override
+  public ValueSource getValueSource(SchemaField field, QParser qparser) {
+    field.checkFieldCacheSource();
+    return new IntFieldSource(field.getName());
+  }
+
+  @Override
+  public LegacyNumericType getNumericType() {
+    return LegacyNumericType.INT;
+  }
+
+  @Override
+  public IndexableField createField(SchemaField field, Object value, float boost) {
+    if (!isFieldUsed(field)) return null;
+
+    if (boost != 1.0 && log.isTraceEnabled()) {
+      log.trace("Can't use document/field boost for PointField. Field: " + field.getName() + ", boost: " + boost);
+    }
+    int intValue = (value instanceof Number) ? ((Number) value).intValue() : Integer.parseInt(value.toString());
+    return new IntPoint(field.getName(), intValue);
+  }
+
+  @Override
+  protected StoredField getStoredField(SchemaField sf, Object value) {
+    return new StoredField(sf.getName(), (Integer) this.toNativeType(value));
+  }
+
+  @Override
+  public PointTypes getType() {
+    return PointTypes.INTEGER;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/schema/LongPointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/LongPointField.java b/solr/core/src/java/org/apache/solr/schema/LongPointField.java
new file mode 100644
index 0000000..f3fca3c
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/schema/LongPointField.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.schema;
+
+import java.lang.invoke.MethodHandles;
+import java.util.Collection;
+
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.legacy.LegacyNumericType;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.queries.function.valuesource.LongFieldSource;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.solr.search.QParser;
+import org.apache.solr.uninverting.UninvertingReader.Type;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * {@code PointField} implementation for {@code Long} values.
+ * @see PointField
+ * @see LongPoint
+ */
+public class LongPointField extends PointField implements LongValueFieldType {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @Override
+  public Object toNativeType(Object val) {
+    if (val == null) return null;
+    if (val instanceof Number) return ((Number) val).longValue();
+    try {
+      if (val instanceof String) return Long.parseLong((String) val);
+    } catch (NumberFormatException e) {
+      Double v = Double.parseDouble((String) val);
+      return v.longValue();
+    }
+    return super.toNativeType(val);
+  }
+
+  @Override
+  public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
+      boolean maxInclusive) {
+    long actualMin, actualMax;
+    if (min == null) {
+      actualMin = Long.MIN_VALUE;
+    } else {
+      actualMin = Long.parseLong(min);
+      if (!minInclusive) {
+        actualMin++;
+      }
+    }
+    if (max == null) {
+      actualMax = Long.MAX_VALUE;
+    } else {
+      actualMax = Long.parseLong(max);
+      if (!maxInclusive) {
+        actualMax--;
+      }
+    }
+    return LongPoint.newRangeQuery(field.getName(), actualMin, actualMax);
+  }
+
+  @Override
+  public Object toObject(SchemaField sf, BytesRef term) {
+    return LongPoint.decodeDimension(term.bytes, term.offset);
+  }
+  
+  @Override
+  public Object toObject(IndexableField f) {
+    final Number val = f.numericValue();
+    if (val != null) {
+      return val;
+    } else {
+      throw new AssertionError("Unexpected state. Field: '" + f + "'");
+    }
+  }
+
+  @Override
+  protected Query getExactQuery(SchemaField field, String externalVal) {
+    return LongPoint.newExactQuery(field.getName(), Long.parseLong(externalVal));
+  }
+  
+  @Override
+  public Query getSetQuery(QParser parser, SchemaField field, Collection<String> externalVal) {
+    assert externalVal.size() > 0;
+    long[] values = new long[externalVal.size()];
+    int i = 0;
+    for (String val:externalVal) {
+      values[i] = Long.parseLong(val);
+      i++;
+    }
+    return LongPoint.newSetQuery(field.getName(), values);
+  }
+
+  @Override
+  protected String indexedToReadable(BytesRef indexedForm) {
+    return Long.toString(LongPoint.decodeDimension(indexedForm.bytes, indexedForm.offset));
+  }
+
+  @Override
+  public void readableToIndexed(CharSequence val, BytesRefBuilder result) {
+    result.grow(Long.BYTES);
+    result.setLength(Long.BYTES);
+    LongPoint.encodeDimension(Long.parseLong(val.toString()), result.bytes(), 0);
+  }
+
+  @Override
+  public SortField getSortField(SchemaField field, boolean top) {
+    field.checkSortability();
+
+    Object missingValue = null;
+    boolean sortMissingLast = field.sortMissingLast();
+    boolean sortMissingFirst = field.sortMissingFirst();
+
+    if (sortMissingLast) {
+      missingValue = top ? Long.MIN_VALUE : Long.MAX_VALUE;
+    } else if (sortMissingFirst) {
+      missingValue = top ? Long.MAX_VALUE : Long.MIN_VALUE;
+    }
+    SortField sf = new SortField(field.getName(), SortField.Type.LONG, top);
+    sf.setMissingValue(missingValue);
+    return sf;
+  }
+
+  @Override
+  public Type getUninversionType(SchemaField sf) {
+    if (sf.multiValued()) {
+      throw new UnsupportedOperationException("MultiValued Point fields with DocValues is not currently supported");
+//      return Type.SORTED_LONG;
+    } else {
+      return Type.LONG_POINT;
+    }
+  }
+
+  @Override
+  public ValueSource getValueSource(SchemaField field, QParser qparser) {
+    field.checkFieldCacheSource();
+    return new LongFieldSource(field.getName());
+  }
+
+  @Override
+  public LegacyNumericType getNumericType() {
+    return LegacyNumericType.LONG;
+  }
+
+  @Override
+  public IndexableField createField(SchemaField field, Object value, float boost) {
+    if (!isFieldUsed(field)) return null;
+
+    if (boost != 1.0 && log.isTraceEnabled()) {
+      log.trace("Can't use document/field boost for PointField. Field: " + field.getName() + ", boost: " + boost);
+    }
+    long longValue = (value instanceof Number) ? ((Number) value).longValue() : Long.parseLong(value.toString());
+    return new LongPoint(field.getName(), longValue);
+  }
+
+  @Override
+  protected StoredField getStoredField(SchemaField sf, Object value) {
+    return new StoredField(sf.getName(), (Long) this.toNativeType(value));
+  }
+
+  @Override
+  public PointTypes getType() {
+    return PointTypes.LONG;
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/schema/PointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/PointField.java b/solr/core/src/java/org/apache/solr/schema/PointField.java
new file mode 100644
index 0000000..a2dd8a8
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/schema/PointField.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.schema;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.SortedSetSelector;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.CharsRefBuilder;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.response.TextResponseWriter;
+import org.apache.solr.search.QParser;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Provides field types to support for Lucene's {@link
+ * org.apache.lucene.document.IntPoint}, {@link org.apache.lucene.document.LongPoint}, {@link org.apache.lucene.document.FloatPoint} and
+ * {@link org.apache.lucene.document.DoublePoint}.
+ * See {@link org.apache.lucene.search.PointRangeQuery} for more details.
+ * It supports integer, float, long and double types. See subclasses for details.
+ * <br>
+ * {@code DocValues} are supported for single-value cases ({@code NumericDocValues}).
+ * {@code FieldCache} is not supported for {@code PointField}s, so sorting, faceting, etc on these fields require the use of {@code docValues="true"} in the schema.
+ */
+public abstract class PointField extends PrimitiveFieldType {
+  
+  public enum PointTypes {
+    INTEGER,
+    LONG,
+    FLOAT,
+    DOUBLE,
+    DATE
+  }
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @Override
+  public boolean isPointField() {
+    return true;
+  }
+  
+  @Override
+  public final ValueSource getSingleValueSource(MultiValueSelector choice, SchemaField field, QParser parser) {
+    // trivial base case
+    if (!field.multiValued()) {
+      // single value matches any selector
+      return getValueSource(field, parser);
+    }
+
+    // Point fields don't support UninvertingReader. See SOLR-9202
+    if (!field.hasDocValues()) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+                              "docValues='true' is required to select '" + choice.toString() +
+                              "' value from multivalued field ("+ field.getName() +") at query time");
+    }
+    
+    // multivalued Point fields all use SortedSetDocValues, so we give a clean error if that's
+    // not supported by the specified choice, else we delegate to a helper
+    SortedSetSelector.Type selectorType = choice.getSortedSetSelectorType();
+    if (null == selectorType) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+                              choice.toString() + " is not a supported option for picking a single value"
+                              + " from the multivalued field: " + field.getName() +
+                              " (type: " + this.getTypeName() + ")");
+    }
+    
+    return getSingleValueSource(selectorType, field);
+  }
+
+  /**
+   * Helper method that will only be called for multivalued Point fields that have doc values.
+   * Default impl throws an error indicating that selecting a single value from this multivalued 
+   * field is not supported for this field type
+   *
+   * @param choice the selector Type to use, will never be null
+   * @param field the field to use, guaranteed to be multivalued.
+   * @see #getSingleValueSource(MultiValueSelector,SchemaField,QParser) 
+   */
+  protected ValueSource getSingleValueSource(SortedSetSelector.Type choice, SchemaField field) {
+    throw new UnsupportedOperationException("MultiValued Point fields with DocValues is not currently supported");
+  }
+
+  @Override
+  public boolean isTokenized() {
+    return false;
+  }
+
+  @Override
+  public boolean multiValuedFieldCache() {
+    return false;
+  }
+
+  /**
+   * @return the type of this field
+   */
+  public abstract PointTypes getType();
+  
+  @Override
+  public abstract Query getSetQuery(QParser parser, SchemaField field, Collection<String> externalVals);
+
+  @Override
+  public Query getFieldQuery(QParser parser, SchemaField field, String externalVal) {
+    if (!field.indexed() && field.hasDocValues()) {
+      // currently implemented as singleton range
+      return getRangeQuery(parser, field, externalVal, externalVal, true, true);
+    } else {
+      return getExactQuery(field, externalVal);
+    }
+  }
+
+  protected abstract Query getExactQuery(SchemaField field, String externalVal);
+
+  @Override
+  public String storedToReadable(IndexableField f) {
+    return toExternal(f);
+  }
+
+  @Override
+  public String toInternal(String val) {
+    throw new UnsupportedOperationException("Can't generate internal string in PointField. use PointField.toInternalByteRef");
+  }
+  
+  public BytesRef toInternalByteRef(String val) {
+    final BytesRefBuilder bytes = new BytesRefBuilder();
+    readableToIndexed(val, bytes);
+    return bytes.get();
+  }
+  
+  @Override
+  public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
+    writer.writeVal(name, toObject(f));
+  }
+
+  @Override
+  public String storedToIndexed(IndexableField f) {
+    throw new UnsupportedOperationException("Not supported with PointFields");
+  }
+  
+  @Override
+  public CharsRef indexedToReadable(BytesRef indexedForm, CharsRefBuilder charsRef) {
+    final String value = indexedToReadable(indexedForm);
+    charsRef.grow(value.length());
+    charsRef.setLength(value.length());
+    value.getChars(0, charsRef.length(), charsRef.chars(), 0);
+    return charsRef.get();
+  }
+  
+  @Override
+  public String indexedToReadable(String indexedForm) {
+    return indexedToReadable(new BytesRef(indexedForm));
+  }
+  
+  protected abstract String indexedToReadable(BytesRef indexedForm);
+  
+  protected boolean isFieldUsed(SchemaField field) {
+    boolean indexed = field.indexed();
+    boolean stored = field.stored();
+    boolean docValues = field.hasDocValues();
+
+    if (!indexed && !stored && !docValues) {
+      if (log.isTraceEnabled()) {
+        log.trace("Ignoring unindexed/unstored field: " + field);
+      }
+      return false;
+    }
+    return true;
+  }
+
+  @Override
+  public List<IndexableField> createFields(SchemaField sf, Object value, float boost) {
+    if (!(sf.hasDocValues() || sf.stored())) {
+      return Collections.singletonList(createField(sf, value, boost));
+    }
+    List<IndexableField> fields = new ArrayList<>();
+    final IndexableField field = createField(sf, value, boost);
+    fields.add(field);
+    
+    if (sf.hasDocValues()) {
+      if (sf.multiValued()) {
+        throw new UnsupportedOperationException("MultiValued Point fields with DocValues is not currently supported. Field: '" + sf.getName() + "'");
+      } else {
+        final long bits;
+        if (field.numericValue() instanceof Integer || field.numericValue() instanceof Long) {
+          bits = field.numericValue().longValue();
+        } else if (field.numericValue() instanceof Float) {
+          bits = Float.floatToIntBits(field.numericValue().floatValue());
+        } else {
+          assert field.numericValue() instanceof Double;
+          bits = Double.doubleToLongBits(field.numericValue().doubleValue());
+        }
+        fields.add(new NumericDocValuesField(sf.getName(), bits));
+      }
+    }
+    if (sf.stored()) {
+      fields.add(getStoredField(sf, value));
+    }
+    return fields;
+  }
+
+  protected abstract StoredField getStoredField(SchemaField sf, Object value);
+
+  @Override
+  public void checkSchemaField(final SchemaField field) {
+    // PointFields support DocValues
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/schema/SchemaField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/SchemaField.java b/solr/core/src/java/org/apache/solr/schema/SchemaField.java
index bcd68c2..009e5fc 100644
--- a/solr/core/src/java/org/apache/solr/schema/SchemaField.java
+++ b/solr/core/src/java/org/apache/solr/schema/SchemaField.java
@@ -170,6 +170,11 @@ public final class SchemaField extends FieldProperties implements IndexableField
                               "can not sort on multivalued field: " 
                               + getName());
     }
+    if (this.type.isPointField() && !hasDocValues()) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, 
+                              "can not sort on a PointField without doc values: " 
+                              + getName());
+    }
   }
 
   /** 
@@ -191,6 +196,11 @@ public final class SchemaField extends FieldProperties implements IndexableField
                               "can not use FieldCache on multivalued field: " 
                               + getName());
     }
+    if (this.type.isPointField() && !hasDocValues()) {
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, 
+                              "Point fields can't use FieldCache. Use docValues=true for field: " 
+                              + getName());
+    }
     
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
index 6d13b51..7c56311 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
@@ -98,6 +98,7 @@ import org.apache.solr.response.SolrQueryResponse;
 import org.apache.solr.schema.BoolField;
 import org.apache.solr.schema.EnumField;
 import org.apache.solr.schema.IndexSchema;
+import org.apache.solr.schema.PointField;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.schema.TrieDateField;
 import org.apache.solr.schema.TrieDoubleField;
@@ -821,16 +822,39 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrI
               continue;
             }
             Object newVal = val;
-            if (schemaField.getType() instanceof TrieIntField) {
-              newVal = val.intValue();
-            } else if (schemaField.getType() instanceof TrieFloatField) {
-              newVal = Float.intBitsToFloat(val.intValue());
-            } else if (schemaField.getType() instanceof TrieDoubleField) {
-              newVal = Double.longBitsToDouble(val);
-            } else if (schemaField.getType() instanceof TrieDateField) {
-              newVal = new Date(val);
-            } else if (schemaField.getType() instanceof EnumField) {
-              newVal = ((EnumField) schemaField.getType()).intValueToStringValue(val.intValue());
+            if (schemaField.getType().isPointField()) {
+              PointField.PointTypes type = ((PointField)schemaField.getType()).getType(); 
+              switch (type) {
+                case INTEGER:
+                  newVal = val.intValue();
+                  break;
+                case LONG:
+                  newVal = val.longValue();
+                  break;
+                case FLOAT:
+                  newVal = Float.intBitsToFloat(val.intValue());
+                  break;
+                case DOUBLE:
+                  newVal = Double.longBitsToDouble(val);
+                  break;
+                case DATE:
+                  newVal = new Date(val);
+                  break;
+                default:
+                  throw new AssertionError("Unexpected PointType: " + type);
+              }
+            } else {
+              if (schemaField.getType() instanceof TrieIntField) {
+                newVal = val.intValue();
+              } else if (schemaField.getType() instanceof TrieFloatField) {
+                newVal = Float.intBitsToFloat(val.intValue());
+              } else if (schemaField.getType() instanceof TrieDoubleField) {
+                newVal = Double.longBitsToDouble(val);
+              } else if (schemaField.getType() instanceof TrieDateField) {
+                newVal = new Date(val);
+              } else if (schemaField.getType() instanceof EnumField) {
+                newVal = ((EnumField) schemaField.getType()).intValueToStringValue(val.intValue());
+              }
             }
             doc.addField(fieldName, newVal);
             break;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/search/TermQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/TermQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/TermQParserPlugin.java
index 99ef4c4..89b3d28 100644
--- a/solr/core/src/java/org/apache/solr/search/TermQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/TermQParserPlugin.java
@@ -50,10 +50,16 @@ public class TermQParserPlugin extends QParserPlugin {
         String fname = localParams.get(QueryParsing.F);
         FieldType ft = req.getSchema().getFieldTypeNoEx(fname);
         String val = localParams.get(QueryParsing.V);
-        BytesRefBuilder term = new BytesRefBuilder();
+        BytesRefBuilder term;
         if (ft != null) {
-          ft.readableToIndexed(val, term);
+          if (ft.isPointField()) {
+            return ft.getFieldQuery(this, req.getSchema().getField(fname), val);
+          } else {
+            term = new BytesRefBuilder();
+            ft.readableToIndexed(val, term);
+          }
         } else {
+          term = new BytesRefBuilder();
           term.copyChars(val);
         }
         return new TermQuery(new Term(fname, term.get()));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/search/TermsQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/TermsQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/TermsQParserPlugin.java
index 3a60149..c407353 100644
--- a/solr/core/src/java/org/apache/solr/search/TermsQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/TermsQParserPlugin.java
@@ -17,6 +17,7 @@
 package org.apache.solr.search;
 
 import java.util.Arrays;
+import java.util.Locale;
 import java.util.regex.Pattern;
 
 import org.apache.lucene.index.Term;
@@ -35,6 +36,7 @@ import org.apache.lucene.util.automaton.Automaton;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.PointField;
 
 /**
  * Finds documents whose specified field has any of the specified values. It's like
@@ -110,6 +112,14 @@ public class TermsQParserPlugin extends QParserPlugin {
           return new MatchNoDocsQuery();
         final String[] splitVals = sepIsSpace ? qstr.split("\\s+") : qstr.split(Pattern.quote(separator), -1);
         assert splitVals.length > 0;
+        
+        if (ft.isPointField()) {
+          if (localParams.get(METHOD) != null) {
+            throw new IllegalArgumentException(
+                String.format(Locale.ROOT, "Method '%s' not supported in TermsQParser when using PointFields", localParams.get(METHOD)));
+          }
+          return ((PointField)ft).getSetQuery(this, req.getSchema().getField(fname), Arrays.asList(splitVals));
+        }
 
         BytesRef[] bytesRefs = new BytesRef[splitVals.length];
         BytesRefBuilder term = new BytesRefBuilder();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/search/facet/FacetRange.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetRange.java b/solr/core/src/java/org/apache/solr/search/facet/FacetRange.java
index 99f6fce..900bbf7 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetRange.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetRange.java
@@ -30,6 +30,7 @@ import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.FacetParams;
 import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.PointField;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.schema.TrieDateField;
 import org.apache.solr.schema.TrieField;
@@ -141,7 +142,32 @@ class FacetRangeProcessor extends FacetProcessor<FacetRange> {
               (SolrException.ErrorCode.BAD_REQUEST,
                   "Expected numeric field type :" + sf);
       }
-    } else {
+    } else if (ft instanceof PointField) {
+      final PointField pfield = (PointField)ft;
+
+      switch (pfield.getType()) {
+        case FLOAT:
+          calc = new FloatCalc(sf);
+          break;
+        case DOUBLE:
+          calc = new DoubleCalc(sf);
+          break;
+        case INTEGER:
+          calc = new IntCalc(sf);
+          break;
+        case LONG:
+          calc = new LongCalc(sf);
+          break;
+        case DATE:
+          calc = new DateCalc(sf, null);
+          break;
+        default:
+          throw new SolrException
+              (SolrException.ErrorCode.BAD_REQUEST,
+                  "Expected numeric field type :" + sf);
+      }
+    } 
+    else {
       throw new SolrException
           (SolrException.ErrorCode.BAD_REQUEST,
               "Expected numeric field type :" + sf);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java b/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java
index b0d7007..3b7abdf 100644
--- a/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java
+++ b/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java
@@ -28,7 +28,11 @@ import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.spell.Dictionary;
 import org.apache.lucene.search.suggest.DocumentValueSourceDictionary;
 import org.apache.solr.core.SolrCore;
+import org.apache.solr.schema.DoublePointField;
 import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.FloatPointField;
+import org.apache.solr.schema.IntPointField;
+import org.apache.solr.schema.LongPointField;
 import org.apache.solr.schema.TrieDoubleField;
 import org.apache.solr.schema.TrieFloatField;
 import org.apache.solr.schema.TrieIntField;
@@ -111,13 +115,13 @@ public class DocumentExpressionDictionaryFactory extends DictionaryFactory {
     SortField.Type type = null;
     String fieldTypeName = core.getLatestSchema().getField(sortFieldName).getType().getTypeName();
     FieldType ft = core.getLatestSchema().getFieldTypes().get(fieldTypeName);
-    if (ft instanceof TrieFloatField) {
+    if (ft instanceof TrieFloatField || ft instanceof FloatPointField) {
       type = SortField.Type.FLOAT;
-    } else if (ft instanceof TrieIntField) {
+    } else if (ft instanceof TrieIntField || ft instanceof IntPointField) {
       type = SortField.Type.INT;
-    } else if (ft instanceof TrieLongField) {
+    } else if (ft instanceof TrieLongField || ft instanceof LongPointField) {
       type = SortField.Type.LONG;
-    } else if (ft instanceof TrieDoubleField) {
+    } else if (ft instanceof TrieDoubleField || ft instanceof DoublePointField) {
       type = SortField.Type.DOUBLE;
     }
     return type;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test-files/solr/collection1/conf/schema-distrib-interval-faceting.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-distrib-interval-faceting.xml b/solr/core/src/test-files/solr/collection1/conf/schema-distrib-interval-faceting.xml
index 79d200d..ff73fdc 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-distrib-interval-faceting.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-distrib-interval-faceting.xml
@@ -24,12 +24,18 @@
   <fieldType name="date" class="solr.TrieDateField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
   <fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
   <fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
+  
+  <!-- Point Fields -->
+  <fieldType name="pint" class="solr.IntPointField" docValues="true"/>
+  <fieldType name="plong" class="solr.LongPointField" docValues="true"/>
+  <fieldType name="pdouble" class="solr.DoublePointField" docValues="true"/>
+  <fieldType name="pfloat" class="solr.FloatPointField" docValues="true"/>
 
   <field name="id" type="string" indexed="true" stored="true" docValues="false" multiValued="false" required="true"/>
   <field name="id_dv" type="string" indexed="false" stored="false" docValues="true" multiValued="false"
          required="true"/>
   <dynamicField name="*_i" type="int" indexed="true" stored="false" docValues="false"/>
-  <dynamicField name="*_i_dv" type="int" indexed="false" stored="false" docValues="true"/>
+  <dynamicField name="*_i_dv" type="${solr.tests.intClass:pint}" indexed="false" stored="false" docValues="true"/>
   <dynamicField name="*_is" type="int" indexed="true" stored="false" docValues="false" multiValued="true"/>
   <dynamicField name="*_is_dv" type="int" indexed="false" stored="false" docValues="true" multiValued="true"/>
   <dynamicField name="*_s" type="string" indexed="true" stored="false" docValues="false"/>
@@ -37,13 +43,13 @@
   <dynamicField name="*_ss" type="string" indexed="true" stored="false" docValues="false" multiValued="true"/>
   <dynamicField name="*_ss_dv" type="string" indexed="false" stored="false" docValues="true" multiValued="true"/>
   <dynamicField name="*_f" type="float" indexed="true" stored="false" docValues="false"/>
-  <dynamicField name="*_f_dv" type="float" indexed="true" stored="false" docValues="true"/>
+  <dynamicField name="*_f_dv" type="${solr.tests.floatClass:pfloat}" indexed="true" stored="false" docValues="true"/>
   <dynamicField name="*_fs_dv" type="float" indexed="true" stored="false" docValues="true" multiValued="true"/>
   <dynamicField name="*_l" type="long" indexed="true" stored="false" docValues="false"/>
-  <dynamicField name="*_l_dv" type="long" indexed="true" stored="false" docValues="true"/>
+  <dynamicField name="*_l_dv" type="${solr.tests.longClass:plong}" indexed="true" stored="false" docValues="true"/>
   <dynamicField name="*_ls_dv" type="long" indexed="true" stored="false" docValues="true" multiValued="true"/>
   <dynamicField name="*_d" type="double" indexed="true" stored="false" docValues="false"/>
-  <dynamicField name="*_d_dv" type="double" indexed="true" stored="false" docValues="true"/>
+  <dynamicField name="*_d_dv" type="${solr.tests.doubleClass:pdouble}" indexed="true" stored="false" docValues="true"/>
   <dynamicField name="*_ds_dv" type="double" indexed="true" stored="false" docValues="true" multiValued="true"/>
   <dynamicField name="*_dt" type="date" indexed="true" stored="false" docValues="false"/>
   <dynamicField name="*_dt_dv" type="date" indexed="true" stored="false" docValues="true"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test-files/solr/collection1/conf/schema-docValuesFaceting.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-docValuesFaceting.xml b/solr/core/src/test-files/solr/collection1/conf/schema-docValuesFaceting.xml
index 113e868..673e7dd 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-docValuesFaceting.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-docValuesFaceting.xml
@@ -23,12 +23,17 @@
   <fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
   <fieldType name="date" class="solr.TrieDateField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
   <fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
+  <fieldType name="pint" class="solr.IntPointField"/>
+  <fieldType name="plong" class="solr.LongPointField"/>
+  <fieldType name="pdouble" class="solr.DoublePointField"/>
+  <fieldType name="pfloat" class="solr.FloatPointField"/>
 
   <field name="id" type="string" indexed="true" stored="true" docValues="false" multiValued="false" required="true"/>
   <field name="id_dv" type="string" indexed="false" stored="false" docValues="true" multiValued="false"
          required="true"/>
   <dynamicField name="*_i" type="int" indexed="true" stored="false" docValues="false"/>
   <dynamicField name="*_i_dv" type="int" indexed="false" stored="false" docValues="true"/>
+  <dynamicField name="*_i_p" type="pint" indexed="true" stored="false" docValues="true"/>
   <dynamicField name="*_is" type="int" indexed="true" stored="false" docValues="false" multiValued="true"/>
   <dynamicField name="*_is_dv" type="int" indexed="false" stored="false" docValues="true" multiValued="true"/>
   <dynamicField name="*_s" type="string" indexed="true" stored="false" docValues="false"/>
@@ -37,14 +42,17 @@
   <dynamicField name="*_ss_dv" type="string" indexed="false" stored="false" docValues="true" multiValued="true"/>
   <dynamicField name="*_f" type="float" indexed="true" stored="false" docValues="false"/>
   <dynamicField name="*_f_dv" type="float" indexed="true" stored="false" docValues="true"/>
+  <dynamicField name="*_f_p" type="pfloat" indexed="true" stored="false" docValues="true"/>
   <dynamicField name="*_fs" type="float" indexed="true" stored="false" docValues="false" multiValued="true"/>
   <dynamicField name="*_fs_dv" type="float" indexed="true" stored="false" docValues="true" multiValued="true"/>
   <dynamicField name="*_l" type="long" indexed="true" stored="false" docValues="false"/>
   <dynamicField name="*_l_dv" type="long" indexed="true" stored="false" docValues="true"/>
+  <dynamicField name="*_l_p" type="plong" indexed="true" stored="false" docValues="true"/>
   <dynamicField name="*_ls" type="long" indexed="true" stored="false" docValues="false" multiValued="true"/>
   <dynamicField name="*_ls_dv" type="long" indexed="true" stored="false" docValues="true" multiValued="true"/>
   <dynamicField name="*_d" type="double" indexed="true" stored="false" docValues="false"/>
   <dynamicField name="*_d_dv" type="double" indexed="true" stored="false" docValues="true"/>
+  <dynamicField name="*_d_p" type="pdouble" indexed="true" stored="false" docValues="true"/>
   <dynamicField name="*_ds" type="double" indexed="true" stored="false" docValues="false" multiValued="true"/>
   <dynamicField name="*_ds_dv" type="double" indexed="true" stored="false" docValues="true" multiValued="true"/>
   <dynamicField name="*_dt" type="date" indexed="true" stored="false" docValues="false"/>
@@ -55,11 +63,15 @@
   <uniqueKey>id</uniqueKey>
 
   <copyField source="*_i" dest="*_i_dv"/>
+  <copyField source="*_i" dest="*_i_p"/>
   <copyField source="*_f" dest="*_f_dv"/>
+  <copyField source="*_f" dest="*_f_p"/>
   <copyField source="*_is" dest="*_is_dv"/>
   <copyField source="*_s" dest="*_s_dv"/>
   <copyField source="*_l" dest="*_l_dv"/>
+  <copyField source="*_l" dest="*_l_p"/>
   <copyField source="*_d" dest="*_d_dv"/>
+  <copyField source="*_d" dest="*_d_p"/>
   <copyField source="*_ss" dest="*_ss_dv"/>
   <copyField source="*_is" dest="*_is_dv"/>
   <copyField source="*_fs" dest="*_fs_dv"/>


[14/50] [abbrv] lucene-solr:apiv2: SOLR-9975: add SpellCheckComponentTest.testCollateExtendedResultsWithJsonNl method

Posted by no...@apache.org.
SOLR-9975: add SpellCheckComponentTest.testCollateExtendedResultsWithJsonNl method


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e816fbe2
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e816fbe2
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e816fbe2

Branch: refs/heads/apiv2
Commit: e816fbe233a9b667a9c30be63241c9400f5a0ebc
Parents: 637915b
Author: Christine Poerschke <cp...@apache.org>
Authored: Tue Jan 17 14:33:58 2017 +0000
Committer: Christine Poerschke <cp...@apache.org>
Committed: Tue Jan 17 15:55:51 2017 +0000

----------------------------------------------------------------------
 .../component/SpellCheckComponentTest.java      | 36 ++++++++++++++++++++
 1 file changed, 36 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e816fbe2/solr/core/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java
index 0e11d44..37d02d9 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java
@@ -184,6 +184,42 @@ public class SpellCheckComponentTest extends SolrTestCaseJ4 {
   
 
   @Test
+  public void testCollateExtendedResultsWithJsonNl() throws Exception {
+    final String q = "documemtsss broens";
+    final String jsonNl = "map";
+    final boolean collateExtendedResults = random().nextBoolean();
+    final List<String> testsList = new ArrayList<String>();
+    if (collateExtendedResults) {
+      testsList.add("/spellcheck/collations/collation/collationQuery=='document brown'");
+      testsList.add("/spellcheck/collations/collation/hits==0");
+      switch (jsonNl) {
+        case "map":
+          testsList.add("/spellcheck/collations/collation/misspellingsAndCorrections/documemtsss=='document'");
+          testsList.add("/spellcheck/collations/collation/misspellingsAndCorrections/broens=='brown'");
+          break;
+        default:
+          fail("unexpected json.nl choice: "+jsonNl);
+          break;
+      }
+    } else {
+      testsList.add("/spellcheck/collations/collation=='document brown'");
+    }
+    final String[] testsArray = new String[testsList.size()];
+    implTestCollateExtendedResultsWithJsonNl(q, jsonNl, collateExtendedResults, testsList.toArray(testsArray));
+  }
+
+  private void implTestCollateExtendedResultsWithJsonNl(String q, String jsonNl, boolean collateExtendedResults, String ... tests) throws Exception {
+    final SolrQueryRequest solrQueryRequest = req(
+        CommonParams.QT, rh,
+        CommonParams.Q, q,
+        "json.nl", jsonNl,
+        SpellCheckComponent.COMPONENT_NAME, "true",
+        SpellingParams.SPELLCHECK_COLLATE_EXTENDED_RESULTS, Boolean.toString(collateExtendedResults),
+        SpellingParams.SPELLCHECK_COLLATE, "true");
+    assertJQ(solrQueryRequest, tests);
+  }
+
+  @Test
   public void testCorrectSpelling() throws Exception {
     // Make sure correct spellings are signaled in the response
     assertJQ(req("json.nl","map", "qt",rh, SpellCheckComponent.COMPONENT_NAME, "true",


[36/50] [abbrv] lucene-solr:apiv2: LUCENE-7640: Fix test.

Posted by no...@apache.org.
LUCENE-7640: Fix test.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/85a05b54
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/85a05b54
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/85a05b54

Branch: refs/heads/apiv2
Commit: 85a05b546bee9ff7484372c44854d4fd66d63b36
Parents: e8fa599
Author: Adrien Grand <jp...@gmail.com>
Authored: Thu Jan 19 09:54:23 2017 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Thu Jan 19 09:54:50 2017 +0100

----------------------------------------------------------------------
 .../lucene/codecs/lucene60/TestLucene60PointsFormat.java  | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/85a05b54/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java
index 4287273..08dc6c6 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java
@@ -31,7 +31,9 @@ import org.apache.lucene.index.BasePointsFormatTestCase;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.MockRandomMergePolicy;
 import org.apache.lucene.index.PointValues;
 import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.index.SegmentWriteState;
@@ -97,7 +99,13 @@ public class TestLucene60PointsFormat extends BasePointsFormatTestCase {
 
   public void testEstimatePointCount() throws IOException {
     Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    // Avoid mockRandomMP since it may cause non-optimal merges that make the
+    // number of points per leaf hard to predict
+    while (iwc.getMergePolicy() instanceof MockRandomMergePolicy) {
+      iwc.setMergePolicy(newMergePolicy());
+    }
+    IndexWriter w = new IndexWriter(dir, iwc);
     byte[] pointValue = new byte[3];
     byte[] uniquePointValue = new byte[3];
     random().nextBytes(uniquePointValue);


[09/50] [abbrv] lucene-solr:apiv2: Various fixes and updates for index sorting on flush

Posted by no...@apache.org.
Various fixes and updates for index sorting on flush

* IndexWriter.validateIndexSort now throws a CorruptIndexException if a segment created by version >= 6.5.0 is not sorted (already applied in branch_6x)
* Removes unneeded check in AssertingLiveDocsFormat (already applied in branch_6x)
* Removes try/finally block when stored fields consumer finishes (already applied in branch_6x).


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1acd2ee2
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1acd2ee2
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1acd2ee2

Branch: refs/heads/apiv2
Commit: 1acd2ee2bbe2ccc3a0607df5447e6216f9b6eb9a
Parents: ed513fd
Author: Jim Ferenczi <ji...@elastic.co>
Authored: Tue Jan 17 14:22:47 2017 +0100
Committer: Jim Ferenczi <ji...@elastic.co>
Committed: Tue Jan 17 14:22:47 2017 +0100

----------------------------------------------------------------------
 .../org/apache/lucene/index/DefaultIndexingChain.java     |  5 +----
 .../src/java/org/apache/lucene/index/IndexWriter.java     | 10 +++++-----
 .../java/org/apache/lucene/index/SortingLeafReader.java   |  2 +-
 .../lucene/codecs/asserting/AssertingLiveDocsFormat.java  |  9 ++-------
 4 files changed, 9 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1acd2ee2/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
index 197ab31..b118c13 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
@@ -313,10 +313,7 @@ final class DefaultIndexingChain extends DocConsumer {
 
   @Override
   public void abort() {
-    try {
-      storedFieldsConsumer.abort();
-    } catch (Throwable t) {
-    }
+    storedFieldsConsumer.abort();
 
     try {
       // E.g. close any open files in the term vectors writer:

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1acd2ee2/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 7f0b97c..0fc2e24 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -1034,17 +1034,17 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
 
   /** Confirms that the incoming index sort (if any) matches the existing index sort (if any).
    *  This is unfortunately just best effort, because it could be the old index only has unsorted flushed segments built
-   *  before {@link Version#LUCENE_7_0_0} (flushed segments are sorted in Lucene 7.0).  */
-  private void validateIndexSort() {
+   *  before {@link Version#LUCENE_6_5_0} (flushed segments are sorted in Lucene 7.0).  */
+  private void validateIndexSort() throws CorruptIndexException {
     Sort indexSort = config.getIndexSort();
     if (indexSort != null) {
       for(SegmentCommitInfo info : segmentInfos) {
         Sort segmentIndexSort = info.info.getIndexSort();
         if (segmentIndexSort != null && indexSort.equals(segmentIndexSort) == false) {
           throw new IllegalArgumentException("cannot change previous indexSort=" + segmentIndexSort + " (from segment=" + info + ") to new indexSort=" + indexSort);
-        } else if (segmentIndexSort == null) {
-          // Flushed segments are not sorted if they were built with a version prior to 7.0
-          assert info.info.getVersion().onOrAfter(Version.LUCENE_7_0_0) == false;
+        } else if (segmentIndexSort == null && info.info.getVersion().onOrAfter(Version.LUCENE_6_5_0)) {
+          // Flushed segments are not sorted if they were built with a version prior to 6.5.0
+          throw new CorruptIndexException("segment not sorted with indexSort=" + segmentIndexSort, info.info.toString());
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1acd2ee2/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
index b36b284..f24a4d0 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
@@ -42,7 +42,7 @@ import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
 
 /**
  * An {@link org.apache.lucene.index.LeafReader} which supports sorting documents by a given
- * {@link Sort}. This is package private and is only used by Lucene fo BWC when it needs to merge
+ * {@link Sort}. This is package private and is only used by Lucene for BWC when it needs to merge
  * an unsorted flushed segment built by an older version (newly flushed segments are sorted since version 7.0).
  *
  * @lucene.experimental

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1acd2ee2/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java
index afc80d5..f4abb54 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingLiveDocsFormat.java
@@ -68,13 +68,8 @@ public class AssertingLiveDocsFormat extends LiveDocsFormat {
 
   @Override
   public void writeLiveDocs(MutableBits bits, Directory dir, SegmentCommitInfo info, int newDelCount, IOContext context) throws IOException {
-    MutableBits raw = bits;
-    /**
-     * bits is not necessarily an AssertingMutableBits because index sorting needs to wrap it in a sorted view.
-     */
-    if (bits instanceof AssertingMutableBits) {
-      raw = (MutableBits) ((AssertingMutableBits) bits).in;
-    }
+    assert bits instanceof AssertingMutableBits;
+    MutableBits raw = (MutableBits) ((AssertingMutableBits)bits).in;
     check(raw, info.info.maxDoc(), info.getDelCount() + newDelCount);
     in.writeLiveDocs(raw, dir, info, newDelCount, context);
   }


[37/50] [abbrv] lucene-solr:apiv2: LUCENE-7645: Use JDK's Arrays.binarySearch in BaseCharFilter.

Posted by no...@apache.org.
LUCENE-7645: Use JDK's Arrays.binarySearch in BaseCharFilter.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a14d7936
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a14d7936
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a14d7936

Branch: refs/heads/apiv2
Commit: a14d79366f97ffde61b56aee2e2d9123ccadc8a7
Parents: 85a05b5
Author: Adrien Grand <jp...@gmail.com>
Authored: Thu Jan 19 11:27:24 2017 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Thu Jan 19 11:27:24 2017 +0100

----------------------------------------------------------------------
 .../analysis/charfilter/BaseCharFilter.java     | 26 +++++---------------
 1 file changed, 6 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a14d7936/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/BaseCharFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/BaseCharFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/BaseCharFilter.java
index 48ffa48..4fba9fe 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/BaseCharFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/BaseCharFilter.java
@@ -41,31 +41,17 @@ public abstract class BaseCharFilter extends CharFilter {
   /** Retrieve the corrected offset. */
   @Override
   protected int correct(int currentOff) {
-    if (offsets == null || currentOff < offsets[0]) {
+    if (offsets == null) {
       return currentOff;
     }
-    
-    int hi = size - 1;
-    if(currentOff >= offsets[hi])
-      return currentOff + diffs[hi];
 
-    int lo = 0;
-    int mid = -1;
-    
-    while (hi >= lo) {
-      mid = (lo + hi) >>> 1;
-      if (currentOff < offsets[mid])
-        hi = mid - 1;
-      else if (currentOff > offsets[mid])
-        lo = mid + 1;
-      else
-        return currentOff + diffs[mid];
+    int index = Arrays.binarySearch(offsets, 0, size, currentOff);
+    if (index < -1) {
+      index = -2 - index;
     }
 
-    if (currentOff < offsets[mid])
-      return mid == 0 ? currentOff : currentOff + diffs[mid-1];
-    else
-      return currentOff + diffs[mid];
+    final int diff = index < 0 ? 0 : diffs[index];
+    return currentOff + diff;
   }
   
   protected int getLastCumulativeDiff() {


[17/50] [abbrv] lucene-solr:apiv2: SOLR-9977: Fix config bug in DistribDocExpirationUpdateProcessorTest that allowed false assumptions about when index version changes

Posted by no...@apache.org.
SOLR-9977: Fix config bug in DistribDocExpirationUpdateProcessorTest that allowed false assumptions about when index version changes


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9ee48aa8
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9ee48aa8
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9ee48aa8

Branch: refs/heads/apiv2
Commit: 9ee48aa857e15461dd6ec6482194141da72e0ba2
Parents: 0bdcfc2
Author: Chris Hostetter <ho...@apache.org>
Authored: Tue Jan 17 17:32:42 2017 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Tue Jan 17 17:32:42 2017 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  3 ++
 .../solrconfig.snippet.randomindexconfig.xml    | 47 --------------------
 .../configsets/doc-expiry/conf/solrconfig.xml   | 15 ++++++-
 3 files changed, 16 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ee48aa8/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 42be8a2..5fd8a9e 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -95,6 +95,9 @@ Bug Fixes
 
 * SOLR-9976: Fix init bug in SegmentsInfoRequestHandlerTest (hossman)
 
+* SOLR-9977: Fix config bug in DistribDocExpirationUpdateProcessorTest that allowed false assumptions
+  about when index version changes (hossman)
+
 Optimizations
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ee48aa8/solr/core/src/test-files/solr/configsets/doc-expiry/conf/solrconfig.snippet.randomindexconfig.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/doc-expiry/conf/solrconfig.snippet.randomindexconfig.xml b/solr/core/src/test-files/solr/configsets/doc-expiry/conf/solrconfig.snippet.randomindexconfig.xml
deleted file mode 100644
index ec5f54e..0000000
--- a/solr/core/src/test-files/solr/configsets/doc-expiry/conf/solrconfig.snippet.randomindexconfig.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-<?xml version="1.0" ?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<!-- 
-
-A solrconfig.xml snippet containing indexConfig settings for randomized testing.
-
--->
-<indexConfig>
-  <!-- this sys property is not set by SolrTestCaseJ4 because we ideally want to use
-       the RandomMergePolicy in all tests - but some tests expect very specific
-       Merge behavior, so those tests can set it as needed.
-  -->
-  <mergePolicy enable="${solr.tests.useMergePolicy:true}" class="${solr.tests.mergePolicy:org.apache.solr.util.RandomMergePolicy}" />
-  <mergePolicyFactory enable="${solr.tests.useMergePolicyFactory:true}" class="${solr.tests.mergePolicyFactory:org.apache.solr.util.RandomMergePolicyFactory}" />
-  
-  <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
-
-  <maxBufferedDocs>${solr.tests.maxBufferedDocs}</maxBufferedDocs>
-  <ramBufferSizeMB>${solr.tests.ramBufferSizeMB}</ramBufferSizeMB>
-
-  <mergeScheduler class="${solr.tests.mergeScheduler}" />
-
-  <writeLockTimeout>1000</writeLockTimeout>
-  <commitLockTimeout>10000</commitLockTimeout>
-
-  <!-- this sys property is not set by SolrTestCaseJ4 because almost all tests should
-       use the single process lockType for speed - but tests that explicitly need
-       to vary the lockType canset it as needed.
-  -->
-  <lockType>${solr.tests.lockType:single}</lockType>
-</indexConfig>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9ee48aa8/solr/core/src/test-files/solr/configsets/doc-expiry/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/doc-expiry/conf/solrconfig.xml b/solr/core/src/test-files/solr/configsets/doc-expiry/conf/solrconfig.xml
index 18d16a3..2599744 100644
--- a/solr/core/src/test-files/solr/configsets/doc-expiry/conf/solrconfig.xml
+++ b/solr/core/src/test-files/solr/configsets/doc-expiry/conf/solrconfig.xml
@@ -25,14 +25,25 @@
 
   <dataDir>${solr.data.dir:}</dataDir>
 
+  <indexConfig>
+    <!-- NOTE: we do *NOT* want Randomized Merging for these tests,
+         because we need to be able to assert that index changes are only happening
+         on the shards we expected them to as a result of our deletes.
+         
+         (the random/mock merge classes can cause new readers to be opened after a commit
+         even if the index itself hasn't changed - ex: new segments file listing same exact segments
+         
+         Instead use Solr defaults for almost everything
+    -->
+    <lockType>${solr.tests.lockType:single}</lockType>
+  </indexConfig>
+  
   <directoryFactory name="DirectoryFactory"
                     class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
   <schemaFactory class="ClassicIndexSchemaFactory"/>
 
   <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
 
-  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
-
   <updateHandler class="solr.DirectUpdateHandler2">
     <updateLog>
       <str name="dir">${solr.ulog.dir:}</str>


[27/50] [abbrv] lucene-solr:apiv2: SOLR-8396: Add support for PointFields in Solr

Posted by no...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
index d8f3ae5..95c403a 100644
--- a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
+++ b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacets.java
@@ -34,6 +34,7 @@ import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.JSONTestUtil;
 import org.apache.solr.SolrTestCaseHS;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.SolrTestCaseJ4.SuppressPointFields;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.request.macro.MacroExpander;
 import org.junit.AfterClass;
@@ -41,6 +42,7 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 
 @LuceneTestCase.SuppressCodecs({"Lucene3x","Lucene40","Lucene41","Lucene42","Lucene45","Appending"})
+@SuppressPointFields
 public class TestJsonFacets extends SolrTestCaseHS {
 
   private static SolrInstances servers;  // for distributed testing

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57934ba4/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
index 61de56d..59e74d90 100644
--- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
+++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
@@ -167,11 +167,18 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
   public static final String SYSTEM_PROPERTY_SOLR_TESTS_USEMERGEPOLICYFACTORY = "solr.tests.useMergePolicyFactory";
   @Deprecated // remove solr.tests.useMergePolicy use with SOLR-8668
   public static final String SYSTEM_PROPERTY_SOLR_TESTS_USEMERGEPOLICY = "solr.tests.useMergePolicy";
+  
+  /**
+   * The system property {@code "solr.tests.preferPointFields"} can be used to make tests use PointFields when possible. 
+   * PointFields will only be used if the schema used by the tests uses "${solr.tests.TYPEClass}" when defining fields. 
+   * If this environment variable is not set, those tests will use PointFields 50% of the times and TrieFields the rest.
+   */
+  public static final boolean PREFER_POINT_FIELDS = Boolean.getBoolean("solr.tests.preferPointFields");
 
   private static String coreName = DEFAULT_TEST_CORENAME;
 
   public static int DEFAULT_CONNECTION_TIMEOUT = 60000;  // default socket connection timeout in ms
-
+  
   protected void writeCoreProperties(Path coreDirectory, String corename) throws IOException {
     Properties props = new Properties();
     props.setProperty("name", corename);
@@ -215,6 +222,19 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
     public String bugUrl();
   }
   
+  /**
+   * Annotation for test classes that want to disable PointFields.
+   * PointFields will otherwise randomly used by some schemas.
+   */
+  @Documented
+  @Inherited
+  @Retention(RetentionPolicy.RUNTIME)
+  @Target(ElementType.TYPE)
+  public @interface SuppressPointFields {
+    /** Point to JIRA entry. */
+    public String bugUrl() default "None";
+  }
+  
   // these are meant to be accessed sequentially, but are volatile just to ensure any test
   // thread will read the latest value
   protected static volatile SSLTestConfig sslConfig;
@@ -419,10 +439,12 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
     lrf = h.getRequestFactory("standard", 0, 20, CommonParams.VERSION, "2.2");
   }
   
-  /** sets system properties based on 
+  /** 
+   * Sets system properties to allow generation of random configurations of
+   * solrconfig.xml and schema.xml. 
+   * Sets properties used on  
    * {@link #newIndexWriterConfig(org.apache.lucene.analysis.Analyzer)}
-   * 
-   * configs can use these system properties to vary the indexwriter settings
+   *  and base schema.xml (Point Fields)
    */
   public static void newRandomConfig() {
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
@@ -438,6 +460,20 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
       mergeSchedulerClass = "org.apache.lucene.index.ConcurrentMergeScheduler";
     }
     System.setProperty("solr.tests.mergeScheduler", mergeSchedulerClass);
+    if (RandomizedContext.current().getTargetClass().isAnnotationPresent(SuppressPointFields.class)
+        || (!PREFER_POINT_FIELDS && random().nextBoolean())) {
+      log.info("Using TrieFields");
+      System.setProperty("solr.tests.intClass", "int");
+      System.setProperty("solr.tests.longClass", "long");
+      System.setProperty("solr.tests.doubleClass", "double");
+      System.setProperty("solr.tests.floatClass", "float");
+    } else {
+      log.info("Using PointFields");
+      System.setProperty("solr.tests.intClass", "pint");
+      System.setProperty("solr.tests.longClass", "plong");
+      System.setProperty("solr.tests.doubleClass", "pdouble");
+      System.setProperty("solr.tests.floatClass", "pfloat");
+    }
   }
 
   public static Throwable getWrappedException(Throwable e) {


[34/50] [abbrv] lucene-solr:apiv2: SOLR-9885: Allow pre-startup Solr log management in Solr bin scripts to be disabled.

Posted by no...@apache.org.
SOLR-9885: Allow pre-startup Solr log management in Solr bin scripts to be disabled.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/075aec91
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/075aec91
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/075aec91

Branch: refs/heads/apiv2
Commit: 075aec91cd2c10e3f9a62adcf0feadc705c205ec
Parents: bb35732
Author: markrmiller <ma...@apache.org>
Authored: Thu Jan 19 03:07:09 2017 -0500
Committer: markrmiller <ma...@apache.org>
Committed: Thu Jan 19 03:07:09 2017 -0500

----------------------------------------------------------------------
 solr/CHANGES.txt     |  2 ++
 solr/bin/solr        | 11 ++++++-----
 solr/bin/solr.cmd    | 13 +++++++++----
 solr/bin/solr.in.cmd |  5 +++++
 solr/bin/solr.in.sh  |  5 +++++
 5 files changed, 27 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/075aec91/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index aab5116..c0fe505 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -102,6 +102,8 @@ New Features
 
 * SOLR-9926: Allow passing arbitrary java system properties to zkcli. (Hrishikesh Gadre via Mark Miller)
 
+* SOLR-9885: Allow pre-startup Solr log management in Solr bin scripts to be disabled. (Mano Kovacs via Mark Miller)
+
 Bug Fixes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/075aec91/solr/bin/solr
----------------------------------------------------------------------
diff --git a/solr/bin/solr b/solr/bin/solr
index fcf864b..c2d0feb 100755
--- a/solr/bin/solr
+++ b/solr/bin/solr
@@ -1480,11 +1480,12 @@ if [ ! -e "$SOLR_HOME" ]; then
   echo -e "\nSolr home directory $SOLR_HOME not found!\n"
   exit 1
 fi
-
-run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -remove_old_solr_logs 7 || echo "Failed removing old solr logs"
-run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -archive_gc_logs        || echo "Failed archiving old GC logs"
-run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -archive_console_logs   || echo "Failed archiving old console logs"
-run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -rotate_solr_logs 9     || echo "Failed rotating old solr logs"
+if [ "${SOLR_LOG_PRESTART_ROTATION:=true}" == "true" ]; then
+  run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -remove_old_solr_logs 7 || echo "Failed removing old solr logs"
+  run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -archive_gc_logs        || echo "Failed archiving old GC logs"
+  run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -archive_console_logs   || echo "Failed archiving old console logs"
+  run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -rotate_solr_logs 9     || echo "Failed rotating old solr logs"
+fi
 
 java_ver_out=`echo "$("$JAVA" -version 2>&1)"`
 JAVA_VERSION=`echo $java_ver_out | grep "java version" | awk '{ print substr($3, 2, length($3)-2); }'`

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/075aec91/solr/bin/solr.cmd
----------------------------------------------------------------------
diff --git a/solr/bin/solr.cmd b/solr/bin/solr.cmd
index 04398bc..732c2de 100644
--- a/solr/bin/solr.cmd
+++ b/solr/bin/solr.cmd
@@ -939,10 +939,15 @@ IF ERRORLEVEL 1 (
 )
 
 REM Clean up and rotate logs
-call :run_utils "-remove_old_solr_logs 7" || echo "Failed removing old solr logs"
-call :run_utils "-archive_gc_logs"        || echo "Failed archiving old GC logs"
-call :run_utils "-archive_console_logs"   || echo "Failed archiving old console logs"
-call :run_utils "-rotate_solr_logs 9"     || echo "Failed rotating old solr logs"
+IF [%SOLR_LOG_PRESTART_ROTATION%] == [] (
+  set SOLR_LOG_PRESTART_ROTATION=true
+)
+IF [%SOLR_LOG_PRESTART_ROTATION%] == [true] (
+  call :run_utils "-remove_old_solr_logs 7" || echo "Failed removing old solr logs"
+  call :run_utils "-archive_gc_logs"        || echo "Failed archiving old GC logs"
+  call :run_utils "-archive_console_logs"   || echo "Failed archiving old console logs"
+  call :run_utils "-rotate_solr_logs 9"     || echo "Failed rotating old solr logs"
+)
 
 IF NOT "%ZK_HOST%"=="" set SOLR_MODE=solrcloud
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/075aec91/solr/bin/solr.in.cmd
----------------------------------------------------------------------
diff --git a/solr/bin/solr.in.cmd b/solr/bin/solr.in.cmd
index d323434..e565c02 100644
--- a/solr/bin/solr.in.cmd
+++ b/solr/bin/solr.in.cmd
@@ -75,6 +75,11 @@ REM set SOLR_LOG_LEVEL=INFO
 REM Location where Solr should write logs to. Absolute or relative to solr start dir
 REM set SOLR_LOGS_DIR=logs
 
+REM Enables log rotation, cleanup, and archiving before starting Solr. Setting SOLR_LOG_PRESTART_ROTATION=false will skip start
+REM time rotation of logs, and the archiving of the last GC and console log files. It does not affect Log4j configuration. This
+REM pre-startup rotation may need to be disabled depending how much you customize the default logging setup.
+REM set SOLR_LOG_PRESTART_ROTATION=true
+
 REM Set the host interface to listen on. Jetty will listen on all interfaces (0.0.0.0) by default.
 REM This must be an IPv4 ("a.b.c.d") or bracketed IPv6 ("[x::y]") address, not a hostname!
 REM set SOLR_JETTY_HOST=0.0.0.0

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/075aec91/solr/bin/solr.in.sh
----------------------------------------------------------------------
diff --git a/solr/bin/solr.in.sh b/solr/bin/solr.in.sh
index e5dd0c9..5a9f807 100644
--- a/solr/bin/solr.in.sh
+++ b/solr/bin/solr.in.sh
@@ -91,6 +91,11 @@
 # Location where Solr should write logs to. Absolute or relative to solr start dir
 #SOLR_LOGS_DIR=logs
 
+# Enables log rotation, cleanup, and archiving during start. Setting SOLR_LOG_PRESTART_ROTATION=false will skip start
+# time rotation of logs, and the archiving of the last GC and console log files. It does not affect Log4j configuration.
+# This pre-startup rotation may need to be disabled depending how much you customize the default logging setup.
+#SOLR_LOG_PRESTART_ROTATION=true
+
 # Sets the port Solr binds to, default is 8983
 #SOLR_PORT=8983
 


[02/50] [abbrv] lucene-solr:apiv2: Remove unnecessary @Override annotation in CoreParser.java class.

Posted by no...@apache.org.
Remove unnecessary @Override annotation in CoreParser.java class.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/649c58de
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/649c58de
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/649c58de

Branch: refs/heads/apiv2
Commit: 649c58de0252ba608963e0fe699f8332c870b294
Parents: efc7ee0
Author: Christine Poerschke <cp...@apache.org>
Authored: Mon Jan 16 15:10:51 2017 +0000
Committer: Christine Poerschke <cp...@apache.org>
Committed: Mon Jan 16 15:29:30 2017 +0000

----------------------------------------------------------------------
 .../src/java/org/apache/lucene/queryparser/xml/CoreParser.java     | 2 --
 1 file changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/649c58de/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java
index 1bf82ac..d8aa8ef 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java
@@ -141,8 +141,6 @@ public class CoreParser implements QueryBuilder {
     return doc;
   }
 
-
-  @Override
   public Query getQuery(Element e) throws ParserException {
     return queryFactory.getQuery(e);
   }


[21/50] [abbrv] lucene-solr:apiv2: WordDelimiterGraphFilter can't correct offsets if a CharFilter had changed them

Posted by no...@apache.org.
WordDelimiterGraphFilter can't correct offsets if a CharFilter had changed them


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/57626c9a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/57626c9a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/57626c9a

Branch: refs/heads/apiv2
Commit: 57626c9a998b45b27d38dc3610c8455e309cff45
Parents: 188a19e
Author: Mike McCandless <mi...@apache.org>
Authored: Wed Jan 18 11:03:08 2017 -0500
Committer: Mike McCandless <mi...@apache.org>
Committed: Wed Jan 18 11:04:01 2017 -0500

----------------------------------------------------------------------
 .../test/org/apache/lucene/analysis/core/TestRandomChains.java    | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/57626c9a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
index 0bd5e0a..8953f9f 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
@@ -80,6 +80,7 @@ import org.apache.lucene.analysis.miscellaneous.LimitTokenPositionFilter;
 import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter.StemmerOverrideMap;
 import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter;
 import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
+import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter;
 import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
 import org.apache.lucene.analysis.path.ReversePathHierarchyTokenizer;
 import org.apache.lucene.analysis.payloads.IdentityEncoder;
@@ -152,6 +153,8 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
           ValidatingTokenFilter.class, 
           // TODO: needs to be a tokenizer, doesnt handle graph inputs properly (a shingle or similar following will then cause pain)
           WordDelimiterFilter.class,
+          // Cannot correct offsets when a char filter had changed them:
+          WordDelimiterGraphFilter.class,
           // clones of core's filters:
           org.apache.lucene.analysis.core.StopFilter.class,
           org.apache.lucene.analysis.core.LowerCaseFilter.class)) {


[39/50] [abbrv] lucene-solr:apiv2: LUCENE-7640: Fix test bug.

Posted by no...@apache.org.
LUCENE-7640: Fix test bug.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a2131a9e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a2131a9e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a2131a9e

Branch: refs/heads/apiv2
Commit: a2131a9e1e3a22dec3ab2185c06999edac3e2f73
Parents: eba9390
Author: Adrien Grand <jp...@gmail.com>
Authored: Thu Jan 19 20:02:18 2017 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Thu Jan 19 20:02:18 2017 +0100

----------------------------------------------------------------------
 lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a2131a9e/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java b/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
index f01f058..fecdaa5 100644
--- a/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
+++ b/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
@@ -1183,8 +1183,8 @@ public class TestBKD extends LuceneTestCase {
           
           @Override
           public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
-            if (StringHelper.compare(3, uniquePointValue, 0, maxPackedValue, 0) > 0 ||
-                StringHelper.compare(3, uniquePointValue, 0, minPackedValue, 0) < 0) {
+            if (StringHelper.compare(numBytesPerDim, uniquePointValue, 0, maxPackedValue, 0) > 0 ||
+                StringHelper.compare(numBytesPerDim, uniquePointValue, 0, minPackedValue, 0) < 0) {
               return Relation.CELL_OUTSIDE_QUERY;
             }
             return Relation.CELL_CROSSES_QUERY;


[06/50] [abbrv] lucene-solr:apiv2: LUCENE-7637: Require that all terms of a TermsQuery come from the same field.

Posted by no...@apache.org.
LUCENE-7637: Require that all terms of a TermsQuery come from the same field.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/43874fc5
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/43874fc5
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/43874fc5

Branch: refs/heads/apiv2
Commit: 43874fc5b5c7fe37c70524693ea2db4ef0e01f95
Parents: 86233cb
Author: Adrien Grand <jp...@gmail.com>
Authored: Tue Jan 17 08:45:28 2017 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Tue Jan 17 08:51:58 2017 +0100

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  11 +-
 .../apache/lucene/search/TermInSetQuery.java    |  93 ++++----------
 .../lucene/search/TermInSetQueryTest.java       | 123 +++++++------------
 .../apache/lucene/facet/MultiFacetQuery.java    |  13 +-
 .../spatial/prefix/NumberRangeFacetsTest.java   |   8 +-
 5 files changed, 84 insertions(+), 164 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/43874fc5/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 59992ea..2e015a3 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -63,6 +63,14 @@ Other
 
 ======================= Lucene 6.5.0 =======================
 
+API Changes
+
+* LUCENE-7624: TermsQuery has been renamed as TermInSetQuery and moved to core.
+  (Alan Woodward)
+
+* LUCENE-7637: TermInSetQuery requires that all terms come from the same field.
+  (Adrien Grand)
+
 New Features
 
 * LUCENE-7623: Add FunctionScoreQuery and FunctionMatchQuery (Alan Woodward,
@@ -107,9 +115,6 @@ API Changes
 * LUCENE-7611: DocumentValueSourceDictionary now takes a LongValuesSource
   as a parameter, and the ValueSource equivalent is deprecated (Alan Woodward)
 
-* LUCENE-7624: TermsQuery has been renamed as TermInSetQuery and moved to core.
-  (Alan Woodward)
-
 New features
 
 * LUCENE-5867: Added BooleanSimilarity. (Robert Muir, Adrien Grand)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/43874fc5/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java b/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
index e1a1575..08fe3c3 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
@@ -21,7 +21,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Objects;
 import java.util.Set;
@@ -73,39 +72,12 @@ public class TermInSetQuery extends Query implements Accountable {
   // Same threshold as MultiTermQueryConstantScoreWrapper
   static final int BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD = 16;
 
-  private final boolean singleField; // whether all terms are from the same field
+  private final String field;
   private final PrefixCodedTerms termData;
   private final int termDataHashCode; // cached hashcode of termData
 
   /**
-   * Creates a new {@link TermInSetQuery} from the given collection. It
-   * can contain duplicate terms and multiple fields.
-   */
-  public TermInSetQuery(Collection<Term> terms) {
-    Term[] sortedTerms = terms.toArray(new Term[terms.size()]);
-    // already sorted if we are a SortedSet with natural order
-    boolean sorted = terms instanceof SortedSet && ((SortedSet<Term>)terms).comparator() == null;
-    if (!sorted) {
-      ArrayUtil.timSort(sortedTerms);
-    }
-    PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
-    Set<String> fields = new HashSet<>();
-    Term previous = null;
-    for (Term term : sortedTerms) {
-      if (term.equals(previous) == false) {
-        fields.add(term.field());
-        builder.add(term);
-      }
-      previous = term;
-    }
-    singleField = fields.size() == 1;
-    termData = builder.finish();
-    termDataHashCode = termData.hashCode();
-  }
-
-  /**
-   * Creates a new {@link TermInSetQuery} from the given collection for
-   * a single field. It can contain duplicate terms.
+   * Creates a new {@link TermInSetQuery} from the given collection of terms.
    */
   public TermInSetQuery(String field, Collection<BytesRef> terms) {
     BytesRef[] sortedTerms = terms.toArray(new BytesRef[terms.size()]);
@@ -125,27 +97,18 @@ public class TermInSetQuery extends Query implements Accountable {
       builder.add(field, term);
       previous.copyBytes(term);
     }
-    singleField = true;
+    this.field = field;
     termData = builder.finish();
     termDataHashCode = termData.hashCode();
   }
 
   /**
-   * Creates a new {@link TermInSetQuery} from the given {@link BytesRef} array for
-   * a single field.
+   * Creates a new {@link TermInSetQuery} from the given array of terms.
    */
   public TermInSetQuery(String field, BytesRef...terms) {
     this(field, Arrays.asList(terms));
   }
 
-  /**
-   * Creates a new {@link TermInSetQuery} from the given array. The array can
-   * contain duplicate terms and multiple fields.
-   */
-  public TermInSetQuery(final Term... terms) {
-    this(Arrays.asList(terms));
-  }
-
   @Override
   public Query rewrite(IndexReader reader) throws IOException {
     final int threshold = Math.min(BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD, BooleanQuery.getMaxClauseCount());
@@ -167,6 +130,7 @@ public class TermInSetQuery extends Query implements Accountable {
   }
 
   private boolean equalsTo(TermInSetQuery other) {
+    // no need to check 'field' explicitly since it is encoded in 'termData'
     // termData might be heavy to compare so check the hash code first
     return termDataHashCode == other.termDataHashCode &&
         termData.equals(other.termData);
@@ -260,6 +224,15 @@ public class TermInSetQuery extends Query implements Accountable {
       private WeightOrDocIdSet rewrite(LeafReaderContext context) throws IOException {
         final LeafReader reader = context.reader();
 
+        final Fields fields = reader.fields();
+        Terms terms = fields.terms(field);
+        if (terms == null) {
+          return null;
+        }
+        TermsEnum termsEnum = terms.iterator();
+        PostingsEnum docs = null;
+        TermIterator iterator = termData.iterator();
+
         // We will first try to collect up to 'threshold' terms into 'matchingTerms'
         // if there are two many terms, we will fall back to building the 'builder'
         final int threshold = Math.min(BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD, BooleanQuery.getMaxClauseCount());
@@ -267,25 +240,9 @@ public class TermInSetQuery extends Query implements Accountable {
         List<TermAndState> matchingTerms = new ArrayList<>(threshold);
         DocIdSetBuilder builder = null;
 
-        final Fields fields = reader.fields();
-        String lastField = null;
-        Terms terms = null;
-        TermsEnum termsEnum = null;
-        PostingsEnum docs = null;
-        TermIterator iterator = termData.iterator();
         for (BytesRef term = iterator.next(); term != null; term = iterator.next()) {
-          String field = iterator.field();
-          // comparing references is fine here
-          if (field != lastField) {
-            terms = fields.terms(field);
-            if (terms == null) {
-              termsEnum = null;
-            } else {
-              termsEnum = terms.iterator();
-            }
-            lastField = field;
-          }
-          if (termsEnum != null && termsEnum.seekExact(term)) {
+          assert field.equals(iterator.field());
+          if (termsEnum.seekExact(term)) {
             if (matchingTerms == null) {
               docs = termsEnum.postings(docs, PostingsEnum.NONE);
               builder.add(docs);
@@ -293,15 +250,7 @@ public class TermInSetQuery extends Query implements Accountable {
               matchingTerms.add(new TermAndState(field, termsEnum));
             } else {
               assert matchingTerms.size() == threshold;
-              if (singleField) {
-                // common case: all terms are in the same field
-                // use an optimized builder that leverages terms stats to be more efficient
-                builder = new DocIdSetBuilder(reader.maxDoc(), terms);
-              } else {
-                // corner case: different fields
-                // don't make assumptions about the docs we will get
-                builder = new DocIdSetBuilder(reader.maxDoc());
-              }
+              builder = new DocIdSetBuilder(reader.maxDoc(), terms);
               docs = termsEnum.postings(docs, PostingsEnum.NONE);
               builder.add(docs);
               for (TermAndState t : matchingTerms) {
@@ -344,7 +293,9 @@ public class TermInSetQuery extends Query implements Accountable {
       @Override
       public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
         final WeightOrDocIdSet weightOrBitSet = rewrite(context);
-        if (weightOrBitSet.weight != null) {
+        if (weightOrBitSet == null) {
+          return null;
+        } else if (weightOrBitSet.weight != null) {
           return weightOrBitSet.weight.bulkScorer(context);
         } else {
           final Scorer scorer = scorer(weightOrBitSet.set);
@@ -358,7 +309,9 @@ public class TermInSetQuery extends Query implements Accountable {
       @Override
       public Scorer scorer(LeafReaderContext context) throws IOException {
         final WeightOrDocIdSet weightOrBitSet = rewrite(context);
-        if (weightOrBitSet.weight != null) {
+        if (weightOrBitSet == null) {
+          return null;
+        } else if (weightOrBitSet.weight != null) {
           return weightOrBitSet.weight.scorer(context);
         } else {
           return scorer(weightOrBitSet.set);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/43874fc5/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java b/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java
index e694d97..3878d59 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java
@@ -18,15 +18,12 @@ package org.apache.lucene.search;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 import com.carrotsearch.randomizedtesting.generators.RandomStrings;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field.Store;
@@ -53,25 +50,25 @@ public class TermInSetQueryTest extends LuceneTestCase {
 
   public void testDuel() throws IOException {
     final int iters = atLeast(2);
+    final String field = "f";
     for (int iter = 0; iter < iters; ++iter) {
-      final List<Term> allTerms = new ArrayList<>();
+      final List<BytesRef> allTerms = new ArrayList<>();
       final int numTerms = TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 10));
       for (int i = 0; i < numTerms; ++i) {
-        final String field = usually() ? "f" : "g";
         final String value = TestUtil.randomAnalysisString(random(), 10, true);
-        allTerms.add(new Term(field, value));
+        allTerms.add(new BytesRef(value));
       }
       Directory dir = newDirectory();
       RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
       final int numDocs = atLeast(100);
       for (int i = 0; i < numDocs; ++i) {
         Document doc = new Document();
-        final Term term = allTerms.get(random().nextInt(allTerms.size()));
-        doc.add(new StringField(term.field(), term.text(), Store.NO));
+        final BytesRef term = allTerms.get(random().nextInt(allTerms.size()));
+        doc.add(new StringField(field, term, Store.NO));
         iw.addDocument(doc);
       }
       if (numTerms > 1 && random().nextBoolean()) {
-        iw.deleteDocuments(new TermQuery(allTerms.get(0)));
+        iw.deleteDocuments(new TermQuery(new Term(field, allTerms.get(0))));
       }
       iw.commit();
       final IndexReader reader = iw.getReader();
@@ -87,16 +84,16 @@ public class TermInSetQueryTest extends LuceneTestCase {
       for (int i = 0; i < 100; ++i) {
         final float boost = random().nextFloat() * 10;
         final int numQueryTerms = TestUtil.nextInt(random(), 1, 1 << TestUtil.nextInt(random(), 1, 8));
-        List<Term> queryTerms = new ArrayList<>();
+        List<BytesRef> queryTerms = new ArrayList<>();
         for (int j = 0; j < numQueryTerms; ++j) {
           queryTerms.add(allTerms.get(random().nextInt(allTerms.size())));
         }
         final BooleanQuery.Builder bq = new BooleanQuery.Builder();
-        for (Term t : queryTerms) {
-          bq.add(new TermQuery(t), Occur.SHOULD);
+        for (BytesRef t : queryTerms) {
+          bq.add(new TermQuery(new Term(field, t)), Occur.SHOULD);
         }
         final Query q1 = new ConstantScoreQuery(bq.build());
-        final Query q2 = new TermInSetQuery(queryTerms);
+        final Query q2 = new TermInSetQuery(field, queryTerms);
         assertSameMatches(searcher, new BoostQuery(q1, boost), new BoostQuery(q2, boost), true);
       }
 
@@ -118,103 +115,72 @@ public class TermInSetQueryTest extends LuceneTestCase {
     }
   }
 
-  private TermInSetQuery termsQuery(boolean singleField, Term...terms) {
-    return termsQuery(singleField, Arrays.asList(terms));
-  }
-
-  private TermInSetQuery termsQuery(boolean singleField, Collection<Term> termList) {
-    if (!singleField) {
-      return new TermInSetQuery(new ArrayList<>(termList));
-    }
-    final TermInSetQuery filter;
-    List<BytesRef> bytes = new ArrayList<>();
-    String field = null;
-    for (Term term : termList) {
-        bytes.add(term.bytes());
-        if (field != null) {
-          assertEquals(term.field(), field);
-        }
-        field = term.field();
-    }
-    assertNotNull(field);
-    filter = new TermInSetQuery(field, bytes);
-    return filter;
-  }
-
   public void testHashCodeAndEquals() {
     int num = atLeast(100);
-    final boolean singleField = random().nextBoolean();
-    List<Term> terms = new ArrayList<>();
-    Set<Term> uniqueTerms = new HashSet<>();
+    List<BytesRef> terms = new ArrayList<>();
+    Set<BytesRef> uniqueTerms = new HashSet<>();
     for (int i = 0; i < num; i++) {
-      String field = "field" + (singleField ? "1" : random().nextInt(100));
       String string = TestUtil.randomRealisticUnicodeString(random());
-      terms.add(new Term(field, string));
-      uniqueTerms.add(new Term(field, string));
-      TermInSetQuery left = termsQuery(singleField ? random().nextBoolean() : false, uniqueTerms);
+      terms.add(new BytesRef(string));
+      uniqueTerms.add(new BytesRef(string));
+      TermInSetQuery left = new TermInSetQuery("field", uniqueTerms);
       Collections.shuffle(terms, random());
-      TermInSetQuery right = termsQuery(singleField ? random().nextBoolean() : false, terms);
+      TermInSetQuery right = new TermInSetQuery("field", terms);
       assertEquals(right, left);
       assertEquals(right.hashCode(), left.hashCode());
       if (uniqueTerms.size() > 1) {
-        List<Term> asList = new ArrayList<>(uniqueTerms);
+        List<BytesRef> asList = new ArrayList<>(uniqueTerms);
         asList.remove(0);
-        TermInSetQuery notEqual = termsQuery(singleField ? random().nextBoolean() : false, asList);
+        TermInSetQuery notEqual = new TermInSetQuery("field", asList);
         assertFalse(left.equals(notEqual));
         assertFalse(right.equals(notEqual));
       }
     }
 
-    TermInSetQuery tq1 = new TermInSetQuery(new Term("thing", "apple"));
-    TermInSetQuery tq2 = new TermInSetQuery(new Term("thing", "orange"));
+    TermInSetQuery tq1 = new TermInSetQuery("thing", new BytesRef("apple"));
+    TermInSetQuery tq2 = new TermInSetQuery("thing", new BytesRef("orange"));
     assertFalse(tq1.hashCode() == tq2.hashCode());
 
     // different fields with the same term should have differing hashcodes
-    tq1 = new TermInSetQuery(new Term("thing1", "apple"));
-    tq2 = new TermInSetQuery(new Term("thing2", "apple"));
+    tq1 = new TermInSetQuery("thing", new BytesRef("apple"));
+    tq2 = new TermInSetQuery("thing2", new BytesRef("apple"));
     assertFalse(tq1.hashCode() == tq2.hashCode());
   }
 
-  public void testSingleFieldEquals() {
+  public void testSimpleEquals() {
     // Two terms with the same hash code
     assertEquals("AaAaBB".hashCode(), "BBBBBB".hashCode());
-    TermInSetQuery left = termsQuery(true, new Term("id", "AaAaAa"), new Term("id", "AaAaBB"));
-    TermInSetQuery right = termsQuery(true, new Term("id", "AaAaAa"), new Term("id", "BBBBBB"));
+    TermInSetQuery left = new TermInSetQuery("id", new BytesRef("AaAaAa"), new BytesRef("AaAaBB"));
+    TermInSetQuery right = new TermInSetQuery("id", new BytesRef("AaAaAa"), new BytesRef("BBBBBB"));
     assertFalse(left.equals(right));
   }
 
   public void testToString() {
-    TermInSetQuery termsQuery = new TermInSetQuery(new Term("field1", "a"),
-                                              new Term("field1", "b"),
-                                              new Term("field1", "c"));
+    TermInSetQuery termsQuery = new TermInSetQuery("field1",
+        new BytesRef("a"), new BytesRef("b"), new BytesRef("c"));
     assertEquals("field1:a field1:b field1:c", termsQuery.toString());
   }
 
   public void testDedup() {
-    Query query1 = new TermInSetQuery(new Term("foo", "bar"));
-    Query query2 = new TermInSetQuery(new Term("foo", "bar"), new Term("foo", "bar"));
+    Query query1 = new TermInSetQuery("foo", new BytesRef("bar"));
+    Query query2 = new TermInSetQuery("foo", new BytesRef("bar"), new BytesRef("bar"));
     QueryUtils.checkEqual(query1, query2);
   }
 
   public void testOrderDoesNotMatter() {
     // order of terms if different
-    Query query1 = new TermInSetQuery(new Term("foo", "bar"), new Term("foo", "baz"));
-    Query query2 = new TermInSetQuery(new Term("foo", "baz"), new Term("foo", "bar"));
-    QueryUtils.checkEqual(query1, query2);
-
-    // order of fields is different
-    query1 = new TermInSetQuery(new Term("foo", "bar"), new Term("bar", "bar"));
-    query2 = new TermInSetQuery(new Term("bar", "bar"), new Term("foo", "bar"));
+    Query query1 = new TermInSetQuery("foo", new BytesRef("bar"), new BytesRef("baz"));
+    Query query2 = new TermInSetQuery("foo", new BytesRef("baz"), new BytesRef("bar"));
     QueryUtils.checkEqual(query1, query2);
   }
 
   public void testRamBytesUsed() {
-    List<Term> terms = new ArrayList<>();
+    List<BytesRef> terms = new ArrayList<>();
     final int numTerms = 1000 + random().nextInt(1000);
     for (int i = 0; i < numTerms; ++i) {
-      terms.add(new Term("f", RandomStrings.randomUnicodeOfLength(random(), 10)));
+      terms.add(new BytesRef(RandomStrings.randomUnicodeOfLength(random(), 10)));
     }
-    TermInSetQuery query = new TermInSetQuery(terms);
+    TermInSetQuery query = new TermInSetQuery("f", terms);
     final long actualRamBytesUsed = RamUsageTester.sizeOf(query);
     final long expectedRamBytesUsed = query.ramBytesUsed();
     // error margin within 5%
@@ -281,43 +247,40 @@ public class TermInSetQueryTest extends LuceneTestCase {
 
   }
 
-  public void testPullOneTermsEnumPerField() throws Exception {
+  public void testPullOneTermsEnum() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
     doc.add(new StringField("foo", "1", Store.NO));
-    doc.add(new StringField("bar", "2", Store.NO));
-    doc.add(new StringField("baz", "3", Store.NO));
     w.addDocument(doc);
     DirectoryReader reader = w.getReader();
     w.close();
     final AtomicInteger counter = new AtomicInteger();
     DirectoryReader wrapped = new TermsCountingDirectoryReaderWrapper(reader, counter);
 
-    final List<Term> terms = new ArrayList<>();
-    final Set<String> fields = new HashSet<>();
+    final List<BytesRef> terms = new ArrayList<>();
     // enough terms to avoid the rewrite
     final int numTerms = TestUtil.nextInt(random(), TermInSetQuery.BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD + 1, 100);
     for (int i = 0; i < numTerms; ++i) {
-      final String field = RandomPicks.randomFrom(random(), new String[] {"foo", "bar", "baz"});
       final BytesRef term = new BytesRef(RandomStrings.randomUnicodeOfCodepointLength(random(), 10));
-      fields.add(field);
-      terms.add(new Term(field, term));
+      terms.add(term);
     }
 
-    new IndexSearcher(wrapped).count(new TermInSetQuery(terms));
-    assertEquals(fields.size(), counter.get());
+    assertEquals(0, new IndexSearcher(wrapped).count(new TermInSetQuery("bar", terms)));
+    assertEquals(0, counter.get()); // missing field
+    new IndexSearcher(wrapped).count(new TermInSetQuery("foo", terms));
+    assertEquals(1, counter.get());
     wrapped.close();
     dir.close();
   }
   
   public void testBinaryToString() {
-    TermInSetQuery query = new TermInSetQuery(new Term("field", new BytesRef(new byte[] { (byte) 0xff, (byte) 0xfe })));
+    TermInSetQuery query = new TermInSetQuery("field", new BytesRef(new byte[] { (byte) 0xff, (byte) 0xfe }));
     assertEquals("field:[ff fe]", query.toString());
   }
 
   public void testIsConsideredCostlyByQueryCache() throws IOException {
-    TermInSetQuery query = new TermInSetQuery(new Term("foo", "bar"), new Term("foo", "baz"));
+    TermInSetQuery query = new TermInSetQuery("foo", new BytesRef("bar"), new BytesRef("baz"));
     UsageTrackingQueryCachingPolicy policy = new UsageTrackingQueryCachingPolicy();
     assertFalse(policy.shouldCache(query));
     policy.onUse(query);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/43874fc5/lucene/facet/src/java/org/apache/lucene/facet/MultiFacetQuery.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/MultiFacetQuery.java b/lucene/facet/src/java/org/apache/lucene/facet/MultiFacetQuery.java
index a010709..72c2773 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/MultiFacetQuery.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/MultiFacetQuery.java
@@ -19,9 +19,9 @@ package org.apache.lucene.facet;
 import java.util.ArrayList;
 import java.util.Collection;
 
-import org.apache.lucene.index.Term;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermInSetQuery;
+import org.apache.lucene.util.BytesRef;
 
 /**
  * A multi-terms {@link Query} over a {@link FacetField}.
@@ -38,7 +38,7 @@ public class MultiFacetQuery extends TermInSetQuery {
    * Creates a new {@code MultiFacetQuery} filtering the query on the given dimension.
    */
   public MultiFacetQuery(final FacetsConfig facetsConfig, final String dimension, final String[]... paths) {
-    super(toTerms(facetsConfig.getDimConfig(dimension), dimension, paths));
+    super(facetsConfig.getDimConfig(dimension).indexFieldName, toTerms(dimension, paths));
   }
 
   /**
@@ -47,14 +47,13 @@ public class MultiFacetQuery extends TermInSetQuery {
    * <b>NOTE:</b>Uses FacetsConfig.DEFAULT_DIM_CONFIG.
    */
   public MultiFacetQuery(final String dimension, final String[]... paths) {
-    super(toTerms(FacetsConfig.DEFAULT_DIM_CONFIG, dimension, paths));
+    super(FacetsConfig.DEFAULT_DIM_CONFIG.indexFieldName, toTerms(dimension, paths));
   }
 
-  static Collection<Term> toTerms(final FacetsConfig.DimConfig dimConfig, final String dimension,
-          final String[]... paths) {
-    final Collection<Term> terms = new ArrayList<>(paths.length);
+  static Collection<BytesRef> toTerms(final String dimension, final String[]... paths) {
+    final Collection<BytesRef> terms = new ArrayList<>(paths.length);
     for (String[] path : paths)
-      terms.add(FacetQuery.toTerm(dimConfig, dimension, path));
+      terms.add(new BytesRef(FacetsConfig.pathToString(dimension, path)));
     return terms;
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/43874fc5/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/NumberRangeFacetsTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/NumberRangeFacetsTest.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/NumberRangeFacetsTest.java
index bb26a2e..3cdf5e9 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/NumberRangeFacetsTest.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/NumberRangeFacetsTest.java
@@ -24,7 +24,6 @@ import java.util.List;
 
 import com.carrotsearch.randomizedtesting.annotations.Repeat;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.Term;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.search.TermInSetQuery;
@@ -36,6 +35,7 @@ import org.apache.lucene.spatial.prefix.tree.DateRangePrefixTree;
 import org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree;
 import org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
 import org.junit.Before;
 import org.junit.Test;
@@ -127,12 +127,12 @@ public class NumberRangeFacetsTest extends StrategyTestCase {
         Collections.shuffle(acceptFieldIds, random());
         acceptFieldIds = acceptFieldIds.subList(0, randomInt(acceptFieldIds.size()));
         if (!acceptFieldIds.isEmpty()) {
-          List<Term> terms = new ArrayList<>();
+          List<BytesRef> terms = new ArrayList<>();
           for (Integer acceptDocId : acceptFieldIds) {
-            terms.add(new Term("id", acceptDocId.toString()));
+            terms.add(new BytesRef(acceptDocId.toString()));
           }
 
-          topAcceptDocs = searchForDocBits(new TermInSetQuery(terms));
+          topAcceptDocs = searchForDocBits(new TermInSetQuery("id", terms));
         }
       }
 


[24/50] [abbrv] lucene-solr:apiv2: Fix changes entry formatting (sub-bullets wrongly promoted to top level items); don't interpret parenthesized text starting with 'e.g.' as an attribution.

Posted by no...@apache.org.
Fix changes entry formatting (sub-bullets wrongly promoted to top level items); don't interpret parenthesized text starting with 'e.g.' as an attribution.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/85061e39
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/85061e39
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/85061e39

Branch: refs/heads/apiv2
Commit: 85061e39e063f99fedcfeee1b2de6cdb8080daa4
Parents: 8c2ef3b
Author: Steve Rowe <sa...@apache.org>
Authored: Wed Jan 18 15:21:23 2017 -0500
Committer: Steve Rowe <sa...@apache.org>
Committed: Wed Jan 18 15:21:23 2017 -0500

----------------------------------------------------------------------
 lucene/site/changes/changes2html.pl |  3 ++-
 solr/CHANGES.txt                    | 16 ++++++++--------
 2 files changed, 10 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/85061e39/lucene/site/changes/changes2html.pl
----------------------------------------------------------------------
diff --git a/lucene/site/changes/changes2html.pl b/lucene/site/changes/changes2html.pl
index 11a0fab..5b866fc 100755
--- a/lucene/site/changes/changes2html.pl
+++ b/lucene/site/changes/changes2html.pl
@@ -26,7 +26,6 @@ use warnings;
 
 my $jira_url_prefix = 'http://issues.apache.org/jira/browse/';
 my $github_pull_request_prefix = 'https://github.com/apache/lucene-solr/pull/';
-my $bugzilla_url_prefix = 'http://issues.apache.org/bugzilla/show_bug.cgi?id=';
 my $month_regex = &setup_month_regex;
 my %month_nums = &setup_month_nums;
 my %lucene_bugzilla_jira_map = &setup_lucene_bugzilla_jira_map;
@@ -643,6 +642,7 @@ sub markup_trailing_attribution {
                            (?!inverse\ )
                            (?![Tt]he\ )
                            (?!use\ the\ bug\ number)
+                           (?!e\.?g\.?\b)
                      [^()"]+?\))\s*$}
                     {\n${extra_newline}<span class="attrib">$1</span>}x) {
     # If attribution is not found, then look for attribution with a
@@ -668,6 +668,7 @@ sub markup_trailing_attribution {
                       (?!inverse\ )
                       (?![Tt]he\ )
                       (?!use\ the\ bug\ number)
+                      (?!e\.?g\.?\b)
                  [^()"]+?\)))
                 ((?:\.|(?i:\.?\s*Issue\s+\d{3,}|LUCENE-\d+)\.?)\s*)$}
               {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/85061e39/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index cfd7a4c..1f32c24 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -225,15 +225,15 @@ New Features
 
 * SOLR-9805: Use metrics-jvm library to instrument jvm internals such as GC, memory usage and others. (shalin)
 
-* SOLR-9812: SOLR-9911, SOLR-9960: Added a new /admin/metrics API to return all metrics collected by Solr via API.
+* SOLR-9812, SOLR-9911, SOLR-9960: Added a new /admin/metrics API to return all metrics collected by Solr via API.
   API supports four optional multi-valued parameters:
-  * 'group' (all,jvm,jetty,node,core),
-  * 'type' (all,counter,timer,gauge,histogram),
-  * 'prefix' that filters the returned metrics,
-  * 'registry' that selects one or more registries by prefix (eg. solr.jvm,solr.core.collection1)
-  Example: http://localhost:8983/solr/admin/metrics?group=jvm,jetty&type=counter
-  Example: http://localhost:8983/solr/admin/metrics?group=jvm&prefix=buffers,os
-  Example: http://localhost:8983/solr/admin/metrics?registry=solr.node,solr.core&prefix=ADMIN
+  - 'group' (all,jvm,jetty,node,core),
+  - 'type' (all,counter,timer,gauge,histogram),
+  - 'prefix' that filters the returned metrics,
+  - 'registry' that selects one or more registries by prefix (eg. solr.jvm,solr.core.collection1)
+  - Example: http://localhost:8983/solr/admin/metrics?group=jvm,jetty&type=counter
+  - Example: http://localhost:8983/solr/admin/metrics?group=jvm&prefix=buffers,os
+  - Example: http://localhost:8983/solr/admin/metrics?registry=solr.node,solr.core&prefix=ADMIN
   (shalin, ab)
 
 * SOLR-9884: Add version to segments handler output (Steven Bower via Erick Erickson)


[20/50] [abbrv] lucene-solr:apiv2: LUCENE-7640: Fix test bug.

Posted by no...@apache.org.
LUCENE-7640: Fix test bug.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/188a19e6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/188a19e6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/188a19e6

Branch: refs/heads/apiv2
Commit: 188a19e67e8e4a9d2c7f0e596eb0820b80770d98
Parents: 71aa463
Author: Adrien Grand <jp...@gmail.com>
Authored: Wed Jan 18 16:59:26 2017 +0100
Committer: Adrien Grand <jp...@gmail.com>
Committed: Wed Jan 18 16:59:26 2017 +0100

----------------------------------------------------------------------
 .../apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/188a19e6/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java
index 3a08bfa..4287273 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene60/TestLucene60PointsFormat.java
@@ -256,8 +256,8 @@ public class TestLucene60PointsFormat extends BasePointsFormatTestCase {
           @Override
           public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
             for (int dim = 0; dim < 2; ++dim) {
-              if (StringHelper.compare(3, uniquePointValue[0], 0, maxPackedValue, dim * 3) > 0 ||
-                  StringHelper.compare(3, uniquePointValue[0], 0, minPackedValue, dim * 3) < 0) {
+              if (StringHelper.compare(3, uniquePointValue[dim], 0, maxPackedValue, dim * 3) > 0 ||
+                  StringHelper.compare(3, uniquePointValue[dim], 0, minPackedValue, dim * 3) < 0) {
                 return Relation.CELL_OUTSIDE_QUERY;
               }
             }


[31/50] [abbrv] lucene-solr:apiv2: SOLR-9926: Allow passing arbitrary java system properties to zkcli.

Posted by no...@apache.org.
SOLR-9926: Allow passing arbitrary java system properties to zkcli.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9f58b6cd
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9f58b6cd
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9f58b6cd

Branch: refs/heads/apiv2
Commit: 9f58b6cd177f72b226c83adbb965cfe08d61d2fb
Parents: 57934ba
Author: markrmiller <ma...@apache.org>
Authored: Wed Jan 18 21:23:36 2017 -0500
Committer: markrmiller <ma...@apache.org>
Committed: Wed Jan 18 21:23:36 2017 -0500

----------------------------------------------------------------------
 solr/CHANGES.txt                            | 2 ++
 solr/server/scripts/cloud-scripts/zkcli.bat | 2 +-
 solr/server/scripts/cloud-scripts/zkcli.sh  | 2 +-
 3 files changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9f58b6cd/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 82c3d2b..aab5116 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -100,6 +100,8 @@ New Features
 * SOLR-9836: Add ability to recover from leader when index corruption is detected on SolrCore creation.
   (Mike Drob via Mark Miller)
 
+* SOLR-9926: Allow passing arbitrary java system properties to zkcli. (Hrishikesh Gadre via Mark Miller)
+
 Bug Fixes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9f58b6cd/solr/server/scripts/cloud-scripts/zkcli.bat
----------------------------------------------------------------------
diff --git a/solr/server/scripts/cloud-scripts/zkcli.bat b/solr/server/scripts/cloud-scripts/zkcli.bat
index 0e4359c..c372685 100644
--- a/solr/server/scripts/cloud-scripts/zkcli.bat
+++ b/solr/server/scripts/cloud-scripts/zkcli.bat
@@ -21,5 +21,5 @@ REM  -DzkCredentialsProvider=org.apache.solr.common.cloud.VMParamsSingleSetCrede
 REM  -DzkDigestUsername=admin-user -DzkDigestPassword=CHANGEME-ADMIN-PASSWORD ^
 REM  -DzkDigestReadonlyUsername=readonly-user -DzkDigestReadonlyPassword=CHANGEME-READONLY-PASSWORD
 
-"%JVM%" %SOLR_ZK_CREDS_AND_ACLS% -Dlog4j.configuration="%LOG4J_CONFIG%" ^
+"%JVM%" %SOLR_ZK_CREDS_AND_ACLS% %ZKCLI_JVM_FLAGS% -Dlog4j.configuration="%LOG4J_CONFIG%" ^
 -classpath "%SDIR%\..\..\solr-webapp\webapp\WEB-INF\lib\*;%SDIR%\..\..\lib\ext\*" org.apache.solr.cloud.ZkCLI %*

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9f58b6cd/solr/server/scripts/cloud-scripts/zkcli.sh
----------------------------------------------------------------------
diff --git a/solr/server/scripts/cloud-scripts/zkcli.sh b/solr/server/scripts/cloud-scripts/zkcli.sh
index e37b6da..df43265 100755
--- a/solr/server/scripts/cloud-scripts/zkcli.sh
+++ b/solr/server/scripts/cloud-scripts/zkcli.sh
@@ -21,6 +21,6 @@ fi
 #  -DzkDigestUsername=admin-user -DzkDigestPassword=CHANGEME-ADMIN-PASSWORD \
 #  -DzkDigestReadonlyUsername=readonly-user -DzkDigestReadonlyPassword=CHANGEME-READONLY-PASSWORD"
 
-PATH=$JAVA_HOME/bin:$PATH $JVM $SOLR_ZK_CREDS_AND_ACLS  -Dlog4j.configuration=$log4j_config \
+PATH=$JAVA_HOME/bin:$PATH $JVM $SOLR_ZK_CREDS_AND_ACLS $ZKCLI_JVM_FLAGS -Dlog4j.configuration=$log4j_config \
 -classpath "$sdir/../../solr-webapp/webapp/WEB-INF/lib/*:$sdir/../../lib/ext/*" org.apache.solr.cloud.ZkCLI ${1+"$@"}
 


[47/50] [abbrv] lucene-solr:apiv2: SOLR-10011: Refactor PointField & TrieField to now have a common base class, NumericFieldType.

Posted by no...@apache.org.
SOLR-10011: Refactor PointField & TrieField to now have a common base class, NumericFieldType.

  The TrieField.TrieTypes and PointField.PointTypes are now consolidated to NumericFieldType.NumberType. This
  refactoring also fixes a bug whereby PointFields were not using DocValues for range queries for
  indexed=false, docValues=true fields.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/285a1013
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/285a1013
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/285a1013

Branch: refs/heads/apiv2
Commit: 285a1013ad04dd1cd5e5e41ffa93a87fe862c152
Parents: 49fa7b0
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Sun Jan 22 04:27:11 2017 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Sun Jan 22 04:27:11 2017 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   5 +
 .../apache/solr/schema/DoublePointField.java    |  11 +-
 .../org/apache/solr/schema/FloatPointField.java |  11 +-
 .../org/apache/solr/schema/IntPointField.java   |  10 +-
 .../org/apache/solr/schema/LongPointField.java  |  11 +-
 .../apache/solr/schema/NumericFieldType.java    | 151 ++++++++++++++++
 .../java/org/apache/solr/schema/PointField.java |  28 +--
 .../org/apache/solr/schema/TrieDateField.java   |   2 +-
 .../org/apache/solr/schema/TrieDoubleField.java |   2 +-
 .../java/org/apache/solr/schema/TrieField.java  | 179 +++----------------
 .../org/apache/solr/schema/TrieFloatField.java  |   2 +-
 .../org/apache/solr/schema/TrieIntField.java    |   2 +-
 .../org/apache/solr/schema/TrieLongField.java   |   2 +-
 .../apache/solr/search/SolrIndexSearcher.java   |   3 +-
 .../distributed/command/GroupConverter.java     |   2 +-
 .../solr/collection1/conf/schema-point.xml      |   4 +
 .../org/apache/solr/schema/TestPointFields.java |   8 +
 17 files changed, 239 insertions(+), 194 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 748125a..abd2983 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -80,6 +80,11 @@ Other Changes
 ----------------------
 * SOLR-8396: Add support for PointFields in Solr (Ishan Chattopadhyaya, Tom�s Fern�ndez L�bbe)
 
+* SOLR-10011: Refactor PointField & TrieField to now have a common base class, NumericFieldType. The
+  TrieField.TrieTypes and PointField.PointTypes are now consolidated to NumericFieldType.NumberType. This
+  refactoring also fixes a bug whereby PointFields were not using DocValues for range queries for
+  indexed=false, docValues=true fields.
+
 ==================  6.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/schema/DoublePointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/DoublePointField.java b/solr/core/src/java/org/apache/solr/schema/DoublePointField.java
index c393dfe..b9a7311 100644
--- a/solr/core/src/java/org/apache/solr/schema/DoublePointField.java
+++ b/solr/core/src/java/org/apache/solr/schema/DoublePointField.java
@@ -45,6 +45,10 @@ public class DoublePointField extends PointField implements DoubleValueFieldType
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
+  public DoublePointField() {
+    type = NumberType.DOUBLE;
+  }
+
   @Override
   public Object toNativeType(Object val) {
     if (val == null) return null;
@@ -54,7 +58,7 @@ public class DoublePointField extends PointField implements DoubleValueFieldType
   }
 
   @Override
-  public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
+  public Query getPointRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
       boolean maxInclusive) {
     double actualMin, actualMax;
     if (min == null) {
@@ -179,9 +183,4 @@ public class DoublePointField extends PointField implements DoubleValueFieldType
   protected StoredField getStoredField(SchemaField sf, Object value) {
     return new StoredField(sf.getName(), (Double) this.toNativeType(value));
   }
-
-  @Override
-  public PointTypes getType() {
-    return PointTypes.DOUBLE;
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/schema/FloatPointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/FloatPointField.java b/solr/core/src/java/org/apache/solr/schema/FloatPointField.java
index 766c6e9..7b866fc 100644
--- a/solr/core/src/java/org/apache/solr/schema/FloatPointField.java
+++ b/solr/core/src/java/org/apache/solr/schema/FloatPointField.java
@@ -45,6 +45,10 @@ public class FloatPointField extends PointField implements FloatValueFieldType {
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
+  public FloatPointField() {
+    type = NumberType.FLOAT;
+  }
+
   @Override
   public Object toNativeType(Object val) {
     if (val == null) return null;
@@ -54,7 +58,7 @@ public class FloatPointField extends PointField implements FloatValueFieldType {
   }
 
   @Override
-  public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
+  public Query getPointRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
       boolean maxInclusive) {
     float actualMin, actualMax;
     if (min == null) {
@@ -179,9 +183,4 @@ public class FloatPointField extends PointField implements FloatValueFieldType {
   protected StoredField getStoredField(SchemaField sf, Object value) {
     return new StoredField(sf.getName(), (Float) this.toNativeType(value));
   }
-  
-  @Override
-  public PointTypes getType() {
-    return PointTypes.FLOAT;
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/schema/IntPointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/IntPointField.java b/solr/core/src/java/org/apache/solr/schema/IntPointField.java
index 2271282..3e74241 100644
--- a/solr/core/src/java/org/apache/solr/schema/IntPointField.java
+++ b/solr/core/src/java/org/apache/solr/schema/IntPointField.java
@@ -44,6 +44,10 @@ public class IntPointField extends PointField implements IntValueFieldType {
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
+  public IntPointField() {
+    type = NumberType.INTEGER;
+  }
+
   @Override
   public Object toNativeType(Object val) {
     if (val == null) return null;
@@ -58,7 +62,7 @@ public class IntPointField extends PointField implements IntValueFieldType {
   }
 
   @Override
-  public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
+  public Query getPointRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
       boolean maxInclusive) {
     int actualMin, actualMax;
     if (min == null) {
@@ -179,8 +183,4 @@ public class IntPointField extends PointField implements IntValueFieldType {
     return new StoredField(sf.getName(), (Integer) this.toNativeType(value));
   }
 
-  @Override
-  public PointTypes getType() {
-    return PointTypes.INTEGER;
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/schema/LongPointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/LongPointField.java b/solr/core/src/java/org/apache/solr/schema/LongPointField.java
index f3fca3c..80f3cf7 100644
--- a/solr/core/src/java/org/apache/solr/schema/LongPointField.java
+++ b/solr/core/src/java/org/apache/solr/schema/LongPointField.java
@@ -44,6 +44,10 @@ public class LongPointField extends PointField implements LongValueFieldType {
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
+  public LongPointField() {
+    type = NumberType.LONG;
+  }
+
   @Override
   public Object toNativeType(Object val) {
     if (val == null) return null;
@@ -58,7 +62,7 @@ public class LongPointField extends PointField implements LongValueFieldType {
   }
 
   @Override
-  public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
+  public Query getPointRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
       boolean maxInclusive) {
     long actualMin, actualMax;
     if (min == null) {
@@ -178,9 +182,4 @@ public class LongPointField extends PointField implements LongValueFieldType {
   protected StoredField getStoredField(SchemaField sf, Object value) {
     return new StoredField(sf.getName(), (Long) this.toNativeType(value));
   }
-
-  @Override
-  public PointTypes getType() {
-    return PointTypes.LONG;
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/schema/NumericFieldType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/NumericFieldType.java b/solr/core/src/java/org/apache/solr/schema/NumericFieldType.java
new file mode 100644
index 0000000..404693d
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/schema/NumericFieldType.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.schema;
+
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.MatchNoDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.search.FunctionRangeQuery;
+import org.apache.solr.search.QParser;
+import org.apache.solr.search.function.ValueSourceRangeFilter;
+import org.apache.solr.util.DateMathParser;
+
+public abstract class NumericFieldType extends PrimitiveFieldType {
+
+  public static enum NumberType {
+    INTEGER,
+    LONG,
+    FLOAT,
+    DOUBLE,
+    DATE
+  }
+
+  protected NumberType type;
+
+  /**
+   * @return the type of this field
+   */
+  final public NumberType getType() {
+    return type;
+  }
+
+  private static long FLOAT_NEGATIVE_INFINITY_BITS = (long)Float.floatToIntBits(Float.NEGATIVE_INFINITY);
+  private static long DOUBLE_NEGATIVE_INFINITY_BITS = Double.doubleToLongBits(Double.NEGATIVE_INFINITY);
+  private static long FLOAT_POSITIVE_INFINITY_BITS = (long)Float.floatToIntBits(Float.POSITIVE_INFINITY);
+  private static long DOUBLE_POSITIVE_INFINITY_BITS = Double.doubleToLongBits(Double.POSITIVE_INFINITY);
+  private static long FLOAT_MINUS_ZERO_BITS = (long)Float.floatToIntBits(-0f);
+  private static long DOUBLE_MINUS_ZERO_BITS = Double.doubleToLongBits(-0d);
+  private static long FLOAT_ZERO_BITS = (long)Float.floatToIntBits(0f);
+  private static long DOUBLE_ZERO_BITS = Double.doubleToLongBits(0d);
+
+  protected Query getDocValuesRangeQuery(QParser parser, SchemaField field, String min, String max,
+      boolean minInclusive, boolean maxInclusive) {
+    assert field.hasDocValues() && !field.multiValued();
+    
+    switch (getType()) {
+      case INTEGER:
+        return numericDocValuesRangeQuery(field.getName(),
+              min == null ? null : (long) Integer.parseInt(min),
+              max == null ? null : (long) Integer.parseInt(max),
+              minInclusive, maxInclusive);
+      case FLOAT:
+        return getRangeQueryForFloatDoubleDocValues(field, min, max, minInclusive, maxInclusive);
+      case LONG:
+        return numericDocValuesRangeQuery(field.getName(),
+              min == null ? null : Long.parseLong(min),
+              max == null ? null : Long.parseLong(max),
+              minInclusive, maxInclusive);
+      case DOUBLE:
+        return getRangeQueryForFloatDoubleDocValues(field, min, max, minInclusive, maxInclusive);
+      case DATE:
+        return numericDocValuesRangeQuery(field.getName(),
+              min == null ? null : DateMathParser.parseMath(null, min).getTime(),
+              max == null ? null : DateMathParser.parseMath(null, max).getTime(),
+              minInclusive, maxInclusive);
+      default:
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for point field");
+    }
+  }
+  
+  protected Query getRangeQueryForFloatDoubleDocValues(SchemaField sf, String min, String max, boolean minInclusive, boolean maxInclusive) {
+    Query query;
+    String fieldName = sf.getName();
+
+    Number minVal = min == null ? null : getType() == NumberType.FLOAT ? Float.parseFloat(min): Double.parseDouble(min);
+    Number maxVal = max == null ? null : getType() == NumberType.FLOAT ? Float.parseFloat(max): Double.parseDouble(max);
+    
+    Long minBits = 
+        min == null ? null : getType() == NumberType.FLOAT ? (long) Float.floatToIntBits(minVal.floatValue()): Double.doubleToLongBits(minVal.doubleValue());
+    Long maxBits = 
+        max == null ? null : getType() == NumberType.FLOAT ? (long) Float.floatToIntBits(maxVal.floatValue()): Double.doubleToLongBits(maxVal.doubleValue());
+    
+    long negativeInfinityBits = getType() == NumberType.FLOAT ? FLOAT_NEGATIVE_INFINITY_BITS : DOUBLE_NEGATIVE_INFINITY_BITS;
+    long positiveInfinityBits = getType() == NumberType.FLOAT ? FLOAT_POSITIVE_INFINITY_BITS : DOUBLE_POSITIVE_INFINITY_BITS;
+    long minusZeroBits = getType() == NumberType.FLOAT ? FLOAT_MINUS_ZERO_BITS : DOUBLE_MINUS_ZERO_BITS;
+    long zeroBits = getType() == NumberType.FLOAT ? FLOAT_ZERO_BITS : DOUBLE_ZERO_BITS;
+    
+    // If min is negative (or -0d) and max is positive (or +0d), then issue a FunctionRangeQuery
+    if ((minVal == null || minVal.doubleValue() < 0d || minBits == minusZeroBits) && 
+        (maxVal == null || (maxVal.doubleValue() > 0d || maxBits == zeroBits))) {
+
+      ValueSource vs = getValueSource(sf, null);
+      query = new FunctionRangeQuery(new ValueSourceRangeFilter(vs, min, max, minInclusive, maxInclusive));
+
+    } else { // If both max and min are negative (or -0d), then issue range query with max and min reversed
+      if ((minVal == null || minVal.doubleValue() < 0d || minBits == minusZeroBits) &&
+          (maxVal != null && (maxVal.doubleValue() < 0d || maxBits == minusZeroBits))) {
+        query = numericDocValuesRangeQuery
+            (fieldName, maxBits, (min == null ? negativeInfinityBits : minBits), maxInclusive, minInclusive);
+      } else { // If both max and min are positive, then issue range query
+        query = numericDocValuesRangeQuery
+            (fieldName, minBits, (max == null ? positiveInfinityBits : maxBits), minInclusive, maxInclusive);
+      }
+    }
+    return query;
+  }
+  
+  public static Query numericDocValuesRangeQuery(
+      String field,
+      Number lowerValue, Number upperValue,
+      boolean lowerInclusive, boolean upperInclusive) {
+
+    long actualLowerValue = Long.MIN_VALUE;
+    if (lowerValue != null) {
+      actualLowerValue = lowerValue.longValue();
+      if (lowerInclusive == false) {
+        if (actualLowerValue == Long.MAX_VALUE) {
+          return new MatchNoDocsQuery();
+        }
+        ++actualLowerValue;
+      }
+    }
+
+    long actualUpperValue = Long.MAX_VALUE;
+    if (upperValue != null) {
+      actualUpperValue = upperValue.longValue();
+      if (upperInclusive == false) {
+        if (actualUpperValue == Long.MIN_VALUE) {
+          return new MatchNoDocsQuery();
+        }
+        --actualUpperValue;
+      }
+    }
+    return NumericDocValuesField.newRangeQuery(field, actualLowerValue, actualUpperValue);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/schema/PointField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/PointField.java b/solr/core/src/java/org/apache/solr/schema/PointField.java
index a2dd8a8..9b1ed38 100644
--- a/solr/core/src/java/org/apache/solr/schema/PointField.java
+++ b/solr/core/src/java/org/apache/solr/schema/PointField.java
@@ -49,15 +49,7 @@ import org.slf4j.LoggerFactory;
  * {@code DocValues} are supported for single-value cases ({@code NumericDocValues}).
  * {@code FieldCache} is not supported for {@code PointField}s, so sorting, faceting, etc on these fields require the use of {@code docValues="true"} in the schema.
  */
-public abstract class PointField extends PrimitiveFieldType {
-  
-  public enum PointTypes {
-    INTEGER,
-    LONG,
-    FLOAT,
-    DOUBLE,
-    DATE
-  }
+public abstract class PointField extends NumericFieldType {
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
@@ -117,11 +109,6 @@ public abstract class PointField extends PrimitiveFieldType {
     return false;
   }
 
-  /**
-   * @return the type of this field
-   */
-  public abstract PointTypes getType();
-  
   @Override
   public abstract Query getSetQuery(QParser parser, SchemaField field, Collection<String> externalVals);
 
@@ -137,6 +124,19 @@ public abstract class PointField extends PrimitiveFieldType {
 
   protected abstract Query getExactQuery(SchemaField field, String externalVal);
 
+  public abstract Query getPointRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
+      boolean maxInclusive);
+
+  @Override
+  public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
+      boolean maxInclusive) {
+    if (!field.indexed() && field.hasDocValues() && !field.multiValued()) {
+      return getDocValuesRangeQuery(parser, field, min, max, minInclusive, maxInclusive);
+    } else {
+      return getPointRangeQuery(parser, field, min, max, minInclusive, maxInclusive);
+    }
+  }
+
   @Override
   public String storedToReadable(IndexableField f) {
     return toExternal(f);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/schema/TrieDateField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieDateField.java b/solr/core/src/java/org/apache/solr/schema/TrieDateField.java
index 209c581..77980a7 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieDateField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieDateField.java
@@ -84,7 +84,7 @@ import org.apache.solr.util.DateMathParser;
  */
 public class TrieDateField extends TrieField implements DateValueFieldType {
   {
-    this.type = TrieTypes.DATE;
+    this.type = NumberType.DATE;
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java b/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
index 7faa38c..b610e6e 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
@@ -52,7 +52,7 @@ import org.apache.lucene.util.mutable.MutableValueDouble;
  */
 public class TrieDoubleField extends TrieField implements DoubleValueFieldType {
   {
-    type=TrieTypes.DOUBLE;
+    type = NumberType.DOUBLE;
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/schema/TrieField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieField.java b/solr/core/src/java/org/apache/solr/schema/TrieField.java
index 57dbeff..e470155 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieField.java
@@ -43,7 +43,6 @@ import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
 import org.apache.lucene.queries.function.valuesource.IntFieldSource;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
-import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.SortedSetSelector;
@@ -56,9 +55,7 @@ import org.apache.lucene.util.mutable.MutableValueDate;
 import org.apache.lucene.util.mutable.MutableValueLong;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.response.TextResponseWriter;
-import org.apache.solr.search.FunctionRangeQuery;
 import org.apache.solr.search.QParser;
-import org.apache.solr.search.function.ValueSourceRangeFilter;
 import org.apache.solr.uninverting.UninvertingReader.Type;
 import org.apache.solr.util.DateMathParser;
 import org.slf4j.Logger;
@@ -84,12 +81,11 @@ import org.slf4j.LoggerFactory;
  * @see org.apache.lucene.legacy.LegacyNumericRangeQuery
  * @since solr 1.4
  */
-public class TrieField extends PrimitiveFieldType {
+public class TrieField extends NumericFieldType {
   public static final int DEFAULT_PRECISION_STEP = 8;
 
   protected int precisionStepArg = TrieField.DEFAULT_PRECISION_STEP;  // the one passed in or defaulted
   protected int precisionStep;     // normalized
-  protected TrieTypes type;
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
@@ -107,7 +103,7 @@ public class TrieField extends PrimitiveFieldType {
 
     if (t != null) {
       try {
-        type = TrieTypes.valueOf(t.toUpperCase(Locale.ROOT));
+        type = NumberType.valueOf(t.toUpperCase(Locale.ROOT));
       } catch (IllegalArgumentException e) {
         throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                 "Invalid type specified in schema.xml for field: " + args.get("name"), e);
@@ -139,7 +135,7 @@ public class TrieField extends PrimitiveFieldType {
       }
 
       // normal stored case
-      return (type == TrieTypes.DATE) ? new Date(val.longValue()) : val;
+      return (type == NumberType.DATE) ? new Date(val.longValue()) : val;
     } else {
       // multi-valued numeric docValues currently use SortedSet on the indexed terms.
       BytesRef term = f.binaryValue();
@@ -340,13 +336,6 @@ public class TrieField extends PrimitiveFieldType {
     return precisionStepArg;
   }
 
-  /**
-   * @return the type of this field
-   */
-  public TrieTypes getType() {
-    return type;
-  }
-
   @Override
   public LegacyNumericType getNumericType() {
     switch (type) {
@@ -372,66 +361,41 @@ public class TrieField extends PrimitiveFieldType {
     }
     int ps = precisionStep;
     Query query;
-    final boolean matchOnly = field.hasDocValues() && !field.indexed();
+
+    if (field.hasDocValues() && !field.indexed()) {
+      return getDocValuesRangeQuery(parser, field, min, max, minInclusive, maxInclusive);
+    }
+
     switch (type) {
       case INTEGER:
-        if (matchOnly) {
-          query = numericDocValuesRangeQuery(field.getName(),
-                min == null ? null : Integer.parseInt(min),
-                max == null ? null : Integer.parseInt(max),
-                minInclusive, maxInclusive);
-        } else {
-          query = LegacyNumericRangeQuery.newIntRange(field.getName(), ps,
-              min == null ? null : Integer.parseInt(min),
-              max == null ? null : Integer.parseInt(max),
-              minInclusive, maxInclusive);
-        }
+        query = LegacyNumericRangeQuery.newIntRange(field.getName(), ps,
+            min == null ? null : Integer.parseInt(min),
+            max == null ? null : Integer.parseInt(max),
+            minInclusive, maxInclusive);
         break;
       case FLOAT:
-        if (matchOnly) {
-          return getRangeQueryForFloatDoubleDocValues(field, min, max, minInclusive, maxInclusive);
-        } else {
-          query = LegacyNumericRangeQuery.newFloatRange(field.getName(), ps,
-              min == null ? null : Float.parseFloat(min),
-              max == null ? null : Float.parseFloat(max),
-              minInclusive, maxInclusive);
-        }
+        query = LegacyNumericRangeQuery.newFloatRange(field.getName(), ps,
+            min == null ? null : Float.parseFloat(min),
+            max == null ? null : Float.parseFloat(max),
+            minInclusive, maxInclusive);
         break;
       case LONG:
-        if (matchOnly) {
-          query = numericDocValuesRangeQuery(field.getName(),
-                min == null ? null : Long.parseLong(min),
-                max == null ? null : Long.parseLong(max),
-                minInclusive, maxInclusive);
-        } else {
-          query = LegacyNumericRangeQuery.newLongRange(field.getName(), ps,
-              min == null ? null : Long.parseLong(min),
-              max == null ? null : Long.parseLong(max),
-              minInclusive, maxInclusive);
-        }
+        query = LegacyNumericRangeQuery.newLongRange(field.getName(), ps,
+            min == null ? null : Long.parseLong(min),
+            max == null ? null : Long.parseLong(max),
+            minInclusive, maxInclusive);
         break;
       case DOUBLE:
-        if (matchOnly) {
-          return getRangeQueryForFloatDoubleDocValues(field, min, max, minInclusive, maxInclusive);
-        } else {
-          query = LegacyNumericRangeQuery.newDoubleRange(field.getName(), ps,
-              min == null ? null : Double.parseDouble(min),
-              max == null ? null : Double.parseDouble(max),
-              minInclusive, maxInclusive);
-        }
+        query = LegacyNumericRangeQuery.newDoubleRange(field.getName(), ps,
+            min == null ? null : Double.parseDouble(min),
+            max == null ? null : Double.parseDouble(max),
+            minInclusive, maxInclusive);
         break;
       case DATE:
-        if (matchOnly) {
-          query = numericDocValuesRangeQuery(field.getName(),
-                min == null ? null : DateMathParser.parseMath(null, min).getTime(),
-                max == null ? null : DateMathParser.parseMath(null, max).getTime(),
-                minInclusive, maxInclusive);
-        } else {
-          query = LegacyNumericRangeQuery.newLongRange(field.getName(), ps,
-              min == null ? null : DateMathParser.parseMath(null, min).getTime(),
-              max == null ? null : DateMathParser.parseMath(null, max).getTime(),
-              minInclusive, maxInclusive);
-        }
+        query = LegacyNumericRangeQuery.newLongRange(field.getName(), ps,
+            min == null ? null : DateMathParser.parseMath(null, min).getTime(),
+            max == null ? null : DateMathParser.parseMath(null, max).getTime(),
+            minInclusive, maxInclusive);
         break;
       default:
         throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field");
@@ -440,81 +404,6 @@ public class TrieField extends PrimitiveFieldType {
     return query;
   }
 
-  private static Query numericDocValuesRangeQuery(
-      String field,
-      Number lowerValue, Number upperValue,
-      boolean lowerInclusive, boolean upperInclusive) {
-
-    long actualLowerValue = Long.MIN_VALUE;
-    if (lowerValue != null) {
-      actualLowerValue = lowerValue.longValue();
-      if (lowerInclusive == false) {
-        if (actualLowerValue == Long.MAX_VALUE) {
-          return new MatchNoDocsQuery();
-        }
-        ++actualLowerValue;
-      }
-    }
-
-    long actualUpperValue = Long.MAX_VALUE;
-    if (upperValue != null) {
-      actualUpperValue = upperValue.longValue();
-      if (upperInclusive == false) {
-        if (actualUpperValue == Long.MIN_VALUE) {
-          return new MatchNoDocsQuery();
-        }
-        --actualUpperValue;
-      }
-    }
-    return NumericDocValuesField.newRangeQuery(field, actualLowerValue, actualUpperValue);
-  }
-
-  private static long FLOAT_NEGATIVE_INFINITY_BITS = (long)Float.floatToIntBits(Float.NEGATIVE_INFINITY);
-  private static long DOUBLE_NEGATIVE_INFINITY_BITS = Double.doubleToLongBits(Double.NEGATIVE_INFINITY);
-  private static long FLOAT_POSITIVE_INFINITY_BITS = (long)Float.floatToIntBits(Float.POSITIVE_INFINITY);
-  private static long DOUBLE_POSITIVE_INFINITY_BITS = Double.doubleToLongBits(Double.POSITIVE_INFINITY);
-  private static long FLOAT_MINUS_ZERO_BITS = (long)Float.floatToIntBits(-0f);
-  private static long DOUBLE_MINUS_ZERO_BITS = Double.doubleToLongBits(-0d);
-  private static long FLOAT_ZERO_BITS = (long)Float.floatToIntBits(0f);
-  private static long DOUBLE_ZERO_BITS = Double.doubleToLongBits(0d);
-
-  private Query getRangeQueryForFloatDoubleDocValues(SchemaField sf, String min, String max, boolean minInclusive, boolean maxInclusive) {
-    Query query;
-    String fieldName = sf.getName();
-
-    Number minVal = min == null ? null : type == TrieTypes.FLOAT ? Float.parseFloat(min): Double.parseDouble(min);
-    Number maxVal = max == null ? null : type == TrieTypes.FLOAT ? Float.parseFloat(max): Double.parseDouble(max);
-    
-    Long minBits = 
-        min == null ? null : type == TrieTypes.FLOAT ? (long) Float.floatToIntBits(minVal.floatValue()): Double.doubleToLongBits(minVal.doubleValue());
-    Long maxBits = 
-        max == null ? null : type == TrieTypes.FLOAT ? (long) Float.floatToIntBits(maxVal.floatValue()): Double.doubleToLongBits(maxVal.doubleValue());
-    
-    long negativeInfinityBits = type == TrieTypes.FLOAT ? FLOAT_NEGATIVE_INFINITY_BITS : DOUBLE_NEGATIVE_INFINITY_BITS;
-    long positiveInfinityBits = type == TrieTypes.FLOAT ? FLOAT_POSITIVE_INFINITY_BITS : DOUBLE_POSITIVE_INFINITY_BITS;
-    long minusZeroBits = type == TrieTypes.FLOAT ? FLOAT_MINUS_ZERO_BITS : DOUBLE_MINUS_ZERO_BITS;
-    long zeroBits = type == TrieTypes.FLOAT ? FLOAT_ZERO_BITS : DOUBLE_ZERO_BITS;
-    
-    // If min is negative (or -0d) and max is positive (or +0d), then issue a FunctionRangeQuery
-    if ((minVal == null || minVal.doubleValue() < 0d || minBits == minusZeroBits) && 
-        (maxVal == null || (maxVal.doubleValue() > 0d || maxBits == zeroBits))) {
-
-      ValueSource vs = getValueSource(sf, null);
-      query = new FunctionRangeQuery(new ValueSourceRangeFilter(vs, min, max, minInclusive, maxInclusive));
-
-    } else { // If both max and min are negative (or -0d), then issue range query with max and min reversed
-      if ((minVal == null || minVal.doubleValue() < 0d || minBits == minusZeroBits) &&
-          (maxVal != null && (maxVal.doubleValue() < 0d || maxBits == minusZeroBits))) {
-        query = numericDocValuesRangeQuery
-            (fieldName, maxBits, (min == null ? negativeInfinityBits : minBits), maxInclusive, minInclusive);
-      } else { // If both max and min are positive, then issue range query
-        query = numericDocValuesRangeQuery
-            (fieldName, minBits, (max == null ? positiveInfinityBits : maxBits), minInclusive, maxInclusive);
-      }
-    }
-    return query;
-  }
-
   @Override
   public Query getFieldQuery(QParser parser, SchemaField field, String externalVal) {
     if (!field.indexed() && field.hasDocValues()) {
@@ -579,7 +468,7 @@ public class TrieField extends PrimitiveFieldType {
 
   @Override
   public String toExternal(IndexableField f) {
-    return (type == TrieTypes.DATE)
+    return (type == NumberType.DATE)
       ? ((Date) toObject(f)).toInstant().toString()
       : toObject(f).toString();
   }
@@ -792,15 +681,6 @@ public class TrieField extends PrimitiveFieldType {
     }
   }
 
-  public enum TrieTypes {
-    INTEGER,
-    LONG,
-    FLOAT,
-    DOUBLE,
-    DATE
-  }
-
-
   static final String INT_PREFIX = new String(new char[]{LegacyNumericUtils.SHIFT_START_INT});
   static final String LONG_PREFIX = new String(new char[]{LegacyNumericUtils.SHIFT_START_LONG});
 
@@ -863,7 +743,6 @@ class TrieDateFieldSource extends LongFieldSource {
   public long externalToLong(String extVal) {
     return DateMathParser.parseMath(null, extVal).getTime();
   }
-
 }
 
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java b/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
index 13b9141..b069810 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
@@ -52,7 +52,7 @@ import org.apache.lucene.util.mutable.MutableValueFloat;
  */
 public class TrieFloatField extends TrieField implements FloatValueFieldType {
   {
-    type=TrieTypes.FLOAT;
+    type = NumberType.FLOAT;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieIntField.java b/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
index d89dd0d..6d4d7cd 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
@@ -45,7 +45,7 @@ import org.apache.lucene.util.mutable.MutableValueInt;
  */
 public class TrieIntField extends TrieField implements IntValueFieldType {
   {
-    type=TrieTypes.INTEGER;
+    type = NumberType.INTEGER;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieLongField.java b/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
index c3a5440..a93d0ce 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
@@ -45,7 +45,7 @@ import org.apache.lucene.util.mutable.MutableValueLong;
  */
 public class TrieLongField extends TrieField implements LongValueFieldType {
   {
-    type=TrieTypes.LONG;
+    type = NumberType.LONG;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
index 7c56311..3f7d511 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
@@ -98,6 +98,7 @@ import org.apache.solr.response.SolrQueryResponse;
 import org.apache.solr.schema.BoolField;
 import org.apache.solr.schema.EnumField;
 import org.apache.solr.schema.IndexSchema;
+import org.apache.solr.schema.NumericFieldType;
 import org.apache.solr.schema.PointField;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.schema.TrieDateField;
@@ -823,7 +824,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrI
             }
             Object newVal = val;
             if (schemaField.getType().isPointField()) {
-              PointField.PointTypes type = ((PointField)schemaField.getType()).getType(); 
+              NumericFieldType.NumberType type = ((PointField)schemaField.getType()).getType(); 
               switch (type) {
                 case INTEGER:
                   newVal = val.intValue();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/GroupConverter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/GroupConverter.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/GroupConverter.java
index 2a5827d..a9849d5 100644
--- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/GroupConverter.java
+++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/GroupConverter.java
@@ -70,7 +70,7 @@ class GroupConverter {
     for (SearchGroup<BytesRef> original : values) {
       SearchGroup<MutableValue> converted = new SearchGroup<MutableValue>();
       converted.sortValues = original.sortValues; // ?
-      TrieField.TrieTypes type = ((TrieField)fieldType).getType();
+      TrieField.NumberType type = ((TrieField)fieldType).getType();
       final MutableValue v;
       switch (type) {
         case INTEGER:

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/test-files/solr/collection1/conf/schema-point.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-point.xml b/solr/core/src/test-files/solr/collection1/conf/schema-point.xml
index ca37ff5..053d39b 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-point.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-point.xml
@@ -79,6 +79,10 @@
    <dynamicField name="*_p_l_dv_ns"  type="plong"    indexed="true"  stored="false" docValues="true" useDocValuesAsStored="true"/>
    <dynamicField name="*_p_d_dv_ns"  type="pdouble"    indexed="true"  stored="false" docValues="true" useDocValuesAsStored="true"/>
    <dynamicField name="*_p_f_dv_ns"  type="pfloat"    indexed="true"  stored="false" docValues="true" useDocValuesAsStored="true"/>
+   <dynamicField name="*_p_i_ni_ns_dv" type="pint"    indexed="false"  stored="false" docValues="true" useDocValuesAsStored="true"/>
+   <dynamicField name="*_p_l_ni_ns_dv" type="plong"   indexed="false"  stored="false" docValues="true" useDocValuesAsStored="true"/>
+   <dynamicField name="*_p_d_ni_ns_dv" type="pdouble" indexed="false"  stored="false" docValues="true" useDocValuesAsStored="true"/>
+   <dynamicField name="*_p_f_ni_ns_dv" type="pfloat"  indexed="false"  stored="false" docValues="true" useDocValuesAsStored="true"/>
 
  </fields>
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/285a1013/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/schema/TestPointFields.java b/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
index 91a7b49..75d142d 100644
--- a/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
+++ b/solr/core/src/test/org/apache/solr/schema/TestPointFields.java
@@ -59,6 +59,7 @@ public class TestPointFields extends SolrTestCaseJ4 {
     doTestIntPointFieldExactQuery("number_p_i", false);
     doTestIntPointFieldExactQuery("number_p_i_mv", false);
     doTestIntPointFieldExactQuery("number_p_i_ni_dv", false);
+    doTestIntPointFieldExactQuery("number_p_i_ni_ns_dv", false);
     // uncomment once MultiValued docValues are supported in PointFields
     //    doTestIntPointFieldExactQuery("number_p_i_ni_mv_dv", false);
   }
@@ -74,6 +75,7 @@ public class TestPointFields extends SolrTestCaseJ4 {
   @Test
   public void testIntPointFieldRangeQuery() throws Exception {
     doTestIntPointFieldRangeQuery("number_p_i", "int", false);
+    doTestIntPointFieldRangeQuery("number_p_i_ni_ns_dv", "int", false);
   }
   
   @Test
@@ -235,6 +237,7 @@ public class TestPointFields extends SolrTestCaseJ4 {
     doTestFloatPointFieldExactQuery("number_p_d");
     doTestFloatPointFieldExactQuery("number_p_d_mv");
     doTestFloatPointFieldExactQuery("number_p_d_ni_dv");
+    doTestFloatPointFieldExactQuery("number_p_d_ni_ns_dv");
     // TODO enable once MuultiValued docValues are supported with PointFields
 //    doTestFloatPointFieldExactQuery("number_p_d_ni_mv_dv");
   }
@@ -258,6 +261,7 @@ public class TestPointFields extends SolrTestCaseJ4 {
   @Test
   public void testDoublePointFieldRangeQuery() throws Exception {
     doTestFloatPointFieldRangeQuery("number_p_d", "double", true);
+    doTestFloatPointFieldRangeQuery("number_p_d_ni_ns_dv", "double", true);
   }
   
   @Test
@@ -457,6 +461,7 @@ public class TestPointFields extends SolrTestCaseJ4 {
     doTestFloatPointFieldExactQuery("number_p_f");
     doTestFloatPointFieldExactQuery("number_p_f_mv");
     doTestFloatPointFieldExactQuery("number_p_f_ni_dv");
+    doTestFloatPointFieldExactQuery("number_p_f_ni_ns_dv");
 //    doTestFloatPointFieldExactQuery("number_p_f_ni_mv_dv");
   }
   
@@ -479,6 +484,7 @@ public class TestPointFields extends SolrTestCaseJ4 {
   @Test
   public void testFloatPointFieldRangeQuery() throws Exception {
     doTestFloatPointFieldRangeQuery("number_p_f", "float", false);
+    doTestFloatPointFieldRangeQuery("number_p_f_ni_ns_dv", "float", false);
   }
   
   @Test
@@ -551,6 +557,7 @@ public class TestPointFields extends SolrTestCaseJ4 {
     doTestIntPointFieldExactQuery("number_p_l", true);
     doTestIntPointFieldExactQuery("number_p_l_mv", true);
     doTestIntPointFieldExactQuery("number_p_l_ni_dv", true);
+    doTestIntPointFieldExactQuery("number_p_l_ni_ns_dv", true);
 //    doTestIntPointFieldExactQuery("number_p_i_ni_mv_dv", true);
   }
   
@@ -565,6 +572,7 @@ public class TestPointFields extends SolrTestCaseJ4 {
   @Test
   public void testLongPointFieldRangeQuery() throws Exception {
     doTestIntPointFieldRangeQuery("number_p_l", "long", true);
+    doTestIntPointFieldRangeQuery("number_p_l_ni_ns_dv", "long", true);
   }
   
   @Test