You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by cp...@apache.org on 2017/05/30 11:28:10 UTC

[01/25] lucene-solr:jira/solr-8668: SOLR-10741: Factor out createSliceShardsStr method from HttpShardHandler.prepDistributed. (Domenico Fabio Marino via Christine Poerschke)

Repository: lucene-solr
Updated Branches:
  refs/heads/jira/solr-8668 8143752f7 -> 36faceaf5


SOLR-10741: Factor out createSliceShardsStr method from HttpShardHandler.prepDistributed.
(Domenico Fabio Marino via Christine Poerschke)


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e7099e4b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e7099e4b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e7099e4b

Branch: refs/heads/jira/solr-8668
Commit: e7099e4bf51bd87ed95a188c474be869c222379d
Parents: 4106e1b
Author: Christine Poerschke <cp...@apache.org>
Authored: Fri May 26 10:51:41 2017 +0100
Committer: Christine Poerschke <cp...@apache.org>
Committed: Fri May 26 10:52:57 2017 +0100

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  3 +++
 .../handler/component/HttpShardHandler.java     | 26 +++++++++++---------
 2 files changed, 18 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e7099e4b/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 1da3fe0..d2f42e6 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -263,6 +263,9 @@ Other Changes
 * SOLR-10659: Remove ResponseBuilder.getSortSpec use in SearchGroupShardResponseProcessor.
   (Judith Silverman via Christine Poerschke)
 
+* SOLR-10741: Factor out createSliceShardsStr method from HttpShardHandler.prepDistributed.
+  (Domenico Fabio Marino via Christine Poerschke)
+
 ==================  6.6.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e7099e4b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
index 4ec3b79..bc620b6 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
@@ -449,17 +449,7 @@ public class HttpShardHandler extends ShardHandler {
           }
         }
         // And now recreate the | delimited list of equivalent servers
-        final StringBuilder sliceShardsStr = new StringBuilder();
-        boolean first = true;
-        for (String shardUrl : shardUrls) {
-          if (first) {
-            first = false;
-          } else {
-            sliceShardsStr.append('|');
-          }
-          sliceShardsStr.append(shardUrl);
-        }
-        rb.shards[i] = sliceShardsStr.toString();
+        rb.shards[i] = createSliceShardsStr(shardUrls);
       }
     }
     String shards_rows = params.get(ShardParams.SHARDS_ROWS);
@@ -472,6 +462,20 @@ public class HttpShardHandler extends ShardHandler {
     }
   }
 
+  private static String createSliceShardsStr(final List<String> shardUrls) {
+    final StringBuilder sliceShardsStr = new StringBuilder();
+    boolean first = true;
+    for (String shardUrl : shardUrls) {
+      if (first) {
+        first = false;
+      } else {
+        sliceShardsStr.append('|');
+      }
+      sliceShardsStr.append(shardUrl);
+    }
+    return sliceShardsStr.toString();
+  }
+
 
   private void addSlices(Map<String,Slice> target, ClusterState state, SolrParams params, String collectionName, String shardKeys, boolean multiCollection) {
     DocCollection coll = state.getCollection(collectionName);


[06/25] lucene-solr:jira/solr-8668: LUCENE-7845: RPT query by point (or simple date interval) optimization

Posted by cp...@apache.org.
LUCENE-7845: RPT query by point (or simple date interval) optimization


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d4f87b4a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d4f87b4a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d4f87b4a

Branch: refs/heads/jira/solr-8668
Commit: d4f87b4a36ca50c4361d7ec4e0858b18d9eaebe8
Parents: b23aab5
Author: David Smiley <ds...@apache.org>
Authored: Fri May 26 16:00:28 2017 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Fri May 26 16:00:28 2017 -0400

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  4 ++
 .../prefix/NumberRangePrefixTreeStrategy.java   | 24 ++++++++---
 .../spatial/prefix/PrefixTreeStrategy.java      | 20 +++++++--
 .../prefix/RecursivePrefixTreeStrategy.java     | 41 +++++++++++++++++--
 .../spatial/prefix/DateNRStrategyTest.java      | 43 +++++++++++---------
 5 files changed, 100 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d4f87b4a/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index a45c11a..0dfc709 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -88,6 +88,10 @@ Optimizations
   values using different numbers of bits per value if this proves to save
   storage. (Adrien Grand)
 
+* LUCENE-7845: Enhance spatial-extras RecursivePrefixTreeStrategy queries when the
+  query is a point (for 2D) or a is a simple date interval (e.g. 1 month).  When
+  the strategy is marked as pointsOnly, the results is a TermQuery. (David Smiley)
+
 Other
 
 * LUCENE-7328: Remove LegacyNumericEncoding from GeoPointField. (Nick Knize)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d4f87b4a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/NumberRangePrefixTreeStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/NumberRangePrefixTreeStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/NumberRangePrefixTreeStrategy.java
index c727c0d..8367644 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/NumberRangePrefixTreeStrategy.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/NumberRangePrefixTreeStrategy.java
@@ -18,18 +18,17 @@ package org.apache.lucene.spatial.prefix;
 
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.Iterator;
 import java.util.Map;
 import java.util.SortedMap;
 import java.util.TreeMap;
 
-import org.locationtech.spatial4j.shape.Point;
-import org.locationtech.spatial4j.shape.Shape;
 import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.spatial.prefix.tree.Cell;
 import org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree;
 import org.apache.lucene.util.Bits;
+import org.locationtech.spatial4j.shape.Point;
+import org.locationtech.spatial4j.shape.Shape;
 
 import static org.apache.lucene.spatial.prefix.tree.NumberRangePrefixTree.UnitNRShape;
 
@@ -57,9 +56,22 @@ public class NumberRangePrefixTreeStrategy extends RecursivePrefixTreeStrategy {
   }
 
   @Override
-  protected Iterator<Cell> createCellIteratorToIndex(Shape shape, int detailLevel, Iterator<Cell> reuse) {
-    //levels doesn't actually matter; NumberRange based Shapes have their own "level".
-    return super.createCellIteratorToIndex(shape, grid.getMaxLevels(), reuse);
+  protected boolean isPointShape(Shape shape) {
+    if (shape instanceof NumberRangePrefixTree.UnitNRShape) {
+      return ((NumberRangePrefixTree.UnitNRShape)shape).getLevel() == grid.getMaxLevels();
+    } else {
+      return false;
+    }
+  }
+
+  @Override
+  protected boolean isGridAlignedShape(Shape shape) {
+    // any UnitNRShape other than the world is a single cell/term
+    if (shape instanceof NumberRangePrefixTree.UnitNRShape) {
+      return ((NumberRangePrefixTree.UnitNRShape)shape).getLevel() > 0;
+    } else {
+      return false;
+    }
   }
 
   /** Unsupported. */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d4f87b4a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java
index e9f43fd..43851c7 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java
@@ -21,8 +21,6 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
-import org.locationtech.spatial4j.shape.Point;
-import org.locationtech.spatial4j.shape.Shape;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.index.IndexOptions;
@@ -34,6 +32,10 @@ import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
 import org.apache.lucene.spatial.query.SpatialArgs;
 import org.apache.lucene.spatial.util.ShapeFieldCacheDistanceValueSource;
 import org.apache.lucene.util.Bits;
+import org.locationtech.spatial4j.shape.Circle;
+import org.locationtech.spatial4j.shape.Point;
+import org.locationtech.spatial4j.shape.Rectangle;
+import org.locationtech.spatial4j.shape.Shape;
 
 /**
  * An abstract SpatialStrategy based on {@link SpatialPrefixTree}. The two
@@ -163,7 +165,7 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy {
   }
 
   protected Iterator<Cell> createCellIteratorToIndex(Shape shape, int detailLevel, Iterator<Cell> reuse) {
-    if (pointsOnly && !(shape instanceof Point)) {
+    if (pointsOnly && !isPointShape(shape)) {
       throw new IllegalArgumentException("pointsOnly is true yet a " + shape.getClass() + " is given for indexing");
     }
     return grid.getTreeCellIterator(shape, detailLevel);//TODO should take a re-use iterator
@@ -205,4 +207,16 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy {
                                    Shape inputShape, final int facetLevel, int maxCells) throws IOException {
     return HeatmapFacetCounter.calcFacets(this, context, topAcceptDocs, inputShape, facetLevel, maxCells);
   }
+
+  protected boolean isPointShape(Shape shape) {
+    if (shape instanceof Point) {
+      return true;
+    } else if (shape instanceof Circle) {
+      return ((Circle) shape).getRadius() == 0.0;
+    } else if (shape instanceof Rectangle) {
+      Rectangle rect = (Rectangle) shape;
+      return rect.getWidth() == 0.0 && rect.getHeight() == 0.0;
+    }
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d4f87b4a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java
index d3d1626..7c79200 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java
@@ -20,9 +20,9 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
-import org.locationtech.spatial4j.shape.Point;
-import org.locationtech.spatial4j.shape.Shape;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.spatial.prefix.tree.Cell;
 import org.apache.lucene.spatial.prefix.tree.CellIterator;
 import org.apache.lucene.spatial.prefix.tree.LegacyCell;
@@ -30,6 +30,7 @@ import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
 import org.apache.lucene.spatial.query.SpatialArgs;
 import org.apache.lucene.spatial.query.SpatialOperation;
 import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
+import org.locationtech.spatial4j.shape.Shape;
 
 /**
  * A {@link PrefixTreeStrategy} which uses {@link AbstractVisitingPrefixTreeQuery}.
@@ -121,7 +122,7 @@ public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy {
 
   @Override
   protected Iterator<Cell> createCellIteratorToIndex(Shape shape, int detailLevel, Iterator<Cell> reuse) {
-    if (shape instanceof Point || !pruneLeafyBranches)
+    if (!pruneLeafyBranches || isGridAlignedShape(shape))
       return super.createCellIteratorToIndex(shape, detailLevel, reuse);
 
     List<Cell> cells = new ArrayList<>(4096);
@@ -177,6 +178,9 @@ public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy {
     int detailLevel = grid.getLevelForDistance(args.resolveDistErr(ctx, distErrPct));
 
     if (op == SpatialOperation.Intersects) {
+      if (isGridAlignedShape(args.getShape())) {
+        return makeGridShapeIntersectsQuery(args.getShape());
+      }
       return new IntersectsPrefixTreeQuery(
           shape, getFieldName(), grid, detailLevel, prefixGridScanLevel);
     } else if (op == SpatialOperation.IsWithin) {
@@ -189,4 +193,35 @@ public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy {
     }
     throw new UnsupportedSpatialOperation(op);
   }
+
+  /**
+   * A quick check of the shape to see if it is perfectly aligned to a grid.
+   * Points always are as they are indivisible.  It's okay to return false
+   * if the shape actually is aligned; this is an optimization hint.
+   */
+  protected boolean isGridAlignedShape(Shape shape) {
+    return isPointShape(shape);
+  }
+
+  /** {@link #makeQuery(SpatialArgs)} specialized for the query being a grid square. */
+  protected Query makeGridShapeIntersectsQuery(Shape gridShape) {
+    assert isGridAlignedShape(gridShape);
+    if (isPointsOnly()) {
+      // Awesome; this will be equivalent to a TermQuery.
+      Iterator<Cell> cellIterator = grid.getTreeCellIterator(gridShape, grid.getMaxLevels());
+      // get last cell
+      Cell cell = cellIterator.next();
+      while (cellIterator.hasNext()) {
+        int prevLevel = cell.getLevel();
+        cell = cellIterator.next();
+        assert prevLevel < cell.getLevel();
+      }
+      return new TermQuery(new Term(getFieldName(), cell.getTokenBytesWithLeaf(null)));
+    } else {
+      // Well there could be parent cells. But we can reduce the "scan level" which will be slower for a point query.
+      // TODO: AVPTQ will still scan the bottom nonetheless; file an issue to eliminate that
+      return new IntersectsPrefixTreeQuery(
+          gridShape, getFieldName(), grid, getGrid().getMaxLevels(), getGrid().getMaxLevels() + 1);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d4f87b4a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/DateNRStrategyTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/DateNRStrategyTest.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/DateNRStrategyTest.java
index 77c2529..54296da 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/DateNRStrategyTest.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/DateNRStrategyTest.java
@@ -27,7 +27,7 @@ import org.junit.Before;
 import org.junit.Test;
 import org.locationtech.spatial4j.shape.Shape;
 
-import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.randomInt;
 import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween;
 
 public class DateNRStrategyTest extends RandomSpatialOpStrategyTestCase {
@@ -42,17 +42,8 @@ public class DateNRStrategyTest extends RandomSpatialOpStrategyTestCase {
   public void setUp() throws Exception {
     super.setUp();
     tree = DateRangePrefixTree.INSTANCE;
-    if (randomBoolean()) {
-      strategy = new NumberRangePrefixTreeStrategy(tree, "dateRange");
-    } else {
-      //Test the format that existed <= Lucene 5.0
-      strategy = new NumberRangePrefixTreeStrategy(tree, "dateRange") {
-        @Override
-        protected CellToBytesRefIterator newCellToBytesRefIterator() {
-          return new CellToBytesRefIterator50();
-        }
-      };
-    }
+    strategy = new NumberRangePrefixTreeStrategy(tree, "dateRange");
+    ((NumberRangePrefixTreeStrategy)strategy).setPointsOnly(randomInt() % 5 == 0);
     Calendar tmpCal = tree.newCal();
     int randomCalWindowField = randomIntBetween(Calendar.YEAR, Calendar.MILLISECOND);
     tmpCal.add(randomCalWindowField, 2_000);
@@ -79,15 +70,16 @@ public class DateNRStrategyTest extends RandomSpatialOpStrategyTestCase {
 
   @Test
   public void testWithinSame() throws IOException {
-    final Calendar cal = tree.newCal();
+    Shape shape = randomIndexedShape();
     testOperation(
-        tree.toShape(cal),
+        shape,
         SpatialOperation.IsWithin,
-        tree.toShape(cal), true);//is within itself
+        shape, true);//is within itself
   }
 
   @Test
   public void testWorld() throws IOException {
+    ((NumberRangePrefixTreeStrategy)strategy).setPointsOnly(false);
     testOperation(
         tree.toShape(tree.newCal()),//world matches everything
         SpatialOperation.Contains,
@@ -96,6 +88,7 @@ public class DateNRStrategyTest extends RandomSpatialOpStrategyTestCase {
 
   @Test
   public void testBugInitIterOptimization() throws Exception {
+    ((NumberRangePrefixTreeStrategy)strategy).setPointsOnly(false);
     //bug due to fast path initIter() optimization
     testOperation(
         tree.parseShape("[2014-03-27T23 TO 2014-04-01T01]"),
@@ -114,6 +107,21 @@ public class DateNRStrategyTest extends RandomSpatialOpStrategyTestCase {
 
   @Override
   protected Shape randomIndexedShape() {
+    if (((NumberRangePrefixTreeStrategy)strategy).isPointsOnly()) {
+      Calendar cal = tree.newCal();
+      cal.setTimeInMillis(random().nextLong());
+      return tree.toShape(cal);
+    } else {
+      return randomShape();
+    }
+  }
+
+  @Override
+  protected Shape randomQueryShape() {
+    return randomShape();
+  }
+
+  private Shape randomShape() {
     Calendar cal1 = randomCalendar();
     UnitNRShape s1 = tree.toShape(cal1);
     if (rarely()) {
@@ -144,9 +152,4 @@ public class DateNRStrategyTest extends RandomSpatialOpStrategyTestCase {
     }
     return cal;
   }
-
-  @Override
-  protected Shape randomQueryShape() {
-    return randomIndexedShape();
-  }
 }


[11/25] lucene-solr:jira/solr-8668: SOLR-10004: Placing the experimental tag properly

Posted by cp...@apache.org.
SOLR-10004: Placing the experimental tag properly


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/4944ddc3
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/4944ddc3
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/4944ddc3

Branch: refs/heads/jira/solr-8668
Commit: 4944ddc305ba731bb9011b82bed5a99e36403601
Parents: 45b26e3
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Sat May 27 03:40:32 2017 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Sat May 27 03:41:32 2017 +0530

----------------------------------------------------------------------
 solr/core/src/java/org/apache/solr/update/PeerSync.java  | 3 ++-
 solr/core/src/java/org/apache/solr/update/UpdateLog.java | 3 ++-
 2 files changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4944ddc3/solr/core/src/java/org/apache/solr/update/PeerSync.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/PeerSync.java b/solr/core/src/java/org/apache/solr/update/PeerSync.java
index c6bfbbf..7371a94 100644
--- a/solr/core/src/java/org/apache/solr/update/PeerSync.java
+++ b/solr/core/src/java/org/apache/solr/update/PeerSync.java
@@ -66,9 +66,10 @@ import static org.apache.solr.update.processor.DistributedUpdateProcessor.Distri
 import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
 
 /**
- * @lucene.experimental
  * This class is useful for performing peer to peer synchronization of recently indexed update commands during
  * recovery process.
+ *
+ * @lucene.experimental
  */
 public class PeerSync implements SolrMetricProducer {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/4944ddc3/solr/core/src/java/org/apache/solr/update/UpdateLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
index 4928aa1..bb7f5f5 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
@@ -81,9 +81,10 @@ import static org.apache.solr.update.processor.DistributingUpdateProcessorFactor
 
 
 /** 
- * @lucene.experimental 
  * This holds references to the transaction logs and pointers for the document IDs to their
  * exact positions in the transaction logs.
+ *
+ * @lucene.experimental
  */
 public class UpdateLog implements PluginInfoInitialized, SolrMetricProducer {
   private static final long STATUS_TIME = TimeUnit.NANOSECONDS.convert(60, TimeUnit.SECONDS);


[19/25] lucene-solr:jira/solr-8668: LUCENE-7850: Move support for legacy numerics to solr/.

Posted by cp...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieLongField.java b/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
index a93d0ce..56b964f 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
@@ -23,7 +23,7 @@ import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.docvalues.LongDocValues;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/search/LegacyNumericRangeQueryBuilder.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/LegacyNumericRangeQueryBuilder.java b/solr/core/src/java/org/apache/solr/search/LegacyNumericRangeQueryBuilder.java
index a2d0ed6..931634f 100644
--- a/solr/core/src/java/org/apache/solr/search/LegacyNumericRangeQueryBuilder.java
+++ b/solr/core/src/java/org/apache/solr/search/LegacyNumericRangeQueryBuilder.java
@@ -17,8 +17,8 @@
 package org.apache.solr.search;
 
 import org.apache.lucene.search.Query;
-import org.apache.lucene.legacy.LegacyNumericRangeQuery;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyNumericRangeQuery;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.queryparser.xml.QueryBuilder;
@@ -26,10 +26,10 @@ import org.apache.lucene.queryparser.xml.builders.PointRangeQueryBuilder;
 import org.w3c.dom.Element;
 
 /**
- * Creates a {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}. The table below specifies the required
+ * Creates a {@link org.apache.solr.legacy.LegacyNumericRangeQuery}. The table below specifies the required
  * attributes and the defaults if optional attributes are omitted. For more
  * detail on what each of the attributes actually do, consult the documentation
- * for {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}:
+ * for {@link org.apache.solr.legacy.LegacyNumericRangeQuery}:
  * <table summary="supported attributes">
  * <tr>
  * <th>Attribute name</th>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/search/QueryParsing.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/QueryParsing.java b/solr/core/src/java/org/apache/solr/search/QueryParsing.java
index 381276c..bbce610 100644
--- a/solr/core/src/java/org/apache/solr/search/QueryParsing.java
+++ b/solr/core/src/java/org/apache/solr/search/QueryParsing.java
@@ -17,7 +17,7 @@
 package org.apache.solr.search;
 
 import org.apache.lucene.index.Term;
-import org.apache.lucene.legacy.LegacyNumericRangeQuery;
+import org.apache.solr.legacy.LegacyNumericRangeQuery;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.BoostQuery;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java b/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java
index d526cf3..fa6e87c 100644
--- a/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java
+++ b/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java
@@ -34,7 +34,7 @@ import org.apache.lucene.util.Bits;
  * Constrains search results to only match those which also match a provided
  * query.  
  *
- * <p> This could be used, for example, with a {@link org.apache.lucene.legacy.LegacyNumericRangeQuery} on a suitably
+ * <p> This could be used, for example, with a {@link org.apache.solr.legacy.LegacyNumericRangeQuery} on a suitably
  * formatted date field to implement date filtering.  One could re-use a single
  * CachingWrapperFilter(QueryWrapperFilter) that matches, e.g., only documents modified 
  * within the last week.  This would only need to be reconstructed once per day.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java b/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java
index 3ff432d..17b7d3b 100644
--- a/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java
+++ b/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java
@@ -25,7 +25,7 @@ import java.util.regex.Pattern;
 
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.mlt.MoreLikeThis;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java b/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java
index dea161d..cc87e09 100644
--- a/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java
+++ b/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java
@@ -16,7 +16,7 @@
  */
 package org.apache.solr.search.mlt;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.mlt.MoreLikeThis;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/uninverting/FieldCache.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/uninverting/FieldCache.java b/solr/core/src/java/org/apache/solr/uninverting/FieldCache.java
index 89e6f0b..87f5f4c 100644
--- a/solr/core/src/java/org/apache/solr/uninverting/FieldCache.java
+++ b/solr/core/src/java/org/apache/solr/uninverting/FieldCache.java
@@ -27,7 +27,7 @@ import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.util.Accountable;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -159,8 +159,8 @@ public interface FieldCache {
   };
   
   /**
-   * A parser instance for int values encoded by {@link org.apache.lucene.legacy.LegacyNumericUtils}, e.g. when indexed
-   * via {@link org.apache.lucene.legacy.LegacyIntField}/{@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
+   * A parser instance for int values encoded by {@link org.apache.solr.legacy.LegacyNumericUtils}, e.g. when indexed
+   * via {@link org.apache.solr.legacy.LegacyIntField}/{@link org.apache.solr.legacy.LegacyNumericTokenStream}.
    * @deprecated Index with points and use {@link #INT_POINT_PARSER} instead.
    */
   @Deprecated
@@ -182,8 +182,8 @@ public interface FieldCache {
   };
 
   /**
-   * A parser instance for float values encoded with {@link org.apache.lucene.legacy.LegacyNumericUtils}, e.g. when indexed
-   * via {@link org.apache.lucene.legacy.LegacyFloatField}/{@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
+   * A parser instance for float values encoded with {@link org.apache.solr.legacy.LegacyNumericUtils}, e.g. when indexed
+   * via {@link org.apache.solr.legacy.LegacyFloatField}/{@link org.apache.solr.legacy.LegacyNumericTokenStream}.
    * @deprecated Index with points and use {@link #FLOAT_POINT_PARSER} instead.
    */
   @Deprecated
@@ -207,8 +207,8 @@ public interface FieldCache {
   };
 
   /**
-   * A parser instance for long values encoded by {@link org.apache.lucene.legacy.LegacyNumericUtils}, e.g. when indexed
-   * via {@link org.apache.lucene.legacy.LegacyLongField}/{@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
+   * A parser instance for long values encoded by {@link org.apache.solr.legacy.LegacyNumericUtils}, e.g. when indexed
+   * via {@link org.apache.solr.legacy.LegacyLongField}/{@link org.apache.solr.legacy.LegacyNumericTokenStream}.
    * @deprecated Index with points and use {@link #LONG_POINT_PARSER} instead.
    */
   @Deprecated
@@ -229,8 +229,8 @@ public interface FieldCache {
   };
 
   /**
-   * A parser instance for double values encoded with {@link org.apache.lucene.legacy.LegacyNumericUtils}, e.g. when indexed
-   * via {@link org.apache.lucene.legacy.LegacyDoubleField}/{@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
+   * A parser instance for double values encoded with {@link org.apache.solr.legacy.LegacyNumericUtils}, e.g. when indexed
+   * via {@link org.apache.solr.legacy.LegacyDoubleField}/{@link org.apache.solr.legacy.LegacyNumericTokenStream}.
    * @deprecated Index with points and use {@link #DOUBLE_POINT_PARSER} instead.
    */
   @Deprecated
@@ -277,7 +277,7 @@ public interface FieldCache {
    * @param parser
    *          Computes long for string values. May be {@code null} if the
    *          requested field was indexed as {@link NumericDocValuesField} or
-   *          {@link org.apache.lucene.legacy.LegacyLongField}.
+   *          {@link org.apache.solr.legacy.LegacyLongField}.
    * @return The values in the given field for each document.
    * @throws IOException
    *           If any error occurs.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java b/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
index 7006b4a..7158e31 100644
--- a/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
+++ b/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
@@ -87,7 +87,7 @@ public class UninvertingReader extends FilterLeafReader {
      */
     DOUBLE_POINT,
     /** 
-     * Single-valued Integer, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyIntField})
+     * Single-valued Integer, (e.g. indexed with {@link org.apache.solr.legacy.LegacyIntField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link NumericDocValuesField}.
@@ -96,7 +96,7 @@ public class UninvertingReader extends FilterLeafReader {
     @Deprecated
     LEGACY_INTEGER,
     /** 
-     * Single-valued Long, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyLongField})
+     * Single-valued Long, (e.g. indexed with {@link org.apache.solr.legacy.LegacyLongField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link NumericDocValuesField}.
@@ -105,7 +105,7 @@ public class UninvertingReader extends FilterLeafReader {
     @Deprecated
     LEGACY_LONG,
     /** 
-     * Single-valued Float, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyFloatField})
+     * Single-valued Float, (e.g. indexed with {@link org.apache.solr.legacy.LegacyFloatField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link NumericDocValuesField}.
@@ -114,7 +114,7 @@ public class UninvertingReader extends FilterLeafReader {
     @Deprecated
     LEGACY_FLOAT,
     /** 
-     * Single-valued Double, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyDoubleField})
+     * Single-valued Double, (e.g. indexed with {@link org.apache.solr.legacy.LegacyDoubleField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link NumericDocValuesField}.
@@ -144,28 +144,28 @@ public class UninvertingReader extends FilterLeafReader {
      */
     SORTED_SET_BINARY,
     /** 
-     * Multi-valued Integer, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyIntField})
+     * Multi-valued Integer, (e.g. indexed with {@link org.apache.solr.legacy.LegacyIntField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link SortedSetDocValuesField}.
      */
     SORTED_SET_INTEGER,
     /** 
-     * Multi-valued Float, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyFloatField})
+     * Multi-valued Float, (e.g. indexed with {@link org.apache.solr.legacy.LegacyFloatField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link SortedSetDocValuesField}.
      */
     SORTED_SET_FLOAT,
     /** 
-     * Multi-valued Long, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyLongField})
+     * Multi-valued Long, (e.g. indexed with {@link org.apache.solr.legacy.LegacyLongField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link SortedSetDocValuesField}.
      */
     SORTED_SET_LONG,
     /** 
-     * Multi-valued Double, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyDoubleField})
+     * Multi-valued Double, (e.g. indexed with {@link org.apache.solr.legacy.LegacyDoubleField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link SortedSetDocValuesField}.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/update/VersionInfo.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/VersionInfo.java b/solr/core/src/java/org/apache/solr/update/VersionInfo.java
index 061e7f6..67b4042 100644
--- a/solr/core/src/java/org/apache/solr/update/VersionInfo.java
+++ b/solr/core/src/java/org/apache/solr/update/VersionInfo.java
@@ -25,7 +25,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.Terms;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.IndexSearcher;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/legacy/TestLegacyField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/legacy/TestLegacyField.java b/solr/core/src/test/org/apache/solr/legacy/TestLegacyField.java
new file mode 100644
index 0000000..5cfac9a
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/legacy/TestLegacyField.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+import java.io.StringReader;
+
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestLegacyField extends LuceneTestCase {
+  
+  public void testLegacyDoubleField() throws Exception {
+    Field fields[] = new Field[] {
+        new LegacyDoubleField("foo", 5d, Field.Store.NO),
+        new LegacyDoubleField("foo", 5d, Field.Store.YES)
+    };
+
+    for (Field field : fields) {
+      trySetByteValue(field);
+      trySetBytesValue(field);
+      trySetBytesRefValue(field);
+      field.setDoubleValue(6d); // ok
+      trySetIntValue(field);
+      trySetFloatValue(field);
+      trySetLongValue(field);
+      trySetReaderValue(field);
+      trySetShortValue(field);
+      trySetStringValue(field);
+      trySetTokenStreamValue(field);
+    
+      assertEquals(6d, field.numericValue().doubleValue(), 0.0d);
+    }
+  }
+  
+  public void testLegacyFloatField() throws Exception {
+    Field fields[] = new Field[] {
+        new LegacyFloatField("foo", 5f, Field.Store.NO),
+        new LegacyFloatField("foo", 5f, Field.Store.YES)
+    };
+
+    for (Field field : fields) {
+      trySetByteValue(field);
+      trySetBytesValue(field);
+      trySetBytesRefValue(field);
+      trySetDoubleValue(field);
+      trySetIntValue(field);
+      field.setFloatValue(6f); // ok
+      trySetLongValue(field);
+      trySetReaderValue(field);
+      trySetShortValue(field);
+      trySetStringValue(field);
+      trySetTokenStreamValue(field);
+      
+      assertEquals(6f, field.numericValue().floatValue(), 0.0f);
+    }
+  }
+  
+  public void testLegacyIntField() throws Exception {
+    Field fields[] = new Field[] {
+        new LegacyIntField("foo", 5, Field.Store.NO),
+        new LegacyIntField("foo", 5, Field.Store.YES)
+    };
+
+    for (Field field : fields) {
+      trySetByteValue(field);
+      trySetBytesValue(field);
+      trySetBytesRefValue(field);
+      trySetDoubleValue(field);
+      field.setIntValue(6); // ok
+      trySetFloatValue(field);
+      trySetLongValue(field);
+      trySetReaderValue(field);
+      trySetShortValue(field);
+      trySetStringValue(field);
+      trySetTokenStreamValue(field);
+      
+      assertEquals(6, field.numericValue().intValue());
+    }
+  }
+  
+  public void testLegacyLongField() throws Exception {
+    Field fields[] = new Field[] {
+        new LegacyLongField("foo", 5L, Field.Store.NO),
+        new LegacyLongField("foo", 5L, Field.Store.YES)
+    };
+
+    for (Field field : fields) {
+      trySetByteValue(field);
+      trySetBytesValue(field);
+      trySetBytesRefValue(field);
+      trySetDoubleValue(field);
+      trySetIntValue(field);
+      trySetFloatValue(field);
+      field.setLongValue(6); // ok
+      trySetReaderValue(field);
+      trySetShortValue(field);
+      trySetStringValue(field);
+      trySetTokenStreamValue(field);
+      
+      assertEquals(6L, field.numericValue().longValue());
+    }
+  }
+  
+  private void trySetByteValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setByteValue((byte) 10);
+    });
+  }
+
+  private void trySetBytesValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setBytesValue(new byte[] { 5, 5 });
+    });
+  }
+  
+  private void trySetBytesRefValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setBytesValue(new BytesRef("bogus"));
+    });
+  }
+  
+  private void trySetDoubleValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setDoubleValue(Double.MAX_VALUE);
+    });
+  }
+  
+  private void trySetIntValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setIntValue(Integer.MAX_VALUE);
+    });
+  }
+  
+  private void trySetLongValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setLongValue(Long.MAX_VALUE);
+    });
+  }
+  
+  private void trySetFloatValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setFloatValue(Float.MAX_VALUE);
+    });
+  }
+  
+  private void trySetReaderValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setReaderValue(new StringReader("BOO!"));
+    });
+  }
+  
+  private void trySetShortValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setShortValue(Short.MAX_VALUE);
+    });
+  }
+  
+  private void trySetStringValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setStringValue("BOO!");
+    });
+  }
+  
+  private void trySetTokenStreamValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setTokenStream(new CannedTokenStream(new Token("foo", 0, 3)));
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/legacy/TestLegacyFieldReuse.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/legacy/TestLegacyFieldReuse.java b/solr/core/src/test/org/apache/solr/legacy/TestLegacyFieldReuse.java
new file mode 100644
index 0000000..39d8d01
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/legacy/TestLegacyFieldReuse.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.solr.legacy.LegacyIntField;
+import org.apache.solr.legacy.LegacyNumericTokenStream;
+import org.apache.solr.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyNumericTokenStream.LegacyNumericTermAttribute;
+
+/** test tokenstream reuse by DefaultIndexingChain */
+public class TestLegacyFieldReuse extends BaseTokenStreamTestCase {
+  
+  public void testNumericReuse() throws IOException {
+    LegacyIntField legacyIntField = new LegacyIntField("foo", 5, Field.Store.NO);
+    
+    // passing null
+    TokenStream ts = legacyIntField.tokenStream(null, null);
+    assertTrue(ts instanceof LegacyNumericTokenStream);
+    assertEquals(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, ((LegacyNumericTokenStream)ts).getPrecisionStep());
+    assertNumericContents(5, ts);
+
+    // now reuse previous stream
+    legacyIntField = new LegacyIntField("foo", 20, Field.Store.NO);
+    TokenStream ts2 = legacyIntField.tokenStream(null, ts);
+    assertSame(ts, ts2);
+    assertNumericContents(20, ts);
+    
+    // pass a bogus stream and ensure it's still ok
+    legacyIntField = new LegacyIntField("foo", 2343, Field.Store.NO);
+    TokenStream bogus = new CannedTokenStream(new Token("bogus", 0, 5));
+    ts = legacyIntField.tokenStream(null, bogus);
+    assertNotSame(bogus, ts);
+    assertNumericContents(2343, ts);
+    
+    // pass another bogus stream (numeric, but different precision step!)
+    legacyIntField = new LegacyIntField("foo", 42, Field.Store.NO);
+    assert 3 != LegacyNumericUtils.PRECISION_STEP_DEFAULT;
+    bogus = new LegacyNumericTokenStream(3);
+    ts = legacyIntField.tokenStream(null, bogus);
+    assertNotSame(bogus, ts);
+    assertNumericContents(42, ts);
+  }
+   
+  private void assertNumericContents(int value, TokenStream ts) throws IOException {
+    assertTrue(ts instanceof LegacyNumericTokenStream);
+    LegacyNumericTermAttribute numericAtt = ts.getAttribute(LegacyNumericTermAttribute.class);
+    ts.reset();
+    boolean seen = false;
+    while (ts.incrementToken()) {
+      if (numericAtt.getShift() == 0) {
+        assertEquals(value, numericAtt.getRawValue());
+        seen = true;
+      }
+    }
+    ts.end();
+    ts.close();
+    assertTrue(seen);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/legacy/TestLegacyNumericUtils.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/legacy/TestLegacyNumericUtils.java b/solr/core/src/test/org/apache/solr/legacy/TestLegacyNumericUtils.java
new file mode 100644
index 0000000..a87e28a
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/legacy/TestLegacyNumericUtils.java
@@ -0,0 +1,571 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Random;
+
+import org.apache.solr.legacy.LegacyNumericUtils;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.LongBitSet;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+
+public class TestLegacyNumericUtils extends LuceneTestCase {
+
+  public void testLongConversionAndOrdering() throws Exception {
+    // generate a series of encoded longs, each numerical one bigger than the one before
+    BytesRefBuilder last = new BytesRefBuilder();
+    BytesRefBuilder act = new BytesRefBuilder();
+    for (long l=-100000L; l<100000L; l++) {
+      LegacyNumericUtils.longToPrefixCoded(l, 0, act);
+      if (last!=null) {
+        // test if smaller
+        assertTrue("actual bigger than last (BytesRef)", last.get().compareTo(act.get()) < 0 );
+        assertTrue("actual bigger than last (as String)", last.get().utf8ToString().compareTo(act.get().utf8ToString()) < 0 );
+      }
+      // test is back and forward conversion works
+      assertEquals("forward and back conversion should generate same long", l, LegacyNumericUtils.prefixCodedToLong(act.get()));
+      // next step
+      last.copyBytes(act);
+    }
+  }
+
+  public void testIntConversionAndOrdering() throws Exception {
+    // generate a series of encoded ints, each numerical one bigger than the one before
+    BytesRefBuilder act = new BytesRefBuilder();
+    BytesRefBuilder last = new BytesRefBuilder();
+    for (int i=-100000; i<100000; i++) {
+      LegacyNumericUtils.intToPrefixCoded(i, 0, act);
+      if (last!=null) {
+        // test if smaller
+        assertTrue("actual bigger than last (BytesRef)", last.get().compareTo(act.get()) < 0 );
+        assertTrue("actual bigger than last (as String)", last.get().utf8ToString().compareTo(act.get().utf8ToString()) < 0 );
+      }
+      // test is back and forward conversion works
+      assertEquals("forward and back conversion should generate same int", i, LegacyNumericUtils.prefixCodedToInt(act.get()));
+      // next step
+      last.copyBytes(act.get());
+    }
+  }
+
+  public void testLongSpecialValues() throws Exception {
+    long[] vals=new long[]{
+      Long.MIN_VALUE, Long.MIN_VALUE+1, Long.MIN_VALUE+2, -5003400000000L,
+      -4000L, -3000L, -2000L, -1000L, -1L, 0L, 1L, 10L, 300L, 50006789999999999L, Long.MAX_VALUE-2, Long.MAX_VALUE-1, Long.MAX_VALUE
+    };
+    BytesRefBuilder[] prefixVals = new BytesRefBuilder[vals.length];
+    
+    for (int i=0; i<vals.length; i++) {
+      prefixVals[i] = new BytesRefBuilder();
+      LegacyNumericUtils.longToPrefixCoded(vals[i], 0, prefixVals[i]);
+      
+      // check forward and back conversion
+      assertEquals( "forward and back conversion should generate same long", vals[i], LegacyNumericUtils.prefixCodedToLong(prefixVals[i].get()) );
+
+      // test if decoding values as int fails correctly
+      final int index = i;
+      expectThrows(NumberFormatException.class, () -> {
+        LegacyNumericUtils.prefixCodedToInt(prefixVals[index].get());
+      });
+    }
+    
+    // check sort order (prefixVals should be ascending)
+    for (int i=1; i<prefixVals.length; i++) {
+      assertTrue( "check sort order", prefixVals[i-1].get().compareTo(prefixVals[i].get()) < 0 );
+    }
+        
+    // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
+    final BytesRefBuilder ref = new BytesRefBuilder();
+    for (int i=0; i<vals.length; i++) {
+      for (int j=0; j<64; j++) {
+        LegacyNumericUtils.longToPrefixCoded(vals[i], j, ref);
+        long prefixVal= LegacyNumericUtils.prefixCodedToLong(ref.get());
+        long mask=(1L << j) - 1L;
+        assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
+      }
+    }
+  }
+
+  public void testIntSpecialValues() throws Exception {
+    int[] vals=new int[]{
+      Integer.MIN_VALUE, Integer.MIN_VALUE+1, Integer.MIN_VALUE+2, -64765767,
+      -4000, -3000, -2000, -1000, -1, 0, 1, 10, 300, 765878989, Integer.MAX_VALUE-2, Integer.MAX_VALUE-1, Integer.MAX_VALUE
+    };
+    BytesRefBuilder[] prefixVals=new BytesRefBuilder[vals.length];
+    
+    for (int i=0; i<vals.length; i++) {
+      prefixVals[i] = new BytesRefBuilder();
+      LegacyNumericUtils.intToPrefixCoded(vals[i], 0, prefixVals[i]);
+      
+      // check forward and back conversion
+      assertEquals( "forward and back conversion should generate same int", vals[i], LegacyNumericUtils.prefixCodedToInt(prefixVals[i].get()) );
+      
+      // test if decoding values as long fails correctly
+      final int index = i;
+      expectThrows(NumberFormatException.class, () -> {
+        LegacyNumericUtils.prefixCodedToLong(prefixVals[index].get());
+      });
+    }
+    
+    // check sort order (prefixVals should be ascending)
+    for (int i=1; i<prefixVals.length; i++) {
+      assertTrue( "check sort order", prefixVals[i-1].get().compareTo(prefixVals[i].get()) < 0 );
+    }
+    
+    // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
+    final BytesRefBuilder ref = new BytesRefBuilder();
+    for (int i=0; i<vals.length; i++) {
+      for (int j=0; j<32; j++) {
+        LegacyNumericUtils.intToPrefixCoded(vals[i], j, ref);
+        int prefixVal= LegacyNumericUtils.prefixCodedToInt(ref.get());
+        int mask=(1 << j) - 1;
+        assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
+      }
+    }
+  }
+
+  public void testDoubles() throws Exception {
+    double[] vals=new double[]{
+      Double.NEGATIVE_INFINITY, -2.3E25, -1.0E15, -1.0, -1.0E-1, -1.0E-2, -0.0, 
+      +0.0, 1.0E-2, 1.0E-1, 1.0, 1.0E15, 2.3E25, Double.POSITIVE_INFINITY, Double.NaN
+    };
+    long[] longVals=new long[vals.length];
+    
+    // check forward and back conversion
+    for (int i=0; i<vals.length; i++) {
+      longVals[i]= NumericUtils.doubleToSortableLong(vals[i]);
+      assertTrue( "forward and back conversion should generate same double", Double.compare(vals[i], NumericUtils.sortableLongToDouble(longVals[i]))==0 );
+    }
+    
+    // check sort order (prefixVals should be ascending)
+    for (int i=1; i<longVals.length; i++) {
+      assertTrue( "check sort order", longVals[i-1] < longVals[i] );
+    }
+  }
+
+  public static final double[] DOUBLE_NANs = {
+    Double.NaN,
+    Double.longBitsToDouble(0x7ff0000000000001L),
+    Double.longBitsToDouble(0x7fffffffffffffffL),
+    Double.longBitsToDouble(0xfff0000000000001L),
+    Double.longBitsToDouble(0xffffffffffffffffL)
+  };
+
+  public void testSortableDoubleNaN() {
+    final long plusInf = NumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
+    for (double nan : DOUBLE_NANs) {
+      assertTrue(Double.isNaN(nan));
+      final long sortable = NumericUtils.doubleToSortableLong(nan);
+      assertTrue("Double not sorted correctly: " + nan + ", long repr: " 
+          + sortable + ", positive inf.: " + plusInf, sortable > plusInf);
+    }
+  }
+  
+  public void testFloats() throws Exception {
+    float[] vals=new float[]{
+      Float.NEGATIVE_INFINITY, -2.3E25f, -1.0E15f, -1.0f, -1.0E-1f, -1.0E-2f, -0.0f, 
+      +0.0f, 1.0E-2f, 1.0E-1f, 1.0f, 1.0E15f, 2.3E25f, Float.POSITIVE_INFINITY, Float.NaN
+    };
+    int[] intVals=new int[vals.length];
+    
+    // check forward and back conversion
+    for (int i=0; i<vals.length; i++) {
+      intVals[i]= NumericUtils.floatToSortableInt(vals[i]);
+      assertTrue( "forward and back conversion should generate same double", Float.compare(vals[i], NumericUtils.sortableIntToFloat(intVals[i]))==0 );
+    }
+    
+    // check sort order (prefixVals should be ascending)
+    for (int i=1; i<intVals.length; i++) {
+      assertTrue( "check sort order", intVals[i-1] < intVals[i] );
+    }
+  }
+
+  public static final float[] FLOAT_NANs = {
+    Float.NaN,
+    Float.intBitsToFloat(0x7f800001),
+    Float.intBitsToFloat(0x7fffffff),
+    Float.intBitsToFloat(0xff800001),
+    Float.intBitsToFloat(0xffffffff)
+  };
+
+  public void testSortableFloatNaN() {
+    final int plusInf = NumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
+    for (float nan : FLOAT_NANs) {
+      assertTrue(Float.isNaN(nan));
+      final int sortable = NumericUtils.floatToSortableInt(nan);
+      assertTrue("Float not sorted correctly: " + nan + ", int repr: " 
+          + sortable + ", positive inf.: " + plusInf, sortable > plusInf);
+    }
+  }
+
+  // INFO: Tests for trieCodeLong()/trieCodeInt() not needed because implicitely tested by range filter tests
+  
+  /** Note: The neededBounds Iterable must be unsigned (easier understanding what's happening) */
+  private void assertLongRangeSplit(final long lower, final long upper, int precisionStep,
+    final boolean useBitSet, final Iterable<Long> expectedBounds, final Iterable<Integer> expectedShifts
+  ) {
+    // Cannot use FixedBitSet since the range could be long:
+    final LongBitSet bits=useBitSet ? new LongBitSet(upper-lower+1) : null;
+    final Iterator<Long> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
+    final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
+
+    LegacyNumericUtils.splitLongRange(new LegacyNumericUtils.LongRangeBuilder() {
+      @Override
+      public void addRange(long min, long max, int shift) {
+        assertTrue("min, max should be inside bounds", min >= lower && min <= upper && max >= lower && max <= upper);
+        if (useBitSet) for (long l = min; l <= max; l++) {
+          assertFalse("ranges should not overlap", bits.getAndSet(l - lower));
+          // extra exit condition to prevent overflow on MAX_VALUE
+          if (l == max) break;
+        }
+        if (neededBounds == null || neededShifts == null)
+          return;
+        // make unsigned longs for easier display and understanding
+        min ^= 0x8000000000000000L;
+        max ^= 0x8000000000000000L;
+        //System.out.println("0x"+Long.toHexString(min>>>shift)+"L,0x"+Long.toHexString(max>>>shift)+"L)/*shift="+shift+"*/,");
+        assertEquals("shift", neededShifts.next().intValue(), shift);
+        assertEquals("inner min bound", neededBounds.next().longValue(), min >>> shift);
+        assertEquals("inner max bound", neededBounds.next().longValue(), max >>> shift);
+      }
+    }, precisionStep, lower, upper);
+    
+    if (useBitSet) {
+      // after flipping all bits in the range, the cardinality should be zero
+      bits.flip(0,upper-lower+1);
+      assertEquals("The sub-range concenated should match the whole range", 0, bits.cardinality());
+    }
+  }
+  
+  /** LUCENE-2541: LegacyNumericRangeQuery errors with endpoints near long min and max values */
+  public void testLongExtremeValues() throws Exception {
+    // upper end extremes
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 1, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 2, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 4, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 6, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 8, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 64, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+
+    assertLongRangeSplit(Long.MAX_VALUE-0xfL, Long.MAX_VALUE, 4, true, Arrays.asList(
+      0xfffffffffffffffL,0xfffffffffffffffL
+    ), Arrays.asList(
+      4
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE-0x10L, Long.MAX_VALUE, 4, true, Arrays.asList(
+      0xffffffffffffffefL,0xffffffffffffffefL,
+      0xfffffffffffffffL,0xfffffffffffffffL
+    ), Arrays.asList(
+      0, 4
+    ));
+
+    // lower end extremes
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 1, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 2, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 4, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 6, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 8, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 64, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE+0xfL, 4, true, Arrays.asList(
+      0x000000000000000L,0x000000000000000L
+    ), Arrays.asList(
+      4
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE+0x10L, 4, true, Arrays.asList(
+      0x0000000000000010L,0x0000000000000010L,
+      0x000000000000000L,0x000000000000000L
+    ), Arrays.asList(
+      0, 4
+    ));
+  }
+  
+  public void testRandomSplit() throws Exception {
+    long num = (long) atLeast(10);
+    for (long i=0; i < num; i++) {
+      executeOneRandomSplit(random());
+    }
+  }
+  
+  private void executeOneRandomSplit(final Random random) throws Exception {
+    long lower = randomLong(random);
+    long len = random.nextInt(16384*1024); // not too large bitsets, else OOME!
+    while (lower + len < lower) { // overflow
+      lower >>= 1;
+    }
+    assertLongRangeSplit(lower, lower + len, random.nextInt(64) + 1, true, null, null);
+  }
+  
+  private long randomLong(final Random random) {
+    long val;
+    switch(random.nextInt(4)) {
+      case 0:
+        val = 1L << (random.nextInt(63)); //  patterns like 0x000000100000 (-1 yields patterns like 0x0000fff)
+        break;
+      case 1:
+        val = -1L << (random.nextInt(63)); // patterns like 0xfffff00000
+        break;
+      default:
+        val = random.nextLong();
+    }
+
+    val += random.nextInt(5)-2;
+
+    if (random.nextBoolean()) {
+      if (random.nextBoolean()) val += random.nextInt(100)-50;
+      if (random.nextBoolean()) val = ~val;
+      if (random.nextBoolean()) val = val<<1;
+      if (random.nextBoolean()) val = val>>>1;
+    }
+
+    return val;
+  }
+  
+  public void testSplitLongRange() throws Exception {
+    // a hard-coded "standard" range
+    assertLongRangeSplit(-5000L, 9500L, 4, true, Arrays.asList(
+      0x7fffffffffffec78L,0x7fffffffffffec7fL,
+      0x8000000000002510L,0x800000000000251cL,
+      0x7fffffffffffec8L, 0x7fffffffffffecfL,
+      0x800000000000250L, 0x800000000000250L,
+      0x7fffffffffffedL,  0x7fffffffffffefL,
+      0x80000000000020L,  0x80000000000024L,
+      0x7ffffffffffffL,   0x8000000000001L
+    ), Arrays.asList(
+      0, 0,
+      4, 4,
+      8, 8,
+      12
+    ));
+    
+    // the same with no range splitting
+    assertLongRangeSplit(-5000L, 9500L, 64, true, Arrays.asList(
+      0x7fffffffffffec78L,0x800000000000251cL
+    ), Arrays.asList(
+      0
+    ));
+    
+    // this tests optimized range splitting, if one of the inner bounds
+    // is also the bound of the next lower precision, it should be used completely
+    assertLongRangeSplit(0L, 1024L+63L, 4, true, Arrays.asList(
+      0x800000000000040L, 0x800000000000043L,
+      0x80000000000000L,  0x80000000000003L
+    ), Arrays.asList(
+      4, 8
+    ));
+    
+    // the full long range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 8, false, Arrays.asList(
+      0x00L,0xffL
+    ), Arrays.asList(
+      56
+    ));
+
+    // the same with precisionStep=4
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 4, false, Arrays.asList(
+      0x0L,0xfL
+    ), Arrays.asList(
+      60
+    ));
+
+    // the same with precisionStep=2
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 2, false, Arrays.asList(
+      0x0L,0x3L
+    ), Arrays.asList(
+      62
+    ));
+
+    // the same with precisionStep=1
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 1, false, Arrays.asList(
+      0x0L,0x1L
+    ), Arrays.asList(
+      63
+    ));
+
+    // a inverse range should produce no sub-ranges
+    assertLongRangeSplit(9500L, -5000L, 4, false, Collections.<Long>emptyList(), Collections.<Integer>emptyList());    
+
+    // a 0-length range should reproduce the range itself
+    assertLongRangeSplit(9500L, 9500L, 4, false, Arrays.asList(
+      0x800000000000251cL,0x800000000000251cL
+    ), Arrays.asList(
+      0
+    ));
+  }
+
+  /** Note: The neededBounds Iterable must be unsigned (easier understanding what's happening) */
+  private void assertIntRangeSplit(final int lower, final int upper, int precisionStep,
+    final boolean useBitSet, final Iterable<Integer> expectedBounds, final Iterable<Integer> expectedShifts
+  ) {
+    final FixedBitSet bits=useBitSet ? new FixedBitSet(upper-lower+1) : null;
+    final Iterator<Integer> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
+    final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
+    
+    LegacyNumericUtils.splitIntRange(new LegacyNumericUtils.IntRangeBuilder() {
+      @Override
+      public void addRange(int min, int max, int shift) {
+        assertTrue("min, max should be inside bounds", min >= lower && min <= upper && max >= lower && max <= upper);
+        if (useBitSet) for (int i = min; i <= max; i++) {
+          assertFalse("ranges should not overlap", bits.getAndSet(i - lower));
+          // extra exit condition to prevent overflow on MAX_VALUE
+          if (i == max) break;
+        }
+        if (neededBounds == null)
+          return;
+        // make unsigned ints for easier display and understanding
+        min ^= 0x80000000;
+        max ^= 0x80000000;
+        //System.out.println("0x"+Integer.toHexString(min>>>shift)+",0x"+Integer.toHexString(max>>>shift)+")/*shift="+shift+"*/,");
+        assertEquals("shift", neededShifts.next().intValue(), shift);
+        assertEquals("inner min bound", neededBounds.next().intValue(), min >>> shift);
+        assertEquals("inner max bound", neededBounds.next().intValue(), max >>> shift);
+      }
+    }, precisionStep, lower, upper);
+    
+    if (useBitSet) {
+      // after flipping all bits in the range, the cardinality should be zero
+      bits.flip(0, upper-lower+1);
+      assertEquals("The sub-range concenated should match the whole range", 0, bits.cardinality());
+    }
+  }
+  
+  public void testSplitIntRange() throws Exception {
+    // a hard-coded "standard" range
+    assertIntRangeSplit(-5000, 9500, 4, true, Arrays.asList(
+      0x7fffec78,0x7fffec7f,
+      0x80002510,0x8000251c,
+      0x7fffec8, 0x7fffecf,
+      0x8000250, 0x8000250,
+      0x7fffed,  0x7fffef,
+      0x800020,  0x800024,
+      0x7ffff,   0x80001
+    ), Arrays.asList(
+      0, 0,
+      4, 4,
+      8, 8,
+      12
+    ));
+    
+    // the same with no range splitting
+    assertIntRangeSplit(-5000, 9500, 32, true, Arrays.asList(
+      0x7fffec78,0x8000251c
+    ), Arrays.asList(
+      0
+    ));
+    
+    // this tests optimized range splitting, if one of the inner bounds
+    // is also the bound of the next lower precision, it should be used completely
+    assertIntRangeSplit(0, 1024+63, 4, true, Arrays.asList(
+      0x8000040, 0x8000043,
+      0x800000,  0x800003
+    ), Arrays.asList(
+      4, 8
+    ));
+    
+    // the full int range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
+    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 8, false, Arrays.asList(
+      0x00,0xff
+    ), Arrays.asList(
+      24
+    ));
+
+    // the same with precisionStep=4
+    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 4, false, Arrays.asList(
+      0x0,0xf
+    ), Arrays.asList(
+      28
+    ));
+
+    // the same with precisionStep=2
+    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 2, false, Arrays.asList(
+      0x0,0x3
+    ), Arrays.asList(
+      30
+    ));
+
+    // the same with precisionStep=1
+    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 1, false, Arrays.asList(
+      0x0,0x1
+    ), Arrays.asList(
+      31
+    ));
+
+    // a inverse range should produce no sub-ranges
+    assertIntRangeSplit(9500, -5000, 4, false, Collections.<Integer>emptyList(), Collections.<Integer>emptyList());    
+
+    // a 0-length range should reproduce the range itself
+    assertIntRangeSplit(9500, 9500, 4, false, Arrays.asList(
+      0x8000251c,0x8000251c
+    ), Arrays.asList(
+      0
+    ));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/legacy/TestLegacyTerms.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/legacy/TestLegacyTerms.java b/solr/core/src/test/org/apache/solr/legacy/TestLegacyTerms.java
new file mode 100644
index 0000000..d91ba88
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/legacy/TestLegacyTerms.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.solr.legacy.LegacyDoubleField;
+import org.apache.solr.legacy.LegacyFloatField;
+import org.apache.solr.legacy.LegacyIntField;
+import org.apache.solr.legacy.LegacyLongField;
+import org.apache.solr.legacy.LegacyNumericUtils;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+
+public class TestLegacyTerms extends LuceneTestCase {
+
+  public void testEmptyIntFieldMinMax() throws Exception {
+    assertNull(LegacyNumericUtils.getMinInt(EMPTY_TERMS));
+    assertNull(LegacyNumericUtils.getMaxInt(EMPTY_TERMS));
+  }
+  
+  public void testIntFieldMinMax() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    int numDocs = atLeast(100);
+    int minValue = Integer.MAX_VALUE;
+    int maxValue = Integer.MIN_VALUE;
+    for(int i=0;i<numDocs;i++ ){
+      Document doc = new Document();
+      int num = random().nextInt();
+      minValue = Math.min(num, minValue);
+      maxValue = Math.max(num, maxValue);
+      doc.add(new LegacyIntField("field", num, Field.Store.NO));
+      w.addDocument(doc);
+    }
+    
+    IndexReader r = w.getReader();
+    Terms terms = MultiFields.getTerms(r, "field");
+    assertEquals(new Integer(minValue), LegacyNumericUtils.getMinInt(terms));
+    assertEquals(new Integer(maxValue), LegacyNumericUtils.getMaxInt(terms));
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testEmptyLongFieldMinMax() throws Exception {
+    assertNull(LegacyNumericUtils.getMinLong(EMPTY_TERMS));
+    assertNull(LegacyNumericUtils.getMaxLong(EMPTY_TERMS));
+  }
+  
+  public void testLongFieldMinMax() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    int numDocs = atLeast(100);
+    long minValue = Long.MAX_VALUE;
+    long maxValue = Long.MIN_VALUE;
+    for(int i=0;i<numDocs;i++ ){
+      Document doc = new Document();
+      long num = random().nextLong();
+      minValue = Math.min(num, minValue);
+      maxValue = Math.max(num, maxValue);
+      doc.add(new LegacyLongField("field", num, Field.Store.NO));
+      w.addDocument(doc);
+    }
+    
+    IndexReader r = w.getReader();
+
+    Terms terms = MultiFields.getTerms(r, "field");
+    assertEquals(new Long(minValue), LegacyNumericUtils.getMinLong(terms));
+    assertEquals(new Long(maxValue), LegacyNumericUtils.getMaxLong(terms));
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testFloatFieldMinMax() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    int numDocs = atLeast(100);
+    float minValue = Float.POSITIVE_INFINITY;
+    float maxValue = Float.NEGATIVE_INFINITY;
+    for(int i=0;i<numDocs;i++ ){
+      Document doc = new Document();
+      float num = random().nextFloat();
+      minValue = Math.min(num, minValue);
+      maxValue = Math.max(num, maxValue);
+      doc.add(new LegacyFloatField("field", num, Field.Store.NO));
+      w.addDocument(doc);
+    }
+    
+    IndexReader r = w.getReader();
+    Terms terms = MultiFields.getTerms(r, "field");
+    assertEquals(minValue, NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMinInt(terms)), 0.0f);
+    assertEquals(maxValue, NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMaxInt(terms)), 0.0f);
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testDoubleFieldMinMax() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    int numDocs = atLeast(100);
+    double minValue = Double.POSITIVE_INFINITY;
+    double maxValue = Double.NEGATIVE_INFINITY;
+    for(int i=0;i<numDocs;i++ ){
+      Document doc = new Document();
+      double num = random().nextDouble();
+      minValue = Math.min(num, minValue);
+      maxValue = Math.max(num, maxValue);
+      doc.add(new LegacyDoubleField("field", num, Field.Store.NO));
+      w.addDocument(doc);
+    }
+    
+    IndexReader r = w.getReader();
+
+    Terms terms = MultiFields.getTerms(r, "field");
+
+    assertEquals(minValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMinLong(terms)), 0.0);
+    assertEquals(maxValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMaxLong(terms)), 0.0);
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  /**
+   * A complete empty Terms instance that has no terms in it and supports no optional statistics
+   */
+  private static Terms EMPTY_TERMS = new Terms() {
+    public TermsEnum iterator() { return TermsEnum.EMPTY; }
+    public long size() { return -1; }
+    public long getSumTotalTermFreq() { return -1; }
+    public long getSumDocFreq() { return -1; }
+    public int getDocCount() { return -1; }
+    public boolean hasFreqs() { return false; }
+    public boolean hasOffsets() { return false; }
+    public boolean hasPositions() { return false; }
+    public boolean hasPayloads() { return false; }
+  };
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/legacy/TestMultiValuedNumericRangeQuery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/legacy/TestMultiValuedNumericRangeQuery.java b/solr/core/src/test/org/apache/solr/legacy/TestMultiValuedNumericRangeQuery.java
new file mode 100644
index 0000000..80b1524
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/legacy/TestMultiValuedNumericRangeQuery.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+
+import java.util.Locale;
+import java.text.DecimalFormat;
+import java.text.DecimalFormatSymbols;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.solr.legacy.LegacyIntField;
+import org.apache.solr.legacy.LegacyNumericRangeQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
+
+  /** Tests LegacyNumericRangeQuery on a multi-valued field (multiple numeric values per document).
+   * This test ensures, that a classical TermRangeQuery returns exactly the same document numbers as
+   * LegacyNumericRangeQuery (see SOLR-1322 for discussion) and the multiple precision terms per numeric value
+   * do not interfere with multiple numeric values.
+   */
+  public void testMultiValuedNRQ() throws Exception {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
+        newIndexWriterConfig(new MockAnalyzer(random()))
+        .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
+    
+    DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.ROOT));
+    
+    int num = atLeast(500);
+    for (int l = 0; l < num; l++) {
+      Document doc = new Document();
+      for (int m=0, c=random().nextInt(10); m<=c; m++) {
+        int value = random().nextInt(Integer.MAX_VALUE);
+        doc.add(newStringField("asc", format.format(value), Field.Store.NO));
+        doc.add(new LegacyIntField("trie", value, Field.Store.NO));
+      }
+      writer.addDocument(doc);
+    }
+    IndexReader reader = writer.getReader();
+    writer.close();
+    
+    IndexSearcher searcher=newSearcher(reader);
+    num = atLeast(50);
+    for (int i = 0; i < num; i++) {
+      int lower=random().nextInt(Integer.MAX_VALUE);
+      int upper=random().nextInt(Integer.MAX_VALUE);
+      if (lower>upper) {
+        int a=lower; lower=upper; upper=a;
+      }
+      TermRangeQuery cq=TermRangeQuery.newStringRange("asc", format.format(lower), format.format(upper), true, true);
+      LegacyNumericRangeQuery<Integer> tq= LegacyNumericRangeQuery.newIntRange("trie", lower, upper, true, true);
+      TopDocs trTopDocs = searcher.search(cq, 1);
+      TopDocs nrTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", trTopDocs.totalHits, nrTopDocs.totalHits );
+    }
+    reader.close();
+    directory.close();
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/legacy/TestNumericRangeQuery32.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/legacy/TestNumericRangeQuery32.java b/solr/core/src/test/org/apache/solr/legacy/TestNumericRangeQuery32.java
new file mode 100644
index 0000000..5c02913
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/legacy/TestNumericRangeQuery32.java
@@ -0,0 +1,461 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestNumericRangeQuery32 extends LuceneTestCase {
+  // distance of entries
+  private static int distance;
+  // shift the starting of the values to the left, to also have negative values:
+  private static final int startOffset = - 1 << 15;
+  // number of docs to generate for testing
+  private static int noDocs;
+  
+  private static Directory directory = null;
+  private static IndexReader reader = null;
+  private static IndexSearcher searcher = null;
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    noDocs = atLeast(4096);
+    distance = (1 << 30) / noDocs;
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
+        newIndexWriterConfig(new MockAnalyzer(random()))
+        .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
+        .setMergePolicy(newLogMergePolicy()));
+    
+    final LegacyFieldType storedInt = new LegacyFieldType(LegacyIntField.TYPE_NOT_STORED);
+    storedInt.setStored(true);
+    storedInt.freeze();
+
+    final LegacyFieldType storedInt8 = new LegacyFieldType(storedInt);
+    storedInt8.setNumericPrecisionStep(8);
+
+    final LegacyFieldType storedInt4 = new LegacyFieldType(storedInt);
+    storedInt4.setNumericPrecisionStep(4);
+
+    final LegacyFieldType storedInt2 = new LegacyFieldType(storedInt);
+    storedInt2.setNumericPrecisionStep(2);
+
+    final LegacyFieldType storedIntNone = new LegacyFieldType(storedInt);
+    storedIntNone.setNumericPrecisionStep(Integer.MAX_VALUE);
+
+    final LegacyFieldType unstoredInt = LegacyIntField.TYPE_NOT_STORED;
+
+    final LegacyFieldType unstoredInt8 = new LegacyFieldType(unstoredInt);
+    unstoredInt8.setNumericPrecisionStep(8);
+
+    final LegacyFieldType unstoredInt4 = new LegacyFieldType(unstoredInt);
+    unstoredInt4.setNumericPrecisionStep(4);
+
+    final LegacyFieldType unstoredInt2 = new LegacyFieldType(unstoredInt);
+    unstoredInt2.setNumericPrecisionStep(2);
+
+    LegacyIntField
+      field8 = new LegacyIntField("field8", 0, storedInt8),
+      field4 = new LegacyIntField("field4", 0, storedInt4),
+      field2 = new LegacyIntField("field2", 0, storedInt2),
+      fieldNoTrie = new LegacyIntField("field"+Integer.MAX_VALUE, 0, storedIntNone),
+      ascfield8 = new LegacyIntField("ascfield8", 0, unstoredInt8),
+      ascfield4 = new LegacyIntField("ascfield4", 0, unstoredInt4),
+      ascfield2 = new LegacyIntField("ascfield2", 0, unstoredInt2);
+    
+    Document doc = new Document();
+    // add fields, that have a distance to test general functionality
+    doc.add(field8); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
+    // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
+    doc.add(ascfield8); doc.add(ascfield4); doc.add(ascfield2);
+    
+    // Add a series of noDocs docs with increasing int values
+    for (int l=0; l<noDocs; l++) {
+      int val=distance*l+startOffset;
+      field8.setIntValue(val);
+      field4.setIntValue(val);
+      field2.setIntValue(val);
+      fieldNoTrie.setIntValue(val);
+
+      val=l-(noDocs/2);
+      ascfield8.setIntValue(val);
+      ascfield4.setIntValue(val);
+      ascfield2.setIntValue(val);
+      writer.addDocument(doc);
+    }
+  
+    reader = writer.getReader();
+    searcher=newSearcher(reader);
+    writer.close();
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher = null;
+    reader.close();
+    reader = null;
+    directory.close();
+    directory = null;
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    // set the theoretical maximum term count for 8bit (see docs for the number)
+    // super.tearDown will restore the default
+    BooleanQuery.setMaxClauseCount(3*255*2 + 255);
+  }
+  
+  /** test for both constant score and boolean query, the other tests only use the constant score mode */
+  private void testRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    int lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
+    LegacyNumericRangeQuery<Integer> q = LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
+    for (byte i=0; i<2; i++) {
+      TopDocs topDocs;
+      String type;
+      switch (i) {
+        case 0:
+          type = " (constant score filter rewrite)";
+          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
+          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+          break;
+        case 1:
+          type = " (constant score boolean rewrite)";
+          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
+          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+          break;
+        default:
+          return;
+      }
+      ScoreDoc[] sd = topDocs.scoreDocs;
+      assertNotNull(sd);
+      assertEquals("Score doc count"+type, count, sd.length );
+      Document doc=searcher.doc(sd[0].doc);
+      assertEquals("First doc"+type, 2*distance+startOffset, doc.getField(field).numericValue().intValue());
+      doc=searcher.doc(sd[sd.length-1].doc);
+      assertEquals("Last doc"+type, (1+count)*distance+startOffset, doc.getField(field).numericValue().intValue());
+    }
+  }
+
+  @Test
+  public void testRange_8bit() throws Exception {
+    testRange(8);
+  }
+  
+  @Test
+  public void testRange_4bit() throws Exception {
+    testRange(4);
+  }
+  
+  @Test
+  public void testRange_2bit() throws Exception {
+    testRange(2);
+  }
+  
+  @Test
+  public void testOneMatchQuery() throws Exception {
+    LegacyNumericRangeQuery<Integer> q = LegacyNumericRangeQuery.newIntRange("ascfield8", 8, 1000, 1000, true, true);
+    TopDocs topDocs = searcher.search(q, noDocs);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", 1, sd.length );
+  }
+  
+  private void testLeftOpenRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    int upper=(count-1)*distance + (distance/3) + startOffset;
+    LegacyNumericRangeQuery<Integer> q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, null, upper, true, true);
+    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", count, sd.length );
+    Document doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", startOffset, doc.getField(field).numericValue().intValue());
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
+    
+    q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, null, upper, false, true);
+    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", count, sd.length );
+    doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", startOffset, doc.getField(field).numericValue().intValue());
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
+  }
+  
+  @Test
+  public void testLeftOpenRange_8bit() throws Exception {
+    testLeftOpenRange(8);
+  }
+  
+  @Test
+  public void testLeftOpenRange_4bit() throws Exception {
+    testLeftOpenRange(4);
+  }
+  
+  @Test
+  public void testLeftOpenRange_2bit() throws Exception {
+    testLeftOpenRange(2);
+  }
+  
+  private void testRightOpenRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    int lower=(count-1)*distance + (distance/3) +startOffset;
+    LegacyNumericRangeQuery<Integer> q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, true);
+    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", noDocs-count, sd.length );
+    Document doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().intValue());
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
+
+    q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, false);
+    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", noDocs-count, sd.length );
+    doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().intValue() );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().intValue() );
+  }
+  
+  @Test
+  public void testRightOpenRange_8bit() throws Exception {
+    testRightOpenRange(8);
+  }
+  
+  @Test
+  public void testRightOpenRange_4bit() throws Exception {
+    testRightOpenRange(4);
+  }
+  
+  @Test
+  public void testRightOpenRange_2bit() throws Exception {
+    testRightOpenRange(2);
+  }
+  
+  @Test
+  public void testInfiniteValues() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
+      newIndexWriterConfig(new MockAnalyzer(random())));
+    Document doc = new Document();
+    doc.add(new LegacyFloatField("float", Float.NEGATIVE_INFINITY, Field.Store.NO));
+    doc.add(new LegacyIntField("int", Integer.MIN_VALUE, Field.Store.NO));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new LegacyFloatField("float", Float.POSITIVE_INFINITY, Field.Store.NO));
+    doc.add(new LegacyIntField("int", Integer.MAX_VALUE, Field.Store.NO));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new LegacyFloatField("float", 0.0f, Field.Store.NO));
+    doc.add(new LegacyIntField("int", 0, Field.Store.NO));
+    writer.addDocument(doc);
+    
+    for (float f : TestLegacyNumericUtils.FLOAT_NANs) {
+      doc = new Document();
+      doc.add(new LegacyFloatField("float", f, Field.Store.NO));
+      writer.addDocument(doc);
+    }
+    
+    writer.close();
+    
+    IndexReader r = DirectoryReader.open(dir);
+    IndexSearcher s = newSearcher(r);
+    
+    Query q= LegacyNumericRangeQuery.newIntRange("int", null, null, true, true);
+    TopDocs topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+    
+    q= LegacyNumericRangeQuery.newIntRange("int", null, null, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+    
+    q= LegacyNumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newFloatRange("float", null, null, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newFloatRange("float", null, null, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newFloatRange("float", Float.NaN, Float.NaN, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", TestLegacyNumericUtils.FLOAT_NANs.length,  topDocs.scoreDocs.length );
+
+    r.close();
+    dir.close();
+  }
+  
+  private void testRangeSplit(int precisionStep) throws Exception {
+    String field="ascfield"+precisionStep;
+    // 10 random tests
+    int num = TestUtil.nextInt(random(), 10, 20);
+    for (int  i =0;  i< num; i++) {
+      int lower=(int)(random().nextDouble()*noDocs - noDocs/2);
+      int upper=(int)(random().nextDouble()*noDocs - noDocs/2);
+      if (lower>upper) {
+        int a=lower; lower=upper; upper=a;
+      }
+      // test inclusive range
+      Query tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
+      TopDocs tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+      // test exclusive range
+      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
+      // test left exclusive range
+      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
+      // test right exclusive range
+      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
+    }
+  }
+
+  @Test
+  public void testRangeSplit_8bit() throws Exception {
+    testRangeSplit(8);
+  }
+  
+  @Test
+  public void testRangeSplit_4bit() throws Exception {
+    testRangeSplit(4);
+  }
+  
+  @Test
+  public void testRangeSplit_2bit() throws Exception {
+    testRangeSplit(2);
+  }
+  
+  /** we fake a float test using int2float conversion of LegacyNumericUtils */
+  private void testFloatRange(int precisionStep) throws Exception {
+    final String field="ascfield"+precisionStep;
+    final int lower=-1000, upper=+2000;
+    
+    Query tq= LegacyNumericRangeQuery.newFloatRange(field, precisionStep,
+        NumericUtils.sortableIntToFloat(lower), NumericUtils.sortableIntToFloat(upper), true, true);
+    TopDocs tTopDocs = searcher.search(tq, 1);
+    assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+  }
+
+  @Test
+  public void testFloatRange_8bit() throws Exception {
+    testFloatRange(8);
+  }
+  
+  @Test
+  public void testFloatRange_4bit() throws Exception {
+    testFloatRange(4);
+  }
+  
+  @Test
+  public void testFloatRange_2bit() throws Exception {
+    testFloatRange(2);
+  }
+  
+  @Test
+  public void testEqualsAndHash() throws Exception {
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test1", 4, 10, 20, true, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test2", 4, 10, 20, false, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test3", 4, 10, 20, true, false));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test4", 4, 10, 20, false, false));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test5", 4, 10, null, true, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test6", 4, null, 20, true, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test7", 4, null, null, true, true));
+    QueryUtils.checkEqual(
+      LegacyNumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true),
+      LegacyNumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newIntRange("test9", 4, 10, 20, true, true),
+      LegacyNumericRangeQuery.newIntRange("test9", 8, 10, 20, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newIntRange("test10a", 4, 10, 20, true, true),
+      LegacyNumericRangeQuery.newIntRange("test10b", 4, 10, 20, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newIntRange("test11", 4, 10, 20, true, true),
+      LegacyNumericRangeQuery.newIntRange("test11", 4, 20, 10, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newIntRange("test12", 4, 10, 20, true, true),
+      LegacyNumericRangeQuery.newIntRange("test12", 4, 10, 20, false, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newIntRange("test13", 4, 10, 20, true, true),
+      LegacyNumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
+    );
+    // the following produces a hash collision, because Long and Integer have the same hashcode, so only test equality:
+    Query q1 = LegacyNumericRangeQuery.newIntRange("test14", 4, 10, 20, true, true);
+    Query q2 = LegacyNumericRangeQuery.newLongRange("test14", 4, 10L, 20L, true, true);
+    assertFalse(q1.equals(q2));
+    assertFalse(q2.equals(q1));
+  }
+  
+}


[04/25] lucene-solr:jira/solr-8668: SOLR-10754: Add hist Stream Evaluator

Posted by cp...@apache.org.
SOLR-10754: Add hist Stream Evaluator


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/d1436c48
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/d1436c48
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/d1436c48

Branch: refs/heads/jira/solr-8668
Commit: d1436c48230795ed77611466dce6bf7ab850442d
Parents: 3e70745
Author: Joel Bernstein <jb...@apache.org>
Authored: Fri May 26 13:41:35 2017 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Fri May 26 13:41:35 2017 -0400

----------------------------------------------------------------------
 .../org/apache/solr/handler/StreamHandler.java  |  1 +
 .../solrj/io/eval/HistogramEvaluator.java       | 98 ++++++++++++++++++++
 .../solrj/io/stream/StreamExpressionTest.java   | 51 +++++++++-
 3 files changed, 148 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d1436c48/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
index cbb9910..f5ccbc8 100644
--- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
@@ -191,6 +191,7 @@ public class StreamHandler extends RequestHandlerBase implements SolrCoreAware,
       .withFunctionName("finddelay", FindDelayEvaluator.class)
       .withFunctionName("sequence", SequenceEvaluator.class)
       .withFunctionName("array", ArrayEvaluator.class)
+      .withFunctionName("hist", HistogramEvaluator.class)
 
       // metrics
          .withFunctionName("min", MinMetric.class)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d1436c48/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HistogramEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HistogramEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HistogramEvaluator.java
new file mode 100644
index 0000000..c691698
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HistogramEvaluator.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.math3.random.EmpiricalDistribution;
+import org.apache.commons.math3.stat.descriptive.SummaryStatistics;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.Explanation;
+import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType;
+import org.apache.solr.client.solrj.io.stream.expr.Expressible;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class HistogramEvaluator extends ComplexEvaluator implements Expressible {
+
+  private static final long serialVersionUID = 1;
+
+  public HistogramEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
+    super(expression, factory);
+  }
+
+  public List<Map> evaluate(Tuple tuple) throws IOException {
+
+    StreamEvaluator colEval1 = subEvaluators.get(0);
+
+    List<Number> numbers1 = (List<Number>)colEval1.evaluate(tuple);
+    double[] column1 = new double[numbers1.size()];
+
+    for(int i=0; i<numbers1.size(); i++) {
+      column1[i] = numbers1.get(i).doubleValue();
+    }
+
+    int bins = 10;
+    if(subEvaluators.size() == 2) {
+      StreamEvaluator binsEval = subEvaluators.get(1);
+      Number binsNum = (Number) binsEval.evaluate(tuple);
+      bins = binsNum.intValue();
+    }
+
+    EmpiricalDistribution empiricalDistribution = new EmpiricalDistribution(bins);
+    empiricalDistribution.load(column1);
+
+    List<Map> binList = new ArrayList();
+
+    List<SummaryStatistics> summaries = empiricalDistribution.getBinStats();
+    for(SummaryStatistics statisticalSummary : summaries) {
+      Map map = new HashMap();
+      map.put("max", statisticalSummary.getMax());
+      map.put("mean", statisticalSummary.getMean());
+      map.put("min", statisticalSummary.getMin());
+      map.put("stdev", statisticalSummary.getStandardDeviation());
+      map.put("sum", statisticalSummary.getSum());
+      map.put("N", statisticalSummary.getN());
+      map.put("var", statisticalSummary.getVariance());
+      binList.add(map);
+    }
+
+    return binList;
+  }
+
+
+
+  @Override
+  public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException {
+    StreamExpression expression = new StreamExpression(factory.getFunctionName(getClass()));
+    return expression;
+  }
+
+  @Override
+  public Explanation toExplanation(StreamFactory factory) throws IOException {
+    return new Explanation(nodeId.toString())
+        .withExpressionType(ExpressionType.EVALUATOR)
+        .withFunctionName(factory.getFunctionName(getClass()))
+        .withImplementingClass(getClass().getName())
+        .withExpression(toExpression(factory).toString());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/d1436c48/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index c570d95..6c15197 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -5227,6 +5227,53 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     }
   }
 
+  @Test
+  public void testHist() throws Exception {
+    String expr = "hist(sequence(100, 0, 1), 10)";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", expr);
+    paramsLoc.set("qt", "/stream");
+
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<Map> hist = (List<Map>)tuples.get(0).get("return-value");
+    assertTrue(hist.size() == 10);
+    for(int i=0; i<hist.size(); i++) {
+      Map stats = hist.get(i);
+      assertTrue(((Number)stats.get("N")).intValue() == 10);
+      assertTrue(((Number)stats.get("min")).intValue() == 10*i);
+      assertTrue(((Number)stats.get("var")).doubleValue() == 9.166666666666666);
+      assertTrue(((Number)stats.get("stdev")).doubleValue() == 3.0276503540974917);
+    }
+
+    expr = "hist(sequence(100, 0, 1), 5)";
+    paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", expr);
+    paramsLoc.set("qt", "/stream");
+
+    solrStream = new SolrStream(url, paramsLoc);
+    solrStream.setStreamContext(context);
+    tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    hist = (List<Map>)tuples.get(0).get("return-value");
+    assertTrue(hist.size() == 5);
+    for(int i=0; i<hist.size(); i++) {
+      Map stats = hist.get(i);
+      assertTrue(((Number)stats.get("N")).intValue() == 20);
+      assertTrue(((Number)stats.get("min")).intValue() == 20*i);
+      assertTrue(((Number)stats.get("var")).doubleValue() == 35);
+      assertTrue(((Number)stats.get("stdev")).doubleValue() == 5.916079783099616);
+    }
+  }
+
+
+
+
 
 
   @Test
@@ -5747,7 +5794,7 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     solrStream.setStreamContext(context);
     List<Tuple> tuples = getTuples(solrStream);
     assertTrue(tuples.size() == 1);
-    List<Number> out = (List<Number>)tuples.get(0).get("out");
+    List<Number> out = (List<Number>)tuples.get(0).get("return-value");
     assertTrue(out.size() == 6);
     assertTrue(out.get(0).intValue() == 1);
     assertTrue(out.get(1).intValue() == 2);
@@ -5764,7 +5811,7 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     solrStream.setStreamContext(context);
     tuples = getTuples(solrStream);
     assertTrue(tuples.size() == 1);
-    out = (List<Number>)tuples.get(0).get("out");
+    out = (List<Number>)tuples.get(0).get("return-value");
     assertTrue(out.size() == 6);
     assertTrue(out.get(0).doubleValue() == 1.122D);
     assertTrue(out.get(1).doubleValue() == 2.222D);


[17/25] lucene-solr:jira/solr-8668: SOLR-10719: Creating a core.properties fails if the parent of core.properties is a symlinked dierctory

Posted by cp...@apache.org.
SOLR-10719: Creating a core.properties fails if the parent of core.properties is a symlinked dierctory


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/412e4ae2
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/412e4ae2
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/412e4ae2

Branch: refs/heads/jira/solr-8668
Commit: 412e4ae2c192a5b444dd63220e5918f4e7fd47be
Parents: 963f43f
Author: Erick Erickson <er...@apache.org>
Authored: Mon May 29 20:05:03 2017 -0700
Committer: Erick Erickson <er...@apache.org>
Committed: Mon May 29 20:08:44 2017 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  3 +++
 .../apache/solr/core/CorePropertiesLocator.java |  5 ++++-
 .../java/org/apache/solr/util/FileUtils.java    | 20 ++++++++++++++++++++
 3 files changed, 27 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/412e4ae2/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index c413cf8..3661037 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -241,6 +241,9 @@ Bug Fixes
 * SOLR-10723 JSON Facet API: resize() implemented incorrectly for CountSlotAcc, HllAgg.NumericAcc
   resulting in exceptions when using a hashing faceting method and sorting by hll(numeric_field).
   (yonik)
+  
+* SOLR-10719: Creating a core.properties fails if the parent of core.properties is a symlinked dierctory
+  (Erick Erickson)
 
 Optimizations
 ----------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/412e4ae2/solr/core/src/java/org/apache/solr/core/CorePropertiesLocator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CorePropertiesLocator.java b/solr/core/src/java/org/apache/solr/core/CorePropertiesLocator.java
index e942c9b..99c101b 100644
--- a/solr/core/src/java/org/apache/solr/core/CorePropertiesLocator.java
+++ b/solr/core/src/java/org/apache/solr/core/CorePropertiesLocator.java
@@ -39,6 +39,7 @@ import java.util.stream.Collectors;
 
 import com.google.common.collect.Lists;
 import org.apache.solr.common.SolrException;
+import org.apache.solr.util.FileUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -85,13 +86,15 @@ public class CorePropertiesLocator implements CoresLocator {
   private void writePropertiesFile(CoreDescriptor cd, Path propfile)  {
     Properties p = buildCoreProperties(cd);
     try {
-      Files.createDirectories(propfile.getParent());
+      FileUtils.createDirectories(propfile.getParent()); // Handling for symlinks.
       try (Writer os = new OutputStreamWriter(Files.newOutputStream(propfile), StandardCharsets.UTF_8)) {
         p.store(os, "Written by CorePropertiesLocator");
       }
     }
     catch (IOException e) {
       logger.error("Couldn't persist core properties to {}: {}", propfile, e.getMessage());
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
+          "Couldn't persist core properties to " + propfile.toAbsolutePath().toString() + " : " + e.getMessage());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/412e4ae2/solr/core/src/java/org/apache/solr/util/FileUtils.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/FileUtils.java b/solr/core/src/java/org/apache/solr/util/FileUtils.java
index 09db4f0..2046262 100644
--- a/solr/core/src/java/org/apache/solr/util/FileUtils.java
+++ b/solr/core/src/java/org/apache/solr/util/FileUtils.java
@@ -18,6 +18,10 @@ package org.apache.solr.util;
 
 import java.io.*;
 import java.nio.channels.FileChannel;
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+import org.apache.commons.io.FileExistsException;
 
 /**
  *
@@ -96,4 +100,20 @@ public class FileUtils {
   public static boolean fileExists(String filePathString) {
     return new File(filePathString).exists();
   }
+
+  // Files.createDirectories has odd behavior if the path is a symlink and it already exists
+  // _even if it's a symlink to a directory_. 
+  // 
+  // oddly, if the path to be created just contains a symlink in intermediate levels, Files.createDirectories
+  // works just fine.
+  //
+  // This works around that issue
+  public static Path createDirectories(Path path) throws IOException {
+    if (Files.exists(path) && Files.isSymbolicLink(path)) {
+      Path real = path.toRealPath();
+      if (Files.isDirectory(real)) return real;
+      throw new FileExistsException("Tried to create a directory at to an existing non-directory symlink: " + path.toString());
+    }
+    return Files.createDirectories(path);
+  }
 }


[18/25] lucene-solr:jira/solr-8668: LUCENE-7850: Move support for legacy numerics to solr/.

Posted by cp...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/legacy/TestNumericRangeQuery64.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/legacy/TestNumericRangeQuery64.java b/solr/core/src/test/org/apache/solr/legacy/TestNumericRangeQuery64.java
new file mode 100644
index 0000000..99d4261
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/legacy/TestNumericRangeQuery64.java
@@ -0,0 +1,490 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestNumericRangeQuery64 extends LuceneTestCase {
+  // distance of entries
+  private static long distance;
+  // shift the starting of the values to the left, to also have negative values:
+  private static final long startOffset = - 1L << 31;
+  // number of docs to generate for testing
+  private static int noDocs;
+  
+  private static Directory directory = null;
+  private static IndexReader reader = null;
+  private static IndexSearcher searcher = null;
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    noDocs = atLeast(4096);
+    distance = (1L << 60) / noDocs;
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
+        newIndexWriterConfig(new MockAnalyzer(random()))
+        .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
+        .setMergePolicy(newLogMergePolicy()));
+
+    final LegacyFieldType storedLong = new LegacyFieldType(LegacyLongField.TYPE_NOT_STORED);
+    storedLong.setStored(true);
+    storedLong.freeze();
+
+    final LegacyFieldType storedLong8 = new LegacyFieldType(storedLong);
+    storedLong8.setNumericPrecisionStep(8);
+
+    final LegacyFieldType storedLong4 = new LegacyFieldType(storedLong);
+    storedLong4.setNumericPrecisionStep(4);
+
+    final LegacyFieldType storedLong6 = new LegacyFieldType(storedLong);
+    storedLong6.setNumericPrecisionStep(6);
+
+    final LegacyFieldType storedLong2 = new LegacyFieldType(storedLong);
+    storedLong2.setNumericPrecisionStep(2);
+
+    final LegacyFieldType storedLongNone = new LegacyFieldType(storedLong);
+    storedLongNone.setNumericPrecisionStep(Integer.MAX_VALUE);
+
+    final LegacyFieldType unstoredLong = LegacyLongField.TYPE_NOT_STORED;
+
+    final LegacyFieldType unstoredLong8 = new LegacyFieldType(unstoredLong);
+    unstoredLong8.setNumericPrecisionStep(8);
+
+    final LegacyFieldType unstoredLong6 = new LegacyFieldType(unstoredLong);
+    unstoredLong6.setNumericPrecisionStep(6);
+
+    final LegacyFieldType unstoredLong4 = new LegacyFieldType(unstoredLong);
+    unstoredLong4.setNumericPrecisionStep(4);
+
+    final LegacyFieldType unstoredLong2 = new LegacyFieldType(unstoredLong);
+    unstoredLong2.setNumericPrecisionStep(2);
+
+    LegacyLongField
+      field8 = new LegacyLongField("field8", 0L, storedLong8),
+      field6 = new LegacyLongField("field6", 0L, storedLong6),
+      field4 = new LegacyLongField("field4", 0L, storedLong4),
+      field2 = new LegacyLongField("field2", 0L, storedLong2),
+      fieldNoTrie = new LegacyLongField("field"+Integer.MAX_VALUE, 0L, storedLongNone),
+      ascfield8 = new LegacyLongField("ascfield8", 0L, unstoredLong8),
+      ascfield6 = new LegacyLongField("ascfield6", 0L, unstoredLong6),
+      ascfield4 = new LegacyLongField("ascfield4", 0L, unstoredLong4),
+      ascfield2 = new LegacyLongField("ascfield2", 0L, unstoredLong2);
+
+    Document doc = new Document();
+    // add fields, that have a distance to test general functionality
+    doc.add(field8); doc.add(field6); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
+    // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
+    doc.add(ascfield8); doc.add(ascfield6); doc.add(ascfield4); doc.add(ascfield2);
+    
+    // Add a series of noDocs docs with increasing long values, by updating the fields
+    for (int l=0; l<noDocs; l++) {
+      long val=distance*l+startOffset;
+      field8.setLongValue(val);
+      field6.setLongValue(val);
+      field4.setLongValue(val);
+      field2.setLongValue(val);
+      fieldNoTrie.setLongValue(val);
+
+      val=l-(noDocs/2);
+      ascfield8.setLongValue(val);
+      ascfield6.setLongValue(val);
+      ascfield4.setLongValue(val);
+      ascfield2.setLongValue(val);
+      writer.addDocument(doc);
+    }
+    reader = writer.getReader();
+    searcher=newSearcher(reader);
+    writer.close();
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher = null;
+    reader.close();
+    reader = null;
+    directory.close();
+    directory = null;
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    // set the theoretical maximum term count for 8bit (see docs for the number)
+    // super.tearDown will restore the default
+    BooleanQuery.setMaxClauseCount(7*255*2 + 255);
+  }
+  
+  /** test for constant score + boolean query + filter, the other tests only use the constant score mode */
+  private void testRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    long lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
+    LegacyNumericRangeQuery<Long> q = LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
+    for (byte i=0; i<2; i++) {
+      TopDocs topDocs;
+      String type;
+      switch (i) {
+        case 0:
+          type = " (constant score filter rewrite)";
+          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
+          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+          break;
+        case 1:
+          type = " (constant score boolean rewrite)";
+          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
+          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+          break;
+        default:
+          return;
+      }
+      ScoreDoc[] sd = topDocs.scoreDocs;
+      assertNotNull(sd);
+      assertEquals("Score doc count"+type, count, sd.length );
+      Document doc=searcher.doc(sd[0].doc);
+      assertEquals("First doc"+type, 2*distance+startOffset, doc.getField(field).numericValue().longValue() );
+      doc=searcher.doc(sd[sd.length-1].doc);
+      assertEquals("Last doc"+type, (1+count)*distance+startOffset, doc.getField(field).numericValue().longValue() );
+    }
+  }
+
+  @Test
+  public void testRange_8bit() throws Exception {
+    testRange(8);
+  }
+  
+  @Test
+  public void testRange_6bit() throws Exception {
+    testRange(6);
+  }
+  
+  @Test
+  public void testRange_4bit() throws Exception {
+    testRange(4);
+  }
+  
+  @Test
+  public void testRange_2bit() throws Exception {
+    testRange(2);
+  }
+  
+  @Test
+  public void testOneMatchQuery() throws Exception {
+    LegacyNumericRangeQuery<Long> q = LegacyNumericRangeQuery.newLongRange("ascfield8", 8, 1000L, 1000L, true, true);
+    TopDocs topDocs = searcher.search(q, noDocs);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", 1, sd.length );
+  }
+  
+  private void testLeftOpenRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    long upper=(count-1)*distance + (distance/3) + startOffset;
+    LegacyNumericRangeQuery<Long> q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, null, upper, true, true);
+    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", count, sd.length );
+    Document doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", startOffset, doc.getField(field).numericValue().longValue() );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
+
+    q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, null, upper, false, true);
+    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", count, sd.length );
+    doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", startOffset, doc.getField(field).numericValue().longValue() );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
+  }
+  
+  @Test
+  public void testLeftOpenRange_8bit() throws Exception {
+    testLeftOpenRange(8);
+  }
+  
+  @Test
+  public void testLeftOpenRange_6bit() throws Exception {
+    testLeftOpenRange(6);
+  }
+  
+  @Test
+  public void testLeftOpenRange_4bit() throws Exception {
+    testLeftOpenRange(4);
+  }
+  
+  @Test
+  public void testLeftOpenRange_2bit() throws Exception {
+    testLeftOpenRange(2);
+  }
+  
+  private void testRightOpenRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    long lower=(count-1)*distance + (distance/3) +startOffset;
+    LegacyNumericRangeQuery<Long> q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, true);
+    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", noDocs-count, sd.length );
+    Document doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().longValue() );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
+
+    q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, false);
+    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", noDocs-count, sd.length );
+    doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().longValue() );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
+  }
+  
+  @Test
+  public void testRightOpenRange_8bit() throws Exception {
+    testRightOpenRange(8);
+  }
+  
+  @Test
+  public void testRightOpenRange_6bit() throws Exception {
+    testRightOpenRange(6);
+  }
+  
+  @Test
+  public void testRightOpenRange_4bit() throws Exception {
+    testRightOpenRange(4);
+  }
+  
+  @Test
+  public void testRightOpenRange_2bit() throws Exception {
+    testRightOpenRange(2);
+  }
+  
+  @Test
+  public void testInfiniteValues() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
+      newIndexWriterConfig(new MockAnalyzer(random())));
+    Document doc = new Document();
+    doc.add(new LegacyDoubleField("double", Double.NEGATIVE_INFINITY, Field.Store.NO));
+    doc.add(new LegacyLongField("long", Long.MIN_VALUE, Field.Store.NO));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new LegacyDoubleField("double", Double.POSITIVE_INFINITY, Field.Store.NO));
+    doc.add(new LegacyLongField("long", Long.MAX_VALUE, Field.Store.NO));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new LegacyDoubleField("double", 0.0, Field.Store.NO));
+    doc.add(new LegacyLongField("long", 0L, Field.Store.NO));
+    writer.addDocument(doc);
+    
+    for (double d : TestLegacyNumericUtils.DOUBLE_NANs) {
+      doc = new Document();
+      doc.add(new LegacyDoubleField("double", d, Field.Store.NO));
+      writer.addDocument(doc);
+    }
+    
+    writer.close();
+    
+    IndexReader r = DirectoryReader.open(dir);
+    IndexSearcher s = newSearcher(r);
+    
+    Query q= LegacyNumericRangeQuery.newLongRange("long", null, null, true, true);
+    TopDocs topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+    
+    q= LegacyNumericRangeQuery.newLongRange("long", null, null, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+    
+    q= LegacyNumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newDoubleRange("double", null, null, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newDoubleRange("double", null, null, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NaN, Double.NaN, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", TestLegacyNumericUtils.DOUBLE_NANs.length,  topDocs.scoreDocs.length );
+
+    r.close();
+    dir.close();
+  }
+  
+  private void testRangeSplit(int precisionStep) throws Exception {
+    String field="ascfield"+precisionStep;
+    // 10 random tests
+    int num = TestUtil.nextInt(random(), 10, 20);
+    for (int i = 0; i < num; i++) {
+      long lower=(long)(random().nextDouble()*noDocs - noDocs/2);
+      long upper=(long)(random().nextDouble()*noDocs - noDocs/2);
+      if (lower>upper) {
+        long a=lower; lower=upper; upper=a;
+      }
+      // test inclusive range
+      Query tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
+      TopDocs tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+      // test exclusive range
+      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
+      // test left exclusive range
+      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
+      // test right exclusive range
+      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
+    }
+  }
+
+  @Test
+  public void testRangeSplit_8bit() throws Exception {
+    testRangeSplit(8);
+  }
+  
+  @Test
+  public void testRangeSplit_6bit() throws Exception {
+    testRangeSplit(6);
+  }
+  
+  @Test
+  public void testRangeSplit_4bit() throws Exception {
+    testRangeSplit(4);
+  }
+  
+  @Test
+  public void testRangeSplit_2bit() throws Exception {
+    testRangeSplit(2);
+  }
+  
+  /** we fake a double test using long2double conversion of LegacyNumericUtils */
+  private void testDoubleRange(int precisionStep) throws Exception {
+    final String field="ascfield"+precisionStep;
+    final long lower=-1000L, upper=+2000L;
+    
+    Query tq= LegacyNumericRangeQuery.newDoubleRange(field, precisionStep,
+        NumericUtils.sortableLongToDouble(lower), NumericUtils.sortableLongToDouble(upper), true, true);
+    TopDocs tTopDocs = searcher.search(tq, 1);
+    assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+  }
+
+  @Test
+  public void testDoubleRange_8bit() throws Exception {
+    testDoubleRange(8);
+  }
+  
+  @Test
+  public void testDoubleRange_6bit() throws Exception {
+    testDoubleRange(6);
+  }
+  
+  @Test
+  public void testDoubleRange_4bit() throws Exception {
+    testDoubleRange(4);
+  }
+  
+  @Test
+  public void testDoubleRange_2bit() throws Exception {
+    testDoubleRange(2);
+  }
+  
+  @Test
+  public void testEqualsAndHash() throws Exception {
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test1", 4, 10L, 20L, true, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test2", 4, 10L, 20L, false, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test3", 4, 10L, 20L, true, false));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test4", 4, 10L, 20L, false, false));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test5", 4, 10L, null, true, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test6", 4, null, 20L, true, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test7", 4, null, null, true, true));
+    QueryUtils.checkEqual(
+      LegacyNumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true),
+      LegacyNumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newLongRange("test9", 4, 10L, 20L, true, true),
+      LegacyNumericRangeQuery.newLongRange("test9", 8, 10L, 20L, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newLongRange("test10a", 4, 10L, 20L, true, true),
+      LegacyNumericRangeQuery.newLongRange("test10b", 4, 10L, 20L, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newLongRange("test11", 4, 10L, 20L, true, true),
+      LegacyNumericRangeQuery.newLongRange("test11", 4, 20L, 10L, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newLongRange("test12", 4, 10L, 20L, true, true),
+      LegacyNumericRangeQuery.newLongRange("test12", 4, 10L, 20L, false, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newLongRange("test13", 4, 10L, 20L, true, true),
+      LegacyNumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
+    );
+     // difference to int range is tested in TestNumericRangeQuery32
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/legacy/TestNumericTokenStream.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/legacy/TestNumericTokenStream.java b/solr/core/src/test/org/apache/solr/legacy/TestNumericTokenStream.java
new file mode 100644
index 0000000..b2e3781
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/legacy/TestNumericTokenStream.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+
+import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.solr.legacy.LegacyNumericTokenStream;
+import org.apache.solr.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyNumericTokenStream.LegacyNumericTermAttributeImpl;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttributeImpl;
+
+@Deprecated
+public class TestNumericTokenStream extends BaseTokenStreamTestCase {
+
+  final long lvalue = random().nextLong();
+  final int ivalue = random().nextInt();
+
+  public void testLongStream() throws Exception {
+    @SuppressWarnings("resource")
+    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream().setLongValue(lvalue);
+    final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
+    assertNotNull(bytesAtt);
+    final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
+    assertNotNull(typeAtt);
+    final LegacyNumericTokenStream.LegacyNumericTermAttribute numericAtt = stream.getAttribute(LegacyNumericTokenStream.LegacyNumericTermAttribute.class);
+    assertNotNull(numericAtt);
+    stream.reset();
+    assertEquals(64, numericAtt.getValueSize());
+    for (int shift=0; shift<64; shift+= LegacyNumericUtils.PRECISION_STEP_DEFAULT) {
+      assertTrue("New token is available", stream.incrementToken());
+      assertEquals("Shift value wrong", shift, numericAtt.getShift());
+      assertEquals("Term is incorrectly encoded", lvalue & ~((1L << shift) - 1L), LegacyNumericUtils.prefixCodedToLong(bytesAtt.getBytesRef()));
+      assertEquals("Term raw value is incorrectly encoded", lvalue & ~((1L << shift) - 1L), numericAtt.getRawValue());
+      assertEquals("Type incorrect", (shift == 0) ? LegacyNumericTokenStream.TOKEN_TYPE_FULL_PREC : LegacyNumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
+    }
+    assertFalse("More tokens available", stream.incrementToken());
+    stream.end();
+    stream.close();
+  }
+
+  public void testIntStream() throws Exception {
+    @SuppressWarnings("resource")
+    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream().setIntValue(ivalue);
+    final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
+    assertNotNull(bytesAtt);
+    final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
+    assertNotNull(typeAtt);
+    final LegacyNumericTokenStream.LegacyNumericTermAttribute numericAtt = stream.getAttribute(LegacyNumericTokenStream.LegacyNumericTermAttribute.class);
+    assertNotNull(numericAtt);
+    stream.reset();
+    assertEquals(32, numericAtt.getValueSize());
+    for (int shift=0; shift<32; shift+= LegacyNumericUtils.PRECISION_STEP_DEFAULT) {
+      assertTrue("New token is available", stream.incrementToken());
+      assertEquals("Shift value wrong", shift, numericAtt.getShift());
+      assertEquals("Term is incorrectly encoded", ivalue & ~((1 << shift) - 1), LegacyNumericUtils.prefixCodedToInt(bytesAtt.getBytesRef()));
+      assertEquals("Term raw value is incorrectly encoded", ((long) ivalue) & ~((1L << shift) - 1L), numericAtt.getRawValue());
+      assertEquals("Type incorrect", (shift == 0) ? LegacyNumericTokenStream.TOKEN_TYPE_FULL_PREC : LegacyNumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
+    }
+    assertFalse("More tokens available", stream.incrementToken());
+    stream.end();
+    stream.close();
+  }
+  
+  public void testNotInitialized() throws Exception {
+    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream();
+    
+    expectThrows(IllegalStateException.class, () -> {
+      stream.reset();
+    });
+
+    expectThrows(IllegalStateException.class, () -> {
+      stream.incrementToken();
+    });
+    
+    stream.close();
+  }
+  
+  public static interface TestAttribute extends CharTermAttribute {}
+  public static class TestAttributeImpl extends CharTermAttributeImpl implements TestAttribute {}
+  
+  public void testCTA() throws Exception {
+    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream();
+    IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
+      stream.addAttribute(CharTermAttribute.class);
+    });
+    assertTrue(e.getMessage().startsWith("LegacyNumericTokenStream does not support"));
+
+    e = expectThrows(IllegalArgumentException.class, () -> {
+      stream.addAttribute(TestAttribute.class);
+    });
+    assertTrue(e.getMessage().startsWith("LegacyNumericTokenStream does not support"));
+    stream.close();
+  }
+  
+  /** LUCENE-7027 */
+  public void testCaptureStateAfterExhausted() throws Exception {
+    // default precstep
+    try (LegacyNumericTokenStream stream=new LegacyNumericTokenStream()) {
+      // int
+      stream.setIntValue(ivalue);
+      stream.reset();
+      while (stream.incrementToken());
+      stream.captureState();
+      stream.end();
+      stream.captureState();
+      // long
+      stream.setLongValue(lvalue);
+      stream.reset();
+      while (stream.incrementToken());
+      stream.captureState();
+      stream.end();
+      stream.captureState();
+    }
+    // huge precstep
+    try (LegacyNumericTokenStream stream=new LegacyNumericTokenStream(Integer.MAX_VALUE)) {
+      // int
+      stream.setIntValue(ivalue);
+      stream.reset();
+      while (stream.incrementToken());
+      stream.captureState();
+      stream.end();
+      stream.captureState();
+      // long
+      stream.setLongValue(lvalue);
+      stream.reset();
+      while (stream.incrementToken());
+      stream.captureState();
+      stream.end();
+      stream.captureState();
+    }
+  }
+  
+  public void testAttributeClone() throws Exception {
+    LegacyNumericTermAttributeImpl att = new LegacyNumericTermAttributeImpl();
+    att.init(lvalue, 64, 8, 0); // set some value, to make getBytesRef() work
+    LegacyNumericTermAttributeImpl copy = assertCloneIsEqual(att);
+    assertNotSame(att.getBytesRef(), copy.getBytesRef());
+    LegacyNumericTermAttributeImpl copy2 = assertCopyIsEqual(att);
+    assertNotSame(att.getBytesRef(), copy2.getBytesRef());
+    
+    // LUCENE-7027 test
+    att.init(lvalue, 64, 8, 64); // Exhausted TokenStream -> should return empty BytesRef
+    assertEquals(new BytesRef(), att.getBytesRef());
+    copy = assertCloneIsEqual(att);
+    assertEquals(new BytesRef(), copy.getBytesRef());
+    assertNotSame(att.getBytesRef(), copy.getBytesRef());
+    copy2 = assertCopyIsEqual(att);
+    assertEquals(new BytesRef(), copy2.getBytesRef());
+    assertNotSame(att.getBytesRef(), copy2.getBytesRef());
+  }
+  
+  public static <T extends AttributeImpl> T assertCloneIsEqual(T att) {
+    @SuppressWarnings("unchecked")
+    T clone = (T) att.clone();
+    assertEquals("Clone must be equal", att, clone);
+    assertEquals("Clone's hashcode must be equal", att.hashCode(), clone.hashCode());
+    return clone;
+  }
+
+  public static <T extends AttributeImpl> T assertCopyIsEqual(T att) throws Exception {
+    @SuppressWarnings("unchecked")
+    T copy = (T) att.getClass().newInstance();
+    att.copyTo(copy);
+    assertEquals("Copied instance must be equal", att, copy);
+    assertEquals("Copied instance's hashcode must be equal", att.hashCode(), copy.hashCode());
+    return copy;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/search/TestLegacyNumericRangeQueryBuilder.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestLegacyNumericRangeQueryBuilder.java b/solr/core/src/test/org/apache/solr/search/TestLegacyNumericRangeQueryBuilder.java
index 3e147c2..a083bfd 100644
--- a/solr/core/src/test/org/apache/solr/search/TestLegacyNumericRangeQueryBuilder.java
+++ b/solr/core/src/test/org/apache/solr/search/TestLegacyNumericRangeQueryBuilder.java
@@ -18,7 +18,7 @@ package org.apache.solr.search;
 
 import org.apache.lucene.search.Query;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.legacy.LegacyNumericRangeQuery;
+import org.apache.solr.legacy.LegacyNumericRangeQuery;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.w3c.dom.Document;
 import org.xml.sax.SAXException;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java b/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
index e995f1e..53caf77 100644
--- a/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
@@ -17,7 +17,7 @@
 package org.apache.solr.search;
 
 import org.apache.lucene.index.Term;
-import org.apache.lucene.legacy.LegacyNumericRangeQuery;
+import org.apache.solr.legacy.LegacyNumericRangeQuery;
 import org.apache.lucene.search.*;
 import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.common.params.ModifiableSolrParams;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/search/TestSolr4Spatial.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestSolr4Spatial.java b/solr/core/src/test/org/apache/solr/search/TestSolr4Spatial.java
index 8cd96ae..895fb83 100644
--- a/solr/core/src/test/org/apache/solr/search/TestSolr4Spatial.java
+++ b/solr/core/src/test/org/apache/solr/search/TestSolr4Spatial.java
@@ -24,7 +24,7 @@ import org.locationtech.spatial4j.context.SpatialContext;
 import org.locationtech.spatial4j.distance.DistanceUtils;
 import org.locationtech.spatial4j.shape.Point;
 import org.locationtech.spatial4j.shape.Rectangle;
-import org.apache.lucene.spatial.bbox.BBoxStrategy;
+import org.apache.solr.legacy.BBoxStrategy;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.core.SolrCore;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/search/function/TestOrdValues.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/function/TestOrdValues.java b/solr/core/src/test/org/apache/solr/search/function/TestOrdValues.java
index b3a70ae..f7918d7 100644
--- a/solr/core/src/test/org/apache/solr/search/function/TestOrdValues.java
+++ b/solr/core/src/test/org/apache/solr/search/function/TestOrdValues.java
@@ -29,8 +29,8 @@ import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.legacy.LegacyFloatField;
-import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.solr.legacy.LegacyFloatField;
+import org.apache.solr.legacy.LegacyIntField;
 import org.apache.lucene.queries.function.FunctionQuery;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java b/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java
index 69b89b4..873c095 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java
@@ -43,9 +43,9 @@ import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
-import org.apache.lucene.legacy.LegacyIntField;
-import org.apache.lucene.legacy.LegacyLongField;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyIntField;
+import org.apache.solr.legacy.LegacyLongField;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSort.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSort.java b/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSort.java
index d53f610..9588e67 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSort.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSort.java
@@ -37,10 +37,10 @@ import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.legacy.LegacyDoubleField;
-import org.apache.lucene.legacy.LegacyFloatField;
-import org.apache.lucene.legacy.LegacyIntField;
-import org.apache.lucene.legacy.LegacyLongField;
+import org.apache.solr.legacy.LegacyDoubleField;
+import org.apache.solr.legacy.LegacyFloatField;
+import org.apache.solr.legacy.LegacyIntField;
+import org.apache.solr.legacy.LegacyLongField;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.IndexSearcher;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/uninverting/TestLegacyFieldCache.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestLegacyFieldCache.java b/solr/core/src/test/org/apache/solr/uninverting/TestLegacyFieldCache.java
index e38e193..b75bab5 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestLegacyFieldCache.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestLegacyFieldCache.java
@@ -32,10 +32,10 @@ import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.legacy.LegacyDoubleField;
-import org.apache.lucene.legacy.LegacyFloatField;
-import org.apache.lucene.legacy.LegacyIntField;
-import org.apache.lucene.legacy.LegacyLongField;
+import org.apache.solr.legacy.LegacyDoubleField;
+import org.apache.solr.legacy.LegacyFloatField;
+import org.apache.solr.legacy.LegacyIntField;
+import org.apache.solr.legacy.LegacyLongField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms32.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms32.java b/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms32.java
index 6fed73b..b9392b7 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms32.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms32.java
@@ -23,9 +23,9 @@ import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.legacy.LegacyFieldType;
-import org.apache.lucene.legacy.LegacyIntField;
-import org.apache.lucene.legacy.LegacyNumericRangeQuery;
+import org.apache.solr.legacy.LegacyFieldType;
+import org.apache.solr.legacy.LegacyIntField;
+import org.apache.solr.legacy.LegacyNumericRangeQuery;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms64.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms64.java b/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms64.java
index 2f341b7..61a3579 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms64.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms64.java
@@ -23,9 +23,9 @@ import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.legacy.LegacyFieldType;
-import org.apache.lucene.legacy.LegacyLongField;
-import org.apache.lucene.legacy.LegacyNumericRangeQuery;
+import org.apache.solr.legacy.LegacyFieldType;
+import org.apache.solr.legacy.LegacyLongField;
+import org.apache.solr.legacy.LegacyNumericRangeQuery;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java b/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java
index e9e94e2..f140ce2 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java
@@ -40,10 +40,10 @@ import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.legacy.LegacyFieldType;
-import org.apache.lucene.legacy.LegacyIntField;
-import org.apache.lucene.legacy.LegacyLongField;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyFieldType;
+import org.apache.solr.legacy.LegacyIntField;
+import org.apache.solr.legacy.LegacyLongField;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;


[24/25] lucene-solr:jira/solr-8668: LUCENE-7850: Move support for legacy numerics to solr/.

Posted by cp...@apache.org.
LUCENE-7850: Move support for legacy numerics to solr/.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/759fa42b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/759fa42b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/759fa42b

Branch: refs/heads/jira/solr-8668
Commit: 759fa42b62779404c576710a2e589a6993aaf990
Parents: 412e4ae
Author: Adrien Grand <jp...@gmail.com>
Authored: Wed May 24 16:30:25 2017 +0200
Committer: Adrien Grand <jp...@gmail.com>
Committed: Tue May 30 10:33:08 2017 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   2 +
 lucene/MIGRATE.txt                              |   6 +
 .../apache/lucene/legacy/LegacyDoubleField.java | 174 -----
 .../org/apache/lucene/legacy/LegacyField.java   |  90 ---
 .../apache/lucene/legacy/LegacyFieldType.java   | 149 ----
 .../apache/lucene/legacy/LegacyFloatField.java  | 174 -----
 .../apache/lucene/legacy/LegacyIntField.java    | 175 -----
 .../apache/lucene/legacy/LegacyLongField.java   | 184 -----
 .../lucene/legacy/LegacyNumericRangeQuery.java  | 537 --------------
 .../lucene/legacy/LegacyNumericTokenStream.java | 357 ----------
 .../apache/lucene/legacy/LegacyNumericType.java |  34 -
 .../lucene/legacy/LegacyNumericUtils.java       | 510 --------------
 .../lucene/legacy/doc-files/nrq-formula-1.png   | Bin 3171 -> 0 bytes
 .../lucene/legacy/doc-files/nrq-formula-2.png   | Bin 3694 -> 0 bytes
 .../org/apache/lucene/legacy/package-info.java  |  21 -
 .../index/TestBackwardsCompatibility.java       |  52 --
 .../apache/lucene/legacy/TestLegacyField.java   | 186 -----
 .../lucene/legacy/TestLegacyFieldReuse.java     |  81 ---
 .../lucene/legacy/TestLegacyNumericUtils.java   | 571 ---------------
 .../apache/lucene/legacy/TestLegacyTerms.java   | 164 -----
 .../TestMultiValuedNumericRangeQuery.java       |  84 ---
 .../lucene/legacy/TestNumericRangeQuery32.java  | 461 ------------
 .../lucene/legacy/TestNumericRangeQuery64.java  | 490 -------------
 .../lucene/legacy/TestNumericTokenStream.java   | 188 -----
 .../lucene/spatial/bbox/BBoxStrategy.java       |  59 +-
 .../prefix/BytesRefIteratorTokenStream.java     |   2 -
 .../spatial/vector/PointVectorStrategy.java     |  52 --
 .../lucene/spatial/DistanceStrategyTest.java    |   3 -
 .../lucene/spatial/QueryEqualsHashCodeTest.java |   2 -
 .../lucene/spatial/bbox/TestBBoxStrategy.java   |  33 +-
 .../spatial/vector/TestPointVectorStrategy.java |   7 +-
 .../solr/analytics/util/AnalyticsParsers.java   |   2 +-
 .../util/valuesource/DateFieldSource.java       |   2 +-
 .../org/apache/solr/legacy/BBoxStrategy.java    | 706 +++++++++++++++++++
 .../org/apache/solr/legacy/BBoxValueSource.java | 135 ++++
 .../apache/solr/legacy/DistanceValueSource.java | 133 ++++
 .../apache/solr/legacy/LegacyDoubleField.java   | 174 +++++
 .../org/apache/solr/legacy/LegacyField.java     |  90 +++
 .../org/apache/solr/legacy/LegacyFieldType.java | 149 ++++
 .../apache/solr/legacy/LegacyFloatField.java    | 174 +++++
 .../org/apache/solr/legacy/LegacyIntField.java  | 175 +++++
 .../org/apache/solr/legacy/LegacyLongField.java | 184 +++++
 .../solr/legacy/LegacyNumericRangeQuery.java    | 537 ++++++++++++++
 .../solr/legacy/LegacyNumericTokenStream.java   | 357 ++++++++++
 .../apache/solr/legacy/LegacyNumericType.java   |  34 +
 .../apache/solr/legacy/LegacyNumericUtils.java  | 510 ++++++++++++++
 .../apache/solr/legacy/PointVectorStrategy.java | 292 ++++++++
 .../solr/legacy/doc-files/nrq-formula-1.png     | Bin 0 -> 3171 bytes
 .../solr/legacy/doc-files/nrq-formula-2.png     | Bin 0 -> 3694 bytes
 .../org/apache/solr/legacy/package-info.java    |  21 +
 .../java/org/apache/solr/schema/BBoxField.java  |   4 +-
 .../java/org/apache/solr/schema/EnumField.java  |  10 +-
 .../schema/SpatialPointVectorFieldType.java     |   4 +-
 .../org/apache/solr/schema/TrieDoubleField.java |   2 +-
 .../java/org/apache/solr/schema/TrieField.java  |  24 +-
 .../org/apache/solr/schema/TrieFloatField.java  |   2 +-
 .../org/apache/solr/schema/TrieIntField.java    |   2 +-
 .../org/apache/solr/schema/TrieLongField.java   |   2 +-
 .../search/LegacyNumericRangeQueryBuilder.java  |   8 +-
 .../org/apache/solr/search/QueryParsing.java    |   2 +-
 .../apache/solr/search/QueryWrapperFilter.java  |   2 +-
 .../apache/solr/search/mlt/CloudMLTQParser.java |   2 +-
 .../solr/search/mlt/SimpleMLTQParser.java       |   2 +-
 .../org/apache/solr/uninverting/FieldCache.java |  20 +-
 .../solr/uninverting/UninvertingReader.java     |  16 +-
 .../org/apache/solr/update/VersionInfo.java     |   2 +-
 .../org/apache/solr/legacy/TestLegacyField.java | 186 +++++
 .../solr/legacy/TestLegacyFieldReuse.java       |  81 +++
 .../solr/legacy/TestLegacyNumericUtils.java     | 571 +++++++++++++++
 .../org/apache/solr/legacy/TestLegacyTerms.java | 164 +++++
 .../TestMultiValuedNumericRangeQuery.java       |  84 +++
 .../solr/legacy/TestNumericRangeQuery32.java    | 461 ++++++++++++
 .../solr/legacy/TestNumericRangeQuery64.java    | 490 +++++++++++++
 .../solr/legacy/TestNumericTokenStream.java     | 188 +++++
 .../TestLegacyNumericRangeQueryBuilder.java     |   2 +-
 .../solr/search/TestMaxScoreQueryParser.java    |   2 +-
 .../apache/solr/search/TestSolr4Spatial.java    |   2 +-
 .../solr/search/function/TestOrdValues.java     |   4 +-
 .../solr/uninverting/TestDocTermOrds.java       |   6 +-
 .../solr/uninverting/TestFieldCacheSort.java    |   8 +-
 .../solr/uninverting/TestLegacyFieldCache.java  |   8 +-
 .../solr/uninverting/TestNumericTerms32.java    |   6 +-
 .../solr/uninverting/TestNumericTerms64.java    |   6 +-
 .../solr/uninverting/TestUninvertingReader.java |   8 +-
 84 files changed, 5991 insertions(+), 4913 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 0dfc709..0c1d351 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -57,6 +57,8 @@ API Changes
   instead, which derived from the UH.  WholeBreakIterator and
   CustomSeparatorBreakIterator were moved to UH's package. (David Smiley)
 
+* LUCENE-7850: Removed support for legacy numerics. (Adrien Grand)
+
 Bug Fixes
 
 * LUCENE-7626: IndexWriter will no longer accept broken token offsets

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/MIGRATE.txt
----------------------------------------------------------------------
diff --git a/lucene/MIGRATE.txt b/lucene/MIGRATE.txt
index c7936a4..89b2d76 100644
--- a/lucene/MIGRATE.txt
+++ b/lucene/MIGRATE.txt
@@ -74,3 +74,9 @@ collecting TopDocs for each group, but instead takes a GroupReducer that will
 perform any type of reduction on the top groups collected on a first-pass.  To
 reproduce the old behaviour of SecondPassGroupingCollector, you should instead
 use TopGroupsCollector.
+
+## Removed legacy numerics (LUCENE-7850)
+
+Support for legacy numerics has been removed since legacy numerics had been
+deprecated since Lucene 6.0. Points should be used instead, see
+org.apache.lucene.index.PointValues for an introduction.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyDoubleField.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyDoubleField.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyDoubleField.java
deleted file mode 100644
index e98a4f0..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyDoubleField.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoublePoint;
-import org.apache.lucene.index.IndexOptions;
-
-
-/**
- * <p>
- * Field that indexes <code>double</code> values
- * for efficient range filtering and sorting. Here's an example usage:
- * 
- * <pre class="prettyprint">
- * document.add(new LegacyDoubleField(name, 6.0, Field.Store.NO));
- * </pre>
- * 
- * For optimal performance, re-use the <code>LegacyDoubleField</code> and
- * {@link Document} instance for more than one document:
- * 
- * <pre class="prettyprint">
- *  LegacyDoubleField field = new LegacyDoubleField(name, 0.0, Field.Store.NO);
- *  Document document = new Document();
- *  document.add(field);
- * 
- *  for(all documents) {
- *    ...
- *    field.setDoubleValue(value)
- *    writer.addDocument(document);
- *    ...
- *  }
- * </pre>
- *
- * See also {@link LegacyIntField}, {@link LegacyLongField}, {@link
- * LegacyFloatField}.
- *
- * <p>To perform range querying or filtering against a
- * <code>LegacyDoubleField</code>, use {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}.
- * To sort according to a
- * <code>LegacyDoubleField</code>, use the normal numeric sort types, eg
- * {@link org.apache.lucene.search.SortField.Type#DOUBLE}. <code>LegacyDoubleField</code>
- * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
- *
- * <p>You may add the same field name as an <code>LegacyDoubleField</code> to
- * the same document more than once.  Range querying and
- * filtering will be the logical OR of all values; so a range query
- * will hit all documents that have at least one value in
- * the range. However sort behavior is not defined.  If you need to sort,
- * you should separately index a single-valued <code>LegacyDoubleField</code>.</p>
- *
- * <p>A <code>LegacyDoubleField</code> will consume somewhat more disk space
- * in the index than an ordinary single-valued field.
- * However, for a typical index that includes substantial
- * textual content per document, this increase will likely
- * be in the noise. </p>
- *
- * <p>Within Lucene, each numeric value is indexed as a
- * <em>trie</em> structure, where each term is logically
- * assigned to larger and larger pre-defined brackets (which
- * are simply lower-precision representations of the value).
- * The step size between each successive bracket is called the
- * <code>precisionStep</code>, measured in bits.  Smaller
- * <code>precisionStep</code> values result in larger number
- * of brackets, which consumes more disk space in the index
- * but may result in faster range search performance.  The
- * default value, 16, was selected for a reasonable tradeoff
- * of disk space consumption versus performance.  You can
- * create a custom {@link LegacyFieldType} and invoke the {@link
- * LegacyFieldType#setNumericPrecisionStep} method if you'd
- * like to change the value.  Note that you must also
- * specify a congruent value when creating {@link
- * org.apache.lucene.legacy.LegacyNumericRangeQuery}.
- * For low cardinality fields larger precision steps are good.
- * If the cardinality is &lt; 100, it is fair
- * to use {@link Integer#MAX_VALUE}, which produces one
- * term per value.
- *
- * <p>For more information on the internals of numeric trie
- * indexing, including the <a
- * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * configuration, see {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}. The format of
- * indexed values is described in {@link org.apache.lucene.legacy.LegacyNumericUtils}.
- *
- * <p>If you only need to sort by numeric value, and never
- * run range querying/filtering, you can index using a
- * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
- * This will minimize disk space consumed. </p>
- *
- * <p>More advanced users can instead use {@link
- * org.apache.lucene.legacy.LegacyNumericTokenStream} directly, when indexing numbers. This
- * class is a wrapper around this token stream type for
- * easier, more intuitive usage.</p>
- *
- * @deprecated Please use {@link DoublePoint} instead
- *
- * @since 2.9
- */
-
-@Deprecated
-public final class LegacyDoubleField extends LegacyField {
-  
-  /** 
-   * Type for a LegacyDoubleField that is not stored:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final LegacyFieldType TYPE_NOT_STORED = new LegacyFieldType();
-  static {
-    TYPE_NOT_STORED.setTokenized(true);
-    TYPE_NOT_STORED.setOmitNorms(true);
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_NOT_STORED.setNumericType(LegacyNumericType.DOUBLE);
-    TYPE_NOT_STORED.freeze();
-  }
-
-  /** 
-   * Type for a stored LegacyDoubleField:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final LegacyFieldType TYPE_STORED = new LegacyFieldType();
-  static {
-    TYPE_STORED.setTokenized(true);
-    TYPE_STORED.setOmitNorms(true);
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_STORED.setNumericType(LegacyNumericType.DOUBLE);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.freeze();
-  }
-
-  /** Creates a stored or un-stored LegacyDoubleField with the provided value
-   *  and default <code>precisionStep</code> {@link
-   *  org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
-   *  @param name field name
-   *  @param value 64-bit double value
-   *  @param stored Store.YES if the content should also be stored
-   *  @throws IllegalArgumentException if the field name is null. 
-   */
-  public LegacyDoubleField(String name, double value, Store stored) {
-    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-    fieldsData = Double.valueOf(value);
-  }
-  
-  /** Expert: allows you to customize the {@link
-   *  LegacyFieldType}. 
-   *  @param name field name
-   *  @param value 64-bit double value
-   *  @param type customized field type: must have {@link LegacyFieldType#numericType()}
-   *         of {@link LegacyNumericType#DOUBLE}.
-   *  @throws IllegalArgumentException if the field name or type is null, or
-   *          if the field type does not have a DOUBLE numericType()
-   */
-  public LegacyDoubleField(String name, double value, LegacyFieldType type) {
-    super(name, type);
-    if (type.numericType() != LegacyNumericType.DOUBLE) {
-      throw new IllegalArgumentException("type.numericType() must be DOUBLE but got " + type.numericType());
-    }
-    fieldsData = Double.valueOf(value);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyField.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyField.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyField.java
deleted file mode 100644
index 87ac0e5..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyField.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexOptions;
-
-/**
- * Field extension with support for legacy numerics
- * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
- */
-@Deprecated
-public class LegacyField extends Field {
-
-  /**
-   * Expert: creates a field with no initial value.
-   * Intended only for custom LegacyField subclasses.
-   * @param name field name
-   * @param type field type
-   * @throws IllegalArgumentException if either the name or type
-   *         is null.
-   */
-  public LegacyField(String name, LegacyFieldType type) {
-    super(name, type);
-  }
-  
-  @Override
-  public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) {
-    if (fieldType().indexOptions() == IndexOptions.NONE) {
-      // Not indexed
-      return null;
-    }
-    final LegacyFieldType fieldType = (LegacyFieldType) fieldType();
-    final LegacyNumericType numericType = fieldType.numericType();
-    if (numericType != null) {
-      if (!(reuse instanceof LegacyNumericTokenStream && ((LegacyNumericTokenStream)reuse).getPrecisionStep() == fieldType.numericPrecisionStep())) {
-        // lazy init the TokenStream as it is heavy to instantiate
-        // (attributes,...) if not needed (stored field loading)
-        reuse = new LegacyNumericTokenStream(fieldType.numericPrecisionStep());
-      }
-      final LegacyNumericTokenStream nts = (LegacyNumericTokenStream) reuse;
-      // initialize value in TokenStream
-      final Number val = (Number) fieldsData;
-      switch (numericType) {
-      case INT:
-        nts.setIntValue(val.intValue());
-        break;
-      case LONG:
-        nts.setLongValue(val.longValue());
-        break;
-      case FLOAT:
-        nts.setFloatValue(val.floatValue());
-        break;
-      case DOUBLE:
-        nts.setDoubleValue(val.doubleValue());
-        break;
-      default:
-        throw new AssertionError("Should never get here");
-      }
-      return reuse;
-    }
-    return super.tokenStream(analyzer, reuse);
-  }
-  
-  @Override
-  public void setTokenStream(TokenStream tokenStream) {
-    final LegacyFieldType fieldType = (LegacyFieldType) fieldType();
-    if (fieldType.numericType() != null) {
-      throw new IllegalArgumentException("cannot set private TokenStream on numeric fields");
-    }
-    super.setTokenStream(tokenStream);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFieldType.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFieldType.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFieldType.java
deleted file mode 100644
index 1f4b0af..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFieldType.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.index.IndexOptions;
-
-/**
- * FieldType extension with support for legacy numerics
- * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
- */
-@Deprecated
-public final class LegacyFieldType extends FieldType {
-  private LegacyNumericType numericType;
-  private int numericPrecisionStep = LegacyNumericUtils.PRECISION_STEP_DEFAULT;
-
-  /**
-   * Create a new mutable LegacyFieldType with all of the properties from <code>ref</code>
-   */
-  public LegacyFieldType(LegacyFieldType ref) {
-    super(ref);
-    this.numericType = ref.numericType;
-    this.numericPrecisionStep = ref.numericPrecisionStep;
-  }
-  
-  /**
-   * Create a new FieldType with default properties.
-   */
-  public LegacyFieldType() {
-  }
-  
-  /**
-   * Specifies the field's numeric type.
-   * @param type numeric type, or null if the field has no numeric type.
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #numericType()
-   *
-   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
-   */
-  @Deprecated
-  public void setNumericType(LegacyNumericType type) {
-    checkIfFrozen();
-    numericType = type;
-  }
-  
-  /** 
-   * LegacyNumericType: if non-null then the field's value will be indexed
-   * numerically so that {@link org.apache.lucene.legacy.LegacyNumericRangeQuery} can be used at
-   * search time. 
-   * <p>
-   * The default is <code>null</code> (no numeric type) 
-   * @see #setNumericType(LegacyNumericType)
-   *
-   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
-   */
-  @Deprecated
-  public LegacyNumericType numericType() {
-    return numericType;
-  }
-  
-  /**
-   * Sets the numeric precision step for the field.
-   * @param precisionStep numeric precision step for the field
-   * @throws IllegalArgumentException if precisionStep is less than 1. 
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #numericPrecisionStep()
-   *
-   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
-   */
-  @Deprecated
-  public void setNumericPrecisionStep(int precisionStep) {
-    checkIfFrozen();
-    if (precisionStep < 1) {
-      throw new IllegalArgumentException("precisionStep must be >= 1 (got " + precisionStep + ")");
-    }
-    this.numericPrecisionStep = precisionStep;
-  }
-  
-  /** 
-   * Precision step for numeric field. 
-   * <p>
-   * This has no effect if {@link #numericType()} returns null.
-   * <p>
-   * The default is {@link org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT}
-   * @see #setNumericPrecisionStep(int)
-   *
-   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
-   */
-  @Deprecated
-  public int numericPrecisionStep() {
-    return numericPrecisionStep;
-  }
-
-  @Override
-  public int hashCode() {
-    final int prime = 31;
-    int result = super.hashCode();
-    result = prime * result + numericPrecisionStep;
-    result = prime * result + ((numericType == null) ? 0 : numericType.hashCode());
-    return result;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (!super.equals(obj)) {
-      return false;
-    }
-    if (getClass() != obj.getClass()) return false;
-    LegacyFieldType other = (LegacyFieldType) obj;
-    if (numericPrecisionStep != other.numericPrecisionStep) return false;
-    if (numericType != other.numericType) return false;
-    return true;
-  }
-
-  /** Prints a Field for human consumption. */
-  @Override
-  public String toString() {
-    StringBuilder result = new StringBuilder();
-    result.append(super.toString());
-    if (indexOptions() != IndexOptions.NONE) {
-      if (result.length() > 0) {
-        result.append(",");
-      }
-      if (numericType != null) {
-        result.append(",numericType=");
-        result.append(numericType);
-        result.append(",numericPrecisionStep=");
-        result.append(numericPrecisionStep);
-      }
-    }
-    return result.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFloatField.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFloatField.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFloatField.java
deleted file mode 100644
index ea3b84a..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFloatField.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FloatPoint;
-import org.apache.lucene.index.IndexOptions;
-
-/**
- * <p>
- * Field that indexes <code>float</code> values
- * for efficient range filtering and sorting. Here's an example usage:
- * 
- * <pre class="prettyprint">
- * document.add(new LegacyFloatField(name, 6.0F, Field.Store.NO));
- * </pre>
- * 
- * For optimal performance, re-use the <code>LegacyFloatField</code> and
- * {@link Document} instance for more than one document:
- * 
- * <pre class="prettyprint">
- *  LegacyFloatField field = new LegacyFloatField(name, 0.0F, Field.Store.NO);
- *  Document document = new Document();
- *  document.add(field);
- * 
- *  for(all documents) {
- *    ...
- *    field.setFloatValue(value)
- *    writer.addDocument(document);
- *    ...
- *  }
- * </pre>
- *
- * See also {@link LegacyIntField}, {@link LegacyLongField}, {@link
- * LegacyDoubleField}.
- *
- * <p>To perform range querying or filtering against a
- * <code>LegacyFloatField</code>, use {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}.
- * To sort according to a
- * <code>LegacyFloatField</code>, use the normal numeric sort types, eg
- * {@link org.apache.lucene.search.SortField.Type#FLOAT}. <code>LegacyFloatField</code>
- * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
- *
- * <p>You may add the same field name as an <code>LegacyFloatField</code> to
- * the same document more than once.  Range querying and
- * filtering will be the logical OR of all values; so a range query
- * will hit all documents that have at least one value in
- * the range. However sort behavior is not defined.  If you need to sort,
- * you should separately index a single-valued <code>LegacyFloatField</code>.</p>
- *
- * <p>A <code>LegacyFloatField</code> will consume somewhat more disk space
- * in the index than an ordinary single-valued field.
- * However, for a typical index that includes substantial
- * textual content per document, this increase will likely
- * be in the noise. </p>
- *
- * <p>Within Lucene, each numeric value is indexed as a
- * <em>trie</em> structure, where each term is logically
- * assigned to larger and larger pre-defined brackets (which
- * are simply lower-precision representations of the value).
- * The step size between each successive bracket is called the
- * <code>precisionStep</code>, measured in bits.  Smaller
- * <code>precisionStep</code> values result in larger number
- * of brackets, which consumes more disk space in the index
- * but may result in faster range search performance.  The
- * default value, 8, was selected for a reasonable tradeoff
- * of disk space consumption versus performance.  You can
- * create a custom {@link LegacyFieldType} and invoke the {@link
- * LegacyFieldType#setNumericPrecisionStep} method if you'd
- * like to change the value.  Note that you must also
- * specify a congruent value when creating {@link
- * org.apache.lucene.legacy.LegacyNumericRangeQuery}.
- * For low cardinality fields larger precision steps are good.
- * If the cardinality is &lt; 100, it is fair
- * to use {@link Integer#MAX_VALUE}, which produces one
- * term per value.
- *
- * <p>For more information on the internals of numeric trie
- * indexing, including the <a
- * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * configuration, see {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}. The format of
- * indexed values is described in {@link org.apache.lucene.legacy.LegacyNumericUtils}.
- *
- * <p>If you only need to sort by numeric value, and never
- * run range querying/filtering, you can index using a
- * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
- * This will minimize disk space consumed. </p>
- *
- * <p>More advanced users can instead use {@link
- * org.apache.lucene.legacy.LegacyNumericTokenStream} directly, when indexing numbers. This
- * class is a wrapper around this token stream type for
- * easier, more intuitive usage.</p>
- *
- * @deprecated Please use {@link FloatPoint} instead
- *
- * @since 2.9
- */
-
-@Deprecated
-public final class LegacyFloatField extends LegacyField {
-  
-  /** 
-   * Type for a LegacyFloatField that is not stored:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final LegacyFieldType TYPE_NOT_STORED = new LegacyFieldType();
-  static {
-    TYPE_NOT_STORED.setTokenized(true);
-    TYPE_NOT_STORED.setOmitNorms(true);
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_NOT_STORED.setNumericType(LegacyNumericType.FLOAT);
-    TYPE_NOT_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
-    TYPE_NOT_STORED.freeze();
-  }
-
-  /** 
-   * Type for a stored LegacyFloatField:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final LegacyFieldType TYPE_STORED = new LegacyFieldType();
-  static {
-    TYPE_STORED.setTokenized(true);
-    TYPE_STORED.setOmitNorms(true);
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_STORED.setNumericType(LegacyNumericType.FLOAT);
-    TYPE_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.freeze();
-  }
-
-  /** Creates a stored or un-stored LegacyFloatField with the provided value
-   *  and default <code>precisionStep</code> {@link
-   *  org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
-   *  @param name field name
-   *  @param value 32-bit double value
-   *  @param stored Store.YES if the content should also be stored
-   *  @throws IllegalArgumentException if the field name is null.
-   */
-  public LegacyFloatField(String name, float value, Store stored) {
-    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-    fieldsData = Float.valueOf(value);
-  }
-  
-  /** Expert: allows you to customize the {@link
-   *  LegacyFieldType}. 
-   *  @param name field name
-   *  @param value 32-bit float value
-   *  @param type customized field type: must have {@link LegacyFieldType#numericType()}
-   *         of {@link LegacyNumericType#FLOAT}.
-   *  @throws IllegalArgumentException if the field name or type is null, or
-   *          if the field type does not have a FLOAT numericType()
-   */
-  public LegacyFloatField(String name, float value, LegacyFieldType type) {
-    super(name, type);
-    if (type.numericType() != LegacyNumericType.FLOAT) {
-      throw new IllegalArgumentException("type.numericType() must be FLOAT but got " + type.numericType());
-    }
-    fieldsData = Float.valueOf(value);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyIntField.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyIntField.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyIntField.java
deleted file mode 100644
index e3ae965..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyIntField.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.IntPoint;
-import org.apache.lucene.index.IndexOptions;
-
-/**
- * <p>
- * Field that indexes <code>int</code> values
- * for efficient range filtering and sorting. Here's an example usage:
- * 
- * <pre class="prettyprint">
- * document.add(new LegacyIntField(name, 6, Field.Store.NO));
- * </pre>
- * 
- * For optimal performance, re-use the <code>LegacyIntField</code> and
- * {@link Document} instance for more than one document:
- * 
- * <pre class="prettyprint">
- *  LegacyIntField field = new LegacyIntField(name, 6, Field.Store.NO);
- *  Document document = new Document();
- *  document.add(field);
- * 
- *  for(all documents) {
- *    ...
- *    field.setIntValue(value)
- *    writer.addDocument(document);
- *    ...
- *  }
- * </pre>
- *
- * See also {@link LegacyLongField}, {@link LegacyFloatField}, {@link
- * LegacyDoubleField}.
- *
- * <p>To perform range querying or filtering against a
- * <code>LegacyIntField</code>, use {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}.
- * To sort according to a
- * <code>LegacyIntField</code>, use the normal numeric sort types, eg
- * {@link org.apache.lucene.search.SortField.Type#INT}. <code>LegacyIntField</code>
- * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
- *
- * <p>You may add the same field name as an <code>LegacyIntField</code> to
- * the same document more than once.  Range querying and
- * filtering will be the logical OR of all values; so a range query
- * will hit all documents that have at least one value in
- * the range. However sort behavior is not defined.  If you need to sort,
- * you should separately index a single-valued <code>LegacyIntField</code>.</p>
- *
- * <p>An <code>LegacyIntField</code> will consume somewhat more disk space
- * in the index than an ordinary single-valued field.
- * However, for a typical index that includes substantial
- * textual content per document, this increase will likely
- * be in the noise. </p>
- *
- * <p>Within Lucene, each numeric value is indexed as a
- * <em>trie</em> structure, where each term is logically
- * assigned to larger and larger pre-defined brackets (which
- * are simply lower-precision representations of the value).
- * The step size between each successive bracket is called the
- * <code>precisionStep</code>, measured in bits.  Smaller
- * <code>precisionStep</code> values result in larger number
- * of brackets, which consumes more disk space in the index
- * but may result in faster range search performance.  The
- * default value, 8, was selected for a reasonable tradeoff
- * of disk space consumption versus performance.  You can
- * create a custom {@link LegacyFieldType} and invoke the {@link
- * LegacyFieldType#setNumericPrecisionStep} method if you'd
- * like to change the value.  Note that you must also
- * specify a congruent value when creating {@link
- * org.apache.lucene.legacy.LegacyNumericRangeQuery}.
- * For low cardinality fields larger precision steps are good.
- * If the cardinality is &lt; 100, it is fair
- * to use {@link Integer#MAX_VALUE}, which produces one
- * term per value.
- *
- * <p>For more information on the internals of numeric trie
- * indexing, including the <a
- * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * configuration, see {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}. The format of
- * indexed values is described in {@link org.apache.lucene.legacy.LegacyNumericUtils}.
- *
- * <p>If you only need to sort by numeric value, and never
- * run range querying/filtering, you can index using a
- * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
- * This will minimize disk space consumed. </p>
- *
- * <p>More advanced users can instead use {@link
- * org.apache.lucene.legacy.LegacyNumericTokenStream} directly, when indexing numbers. This
- * class is a wrapper around this token stream type for
- * easier, more intuitive usage.</p>
- *
- * @deprecated Please use {@link IntPoint} instead
- *
- * @since 2.9
- */
-
-@Deprecated
-public final class LegacyIntField extends LegacyField {
-  
-  /** 
-   * Type for an LegacyIntField that is not stored:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final LegacyFieldType TYPE_NOT_STORED = new LegacyFieldType();
-  static {
-    TYPE_NOT_STORED.setTokenized(true);
-    TYPE_NOT_STORED.setOmitNorms(true);
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_NOT_STORED.setNumericType(LegacyNumericType.INT);
-    TYPE_NOT_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
-    TYPE_NOT_STORED.freeze();
-  }
-
-  /** 
-   * Type for a stored LegacyIntField:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final LegacyFieldType TYPE_STORED = new LegacyFieldType();
-  static {
-    TYPE_STORED.setTokenized(true);
-    TYPE_STORED.setOmitNorms(true);
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_STORED.setNumericType(LegacyNumericType.INT);
-    TYPE_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.freeze();
-  }
-
-  /** Creates a stored or un-stored LegacyIntField with the provided value
-   *  and default <code>precisionStep</code> {@link
-   *  org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
-   *  @param name field name
-   *  @param value 32-bit integer value
-   *  @param stored Store.YES if the content should also be stored
-   *  @throws IllegalArgumentException if the field name is null.
-   */
-  public LegacyIntField(String name, int value, Store stored) {
-    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-    fieldsData = Integer.valueOf(value);
-  }
-  
-  /** Expert: allows you to customize the {@link
-   *  LegacyFieldType}. 
-   *  @param name field name
-   *  @param value 32-bit integer value
-   *  @param type customized field type: must have {@link LegacyFieldType#numericType()}
-   *         of {@link LegacyNumericType#INT}.
-   *  @throws IllegalArgumentException if the field name or type is null, or
-   *          if the field type does not have a INT numericType()
-   */
-  public LegacyIntField(String name, int value, LegacyFieldType type) {
-    super(name, type);
-    if (type.numericType() != LegacyNumericType.INT) {
-      throw new IllegalArgumentException("type.numericType() must be INT but got " + type.numericType());
-    }
-    fieldsData = Integer.valueOf(value);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyLongField.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyLongField.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyLongField.java
deleted file mode 100644
index 3e20b44..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyLongField.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.LongPoint;
-import org.apache.lucene.index.IndexOptions;
-
-
-/**
- * <p>
- * Field that indexes <code>long</code> values
- * for efficient range filtering and sorting. Here's an example usage:
- * 
- * <pre class="prettyprint">
- * document.add(new LegacyLongField(name, 6L, Field.Store.NO));
- * </pre>
- * 
- * For optimal performance, re-use the <code>LegacyLongField</code> and
- * {@link Document} instance for more than one document:
- * 
- * <pre class="prettyprint">
- *  LegacyLongField field = new LegacyLongField(name, 0L, Field.Store.NO);
- *  Document document = new Document();
- *  document.add(field);
- * 
- *  for(all documents) {
- *    ...
- *    field.setLongValue(value)
- *    writer.addDocument(document);
- *    ...
- *  }
- * </pre>
- *
- * See also {@link LegacyIntField}, {@link LegacyFloatField}, {@link
- * LegacyDoubleField}.
- *
- * Any type that can be converted to long can also be
- * indexed.  For example, date/time values represented by a
- * {@link java.util.Date} can be translated into a long
- * value using the {@link java.util.Date#getTime} method.  If you
- * don't need millisecond precision, you can quantize the
- * value, either by dividing the result of
- * {@link java.util.Date#getTime} or using the separate getters
- * (for year, month, etc.) to construct an <code>int</code> or
- * <code>long</code> value.
- *
- * <p>To perform range querying or filtering against a
- * <code>LegacyLongField</code>, use {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}.
- * To sort according to a
- * <code>LegacyLongField</code>, use the normal numeric sort types, eg
- * {@link org.apache.lucene.search.SortField.Type#LONG}. <code>LegacyLongField</code>
- * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.
- *
- * <p>You may add the same field name as an <code>LegacyLongField</code> to
- * the same document more than once.  Range querying and
- * filtering will be the logical OR of all values; so a range query
- * will hit all documents that have at least one value in
- * the range. However sort behavior is not defined.  If you need to sort,
- * you should separately index a single-valued <code>LegacyLongField</code>.
- *
- * <p>A <code>LegacyLongField</code> will consume somewhat more disk space
- * in the index than an ordinary single-valued field.
- * However, for a typical index that includes substantial
- * textual content per document, this increase will likely
- * be in the noise. </p>
- *
- * <p>Within Lucene, each numeric value is indexed as a
- * <em>trie</em> structure, where each term is logically
- * assigned to larger and larger pre-defined brackets (which
- * are simply lower-precision representations of the value).
- * The step size between each successive bracket is called the
- * <code>precisionStep</code>, measured in bits.  Smaller
- * <code>precisionStep</code> values result in larger number
- * of brackets, which consumes more disk space in the index
- * but may result in faster range search performance.  The
- * default value, 16, was selected for a reasonable tradeoff
- * of disk space consumption versus performance.  You can
- * create a custom {@link LegacyFieldType} and invoke the {@link
- * LegacyFieldType#setNumericPrecisionStep} method if you'd
- * like to change the value.  Note that you must also
- * specify a congruent value when creating {@link
- * org.apache.lucene.legacy.LegacyNumericRangeQuery}.
- * For low cardinality fields larger precision steps are good.
- * If the cardinality is &lt; 100, it is fair
- * to use {@link Integer#MAX_VALUE}, which produces one
- * term per value.
- *
- * <p>For more information on the internals of numeric trie
- * indexing, including the <a
- * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * configuration, see {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}. The format of
- * indexed values is described in {@link org.apache.lucene.legacy.LegacyNumericUtils}.
- *
- * <p>If you only need to sort by numeric value, and never
- * run range querying/filtering, you can index using a
- * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
- * This will minimize disk space consumed.
- *
- * <p>More advanced users can instead use {@link
- * org.apache.lucene.legacy.LegacyNumericTokenStream} directly, when indexing numbers. This
- * class is a wrapper around this token stream type for
- * easier, more intuitive usage.</p>
- *
- * @deprecated Please use {@link LongPoint} instead
- *
- * @since 2.9
- */
-
-@Deprecated
-public final class LegacyLongField extends LegacyField {
-  
-  /** 
-   * Type for a LegacyLongField that is not stored:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final LegacyFieldType TYPE_NOT_STORED = new LegacyFieldType();
-  static {
-    TYPE_NOT_STORED.setTokenized(true);
-    TYPE_NOT_STORED.setOmitNorms(true);
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_NOT_STORED.setNumericType(LegacyNumericType.LONG);
-    TYPE_NOT_STORED.freeze();
-  }
-
-  /** 
-   * Type for a stored LegacyLongField:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final LegacyFieldType TYPE_STORED = new LegacyFieldType();
-  static {
-    TYPE_STORED.setTokenized(true);
-    TYPE_STORED.setOmitNorms(true);
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_STORED.setNumericType(LegacyNumericType.LONG);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.freeze();
-  }
-
-  /** Creates a stored or un-stored LegacyLongField with the provided value
-   *  and default <code>precisionStep</code> {@link
-   *  org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
-   *  @param name field name
-   *  @param value 64-bit long value
-   *  @param stored Store.YES if the content should also be stored
-   *  @throws IllegalArgumentException if the field name is null.
-   */
-  public LegacyLongField(String name, long value, Store stored) {
-    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-    fieldsData = Long.valueOf(value);
-  }
-  
-  /** Expert: allows you to customize the {@link
-   *  LegacyFieldType}. 
-   *  @param name field name
-   *  @param value 64-bit long value
-   *  @param type customized field type: must have {@link LegacyFieldType#numericType()}
-   *         of {@link LegacyNumericType#LONG}.
-   *  @throws IllegalArgumentException if the field name or type is null, or
-   *          if the field type does not have a LONG numericType()
-   */
-  public LegacyLongField(String name, long value, LegacyFieldType type) {
-    super(name, type);
-    if (type.numericType() != LegacyNumericType.LONG) {
-      throw new IllegalArgumentException("type.numericType() must be LONG but got " + type.numericType());
-    }
-    fieldsData = Long.valueOf(value);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericRangeQuery.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericRangeQuery.java
deleted file mode 100644
index f172a20..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericRangeQuery.java
+++ /dev/null
@@ -1,537 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.Objects;
-
-import org.apache.lucene.document.DoublePoint;
-import org.apache.lucene.document.FloatPoint;
-import org.apache.lucene.document.IntPoint;
-import org.apache.lucene.document.LongPoint;
-import org.apache.lucene.index.FilteredTermsEnum;
-import org.apache.lucene.index.PointValues;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.MultiTermQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermRangeQuery;
-import org.apache.lucene.util.AttributeSource;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.index.Term; // for javadocs
-
-/**
- * <p>A {@link Query} that matches numeric values within a
- * specified range.  To use this, you must first index the
- * numeric values using {@link org.apache.lucene.legacy.LegacyIntField}, {@link
- * org.apache.lucene.legacy.LegacyFloatField}, {@link org.apache.lucene.legacy.LegacyLongField} or {@link org.apache.lucene.legacy.LegacyDoubleField} (expert: {@link
- * org.apache.lucene.legacy.LegacyNumericTokenStream}).  If your terms are instead textual,
- * you should use {@link TermRangeQuery}.</p>
- *
- * <p>You create a new LegacyNumericRangeQuery with the static
- * factory methods, eg:
- *
- * <pre class="prettyprint">
- * Query q = LegacyNumericRangeQuery.newFloatRange("weight", 0.03f, 0.10f, true, true);
- * </pre>
- *
- * matches all documents whose float valued "weight" field
- * ranges from 0.03 to 0.10, inclusive.
- *
- * <p>The performance of LegacyNumericRangeQuery is much better
- * than the corresponding {@link TermRangeQuery} because the
- * number of terms that must be searched is usually far
- * fewer, thanks to trie indexing, described below.</p>
- *
- * <p>You can optionally specify a <a
- * href="#precisionStepDesc"><code>precisionStep</code></a>
- * when creating this query.  This is necessary if you've
- * changed this configuration from its default (4) during
- * indexing.  Lower values consume more disk space but speed
- * up searching.  Suitable values are between <b>1</b> and
- * <b>8</b>. A good starting point to test is <b>4</b>,
- * which is the default value for all <code>Numeric*</code>
- * classes.  See <a href="#precisionStepDesc">below</a> for
- * details.
- *
- * <p>This query defaults to {@linkplain
- * MultiTermQuery#CONSTANT_SCORE_REWRITE}.
- * With precision steps of &le;4, this query can be run with
- * one of the BooleanQuery rewrite methods without changing
- * BooleanQuery's default max clause count.
- *
- * <br><h3>How it works</h3>
- *
- * <p>See the publication about <a target="_blank" href="http://www.panfmp.org">panFMP</a>,
- * where this algorithm was described (referred to as <code>TrieRangeQuery</code>):
- *
- * <blockquote><strong>Schindler, U, Diepenbroek, M</strong>, 2008.
- * <em>Generic XML-based Framework for Metadata Portals.</em>
- * Computers &amp; Geosciences 34 (12), 1947-1955.
- * <a href="http://dx.doi.org/10.1016/j.cageo.2008.02.023"
- * target="_blank">doi:10.1016/j.cageo.2008.02.023</a></blockquote>
- *
- * <p><em>A quote from this paper:</em> Because Apache Lucene is a full-text
- * search engine and not a conventional database, it cannot handle numerical ranges
- * (e.g., field value is inside user defined bounds, even dates are numerical values).
- * We have developed an extension to Apache Lucene that stores
- * the numerical values in a special string-encoded format with variable precision
- * (all numerical values like doubles, longs, floats, and ints are converted to
- * lexicographic sortable string representations and stored with different precisions
- * (for a more detailed description of how the values are stored,
- * see {@link org.apache.lucene.legacy.LegacyNumericUtils}). A range is then divided recursively into multiple intervals for searching:
- * The center of the range is searched only with the lowest possible precision in the <em>trie</em>,
- * while the boundaries are matched more exactly. This reduces the number of terms dramatically.</p>
- *
- * <p>For the variant that stores long values in 8 different precisions (each reduced by 8 bits) that
- * uses a lowest precision of 1 byte, the index contains only a maximum of 256 distinct values in the
- * lowest precision. Overall, a range could consist of a theoretical maximum of
- * <code>7*255*2 + 255 = 3825</code> distinct terms (when there is a term for every distinct value of an
- * 8-byte-number in the index and the range covers almost all of them; a maximum of 255 distinct values is used
- * because it would always be possible to reduce the full 256 values to one term with degraded precision).
- * In practice, we have seen up to 300 terms in most cases (index with 500,000 metadata records
- * and a uniform value distribution).</p>
- *
- * <h3><a name="precisionStepDesc">Precision Step</a></h3>
- * <p>You can choose any <code>precisionStep</code> when encoding values.
- * Lower step values mean more precisions and so more terms in index (and index gets larger). The number
- * of indexed terms per value is (those are generated by {@link org.apache.lucene.legacy.LegacyNumericTokenStream}):
- * <p style="font-family:serif">
- * &nbsp;&nbsp;indexedTermsPerValue = <b>ceil</b><big>(</big>bitsPerValue / precisionStep<big>)</big>
- * </p>
- * As the lower precision terms are shared by many values, the additional terms only
- * slightly grow the term dictionary (approx. 7% for <code>precisionStep=4</code>), but have a larger
- * impact on the postings (the postings file will have  more entries, as every document is linked to
- * <code>indexedTermsPerValue</code> terms instead of one). The formula to estimate the growth
- * of the term dictionary in comparison to one term per value:
- * <p>
- * <!-- the formula in the alt attribute was transformed from latex to PNG with http://1.618034.com/latex.php (with 110 dpi): -->
- * &nbsp;&nbsp;<img src="doc-files/nrq-formula-1.png" alt="\mathrm{termDictOverhead} = \sum\limits_{i=0}^{\mathrm{indexedTermsPerValue}-1} \frac{1}{2^{\mathrm{precisionStep}\cdot i}}">
- * </p>
- * <p>On the other hand, if the <code>precisionStep</code> is smaller, the maximum number of terms to match reduces,
- * which optimizes query speed. The formula to calculate the maximum number of terms that will be visited while
- * executing the query is:
- * <p>
- * <!-- the formula in the alt attribute was transformed from latex to PNG with http://1.618034.com/latex.php (with 110 dpi): -->
- * &nbsp;&nbsp;<img src="doc-files/nrq-formula-2.png" alt="\mathrm{maxQueryTerms} = \left[ \left( \mathrm{indexedTermsPerValue} - 1 \right) \cdot \left(2^\mathrm{precisionStep} - 1 \right) \cdot 2 \right] + \left( 2^\mathrm{precisionStep} - 1 \right)">
- * </p>
- * <p>For longs stored using a precision step of 4, <code>maxQueryTerms = 15*15*2 + 15 = 465</code>, and for a precision
- * step of 2, <code>maxQueryTerms = 31*3*2 + 3 = 189</code>. But the faster search speed is reduced by more seeking
- * in the term enum of the index. Because of this, the ideal <code>precisionStep</code> value can only
- * be found out by testing. <b>Important:</b> You can index with a lower precision step value and test search speed
- * using a multiple of the original step value.</p>
- *
- * <p>Good values for <code>precisionStep</code> are depending on usage and data type:
- * <ul>
- *  <li>The default for all data types is <b>4</b>, which is used, when no <code>precisionStep</code> is given.
- *  <li>Ideal value in most cases for <em>64 bit</em> data types <em>(long, double)</em> is <b>6</b> or <b>8</b>.
- *  <li>Ideal value in most cases for <em>32 bit</em> data types <em>(int, float)</em> is <b>4</b>.
- *  <li>For low cardinality fields larger precision steps are good. If the cardinality is &lt; 100, it is
- *  fair to use {@link Integer#MAX_VALUE} (see below).
- *  <li>Steps <b>&ge;64</b> for <em>long/double</em> and <b>&ge;32</b> for <em>int/float</em> produces one token
- *  per value in the index and querying is as slow as a conventional {@link TermRangeQuery}. But it can be used
- *  to produce fields, that are solely used for sorting (in this case simply use {@link Integer#MAX_VALUE} as
- *  <code>precisionStep</code>). Using {@link org.apache.lucene.legacy.LegacyIntField},
- *  {@link org.apache.lucene.legacy.LegacyLongField}, {@link org.apache.lucene.legacy.LegacyFloatField} or {@link org.apache.lucene.legacy.LegacyDoubleField} for sorting
- *  is ideal, because building the field cache is much faster than with text-only numbers.
- *  These fields have one term per value and therefore also work with term enumeration for building distinct lists
- *  (e.g. facets / preselected values to search for).
- *  Sorting is also possible with range query optimized fields using one of the above <code>precisionSteps</code>.
- * </ul>
- *
- * <p>Comparisons of the different types of RangeQueries on an index with about 500,000 docs showed
- * that {@link TermRangeQuery} in boolean rewrite mode (with raised {@link BooleanQuery} clause count)
- * took about 30-40 secs to complete, {@link TermRangeQuery} in constant score filter rewrite mode took 5 secs
- * and executing this class took &lt;100ms to complete (on an Opteron64 machine, Java 1.5, 8 bit
- * precision step). This query type was developed for a geographic portal, where the performance for
- * e.g. bounding boxes or exact date/time stamps is important.</p>
- *
- * @deprecated Instead index with {@link IntPoint}, {@link LongPoint}, {@link FloatPoint}, {@link DoublePoint}, and
- *             create range queries with {@link IntPoint#newRangeQuery(String, int, int) IntPoint.newRangeQuery()},
- *             {@link LongPoint#newRangeQuery(String, long, long) LongPoint.newRangeQuery()},
- *             {@link FloatPoint#newRangeQuery(String, float, float) FloatPoint.newRangeQuery()},
- *             {@link DoublePoint#newRangeQuery(String, double, double) DoublePoint.newRangeQuery()} respectively.
- *             See {@link PointValues} for background information on Points.
- *
- * @since 2.9
- **/
-
-@Deprecated
-public final class LegacyNumericRangeQuery<T extends Number> extends MultiTermQuery {
-
-  private LegacyNumericRangeQuery(final String field, final int precisionStep, final LegacyNumericType dataType,
-                                  T min, T max, final boolean minInclusive, final boolean maxInclusive) {
-    super(field);
-    if (precisionStep < 1)
-      throw new IllegalArgumentException("precisionStep must be >=1");
-    this.precisionStep = precisionStep;
-    this.dataType = Objects.requireNonNull(dataType, "LegacyNumericType must not be null");
-    this.min = min;
-    this.max = max;
-    this.minInclusive = minInclusive;
-    this.maxInclusive = maxInclusive;
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>long</code>
-   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Long> newLongRange(final String field, final int precisionStep,
-    Long min, Long max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, precisionStep, LegacyNumericType.LONG, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>long</code>
-   * range using the default <code>precisionStep</code> {@link org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Long> newLongRange(final String field,
-    Long min, Long max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT, LegacyNumericType.LONG, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>int</code>
-   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Integer> newIntRange(final String field, final int precisionStep,
-    Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, precisionStep, LegacyNumericType.INT, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>int</code>
-   * range using the default <code>precisionStep</code> {@link org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Integer> newIntRange(final String field,
-    Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, LegacyNumericType.INT, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>double</code>
-   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Double.NaN}.  By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Double> newDoubleRange(final String field, final int precisionStep,
-    Double min, Double max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, precisionStep, LegacyNumericType.DOUBLE, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>double</code>
-   * range using the default <code>precisionStep</code> {@link org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Double.NaN}.  By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Double> newDoubleRange(final String field,
-    Double min, Double max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT, LegacyNumericType.DOUBLE, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>float</code>
-   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Float.NaN}.  By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Float> newFloatRange(final String field, final int precisionStep,
-    Float min, Float max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, precisionStep, LegacyNumericType.FLOAT, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>float</code>
-   * range using the default <code>precisionStep</code> {@link org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Float.NaN}.  By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Float> newFloatRange(final String field,
-    Float min, Float max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, LegacyNumericType.FLOAT, min, max, minInclusive, maxInclusive);
-  }
-
-  @Override @SuppressWarnings("unchecked")
-  protected TermsEnum getTermsEnum(final Terms terms, AttributeSource atts) throws IOException {
-    // very strange: java.lang.Number itself is not Comparable, but all subclasses used here are
-    if (min != null && max != null && ((Comparable<T>) min).compareTo(max) > 0) {
-      return TermsEnum.EMPTY;
-    }
-    return new NumericRangeTermsEnum(terms.iterator());
-  }
-
-  /** Returns <code>true</code> if the lower endpoint is inclusive */
-  public boolean includesMin() { return minInclusive; }
-  
-  /** Returns <code>true</code> if the upper endpoint is inclusive */
-  public boolean includesMax() { return maxInclusive; }
-
-  /** Returns the lower value of this range query */
-  public T getMin() { return min; }
-
-  /** Returns the upper value of this range query */
-  public T getMax() { return max; }
-  
-  /** Returns the precision step. */
-  public int getPrecisionStep() { return precisionStep; }
-  
-  @Override
-  public String toString(final String field) {
-    final StringBuilder sb = new StringBuilder();
-    if (!getField().equals(field)) sb.append(getField()).append(':');
-    return sb.append(minInclusive ? '[' : '{')
-      .append((min == null) ? "*" : min.toString())
-      .append(" TO ")
-      .append((max == null) ? "*" : max.toString())
-      .append(maxInclusive ? ']' : '}')
-      .toString();
-  }
-
-  @Override
-  @SuppressWarnings({"unchecked","rawtypes"})
-  public final boolean equals(final Object o) {
-    if (o==this) return true;
-    if (!super.equals(o))
-      return false;
-    if (o instanceof LegacyNumericRangeQuery) {
-      final LegacyNumericRangeQuery q=(LegacyNumericRangeQuery)o;
-      return (
-        (q.min == null ? min == null : q.min.equals(min)) &&
-        (q.max == null ? max == null : q.max.equals(max)) &&
-        minInclusive == q.minInclusive &&
-        maxInclusive == q.maxInclusive &&
-        precisionStep == q.precisionStep
-      );
-    }
-    return false;
-  }
-
-  @Override
-  public final int hashCode() {
-    int hash = super.hashCode();
-    hash = 31 * hash + precisionStep;
-    hash = 31 * hash + Objects.hashCode(min);
-    hash = 31 * hash + Objects.hashCode(max);
-    hash = 31 * hash + Objects.hashCode(minInclusive);
-    hash = 31 * hash + Objects.hashCode(maxInclusive);
-    return hash;
-  }
-
-  // members (package private, to be also fast accessible by NumericRangeTermEnum)
-  final int precisionStep;
-  final LegacyNumericType dataType;
-  final T min, max;
-  final boolean minInclusive,maxInclusive;
-
-  // used to handle float/double infinity correcty
-  static final long LONG_NEGATIVE_INFINITY =
-    NumericUtils.doubleToSortableLong(Double.NEGATIVE_INFINITY);
-  static final long LONG_POSITIVE_INFINITY =
-    NumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
-  static final int INT_NEGATIVE_INFINITY =
-    NumericUtils.floatToSortableInt(Float.NEGATIVE_INFINITY);
-  static final int INT_POSITIVE_INFINITY =
-    NumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
-
-  /**
-   * Subclass of FilteredTermsEnum for enumerating all terms that match the
-   * sub-ranges for trie range queries, using flex API.
-   * <p>
-   * WARNING: This term enumeration is not guaranteed to be always ordered by
-   * {@link Term#compareTo}.
-   * The ordering depends on how {@link org.apache.lucene.legacy.LegacyNumericUtils#splitLongRange} and
-   * {@link org.apache.lucene.legacy.LegacyNumericUtils#splitIntRange} generates the sub-ranges. For
-   * {@link MultiTermQuery} ordering is not relevant.
-   */
-  private final class NumericRangeTermsEnum extends FilteredTermsEnum {
-
-    private BytesRef currentLowerBound, currentUpperBound;
-
-    private final LinkedList<BytesRef> rangeBounds = new LinkedList<>();
-
-    NumericRangeTermsEnum(final TermsEnum tenum) {
-      super(tenum);
-      switch (dataType) {
-        case LONG:
-        case DOUBLE: {
-          // lower
-          long minBound;
-          if (dataType == LegacyNumericType.LONG) {
-            minBound = (min == null) ? Long.MIN_VALUE : min.longValue();
-          } else {
-            assert dataType == LegacyNumericType.DOUBLE;
-            minBound = (min == null) ? LONG_NEGATIVE_INFINITY
-              : NumericUtils.doubleToSortableLong(min.doubleValue());
-          }
-          if (!minInclusive && min != null) {
-            if (minBound == Long.MAX_VALUE) break;
-            minBound++;
-          }
-          
-          // upper
-          long maxBound;
-          if (dataType == LegacyNumericType.LONG) {
-            maxBound = (max == null) ? Long.MAX_VALUE : max.longValue();
-          } else {
-            assert dataType == LegacyNumericType.DOUBLE;
-            maxBound = (max == null) ? LONG_POSITIVE_INFINITY
-              : NumericUtils.doubleToSortableLong(max.doubleValue());
-          }
-          if (!maxInclusive && max != null) {
-            if (maxBound == Long.MIN_VALUE) break;
-            maxBound--;
-          }
-          
-          LegacyNumericUtils.splitLongRange(new LegacyNumericUtils.LongRangeBuilder() {
-            @Override
-            public final void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
-              rangeBounds.add(minPrefixCoded);
-              rangeBounds.add(maxPrefixCoded);
-            }
-          }, precisionStep, minBound, maxBound);
-          break;
-        }
-          
-        case INT:
-        case FLOAT: {
-          // lower
-          int minBound;
-          if (dataType == LegacyNumericType.INT) {
-            minBound = (min == null) ? Integer.MIN_VALUE : min.intValue();
-          } else {
-            assert dataType == LegacyNumericType.FLOAT;
-            minBound = (min == null) ? INT_NEGATIVE_INFINITY
-              : NumericUtils.floatToSortableInt(min.floatValue());
-          }
-          if (!minInclusive && min != null) {
-            if (minBound == Integer.MAX_VALUE) break;
-            minBound++;
-          }
-          
-          // upper
-          int maxBound;
-          if (dataType == LegacyNumericType.INT) {
-            maxBound = (max == null) ? Integer.MAX_VALUE : max.intValue();
-          } else {
-            assert dataType == LegacyNumericType.FLOAT;
-            maxBound = (max == null) ? INT_POSITIVE_INFINITY
-              : NumericUtils.floatToSortableInt(max.floatValue());
-          }
-          if (!maxInclusive && max != null) {
-            if (maxBound == Integer.MIN_VALUE) break;
-            maxBound--;
-          }
-          
-          LegacyNumericUtils.splitIntRange(new LegacyNumericUtils.IntRangeBuilder() {
-            @Override
-            public final void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
-              rangeBounds.add(minPrefixCoded);
-              rangeBounds.add(maxPrefixCoded);
-            }
-          }, precisionStep, minBound, maxBound);
-          break;
-        }
-          
-        default:
-          // should never happen
-          throw new IllegalArgumentException("Invalid LegacyNumericType");
-      }
-    }
-    
-    private void nextRange() {
-      assert rangeBounds.size() % 2 == 0;
-
-      currentLowerBound = rangeBounds.removeFirst();
-      assert currentUpperBound == null || currentUpperBound.compareTo(currentLowerBound) <= 0 :
-        "The current upper bound must be <= the new lower bound";
-      
-      currentUpperBound = rangeBounds.removeFirst();
-    }
-    
-    @Override
-    protected final BytesRef nextSeekTerm(BytesRef term) {
-      while (rangeBounds.size() >= 2) {
-        nextRange();
-        
-        // if the new upper bound is before the term parameter, the sub-range is never a hit
-        if (term != null && term.compareTo(currentUpperBound) > 0)
-          continue;
-        // never seek backwards, so use current term if lower bound is smaller
-        return (term != null && term.compareTo(currentLowerBound) > 0) ?
-          term : currentLowerBound;
-      }
-      
-      // no more sub-range enums available
-      assert rangeBounds.isEmpty();
-      currentLowerBound = currentUpperBound = null;
-      return null;
-    }
-    
-    @Override
-    protected final AcceptStatus accept(BytesRef term) {
-      while (currentUpperBound == null || term.compareTo(currentUpperBound) > 0) {
-        if (rangeBounds.isEmpty())
-          return AcceptStatus.END;
-        // peek next sub-range, only seek if the current term is smaller than next lower bound
-        if (term.compareTo(rangeBounds.getFirst()) < 0)
-          return AcceptStatus.NO_AND_SEEK;
-        // step forward to next range without seeking, as next lower range bound is less or equal current term
-        nextRange();
-      }
-      return AcceptStatus.YES;
-    }
-
-  }
-  
-}


[09/25] lucene-solr:jira/solr-8668: SOLR-10233: Correctly set maxShardsPerNode in BackupRestoreTestCase when using createNodeSet and replica types

Posted by cp...@apache.org.
SOLR-10233: Correctly set maxShardsPerNode in BackupRestoreTestCase when using createNodeSet and replica types


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/8f92fb47
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/8f92fb47
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/8f92fb47

Branch: refs/heads/jira/solr-8668
Commit: 8f92fb4722709bec34b4c0330afb7cabba86e350
Parents: 9fbc9db
Author: Tomas Fernandez Lobbe <tf...@apache.org>
Authored: Fri May 26 14:49:18 2017 -0700
Committer: Tomas Fernandez Lobbe <tf...@apache.org>
Committed: Fri May 26 14:49:18 2017 -0700

----------------------------------------------------------------------
 .../apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/8f92fb47/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
index a6d130e..f86322d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
+++ b/solr/core/src/test/org/apache/solr/cloud/AbstractCloudBackupRestoreTestCase.java
@@ -244,6 +244,7 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
         // may need to increase maxShardsPerNode (e.g. if it was shard split, then now we need more)
         restore.setMaxShardsPerNode((int)Math.ceil(backupCollection.getReplicas().size()/cluster.getJettySolrRunners().size()));
       }
+      
 
       if (rarely()) { // Try with createNodeSet configuration
         int nodeSetSize = cluster.getJettySolrRunners().size() / 2;
@@ -255,7 +256,11 @@ public abstract class AbstractCloudBackupRestoreTestCase extends SolrCloudTestCa
         restore.setCreateNodeSet(String.join(",", nodeStrs));
         restore.setCreateNodeSetShuffle(usually());
         // we need to double maxShardsPerNode value since we reduced number of available nodes by half.
-        restore.setMaxShardsPerNode(origShardToDocCount.size() * 2);
+        if (restore.getMaxShardsPerNode() != null) {
+          restore.setMaxShardsPerNode(restore.getMaxShardsPerNode() * 2);
+        } else {
+          restore.setMaxShardsPerNode(origShardToDocCount.size() * 2);
+        }
       }
 
       Properties props = new Properties();


[10/25] lucene-solr:jira/solr-8668: SOLR-10735: Fixing windows space directory issue

Posted by cp...@apache.org.
SOLR-10735: Fixing windows space directory issue


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/45b26e32
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/45b26e32
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/45b26e32

Branch: refs/heads/jira/solr-8668
Commit: 45b26e322f1e173c8a19f07700e64daa5475da84
Parents: 8f92fb4
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Sat May 27 03:30:27 2017 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Sat May 27 03:30:27 2017 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                     | 3 +++
 solr/core/src/java/org/apache/solr/util/SolrCLI.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/45b26e32/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index cd383d1..7d15bef 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -477,6 +477,9 @@ Bug Fixes
  "lucene"/standard query parser, should require " TO " in range queries,
   and accept "TO" as endpoints in range queries. (hossman, Steve Rowe)
 
+* SOLR-10735: Windows script (solr.cmd) didn't work properly with directory containing spaces. Adding quotations
+  to fix (Uwe Schindler, janhoy, Tomas Fernandez-Lobbe, Ishan Chattopadhyaya) 
+
 Other Changes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/45b26e32/solr/core/src/java/org/apache/solr/util/SolrCLI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/SolrCLI.java b/solr/core/src/java/org/apache/solr/util/SolrCLI.java
index 97fdf1e..51ab5d7 100644
--- a/solr/core/src/java/org/apache/solr/util/SolrCLI.java
+++ b/solr/core/src/java/org/apache/solr/util/SolrCLI.java
@@ -2941,7 +2941,7 @@ public class SolrCLI {
         solrHome = solrHome.substring(cwdPath.length()+1);
 
       String startCmd =
-          String.format(Locale.ROOT, "%s start %s -p %d -s \"%s\" %s %s %s %s %s %s",
+          String.format(Locale.ROOT, "\"%s\" start %s -p %d -s \"%s\" %s %s %s %s %s %s",
               callScript, cloudModeArg, port, solrHome, hostArg, zkHostArg, memArg, forceArg, extraArgs, addlOptsArg);
       startCmd = startCmd.replaceAll("\\s+", " ").trim(); // for pretty printing
 


[15/25] lucene-solr:jira/solr-8668: SOLR-10004: Making the description of the failure more clear

Posted by cp...@apache.org.
SOLR-10004: Making the description of the failure more clear


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/cb97ad78
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/cb97ad78
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/cb97ad78

Branch: refs/heads/jira/solr-8668
Commit: cb97ad787af8ea616ebb024b9afcfcffd3216de3
Parents: 906679a
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Mon May 29 22:52:28 2017 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Mon May 29 22:54:26 2017 +0530

----------------------------------------------------------------------
 dev-tools/scripts/checkJavaDocs.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/cb97ad78/dev-tools/scripts/checkJavaDocs.py
----------------------------------------------------------------------
diff --git a/dev-tools/scripts/checkJavaDocs.py b/dev-tools/scripts/checkJavaDocs.py
index ae2b440..355bbdd 100644
--- a/dev-tools/scripts/checkJavaDocs.py
+++ b/dev-tools/scripts/checkJavaDocs.py
@@ -296,7 +296,7 @@ def checkSummary(fullPath):
         print()
         print(fullPath)
         printed = True
-      print('  missing: %s' % unescapeHTML(lastHREF))
+      print('  missing description: %s' % unescapeHTML(lastHREF))
       anyMissing = True
     elif lineLower.find('licensed to the apache software foundation') != -1 or lineLower.find('copyright 2004 the apache software foundation') != -1:
       if not printed:


[12/25] lucene-solr:jira/solr-8668: SOLR-10758: move CHANGES entry under 6.6 section after backport

Posted by cp...@apache.org.
SOLR-10758: move CHANGES entry under 6.6 section after backport


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1d2acdbe
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1d2acdbe
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1d2acdbe

Branch: refs/heads/jira/solr-8668
Commit: 1d2acdbea788bfbc75e4a1a475cf1395c31bd569
Parents: 4944ddc
Author: Steve Rowe <sa...@gmail.com>
Authored: Fri May 26 18:39:41 2017 -0400
Committer: Steve Rowe <sa...@gmail.com>
Committed: Fri May 26 18:39:41 2017 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1d2acdbe/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 7d15bef..7ad4122 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -247,11 +247,6 @@ Optimizations
   so that the second phase which would normally involve calculating the domain for the bucket
   can be skipped entirely, leading to large performance improvements. (yonik)
 
-Ref Guide
-----------------------
-
-* SOLR-10758: Modernize the Solr ref guide's Chinese language analysis coverage. (Steve Rowe)
-
 Other Changes
 ----------------------
 
@@ -480,6 +475,11 @@ Bug Fixes
 * SOLR-10735: Windows script (solr.cmd) didn't work properly with directory containing spaces. Adding quotations
   to fix (Uwe Schindler, janhoy, Tomas Fernandez-Lobbe, Ishan Chattopadhyaya) 
 
+Ref Guide
+----------------------
+
+* SOLR-10758: Modernize the Solr ref guide's Chinese language analysis coverage. (Steve Rowe)
+
 Other Changes
 ----------------------
 


[03/25] lucene-solr:jira/solr-8668: SOLR-10747: Change the tuple field to return-value

Posted by cp...@apache.org.
SOLR-10747: Change the tuple field to return-value


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/3e70745c
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/3e70745c
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/3e70745c

Branch: refs/heads/jira/solr-8668
Commit: 3e70745c79efeedd03beebb76b8266eb67a784ae
Parents: 81821b2
Author: Joel Bernstein <jb...@apache.org>
Authored: Fri May 26 11:21:51 2017 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Fri May 26 11:21:51 2017 -0400

----------------------------------------------------------------------
 solr/core/src/java/org/apache/solr/handler/StreamHandler.java      | 2 +-
 .../apache/solr/client/solrj/io/stream/StreamExpressionTest.java   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3e70745c/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
index dc097be..cbb9910 100644
--- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
@@ -305,7 +305,7 @@ public class StreamHandler extends RequestHandlerBase implements SolrCoreAware,
       StreamExpression streamExpression = StreamExpressionParser.parse(params.get("expr"));
       if(this.streamFactory.isEvaluator(streamExpression)) {
         StreamExpression tupleExpression = new StreamExpression("tuple");
-        tupleExpression.addParameter(new StreamExpressionNamedParameter("out", streamExpression));
+        tupleExpression.addParameter(new StreamExpressionNamedParameter("return-value", streamExpression));
         tupleStream = this.streamFactory.constructStream(tupleExpression);
       } else {
         tupleStream = this.streamFactory.constructStream(streamExpression);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/3e70745c/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index cbd57b8..c570d95 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -5220,7 +5220,7 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     solrStream.setStreamContext(context);
     List<Tuple> tuples = getTuples(solrStream);
     assertTrue(tuples.size() == 1);
-    List<Number> sequence = (List<Number>)tuples.get(0).get("out");
+    List<Number> sequence = (List<Number>)tuples.get(0).get("return-value");
     assertTrue(sequence.size() == 20);
     for(int i=0; i<sequence.size(); i++) {
       assertTrue(sequence.get(i).intValue() == i);


[13/25] lucene-solr:jira/solr-8668: SOLR-10755: delete/refactor many solrj deprecations

Posted by cp...@apache.org.
SOLR-10755: delete/refactor many solrj deprecations


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/bc973ecd
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/bc973ecd
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/bc973ecd

Branch: refs/heads/jira/solr-8668
Commit: bc973ecdcfacf39440da06b86139c77935e1e92e
Parents: 1d2acdb
Author: Chris Hostetter <ho...@apache.org>
Authored: Fri May 26 19:04:31 2017 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Fri May 26 19:04:31 2017 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   2 +
 .../solr/highlight/DefaultSolrHighlighter.java  |   8 +-
 .../org/apache/solr/schema/IndexSchema.java     |   2 +-
 .../apache/solr/cloud/TestConfigSetsAPI.java    |   5 +-
 .../solr/cloud/TestMiniSolrCloudClusterSSL.java |   2 +-
 .../TestSolrCloudWithDelegationTokens.java      |   2 +-
 .../hadoop/TestDelegationWithHadoopAuth.java    |   2 +-
 .../solr/client/solrj/impl/CloudSolrClient.java | 271 +------------------
 .../solrj/impl/ConcurrentUpdateSolrClient.java  |  51 +---
 .../impl/DelegationTokenHttpSolrClient.java     |  24 +-
 .../solr/client/solrj/impl/HttpSolrClient.java  |  43 +--
 .../client/solrj/impl/LBHttpSolrClient.java     |  26 +-
 .../impl/SolrHttpClientContextBuilder.java      |   8 -
 .../solrj/io/graph/ShortestPathStream.java      |  25 --
 .../client/solrj/io/stream/CloudSolrStream.java |  21 --
 .../client/solrj/io/stream/FacetStream.java     |  16 --
 .../client/solrj/io/stream/RandomStream.java    |   4 +-
 .../solr/client/solrj/io/stream/SolrStream.java |  14 -
 .../solr/client/solrj/io/stream/SqlStream.java  |   2 -
 .../client/solrj/io/stream/StatsStream.java     |  10 -
 .../client/solrj/io/stream/TopicStream.java     |  19 --
 .../solr/common/params/HighlightParams.java     |   2 -
 .../apache/solr/common/util/ExecutorUtil.java   |  41 ---
 .../org/apache/solr/common/util/NamedList.java  |  23 +-
 .../solr/common/util/SimpleOrderedMap.java      |   9 +-
 .../embedded/SolrExampleStreamingTest.java      |   2 +-
 .../solrj/impl/BasicHttpSolrClientTest.java     |   2 +-
 .../CloudSolrClientMultiConstructorTest.java    |   4 +-
 .../impl/ConcurrentUpdateSolrClientTest.java    |  23 +-
 .../solrj/io/graph/GraphExpressionTest.java     |   2 +-
 .../java/org/apache/solr/SolrTestCaseJ4.java    | 108 +++++---
 31 files changed, 147 insertions(+), 626 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 7ad4122..d4e6eac 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -196,6 +196,8 @@ Other Changes
 
 * SOLR-10710: Fix LTR failing tests. (Diego Ceccarelli via Tomás Fernández Löbbe)
 
+* SOLR-10755: delete/refactor many solrj deprecations (hossman)
+
 ==================  6.7.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java b/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
index 7e56ee4..18d9b44 100644
--- a/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
+++ b/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
@@ -84,6 +84,12 @@ import org.slf4j.LoggerFactory;
 public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInfoInitialized
 {
 
+  /** 
+   * This constant was formerly part of HighlightParams.  After deprecation it was removed so clients 
+   * would no longer use it, but we still support it server side.
+   */
+  private static final String USE_FVH = HighlightParams.HIGHLIGHT + ".useFastVectorHighlighter";
+  
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
   protected final SolrCore solrCore;
@@ -492,7 +498,7 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf
     boolean methodFvh =
         HighlightComponent.HighlightMethod.FAST_VECTOR.getMethodName().equals(
             params.getFieldParam(schemaField.getName(), HighlightParams.METHOD))
-        || params.getFieldBool(schemaField.getName(), HighlightParams.USE_FVH, false);
+        || params.getFieldBool(schemaField.getName(), USE_FVH, false);
     if (!methodFvh) return false;
     boolean termPosOff = schemaField.storeTermPositions() && schemaField.storeTermOffsets();
     if (!termPosOff) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
index 13385b7..04f64d5 100644
--- a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
+++ b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
@@ -373,7 +373,7 @@ public class IndexSchema {
   void persist(Writer writer) throws IOException {
     final SolrQueryResponse response = new SolrQueryResponse();
     response.add(IndexSchema.SCHEMA, getNamedPropertyValues());
-    final NamedList args = new NamedList(Arrays.<Object>asList("indent", "on"));
+    final SolrParams args = (new ModifiableSolrParams()).set("indent", "on");
     final LocalSolrQueryRequest req = new LocalSolrQueryRequest(null, args);
     final SchemaXmlWriter schemaXmlWriter = new SchemaXmlWriter(writer, req, response);
     schemaXmlWriter.setEmitManagedSchemaDoNotEditWarning(true);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java b/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java
index 6c20ccc..875c0ef 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java
@@ -279,8 +279,7 @@ public class TestConfigSetsAPI extends SolrTestCaseJ4 {
 
   @Test
   public void testUploadErrors() throws Exception {
-    final SolrClient solrClient = new HttpSolrClient(
-        solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString());
+    final SolrClient solrClient = getHttpSolrClient(solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString());
 
     ByteBuffer emptyData = ByteBuffer.allocate(0);
 
@@ -504,7 +503,7 @@ public class TestConfigSetsAPI extends SolrTestCaseJ4 {
 
   private void xsltRequest(String collection) throws SolrServerException, IOException {
     String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
-    try (HttpSolrClient client = new HttpSolrClient(baseUrl + "/" + collection)) {
+    try (HttpSolrClient client = getHttpSolrClient(baseUrl + "/" + collection)) {
       String xml = 
           "<random>" +
               " <document>" +

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterSSL.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterSSL.java b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterSSL.java
index a53b39f..98f952a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterSSL.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestMiniSolrCloudClusterSSL.java
@@ -332,7 +332,7 @@ public class TestMiniSolrCloudClusterSSL extends SolrTestCaseJ4 {
     // that "optimize" the test client construction in a way that would prevent us from finding bugs with
     // regular HttpSolrClient instantiation.
     if (random().nextBoolean()) {
-      return new HttpSolrClient(url);
+      return (new HttpSolrClient.Builder(url)).build();
     } // else...
     return getHttpSolrClient(url);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithDelegationTokens.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithDelegationTokens.java b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithDelegationTokens.java
index f8f3f7e..ddbbb64 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithDelegationTokens.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestSolrCloudWithDelegationTokens.java
@@ -394,7 +394,7 @@ public class TestSolrCloudWithDelegationTokens extends SolrTestCaseJ4 {
     }
 
     ss = new HttpSolrClient.Builder(solrClientPrimary.getBaseURL().toString())
-        .withDelegationToken(token)
+        .withKerberosDelegationToken(token)
         .withResponseParser(solrClientPrimary.getParser())
         .build();
     try {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/core/src/test/org/apache/solr/security/hadoop/TestDelegationWithHadoopAuth.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/security/hadoop/TestDelegationWithHadoopAuth.java b/solr/core/src/test/org/apache/solr/security/hadoop/TestDelegationWithHadoopAuth.java
index c799296..37d9cdf 100644
--- a/solr/core/src/test/org/apache/solr/security/hadoop/TestDelegationWithHadoopAuth.java
+++ b/solr/core/src/test/org/apache/solr/security/hadoop/TestDelegationWithHadoopAuth.java
@@ -380,7 +380,7 @@ public class TestDelegationWithHadoopAuth extends SolrCloudTestCase {
     }
 
     ss = new HttpSolrClient.Builder(primarySolrClient.getBaseURL().toString())
-        .withDelegationToken(token)
+        .withKerberosDelegationToken(token)
         .withResponseParser(primarySolrClient.getParser())
         .build();
     try {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
index ff7b06a..1271655 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
@@ -20,7 +20,6 @@ import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.net.ConnectException;
 import java.net.SocketException;
-import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -239,168 +238,6 @@ public class CloudSolrClient extends SolrClient {
    * of the SolrCloud state. If there is a fully redundant Zookeeper quorum and
    * SolrCloud has enough replicas for every shard in a collection, there is no
    * single point of failure. Updates will be sent to shard leaders by default.
-   * 
-   * @param zkHost
-   *          The client endpoint of the zookeeper quorum containing the cloud
-   *          state. The full specification for this string is one or more comma
-   *          separated HOST:PORT values, followed by an optional chroot value
-   *          that starts with a forward slash. Using a chroot allows multiple
-   *          applications to coexist in one ensemble. For full details, see the
-   *          Zookeeper documentation. Some examples:
-   *          <p>
-   *          "host1:2181"
-   *          <p>
-   *          "host1:2181,host2:2181,host3:2181/mysolrchroot"
-   *          <p>
-   *          "zoo1.example.com:2181,zoo2.example.com:2181,zoo3.example.com:2181"
-   *          
-   * @deprecated use {@link Builder} instead.
-   */
-  @Deprecated
-  public CloudSolrClient(String zkHost) {
-    this.stateProvider = new ZkClientClusterStateProvider(zkHost);
-      this.clientIsInternal = true;
-      this.myClient = HttpClientUtil.createClient(null);
-      this.lbClient = new LBHttpSolrClient.Builder()
-          .withHttpClient(myClient)
-          .build();
-      this.lbClient.setRequestWriter(new BinaryRequestWriter());
-      this.lbClient.setParser(new BinaryResponseParser());
-      this.updatesToLeaders = true;
-      this.directUpdatesToLeadersOnly = false;
-      shutdownLBHttpSolrServer = true;
-      lbClient.addQueryParams(STATE_VERSION);
-  }
-
-  /**
-   * Create a new client object that connects to Zookeeper and is always aware
-   * of the SolrCloud state. If there is a fully redundant Zookeeper quorum and
-   * SolrCloud has enough replicas for every shard in a collection, there is no
-   * single point of failure. Updates will be sent to shard leaders by default.
-   *
-   * @param zkHost
-   *          The client endpoint of the zookeeper quorum containing the cloud
-   *          state. The full specification for this string is one or more comma
-   *          separated HOST:PORT values, followed by an optional chroot value
-   *          that starts with a forward slash. Using a chroot allows multiple
-   *          applications to coexist in one ensemble. For full details, see the
-   *          Zookeeper documentation. Some examples:
-   *          <p>
-   *          "host1:2181"
-   *          <p>
-   *          "host1:2181,host2:2181,host3:2181/mysolrchroot"
-   *          <p>
-   *          "zoo1.example.com:2181,zoo2.example.com:2181,zoo3.example.com:2181"
-   * @param httpClient
-   *          the {@link HttpClient} instance to be used for all requests. The
-   *          provided httpClient should use a multi-threaded connection manager.
-   *          
-   * @deprecated use {@link Builder} instead.
-   */
-  @Deprecated
-  public CloudSolrClient(String zkHost, HttpClient httpClient) {
-    this.stateProvider = new ZkClientClusterStateProvider(zkHost);
-    this.clientIsInternal = httpClient == null;
-    this.myClient = httpClient == null ? HttpClientUtil.createClient(null) : httpClient;
-    this.lbClient = createLBHttpSolrClient(myClient);
-    this.updatesToLeaders = true;
-    this.directUpdatesToLeadersOnly = false;
-    shutdownLBHttpSolrServer = true;
-    lbClient.addQueryParams(STATE_VERSION);
-  }
-  
-  /**
-   * Create a new client object using multiple string values in a Collection
-   * instead of a standard zkHost connection string. Note that this method will
-   * not be used if there is only one String argument - that will use
-   * {@link #CloudSolrClient(String)} instead.
-   * 
-   * @param zkHosts
-   *          A Java Collection (List, Set, etc) of HOST:PORT strings, one for
-   *          each host in the zookeeper ensemble. Note that with certain
-   *          Collection types like HashSet, the order of hosts in the final
-   *          connect string may not be in the same order you added them.
-   * @param chroot
-   *          A chroot value for zookeeper, starting with a forward slash. If no
-   *          chroot is required, use null.
-   * @throws IllegalArgumentException
-   *           if the chroot value does not start with a forward slash.
-   * @see #CloudSolrClient(String)
-   * @deprecated use {@link Builder} instead.
-   */
-  @Deprecated
-  public CloudSolrClient(Collection<String> zkHosts, String chroot) {
-    this(zkHosts, chroot, null);
-  }
-
-  /**
-   * Create a new client object using multiple string values in a Collection
-   * instead of a standard zkHost connection string. Note that this method will
-   * not be used if there is only one String argument - that will use
-   * {@link #CloudSolrClient(String)} instead.
-   *
-   * @param zkHosts
-   *          A Java Collection (List, Set, etc) of HOST:PORT strings, one for
-   *          each host in the zookeeper ensemble. Note that with certain
-   *          Collection types like HashSet, the order of hosts in the final
-   *          connect string may not be in the same order you added them.
-   * @param chroot
-   *          A chroot value for zookeeper, starting with a forward slash. If no
-   *          chroot is required, use null.
-   * @param httpClient
-   *          the {@link HttpClient} instance to be used for all requests. The provided httpClient should use a
-   *          multi-threaded connection manager.
-   * @throws IllegalArgumentException
-   *           if the chroot value does not start with a forward slash.
-   * @see #CloudSolrClient(String)
-   * @deprecated use {@link Builder} instead.
-   */
-  @Deprecated
-  public CloudSolrClient(Collection<String> zkHosts, String chroot, HttpClient httpClient) {
-    this.stateProvider = new ZkClientClusterStateProvider(zkHosts, chroot);
-    this.clientIsInternal = httpClient == null;
-    this.myClient = httpClient == null ? HttpClientUtil.createClient(null) : httpClient;
-    this.lbClient = createLBHttpSolrClient(myClient);
-    this.updatesToLeaders = true;
-    this.directUpdatesToLeadersOnly = false;
-    shutdownLBHttpSolrServer = true;
-  }
-  
-  /**
-   * Create a new client object that connects to Zookeeper and is always aware
-   * of the SolrCloud state. If there is a fully redundant Zookeeper quorum and
-   * SolrCloud has enough replicas for every shard in a collection, there is no
-   * single point of failure. Updates will be sent to shard leaders by default.
-   * 
-   * @param zkHosts
-   *          A Java Collection (List, Set, etc) of HOST:PORT strings, one for
-   *          each host in the zookeeper ensemble. Note that with certain
-   *          Collection types like HashSet, the order of hosts in the final
-   *          connect string may not be in the same order you added them.
-   * @param chroot
-   *          A chroot value for zookeeper, starting with a forward slash. If no
-   *          chroot is required, use null.
-   * @param httpClient
-   *          the {@link HttpClient} instance to be used for all requests. The provided httpClient should use a
-   *          multi-threaded connection manager.  If null, a default HttpClient will be used.
-   * @param lbSolrClient
-   *          LBHttpSolrClient instance for requests.  If null, a default LBHttpSolrClient will be used.
-   * @param updatesToLeaders
-   *          If true, sends updates to shard leaders.
-   *
-   * @deprecated use {@link Builder} instead.  This will soon be a protected method, and will only
-   * be available for use in implementing subclasses.
-   */
-  @Deprecated
-  public CloudSolrClient(Collection<String> zkHosts, String chroot, HttpClient httpClient, LBHttpSolrClient lbSolrClient, boolean updatesToLeaders) {
-    this(zkHosts, chroot, null, httpClient, lbSolrClient, null, updatesToLeaders, false, null);
-  }
-
-  /**
-   * Create a new client object that connects to Zookeeper and is always aware
-   * of the SolrCloud state. If there is a fully redundant Zookeeper quorum and
-   * SolrCloud has enough replicas for every shard in a collection, there is no
-   * single point of failure. Updates will be sent to shard leaders by default.
    *
    * @param zkHosts
    *          A Java Collection (List, Set, etc) of HOST:PORT strings, one for
@@ -467,46 +304,6 @@ public class CloudSolrClient extends SolrClient {
     this.updatesToLeaders = updatesToLeaders;
     this.directUpdatesToLeadersOnly = directUpdatesToLeadersOnly;
   }
-  
-  /**
-   * @param zkHost
-   *          A zookeeper client endpoint.
-   * @param updatesToLeaders
-   *          If true, sends updates only to shard leaders.
-   * @see #CloudSolrClient(String) for full description and details on zkHost
-   * @deprecated use {@link CloudSolrClient.Builder} instead.
-   */
-  @Deprecated
-  public CloudSolrClient(String zkHost, boolean updatesToLeaders) {
-    this(zkHost, updatesToLeaders, null);
-  }
-
-  /**
-   * @param zkHost
-   *          A zookeeper client endpoint.
-   * @param updatesToLeaders
-   *          If true, sends updates only to shard leaders.
-   * @param httpClient
-   *          the {@link HttpClient} instance to be used for all requests. The provided httpClient should use a
-   *          multi-threaded connection manager.
-   * @see #CloudSolrClient(String) for full description and details on zkHost
-   * @deprecated use {@link CloudSolrClient.Builder} instead.
-   */
-  @Deprecated
-  public CloudSolrClient(String zkHost, boolean updatesToLeaders, HttpClient httpClient) {
-    this.stateProvider = new ZkClientClusterStateProvider(zkHost);
-    this.clientIsInternal = httpClient == null;
-    this.myClient = httpClient == null ? HttpClientUtil.createClient(null) : httpClient;
-    this.lbClient = new LBHttpSolrClient.Builder()
-        .withHttpClient(myClient)
-        .build();
-    this.lbClient.setRequestWriter(new BinaryRequestWriter());
-    this.lbClient.setParser(new BinaryResponseParser());
-    this.updatesToLeaders = updatesToLeaders;
-    this.directUpdatesToLeadersOnly = false;
-    shutdownLBHttpSolrServer = true;
-    lbClient.addQueryParams(STATE_VERSION);
-  }
 
   /**Sets the cache ttl for DocCollection Objects cached  . This is only applicable for collections which are persisted outside of clusterstate.json
    * @param seconds ttl value in seconds
@@ -515,40 +312,6 @@ public class CloudSolrClient extends SolrClient {
     assert seconds > 0;
     this.collectionStateCache.timeToLive = seconds * 1000L;
   }
-
-  /**
-   * @param zkHost
-   *          A zookeeper client endpoint.
-   * @param lbClient
-   *          LBHttpSolrServer instance for requests.
-   * @see #CloudSolrClient(String) for full description and details on zkHost
-   * @deprecated use {@link CloudSolrClient.Builder} instead.
-   */
-  @Deprecated
-  public CloudSolrClient(String zkHost, LBHttpSolrClient lbClient) {
-    this(zkHost, lbClient, true);
-  }
-  
-  /**
-   * @param zkHost
-   *          A zookeeper client endpoint.
-   * @param lbClient
-   *          LBHttpSolrServer instance for requests.
-   * @param updatesToLeaders
-   *          If true, sends updates only to shard leaders.
-   * @see #CloudSolrClient(String) for full description and details on zkHost
-   * @deprecated use {@link Builder} instead.
-   */
-  @Deprecated
-  public CloudSolrClient(String zkHost, LBHttpSolrClient lbClient, boolean updatesToLeaders) {
-    this.lbClient = lbClient;
-    this.stateProvider = new ZkClientClusterStateProvider(zkHost);
-    this.updatesToLeaders = updatesToLeaders;
-    this.directUpdatesToLeadersOnly = false;
-    shutdownLBHttpSolrServer = false;
-    this.clientIsInternal = false;
-    lbClient.addQueryParams(STATE_VERSION);
-  }
   
   public ResponseParser getParser() {
     return lbClient.getParser();
@@ -660,25 +423,6 @@ public class CloudSolrClient extends SolrClient {
     this.parallelUpdates = parallelUpdates;
   }
 
-  /**
-   * Upload a set of config files to Zookeeper and give it a name
-   *
-   * NOTE: You should only allow trusted users to upload configs.  If you
-   * are allowing client access to zookeeper, you should protect the
-   * /configs node against unauthorised write access.
-   *
-   *  @deprecated Please use {@link ZkClientClusterStateProvider#uploadConfig(Path, String)} instead
-   *
-   * @param configPath {@link java.nio.file.Path} to the config files
-   * @param configName the name of the config
-   * @throws IOException if an IO error occurs
-   */
-  @Deprecated
-  public void uploadConfig(Path configPath, String configName) throws IOException {
-    stateProvider.connect();
-    assertZKStateProvider().uploadConfig(configPath, configName);
-  }
-
   private ZkClientClusterStateProvider assertZKStateProvider() {
     if (stateProvider instanceof ZkClientClusterStateProvider) {
       return (ZkClientClusterStateProvider) stateProvider;
@@ -686,20 +430,7 @@ public class CloudSolrClient extends SolrClient {
     throw new IllegalArgumentException("This client does not use ZK");
 
   }
-
-  /**
-   * Download a named config from Zookeeper to a location on the filesystem
-   *
-   * @deprecated Please use {@link ZkClientClusterStateProvider#downloadConfig(String, Path)} instead
-   * @param configName    the name of the config
-   * @param downloadPath  the path to write config files to
-   * @throws IOException  if an I/O exception occurs
-   */
-  @Deprecated
-  public void downloadConfig(String configName, Path downloadPath) throws IOException {
-    assertZKStateProvider().downloadConfig(configName, downloadPath);
-  }
-
+  
   /**
    * Block until a collection state matches a predicate, or a timeout
    *

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClient.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClient.java
index bc37c13..d6675f2 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClient.java
@@ -96,56 +96,11 @@ public class ConcurrentUpdateSolrClient extends SolrClient {
   AtomicInteger emptyQueueLoops;
   
   /**
-   * Uses an internally managed HttpClient instance.
-   * 
-   * @param solrServerUrl
-   *          The Solr server URL
-   * @param queueSize
-   *          The buffer size before the documents are sent to the server
-   * @param threadCount
-   *          The number of background threads used to empty the queue
-   *          
-   * @deprecated use {@link Builder} instead.
-   */
-  @Deprecated
-  public ConcurrentUpdateSolrClient(String solrServerUrl, int queueSize,
-                                    int threadCount) {
-    this(solrServerUrl, null, queueSize, threadCount);
-    shutdownExecutor = true;
-    internalHttpClient = true;
-  }
-  
-  /**
-   * @deprecated use {@link Builder} instead.
-   */
-  @Deprecated
-  public ConcurrentUpdateSolrClient(String solrServerUrl,
-                                    HttpClient client, int queueSize, int threadCount) {
-    this(solrServerUrl, client, queueSize, threadCount, ExecutorUtil.newMDCAwareCachedThreadPool(
-        new SolrjNamedThreadFactory("concurrentUpdateScheduler")));
-    shutdownExecutor = true;
-  }
-
-  /**
-   * Uses the supplied HttpClient to send documents to the Solr server.
-   * 
-   * @deprecated use {@link Builder} instead.
-   */
-  @Deprecated
-  public ConcurrentUpdateSolrClient(String solrServerUrl,
-                                    HttpClient client, int queueSize, int threadCount, ExecutorService es) {
-    this(solrServerUrl, client, queueSize, threadCount, es, false);
-  }
-  
-  /**
    * Uses the supplied HttpClient to send documents to the Solr server.
-   * 
-   * @deprecated use {@link Builder} instead.  This will soon be a
-   * protected method, and will only be available for use in implementing subclasses.
    */
-  @Deprecated
-  public ConcurrentUpdateSolrClient(String solrServerUrl,
-                                    HttpClient client, int queueSize, int threadCount, ExecutorService es, boolean streamDeletes) {
+  protected ConcurrentUpdateSolrClient(String solrServerUrl,
+                                       HttpClient client, int queueSize, int threadCount,
+                                       ExecutorService es, boolean streamDeletes) {
     this.internalHttpClient = (client == null);
     this.client = new HttpSolrClient.Builder(solrServerUrl)
         .withHttpClient(client)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/impl/DelegationTokenHttpSolrClient.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/DelegationTokenHttpSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/DelegationTokenHttpSolrClient.java
index ab8175d..fc83391 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/DelegationTokenHttpSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/DelegationTokenHttpSolrClient.java
@@ -35,21 +35,15 @@ public class DelegationTokenHttpSolrClient extends HttpSolrClient {
   public final static String DELEGATION_TOKEN_PARAM = "delegation";
 
   /**
-   * This constructor is deprecated in favor of passing delegation token via
-   * {@linkplain org.apache.solr.client.solrj.impl.HttpSolrClient.Builder#withInvariantParams(ModifiableSolrParams)}.
-   *
-   * @param baseURL The base url to communicate with the Solr server
-   * @param client Http client instance to use for communication
-   * @param parser Response parser instance to use to decode response from Solr server
-   * @param allowCompression Should compression be allowed ?
-   * @param delegationToken The delegation token string.
+   * Package protected constructor for use by 
+   * {@linkplain org.apache.solr.client.solrj.impl.HttpSolrClient.Builder}.
+   * @lucene.internal
    */
-  @Deprecated
-  public DelegationTokenHttpSolrClient(String baseURL,
-                                       HttpClient client,
-                                       ResponseParser parser,
-                                       boolean allowCompression,
-                                       String delegationToken) {
+  DelegationTokenHttpSolrClient(String baseURL,
+                                HttpClient client,
+                                ResponseParser parser,
+                                boolean allowCompression,
+                                String delegationToken) {
     super(baseURL, client, parser, allowCompression);
     if (delegationToken == null) {
       throw new IllegalArgumentException("Delegation token cannot be null");
@@ -100,4 +94,4 @@ public class DelegationTokenHttpSolrClient extends HttpSolrClient {
     }
     super.setQueryParams(queryParams);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrClient.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrClient.java
index dea1711..c1e9576 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrClient.java
@@ -158,40 +158,7 @@ public class HttpSolrClient extends SolrClient {
   private volatile Integer connectionTimeout;
   private volatile Integer soTimeout;
   
-  /**
-   * @param baseURL
-   *          The URL of the Solr server. For example, "
-   *          <code>http://localhost:8983/solr/</code>" if you are using the
-   *          standard distribution Solr webapp on your local machine.
-   * @deprecated use {@link Builder} instead.
-   */
-  @Deprecated
-  public HttpSolrClient(String baseURL) {
-    this(baseURL, null, new BinaryResponseParser());
-  }
-  
-  /**
-   * @deprecated use {@link Builder} instead.
-   */
-  @Deprecated
-  public HttpSolrClient(String baseURL, HttpClient client) {
-    this(baseURL, client, new BinaryResponseParser());
-  }
-  
-  /**
-   * @deprecated use {@link Builder} instead.
-   */
-  @Deprecated
-  public HttpSolrClient(String baseURL, HttpClient client, ResponseParser parser) {
-    this(baseURL, client, parser, false);
-  }
-  
-  /**
-   * @deprecated use {@link Builder} instead.  This will soon be a 'protected'
-   * method, and will only be available for use in implementing subclasses.
-   */
-  @Deprecated
-  public HttpSolrClient(String baseURL, HttpClient client, ResponseParser parser, boolean allowCompression) {
+  protected HttpSolrClient(String baseURL, HttpClient client, ResponseParser parser, boolean allowCompression) {
     this.baseUrl = baseURL;
     if (baseUrl.endsWith("/")) {
       baseUrl = baseUrl.substring(0, baseUrl.length() - 1);
@@ -840,14 +807,6 @@ public class HttpSolrClient extends SolrClient {
      * Use a delegation token for authenticating via the KerberosPlugin
      */
     public Builder withKerberosDelegationToken(String delegationToken) {
-      return withDelegationToken(delegationToken);
-    }
-
-    @Deprecated
-    /**
-     * @deprecated use {@link withKerberosDelegationToken(String)} instead
-     */
-    public Builder withDelegationToken(String delegationToken) {
       if (this.invariantParams.get(DelegationTokenHttpSolrClient.DELEGATION_TOKEN_PARAM) != null) {
         throw new IllegalStateException(DelegationTokenHttpSolrClient.DELEGATION_TOKEN_PARAM + " is already defined!");
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java
index 8dc2fd9..7706bf6 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java
@@ -237,28 +237,9 @@ public class LBHttpSolrClient extends SolrClient {
   }
 
   /**
-   * @deprecated use {@link Builder} instead.
-   */
-  @Deprecated
-  public LBHttpSolrClient(String... solrServerUrls) throws MalformedURLException {
-    this(null, solrServerUrls);
-  }
-  
-  /**
-   * The provided httpClient should use a multi-threaded connection manager
-   * @deprecated use {@link Builder} instead.
-   */ 
-  @Deprecated
-  public LBHttpSolrClient(HttpClient httpClient, String... solrServerUrl) {
-    this(httpClient, new BinaryResponseParser(), solrServerUrl);
-  }
-
-  /**
    * The provided httpClient should use a multi-threaded connection manager
-   * @deprecated use {@link Builder} instead.  This will soon be a protected
-   * method and will only be available for use in implementing subclasses.
    */
-  public LBHttpSolrClient(HttpSolrClient.Builder httpSolrClientBuilder,
+  protected LBHttpSolrClient(HttpSolrClient.Builder httpSolrClientBuilder,
                           HttpClient httpClient, String... solrServerUrl) {
     clientIsInternal = httpClient == null;
     this.httpSolrClientBuilder = httpSolrClientBuilder;
@@ -275,11 +256,8 @@ public class LBHttpSolrClient extends SolrClient {
 
   /**
    * The provided httpClient should use a multi-threaded connection manager
-   * @deprecated use {@link Builder} instead.  This will soon be a protected
-   * method and will only be available for use in implementing subclasses.
    */
-  @Deprecated
-  public LBHttpSolrClient(HttpClient httpClient, ResponseParser parser, String... solrServerUrl) {
+  protected LBHttpSolrClient(HttpClient httpClient, ResponseParser parser, String... solrServerUrl) {
     clientIsInternal = (httpClient == null);
     this.httpClient = httpClient == null ? constructClient(solrServerUrl) : httpClient;
     this.parser = parser;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrHttpClientContextBuilder.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrHttpClientContextBuilder.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrHttpClientContextBuilder.java
index f57848d..7ae98e5 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrHttpClientContextBuilder.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/SolrHttpClientContextBuilder.java
@@ -77,14 +77,6 @@ public class SolrHttpClientContextBuilder {
     return credentialsProviderProvider;
   }
 
-  /**
-   * @deprecated use {@link #createContext(Object)}
-   */
-  @Deprecated
-  public HttpClientContext createContext() {
-    return createContext(null);
-  }
-  
   public HttpClientContext createContext(Object userToken) {
     HttpClientContext context = new HttpClientContext();
     if (getCredentialsProviderProvider() != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/ShortestPathStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/ShortestPathStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/ShortestPathStream.java
index 5075330..03595c2 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/ShortestPathStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/ShortestPathStream.java
@@ -46,7 +46,6 @@ import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionValue;
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType;
-import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.ExecutorUtil;
@@ -72,30 +71,6 @@ public class ShortestPathStream extends TupleStream implements Expressible {
   private int threads;
   private SolrParams queryParams;
 
-  @Deprecated
-  public ShortestPathStream(String zkHost,
-                            String collection,
-                            String fromNode,
-                            String toNode,
-                            String fromField,
-                            String toField,
-                            Map queryParams,
-                            int joinBatchSize,
-                            int threads,
-                            int maxDepth) {
-
-    init(zkHost,
-        collection,
-        fromNode,
-        toNode,
-        fromField,
-        toField,
-        new MapSolrParams(queryParams),
-        joinBatchSize,
-        threads,
-        maxDepth);
-  }
-
   public ShortestPathStream(String zkHost,
                             String collection,
                             String fromNode,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java
index 6d1764a..62e4079 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java
@@ -51,7 +51,6 @@ import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.ExecutorUtil;
@@ -89,32 +88,12 @@ public class CloudSolrStream extends TupleStream implements Expressible {
     
   }
 
-
-  /**
-   * @param zkHost         Zookeeper ensemble connection string
-   * @param collectionName Name of the collection to operate on
-   * @param params         Map&lt;String, String&gt; of parameter/value pairs
-   * @throws IOException Something went wrong
-   *                     <p>
-   *                     This form does not allow specifying multiple clauses, say "fq" clauses, use the form that
-   *                     takes a SolrParams. Transition code can call the preferred method that takes SolrParams
-   *                     by calling CloudSolrStream(zkHost, collectionName,
-   *                     new ModifiableSolrParams(SolrParams.toMultiMap(new NamedList(Map&lt;String, String&gt;)));
-   * @deprecated         Use the constructor that has a SolrParams obj rather than a Map
-   */
-
-  @Deprecated
-  public CloudSolrStream(String zkHost, String collectionName, Map params) throws IOException {
-    init(collectionName, zkHost, new MapSolrParams(params));
-  }
-
   /**
    * @param zkHost         Zookeeper ensemble connection string
    * @param collectionName Name of the collection to operate on
    * @param params         Map&lt;String, String[]&gt; of parameter/value pairs
    * @throws IOException Something went wrong
    */
-
   public CloudSolrStream(String zkHost, String collectionName, SolrParams params) throws IOException {
     init(collectionName, zkHost, params);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
index 0180764..c5bd56b 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
@@ -46,7 +46,6 @@ import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 import org.apache.solr.client.solrj.io.stream.metrics.Bucket;
 import org.apache.solr.client.solrj.io.stream.metrics.Metric;
 import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -72,21 +71,6 @@ public class FacetStream extends TupleStream implements Expressible  {
   protected transient SolrClientCache cache;
   protected transient CloudSolrClient cloudSolrClient;
 
-  /*
-   *
-   * @deprecated. Use the form that takes a SolrParams rather than Map&ltString, String&gt;
-   */
-  @Deprecated
-  public FacetStream(String zkHost,
-                     String collection,
-                     Map<String, String> props,
-                     Bucket[] buckets,
-                     Metric[] metrics,
-                     FieldComparator[] bucketSorts,
-                     int bucketSizeLimit) throws IOException {
-    init(collection, new MapSolrParams(props), buckets, bucketSorts, metrics, bucketSizeLimit, zkHost);
-  }
-
   public FacetStream(String zkHost,
                      String collection,
                      SolrParams params,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RandomStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RandomStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RandomStream.java
index 5ba485d..269a800 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RandomStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RandomStream.java
@@ -174,7 +174,7 @@ public class RandomStream extends TupleStream implements Expressible  {
     if(cache != null) {
       cloudSolrClient = cache.getCloudSolrClient(zkHost);
     } else {
-      cloudSolrClient = new CloudSolrClient(zkHost);
+      cloudSolrClient = (new CloudSolrClient.Builder()).withZkHost(zkHost).build();
     }
 
     ModifiableSolrParams params = getParams(this.props);
@@ -236,4 +236,4 @@ public class RandomStream extends TupleStream implements Expressible  {
   public StreamComparator getStreamSort() {
     return null;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SolrStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SolrStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SolrStream.java
index 31d1913..ab029af 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SolrStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SolrStream.java
@@ -41,7 +41,6 @@ import org.apache.solr.client.solrj.io.stream.expr.StreamExplanation;
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -74,19 +73,6 @@ public class SolrStream extends TupleStream {
   /**
    * @param baseUrl Base URL of the stream.
    * @param params  Map&lt;String, String&gt; of parameters
-   * @deprecated, use the form that thakes SolrParams. Existing code can use
-   * new ModifiableSolrParams(SolrParams.toMultiMap(new NamedList(params)))
-   * for existing calls that use Map&lt;String, String&gt;
-   */
-  @Deprecated
-  public SolrStream(String baseUrl, Map params) {
-    this.baseUrl = baseUrl;
-    this.params = new ModifiableSolrParams(new MapSolrParams(params));
-  }
-
-  /**
-   * @param baseUrl Base URL of the stream.
-   * @param params  Map&lt;String, String&gt; of parameters
    */
 
   public SolrStream(String baseUrl, SolrParams params) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SqlStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SqlStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SqlStream.java
index d7c10e4..d2296b7 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SqlStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SqlStream.java
@@ -60,9 +60,7 @@ public class SqlStream extends TupleStream implements Expressible {
    *                     takes a SolrParams. Transition code can call the preferred method that takes SolrParams
    *                     by calling CloudSolrStream(zkHost, collectionName,
    *                     new ModifiableSolrParams(SolrParams.toMultiMap(new NamedList(Map&lt;String, String&gt;)));
-   * @deprecated         Use the constructor that has a SolrParams obj rather than a Map
    */
-  
   public SqlStream(String zkHost, String collectionName, SolrParams params) throws IOException {
     init(collectionName, zkHost, params);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StatsStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StatsStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StatsStream.java
index cb46db4..c20429d 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StatsStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StatsStream.java
@@ -42,7 +42,6 @@ import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 import org.apache.solr.client.solrj.io.stream.metrics.Metric;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -63,15 +62,6 @@ public class StatsStream extends TupleStream implements Expressible  {
   protected transient CloudSolrClient cloudSolrClient;
   protected StreamContext streamContext;
 
-  // Use StatsStream(String, String, SolrParams, Metric[]
-  @Deprecated
-  public StatsStream(String zkHost,
-                     String collection,
-                     Map<String, String> props,
-                     Metric[] metrics) {
-    init(zkHost, collection, new MapSolrParams(props), metrics);
-  }
-
   public StatsStream(String zkHost,
                      String collection,
                      SolrParams params,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
index 58063d0..7fff901 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TopicStream.java
@@ -55,7 +55,6 @@ import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.ExecutorUtil;
@@ -83,24 +82,6 @@ public class TopicStream extends CloudSolrStream implements Expressible  {
   private String checkpointCollection;
   private long initialCheckpoint = -1;
 
-  // Use TopicStream that takes a SolrParams
-  @Deprecated
-  public TopicStream(String zkHost,
-                     String checkpointCollection,
-                     String collection,
-                     String id,
-                     long initialCheckpoint,
-                     long checkpointEvery,
-                     Map<String, String> params) {
-    init(zkHost,
-         checkpointCollection,
-         collection,
-         id,
-         initialCheckpoint,
-         checkpointEvery,
-         new MapSolrParams(params));
-  }
-
   public TopicStream(String zkHost,
                      String checkpointCollection,
                      String collection,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/common/params/HighlightParams.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/HighlightParams.java b/solr/solrj/src/java/org/apache/solr/common/params/HighlightParams.java
index ef254cc..e09a2dc 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/HighlightParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/HighlightParams.java
@@ -24,8 +24,6 @@ public interface HighlightParams {
   // primary
   public static final String HIGHLIGHT   = "hl";
   public static final String METHOD      = HIGHLIGHT+".method"; // original|fastVector|postings|unified
-  @Deprecated // see hl.method
-  public static final String USE_FVH     = HIGHLIGHT + ".useFastVectorHighlighter";
   public static final String FIELDS      = HIGHLIGHT+".fl";
   public static final String SNIPPETS    = HIGHLIGHT+".snippets";
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/common/util/ExecutorUtil.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/ExecutorUtil.java b/solr/solrj/src/java/org/apache/solr/common/util/ExecutorUtil.java
index 5f307a8..a045726 100644
--- a/solr/solrj/src/java/org/apache/solr/common/util/ExecutorUtil.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/ExecutorUtil.java
@@ -71,47 +71,6 @@ public class ExecutorUtil {
     public void clean(AtomicReference<?> ctx);
   }
 
-  // ** This will interrupt the threads! ** Lucene and Solr do not like this because it can close channels, so only use
-  // this if you know what you are doing - you probably want shutdownAndAwaitTermination.
-  // Marked as Deprecated to discourage use.
-  @Deprecated
-  public static void shutdownWithInterruptAndAwaitTermination(ExecutorService pool) {
-    pool.shutdownNow(); // Cancel currently executing tasks - NOTE: this interrupts!
-    boolean shutdown = false;
-    while (!shutdown) {
-      try {
-        // Wait a while for existing tasks to terminate
-        shutdown = pool.awaitTermination(60, TimeUnit.SECONDS);
-      } catch (InterruptedException ie) {
-        // Preserve interrupt status
-        Thread.currentThread().interrupt();
-      }
-    }
-  }
-  
-  // ** This will interrupt the threads! ** Lucene and Solr do not like this because it can close channels, so only use
-  // this if you know what you are doing - you probably want shutdownAndAwaitTermination.
-  // Marked as Deprecated to discourage use.
-  @Deprecated
-  public static void shutdownAndAwaitTerminationWithInterrupt(ExecutorService pool) {
-    pool.shutdown(); // Disable new tasks from being submitted
-    boolean shutdown = false;
-    boolean interrupted = false;
-    while (!shutdown) {
-      try {
-        // Wait a while for existing tasks to terminate
-        shutdown = pool.awaitTermination(60, TimeUnit.SECONDS);
-      } catch (InterruptedException ie) {
-        // Preserve interrupt status
-        Thread.currentThread().interrupt();
-      }
-      if (!shutdown && !interrupted) {
-        pool.shutdownNow(); // Cancel currently executing tasks - NOTE: this interrupts!
-        interrupted = true;
-      }
-    }
-  }
-
   public static void shutdownAndAwaitTermination(ExecutorService pool) {
     pool.shutdown(); // Disable new tasks from being submitted
     boolean shutdown = false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/common/util/NamedList.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/NamedList.java b/solr/solrj/src/java/org/apache/solr/common/util/NamedList.java
index 0313d3a..d34d8e7 100644
--- a/solr/solrj/src/java/org/apache/solr/common/util/NamedList.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/NamedList.java
@@ -122,12 +122,17 @@ public class NamedList<T> implements Cloneable, Serializable, Iterable<Map.Entry
    * When using this constructor, runtime type safety is only guaranteed if
    * all even numbered elements of the input list are of type "T".
    * </p>
-   *
-   * @param nameValuePairs underlying List which should be used to implement a NamedList
-   * @deprecated Use {@link #NamedList(java.util.Map.Entry[])} for the NamedList instantiation
+   * <p>
+   * This method is package protected and exists solely so SimpleOrderedMap and clone() can utilize it
+   * </p>
+   * <p>
+   * TODO: this method was formerly public, now that it's not we can change the impl details of 
+   * this class to be based on a Map.Entry[] 
+   * </p>
+   * @lucene.internal
+   * @see #nameValueMapToList
    */
-  @Deprecated
-  public NamedList(List<Object> nameValuePairs) {
+  NamedList(List<Object> nameValuePairs) {
     nvPairs=nameValuePairs;
   }
 
@@ -136,12 +141,14 @@ public class NamedList<T> implements Cloneable, Serializable, Iterable<Map.Entry
    * indexed elements (0,2,4. ..etc) are Strings and odd elements (1,3,5,) are of
    * the type "T".
    *
-   * @return Modified List as per the above description
-   * @deprecated This a temporary placeholder method until the guts of the class
+   * <p>
+   * NOTE: This a temporary placeholder method until the guts of the class
    * are actually replaced by List&lt;String, ?&gt;.
+   * </p>
+   *
+   * @return Modified List as per the above description
    * @see <a href="https://issues.apache.org/jira/browse/SOLR-912">SOLR-912</a>
    */
-  @Deprecated
   private List<Object> nameValueMapToList(Map.Entry<String, ? extends T>[] nameValuePairs) {
     List<Object> result = new ArrayList<>();
     for (Map.Entry<String, ?> ent : nameValuePairs) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/java/org/apache/solr/common/util/SimpleOrderedMap.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/SimpleOrderedMap.java b/solr/solrj/src/java/org/apache/solr/common/util/SimpleOrderedMap.java
index 3fee6da..701cdc4 100644
--- a/solr/solrj/src/java/org/apache/solr/common/util/SimpleOrderedMap.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/SimpleOrderedMap.java
@@ -50,10 +50,15 @@ public class SimpleOrderedMap<T> extends NamedList<T> {
    * Creates an instance backed by an explicitly specified list of
    * pairwise names/values.
    *
+   * <p>
+   * TODO: this method was formerly public, now that it's not we can change the impl details of 
+   * this class to be based on a Map.Entry[] 
+   * </p>
+   *
    * @param nameValuePairs underlying List which should be used to implement a SimpleOrderedMap; modifying this List will affect the SimpleOrderedMap.
+   * @lucene.internal
    */
-  @Deprecated
-  public SimpleOrderedMap(List<Object> nameValuePairs) {
+  private SimpleOrderedMap(List<Object> nameValuePairs) {
     super(nameValuePairs);
   }
   

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java
index 02ed7be..c2314f8 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java
@@ -50,7 +50,7 @@ public class SolrExampleStreamingTest extends SolrExampleTests {
     public Throwable lastError = null;
 
     public ErrorTrackingConcurrentUpdateSolrClient(String solrServerUrl, int queueSize, int threadCount) {
-      super(solrServerUrl, queueSize, threadCount);
+      super(solrServerUrl, null, queueSize, threadCount, null, false);
     }
     
     @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BasicHttpSolrClientTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BasicHttpSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BasicHttpSolrClientTest.java
index 06ae8b8..4addce3 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BasicHttpSolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BasicHttpSolrClientTest.java
@@ -839,7 +839,7 @@ public class BasicHttpSolrClientTest extends SolrJettyTestBase {
 
     try(HttpSolrClient createdClient = new HttpSolrClient.Builder()
         .withBaseSolrUrl(jetty.getBaseUrl().toString())
-        .withDelegationToken("mydt")
+        .withKerberosDelegationToken("mydt")
         .withInvariantParams(SolrTestCaseJ4.params(DelegationTokenHttpSolrClient.DELEGATION_TOKEN_PARAM, "mydt"))
         .build()) {
       fail();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
index 3a132d7..e1831f9 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientMultiConstructorTest.java
@@ -67,7 +67,7 @@ public class CloudSolrClientMultiConstructorTest extends LuceneTestCase {
       clientChroot = "/mychroot";
     }
 
-    try (CloudSolrClient client = new CloudSolrClient(hosts, clientChroot)) {
+    try (CloudSolrClient client = (new CloudSolrClient.Builder()).withZkHost(hosts).withZkChroot(clientChroot).build()) {
       assertEquals(sb.toString(), client.getZkHost());
     }
 
@@ -77,6 +77,6 @@ public class CloudSolrClientMultiConstructorTest extends LuceneTestCase {
   public void testBadChroot() {
     hosts = new ArrayList<>();
     hosts.add("host1:2181");
-    new CloudSolrClient(hosts, "foo");
+    (new CloudSolrClient.Builder()).withZkHost(hosts).withZkChroot("foo").build();
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientTest.java
index 24f08d2..4b061d5 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientTest.java
@@ -194,7 +194,11 @@ public class ConcurrentUpdateSolrClientTest extends SolrJettyTestBase {
     int cussThreadCount = 2;
     int cussQueueSize = 10;
 
-    try (ConcurrentUpdateSolrClient concurrentClient = new ConcurrentUpdateSolrClient(jetty.getBaseUrl().toString(), cussQueueSize, cussThreadCount)) {
+    try (ConcurrentUpdateSolrClient concurrentClient
+         = (new ConcurrentUpdateSolrClient.Builder(jetty.getBaseUrl().toString()))
+         .withQueueSize(cussQueueSize)
+         .withThreadCount(cussThreadCount).build()) {
+      
       SolrInputDocument doc = new SolrInputDocument();
       doc.addField("id", "collection");
       concurrentClient.add("collection1", doc);
@@ -203,7 +207,11 @@ public class ConcurrentUpdateSolrClientTest extends SolrJettyTestBase {
       assertEquals(1, concurrentClient.query("collection1", new SolrQuery("id:collection")).getResults().getNumFound());
     }
 
-    try (ConcurrentUpdateSolrClient concurrentClient = new ConcurrentUpdateSolrClient(jetty.getBaseUrl().toString() + "/collection1", cussQueueSize, cussThreadCount)) {
+    try (ConcurrentUpdateSolrClient concurrentClient
+         = (new ConcurrentUpdateSolrClient.Builder(jetty.getBaseUrl().toString() + "/collection1"))
+         .withQueueSize(cussQueueSize)
+         .withThreadCount(cussThreadCount).build()) {
+         
       assertEquals(1, concurrentClient.query(new SolrQuery("id:collection")).getResults().getNumFound());
     }
 
@@ -218,7 +226,10 @@ public class ConcurrentUpdateSolrClientTest extends SolrJettyTestBase {
     int numRunnables = 5;
     int expected = numDocs * numRunnables;
 
-    try (ConcurrentUpdateSolrClient concurrentClient = new ConcurrentUpdateSolrClient(jetty.getBaseUrl().toString(), cussQueueSize, cussThreadCount)) {
+    try (ConcurrentUpdateSolrClient concurrentClient
+         = (new ConcurrentUpdateSolrClient.Builder(jetty.getBaseUrl().toString()))
+         .withQueueSize(cussQueueSize)
+         .withThreadCount(cussThreadCount).build()) {
       concurrentClient.setPollQueueTime(0);
 
       // ensure it doesn't block where there's nothing to do yet
@@ -246,7 +257,11 @@ public class ConcurrentUpdateSolrClientTest extends SolrJettyTestBase {
       concurrentClient.shutdownNow();
     }
 
-    try (ConcurrentUpdateSolrClient concurrentClient = new ConcurrentUpdateSolrClient(jetty.getBaseUrl().toString() + "/collection1", cussQueueSize, cussThreadCount)) {
+    try (ConcurrentUpdateSolrClient concurrentClient
+         = (new ConcurrentUpdateSolrClient.Builder(jetty.getBaseUrl().toString() + "/collection1"))
+         .withQueueSize(cussQueueSize)
+         .withThreadCount(cussThreadCount).build()) {
+
       assertEquals(expected, concurrentClient.query(new SolrQuery("*:*")).getResults().getNumFound());
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java
index 33781ef..9b11783 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/graph/GraphExpressionTest.java
@@ -862,7 +862,7 @@ public class GraphExpressionTest extends SolrCloudTestCase {
     JettySolrRunner runner = runners.get(0);
     String url = runner.getBaseUrl().toString();
 
-    HttpSolrClient client = new HttpSolrClient(url);
+    HttpSolrClient client = getHttpSolrClient(url);
     ModifiableSolrParams params = new ModifiableSolrParams();
 
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bc973ecd/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
index a55e2ea..419f94f 100644
--- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
+++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
@@ -2222,7 +2222,14 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
   public static Object skewed(Object likely, Object unlikely) {
     return (0 == TestUtil.nextInt(random(), 0, 9)) ? unlikely : likely;
   }
-  
+
+  /**
+   * A variant of {@link  org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} that will randomize which nodes recieve updates 
+   * unless otherwise specified by the caller.
+   *
+   * @see #sendDirectUpdatesToAnyShardReplica
+   * @see #sendDirectUpdatesToShardLeadersOnly
+   */
   public static class CloudSolrClientBuilder extends CloudSolrClient.Builder {
 
     private boolean configuredDUTflag = false;
@@ -2267,30 +2274,35 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
     }
   }
 
+  /**
+   * This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
+   * Tests that do not wish to have any randomized behavior should use the 
+   * {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly
+   */ 
   public static CloudSolrClient getCloudSolrClient(String zkHost) {
-    if (random().nextBoolean()) {
-      return new CloudSolrClient(zkHost);
-    }
     return new CloudSolrClientBuilder()
         .withZkHost(zkHost)
         .build();
   }
   
+  /**
+   * This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
+   * Tests that do not wish to have any randomized behavior should use the 
+   * {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly
+   */ 
   public static CloudSolrClient getCloudSolrClient(String zkHost, HttpClient httpClient) {
-    if (random().nextBoolean()) {
-      return new CloudSolrClient(zkHost, httpClient);
-    }
     return new CloudSolrClientBuilder()
         .withZkHost(zkHost)
         .withHttpClient(httpClient)
         .build();
   }
   
+  /**
+   * This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
+   * Tests that do not wish to have any randomized behavior should use the 
+   * {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly
+   */ 
   public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLeadersOnly) {
-    if (random().nextBoolean()) {
-      return new CloudSolrClient(zkHost, shardLeadersOnly);
-    }
-    
     if (shardLeadersOnly) {
       return new CloudSolrClientBuilder()
           .withZkHost(zkHost)
@@ -2303,11 +2315,12 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
         .build();
   }
   
+  /**
+   * This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
+   * Tests that do not wish to have any randomized behavior should use the 
+   * {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly
+   */ 
   public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLeadersOnly, HttpClient httpClient) {
-    if (random().nextBoolean()) {
-      return new CloudSolrClient(zkHost, shardLeadersOnly, httpClient);
-    }
-    
     if (shardLeadersOnly) {
       return new CloudSolrClientBuilder()
           .withZkHost(zkHost)
@@ -2322,20 +2335,24 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
         .build();
   }
   
+  /**
+   * This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
+   * Tests that do not wish to have any randomized behavior should use the 
+   * {@link org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient.Builder} class directly
+   */ 
   public static ConcurrentUpdateSolrClient getConcurrentUpdateSolrClient(String baseSolrUrl, int queueSize, int threadCount) {
-    if (random().nextBoolean()) {
-      return new ConcurrentUpdateSolrClient(baseSolrUrl, queueSize, threadCount);
-    }
     return new ConcurrentUpdateSolrClient.Builder(baseSolrUrl)
         .withQueueSize(queueSize)
         .withThreadCount(threadCount)
         .build();
   }
   
+  /**
+   * This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
+   * Tests that do not wish to have any randomized behavior should use the 
+   * {@link org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient.Builder} class directly
+   */ 
   public static ConcurrentUpdateSolrClient getConcurrentUpdateSolrClient(String baseSolrUrl, HttpClient httpClient, int queueSize, int threadCount) {
-    if (random().nextBoolean()) {
-      return new ConcurrentUpdateSolrClient(baseSolrUrl, httpClient, queueSize, threadCount);
-    }
     return new ConcurrentUpdateSolrClient.Builder(baseSolrUrl)
         .withHttpClient(httpClient)
         .withQueueSize(queueSize)
@@ -2343,30 +2360,35 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
         .build();
   }
   
+  /**
+   * This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
+   * Tests that do not wish to have any randomized behavior should use the 
+   * {@link org.apache.solr.client.solrj.impl.LBHttpSolrClient.Builder} class directly
+   */ 
   public static LBHttpSolrClient getLBHttpSolrClient(HttpClient client, String... solrUrls) {
-    if (random().nextBoolean()) {
-      return new LBHttpSolrClient(client, solrUrls);
-    }
-    
     return new LBHttpSolrClient.Builder()
         .withHttpClient(client)
         .withBaseSolrUrls(solrUrls)
         .build();
   }
   
+  /**
+   * This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
+   * Tests that do not wish to have any randomized behavior should use the 
+   * {@link org.apache.solr.client.solrj.impl.LBHttpSolrClient.Builder} class directly
+   */ 
   public static LBHttpSolrClient getLBHttpSolrClient(String... solrUrls) throws MalformedURLException {
-    if (random().nextBoolean()) {
-      return new LBHttpSolrClient(solrUrls);
-    }
     return new LBHttpSolrClient.Builder()
         .withBaseSolrUrls(solrUrls)
         .build();
   }
   
+  /**
+   * This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
+   * Tests that do not wish to have any randomized behavior should use the 
+   * {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly
+   */ 
   public static HttpSolrClient getHttpSolrClient(String url, HttpClient httpClient, ResponseParser responseParser, boolean compression) {
-    if(random().nextBoolean()) {
-      return new HttpSolrClient(url, httpClient, responseParser, compression);
-    }
     return new Builder(url)
         .withHttpClient(httpClient)
         .withResponseParser(responseParser)
@@ -2374,29 +2396,35 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
         .build();
   }
   
+  /**
+   * This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
+   * Tests that do not wish to have any randomized behavior should use the 
+   * {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly
+   */ 
   public static HttpSolrClient getHttpSolrClient(String url, HttpClient httpClient, ResponseParser responseParser) {
-    if(random().nextBoolean()) {
-      return new HttpSolrClient(url, httpClient, responseParser);
-    }
     return new Builder(url)
         .withHttpClient(httpClient)
         .withResponseParser(responseParser)
         .build();
   }
   
+  /**
+   * This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
+   * Tests that do not wish to have any randomized behavior should use the 
+   * {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly
+   */ 
   public static HttpSolrClient getHttpSolrClient(String url, HttpClient httpClient) {
-    if(random().nextBoolean()) {
-      return new HttpSolrClient(url, httpClient);
-    }
     return new Builder(url)
         .withHttpClient(httpClient)
         .build();
   }
 
+  /**
+   * This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
+   * Tests that do not wish to have any randomized behavior should use the 
+   * {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly
+   */ 
   public static HttpSolrClient getHttpSolrClient(String url) {
-    if(random().nextBoolean()) {
-      return new HttpSolrClient(url);
-    }
     return new Builder(url)
         .build();
   }


[07/25] lucene-solr:jira/solr-8668: SOLR-10758: Point to Lib Directives in SolrConfig page from the Traditional Chinese ICUTokenizer paragraph.

Posted by cp...@apache.org.
SOLR-10758: Point to Lib Directives in SolrConfig page from the Traditional Chinese ICUTokenizer paragraph.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6bbdfbc7
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6bbdfbc7
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6bbdfbc7

Branch: refs/heads/jira/solr-8668
Commit: 6bbdfbc7c1b64a4ad399509c4d77b30741c5845c
Parents: d4f87b4
Author: Steve Rowe <sa...@gmail.com>
Authored: Fri May 26 16:04:22 2017 -0400
Committer: Steve Rowe <sa...@gmail.com>
Committed: Fri May 26 16:04:49 2017 -0400

----------------------------------------------------------------------
 solr/solr-ref-guide/src/language-analysis.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6bbdfbc7/solr/solr-ref-guide/src/language-analysis.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/language-analysis.adoc b/solr/solr-ref-guide/src/language-analysis.adoc
index c55a0cd..c82cd61 100644
--- a/solr/solr-ref-guide/src/language-analysis.adoc
+++ b/solr/solr-ref-guide/src/language-analysis.adoc
@@ -510,7 +510,7 @@ Solr can stem Catalan using the Snowball Porter Stemmer with an argument of `lan
 [[LanguageAnalysis-TraditionalChinese]]
 === Traditional Chinese
 
-The default configuration of the <<tokenizers.adoc#Tokenizers-ICUTokenizer,ICU Tokenizer>> is suitable for Traditional Chinese text.  It follows the Word Break rules from the Unicode Text Segmentation algorithm for non-Chinese text, and uses a dictionary to segment Chinese words.  To use this tokenizer, see `solr/contrib/analysis-extras/README.txt` for instructions on which jars you need to add to your `solr_home/lib`.
+The default configuration of the <<tokenizers.adoc#Tokenizers-ICUTokenizer,ICU Tokenizer>> is suitable for Traditional Chinese text.  It follows the Word Break rules from the Unicode Text Segmentation algorithm for non-Chinese text, and uses a dictionary to segment Chinese words.  To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<lib-directives-in-solrconfig.adoc#lib-directives-in-solrconfig,Lib Directives in SolrConfig>>). See the `solr/contrib/analysis-extras/README.txt` for information on which jars you need to add to your `SOLR_HOME/lib`.
 
 <<tokenizers.adoc#Tokenizers-StandardTokenizer,Standard Tokenizer>> can also be used to tokenize Traditional Chinese text.  Following the Word Break rules from the Unicode Text Segmentation algorithm, it produces one token per Chinese character.  When combined with <<LanguageAnalysis-CJKBigramFilter,CJK Bigram Filter>>, overlapping bigrams of Chinese characters are formed.
  


[21/25] lucene-solr:jira/solr-8668: LUCENE-7850: Move support for legacy numerics to solr/.

Posted by cp...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/BBoxStrategy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/BBoxStrategy.java b/solr/core/src/java/org/apache/solr/legacy/BBoxStrategy.java
new file mode 100644
index 0000000..c919eb8
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/BBoxStrategy.java
@@ -0,0 +1,706 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+import org.apache.lucene.document.DoubleDocValuesField;
+import org.apache.lucene.document.DoublePoint;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.spatial.SpatialStrategy;
+import org.apache.lucene.spatial.bbox.BBoxOverlapRatioValueSource;
+import org.apache.lucene.spatial.query.SpatialArgs;
+import org.apache.lucene.spatial.query.SpatialOperation;
+import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
+import org.apache.lucene.spatial.util.DistanceToShapeValueSource;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.NumericUtils;
+import org.locationtech.spatial4j.context.SpatialContext;
+import org.locationtech.spatial4j.shape.Point;
+import org.locationtech.spatial4j.shape.Rectangle;
+import org.locationtech.spatial4j.shape.Shape;
+
+
+/**
+ * A SpatialStrategy for indexing and searching Rectangles by storing its
+ * coordinates in numeric fields. It supports all {@link SpatialOperation}s and
+ * has a custom overlap relevancy. It is based on GeoPortal's <a
+ * href="http://geoportal.svn.sourceforge.net/svnroot/geoportal/Geoportal/trunk/src/com/esri/gpt/catalog/lucene/SpatialClauseAdapter.java">SpatialClauseAdapter</a>.
+ * <p>
+ * <b>Characteristics:</b>
+ * <br>
+ * <ul>
+ * <li>Only indexes Rectangles; just one per field value. Other shapes can be provided
+ * and the bounding box will be used.</li>
+ * <li>Can query only by a Rectangle. Providing other shapes is an error.</li>
+ * <li>Supports most {@link SpatialOperation}s but not Overlaps.</li>
+ * <li>Uses the DocValues API for any sorting / relevancy.</li>
+ * </ul>
+ * <p>
+ * <b>Implementation:</b>
+ * <p>
+ * This uses 4 double fields for minX, maxX, minY, maxY
+ * and a boolean to mark a dateline cross. Depending on the particular {@link
+ * SpatialOperation}s, there are a variety of range queries on {@link DoublePoint}s to be
+ * done.
+ * The {@link #makeOverlapRatioValueSource(org.locationtech.spatial4j.shape.Rectangle, double)}
+ * works by calculating the query bbox overlap percentage against the indexed
+ * shape overlap percentage. The indexed shape's coordinates are retrieved from
+ * {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.
+ *
+ * @lucene.experimental
+ */
+public class BBoxStrategy extends SpatialStrategy {
+
+  // note: we use a FieldType to articulate the options we want on the field.  We don't use it as-is with a Field, we
+  //  create more than one Field.
+
+  /**
+   * pointValues, docValues, and nothing else.
+   */
+  public static FieldType DEFAULT_FIELDTYPE;
+
+  @Deprecated
+  public static LegacyFieldType LEGACY_FIELDTYPE;
+  static {
+    // Default: pointValues + docValues
+    FieldType type = new FieldType();
+    type.setDimensions(1, Double.BYTES);//pointValues (assume Double)
+    type.setDocValuesType(DocValuesType.NUMERIC);//docValues
+    type.setStored(false);
+    type.freeze();
+    DEFAULT_FIELDTYPE = type;
+    // Legacy default: legacyNumerics + docValues
+    LegacyFieldType legacyType = new LegacyFieldType();
+    legacyType.setIndexOptions(IndexOptions.DOCS);
+    legacyType.setNumericType(LegacyNumericType.DOUBLE);
+    legacyType.setNumericPrecisionStep(8);// same as solr default
+    legacyType.setDocValuesType(DocValuesType.NUMERIC);//docValues
+    legacyType.setStored(false);
+    legacyType.freeze();
+    LEGACY_FIELDTYPE = legacyType;
+  }
+
+  public static final String SUFFIX_MINX = "__minX";
+  public static final String SUFFIX_MAXX = "__maxX";
+  public static final String SUFFIX_MINY = "__minY";
+  public static final String SUFFIX_MAXY = "__maxY";
+  public static final String SUFFIX_XDL  = "__xdl";
+
+  /*
+   * The Bounding Box gets stored as four fields for x/y min/max and a flag
+   * that says if the box crosses the dateline (xdl).
+   */
+  final String field_bbox;
+  final String field_minX;
+  final String field_minY;
+  final String field_maxX;
+  final String field_maxY;
+  final String field_xdl; // crosses dateline
+
+  private final FieldType optionsFieldType;//from constructor; aggregate field type used to express all options
+  private final int fieldsLen;
+  private final boolean hasStored;
+  private final boolean hasDocVals;
+  private final boolean hasPointVals;
+  // equiv to "hasLegacyNumerics":
+  private final LegacyFieldType legacyNumericFieldType; // not stored; holds precision step.
+  private final FieldType xdlFieldType;
+
+  /**
+   * Creates a new {@link BBoxStrategy} instance that uses {@link DoublePoint} and {@link DoublePoint#newRangeQuery}
+   */
+  public static BBoxStrategy newInstance(SpatialContext ctx, String fieldNamePrefix) {
+    return new BBoxStrategy(ctx, fieldNamePrefix, DEFAULT_FIELDTYPE);
+  }
+
+  /**
+   * Creates a new {@link BBoxStrategy} instance that uses {@link LegacyDoubleField} for backwards compatibility
+   * @deprecated LegacyNumerics will be removed
+   */
+  @Deprecated
+  public static BBoxStrategy newLegacyInstance(SpatialContext ctx, String fieldNamePrefix) {
+    return new BBoxStrategy(ctx, fieldNamePrefix, LEGACY_FIELDTYPE);
+  }
+
+  /**
+   * Creates this strategy.
+   * {@code fieldType} is used to customize the indexing options of the 4 number fields, and to a lesser degree the XDL
+   * field too. Search requires pointValues (or legacy numerics), and relevancy requires docValues. If these features
+   * aren't needed then disable them.
+   */
+  public BBoxStrategy(SpatialContext ctx, String fieldNamePrefix, FieldType fieldType) {
+    super(ctx, fieldNamePrefix);
+    field_bbox = fieldNamePrefix;
+    field_minX = fieldNamePrefix + SUFFIX_MINX;
+    field_maxX = fieldNamePrefix + SUFFIX_MAXX;
+    field_minY = fieldNamePrefix + SUFFIX_MINY;
+    field_maxY = fieldNamePrefix + SUFFIX_MAXY;
+    field_xdl = fieldNamePrefix + SUFFIX_XDL;
+
+    fieldType.freeze();
+    this.optionsFieldType = fieldType;
+
+    int numQuads = 0;
+    if ((this.hasStored = fieldType.stored())) {
+      numQuads++;
+    }
+    if ((this.hasDocVals = fieldType.docValuesType() != DocValuesType.NONE)) {
+      numQuads++;
+    }
+    if ((this.hasPointVals = fieldType.pointDimensionCount() > 0)) {
+      numQuads++;
+    }
+    if (fieldType.indexOptions() != IndexOptions.NONE && fieldType instanceof LegacyFieldType && ((LegacyFieldType)fieldType).numericType() != null) {
+      if (hasPointVals) {
+        throw new IllegalArgumentException("pointValues and LegacyNumericType are mutually exclusive");
+      }
+      final LegacyFieldType legacyType = (LegacyFieldType) fieldType;
+      if (legacyType.numericType() != LegacyNumericType.DOUBLE) {
+        throw new IllegalArgumentException(getClass() + " does not support " + legacyType.numericType());
+      }
+      numQuads++;
+      legacyNumericFieldType = new LegacyFieldType(LegacyDoubleField.TYPE_NOT_STORED);
+      legacyNumericFieldType.setNumericPrecisionStep(legacyType.numericPrecisionStep());
+      legacyNumericFieldType.freeze();
+    } else {
+      legacyNumericFieldType = null;
+    }
+
+    if (hasPointVals || legacyNumericFieldType != null) { // if we have an index...
+      xdlFieldType = new FieldType(StringField.TYPE_NOT_STORED);
+      xdlFieldType.setIndexOptions(IndexOptions.DOCS);
+      xdlFieldType.freeze();
+    } else {
+      xdlFieldType = null;
+    }
+
+    this.fieldsLen = numQuads * 4 + (xdlFieldType != null ? 1 : 0);
+  }
+
+  /** Returns a field type representing the set of field options. This is identical to what was passed into the
+   * constructor.  It's frozen. */
+  public FieldType getFieldType() {
+    return optionsFieldType;
+  }
+
+  //---------------------------------
+  // Indexing
+  //---------------------------------
+
+  @Override
+  public Field[] createIndexableFields(Shape shape) {
+    return createIndexableFields(shape.getBoundingBox());
+  }
+
+  private Field[] createIndexableFields(Rectangle bbox) {
+    Field[] fields = new Field[fieldsLen];
+    int idx = -1;
+    if (hasStored) {
+      fields[++idx] = new StoredField(field_minX, bbox.getMinX());
+      fields[++idx] = new StoredField(field_minY, bbox.getMinY());
+      fields[++idx] = new StoredField(field_maxX, bbox.getMaxX());
+      fields[++idx] = new StoredField(field_maxY, bbox.getMaxY());
+    }
+    if (hasDocVals) {
+      fields[++idx] = new DoubleDocValuesField(field_minX, bbox.getMinX());
+      fields[++idx] = new DoubleDocValuesField(field_minY, bbox.getMinY());
+      fields[++idx] = new DoubleDocValuesField(field_maxX, bbox.getMaxX());
+      fields[++idx] = new DoubleDocValuesField(field_maxY, bbox.getMaxY());
+    }
+    if (hasPointVals) {
+      fields[++idx] = new DoublePoint(field_minX, bbox.getMinX());
+      fields[++idx] = new DoublePoint(field_minY, bbox.getMinY());
+      fields[++idx] = new DoublePoint(field_maxX, bbox.getMaxX());
+      fields[++idx] = new DoublePoint(field_maxY, bbox.getMaxY());
+    }
+    if (legacyNumericFieldType != null) {
+      fields[++idx] = new LegacyDoubleField(field_minX, bbox.getMinX(), legacyNumericFieldType);
+      fields[++idx] = new LegacyDoubleField(field_minY, bbox.getMinY(), legacyNumericFieldType);
+      fields[++idx] = new LegacyDoubleField(field_maxX, bbox.getMaxX(), legacyNumericFieldType);
+      fields[++idx] = new LegacyDoubleField(field_maxY, bbox.getMaxY(), legacyNumericFieldType);
+    }
+    if (xdlFieldType != null) {
+      fields[++idx] = new Field(field_xdl, bbox.getCrossesDateLine()?"T":"F", xdlFieldType);
+    }
+    assert idx == fields.length - 1;
+    return fields;
+  }
+
+
+  //---------------------------------
+  // Value Source / Relevancy
+  //---------------------------------
+
+  /**
+   * Provides access to each rectangle per document as a ValueSource in which
+   * {@link org.apache.lucene.queries.function.FunctionValues#objectVal(int)} returns a {@link
+   * Shape}.
+   */ //TODO raise to SpatialStrategy
+  public ValueSource makeShapeValueSource() {
+    return new BBoxValueSource(this);
+  }
+
+  @Override
+  public ValueSource makeDistanceValueSource(Point queryPoint, double multiplier) {
+    //TODO if makeShapeValueSource gets lifted to the top; this could become a generic impl.
+    return new DistanceToShapeValueSource(makeShapeValueSource(), queryPoint, multiplier, ctx);
+  }
+
+  /** Returns a similarity based on {@link BBoxOverlapRatioValueSource}. This is just a
+   * convenience method. */
+  public ValueSource makeOverlapRatioValueSource(Rectangle queryBox, double queryTargetProportion) {
+    return new BBoxOverlapRatioValueSource(
+        makeShapeValueSource(), ctx.isGeo(), queryBox, queryTargetProportion, 0.0);
+  }
+
+  //---------------------------------
+  // Query Building
+  //---------------------------------
+
+  //  Utility on SpatialStrategy?
+//  public Query makeQueryWithValueSource(SpatialArgs args, ValueSource valueSource) {
+//    return new CustomScoreQuery(makeQuery(args), new FunctionQuery(valueSource));
+  //or...
+//  return new BooleanQuery.Builder()
+//      .add(new FunctionQuery(valueSource), BooleanClause.Occur.MUST)//matches everything and provides score
+//      .add(filterQuery, BooleanClause.Occur.FILTER)//filters (score isn't used)
+//  .build();
+//  }
+
+  @Override
+  public Query makeQuery(SpatialArgs args) {
+    Shape shape = args.getShape();
+    if (!(shape instanceof Rectangle))
+      throw new UnsupportedOperationException("Can only query by Rectangle, not " + shape);
+
+    Rectangle bbox = (Rectangle) shape;
+    Query spatial;
+
+    // Useful for understanding Relations:
+    // http://edndoc.esri.com/arcsde/9.1/general_topics/understand_spatial_relations.htm
+    SpatialOperation op = args.getOperation();
+         if( op == SpatialOperation.BBoxIntersects ) spatial = makeIntersects(bbox);
+    else if( op == SpatialOperation.BBoxWithin     ) spatial = makeWithin(bbox);
+    else if( op == SpatialOperation.Contains       ) spatial = makeContains(bbox);
+    else if( op == SpatialOperation.Intersects     ) spatial = makeIntersects(bbox);
+    else if( op == SpatialOperation.IsEqualTo      ) spatial = makeEquals(bbox);
+    else if( op == SpatialOperation.IsDisjointTo   ) spatial = makeDisjoint(bbox);
+    else if( op == SpatialOperation.IsWithin       ) spatial = makeWithin(bbox);
+    else { //no Overlaps support yet
+        throw new UnsupportedSpatialOperation(op);
+    }
+    return new ConstantScoreQuery(spatial);
+  }
+
+  /**
+   * Constructs a query to retrieve documents that fully contain the input envelope.
+   *
+   * @return the spatial query
+   */
+  Query makeContains(Rectangle bbox) {
+
+    // general case
+    // docMinX <= queryExtent.getMinX() AND docMinY <= queryExtent.getMinY() AND docMaxX >= queryExtent.getMaxX() AND docMaxY >= queryExtent.getMaxY()
+
+    // Y conditions
+    // docMinY <= queryExtent.getMinY() AND docMaxY >= queryExtent.getMaxY()
+    Query qMinY = this.makeNumericRangeQuery(field_minY, null, bbox.getMinY(), false, true);
+    Query qMaxY = this.makeNumericRangeQuery(field_maxY, bbox.getMaxY(), null, true, false);
+    Query yConditions = this.makeQuery(BooleanClause.Occur.MUST, qMinY, qMaxY);
+
+    // X conditions
+    Query xConditions;
+
+    // queries that do not cross the date line
+    if (!bbox.getCrossesDateLine()) {
+
+      // X Conditions for documents that do not cross the date line,
+      // documents that contain the min X and max X of the query envelope,
+      // docMinX <= queryExtent.getMinX() AND docMaxX >= queryExtent.getMaxX()
+      Query qMinX = this.makeNumericRangeQuery(field_minX, null, bbox.getMinX(), false, true);
+      Query qMaxX = this.makeNumericRangeQuery(field_maxX, bbox.getMaxX(), null, true, false);
+      Query qMinMax = this.makeQuery(BooleanClause.Occur.MUST, qMinX, qMaxX);
+      Query qNonXDL = this.makeXDL(false, qMinMax);
+
+      if (!ctx.isGeo()) {
+        xConditions = qNonXDL;
+      } else {
+        // X Conditions for documents that cross the date line,
+        // the left portion of the document contains the min X of the query
+        // OR the right portion of the document contains the max X of the query,
+        // docMinXLeft <= queryExtent.getMinX() OR docMaxXRight >= queryExtent.getMaxX()
+        Query qXDLLeft = this.makeNumericRangeQuery(field_minX, null, bbox.getMinX(), false, true);
+        Query qXDLRight = this.makeNumericRangeQuery(field_maxX, bbox.getMaxX(), null, true, false);
+        Query qXDLLeftRight = this.makeQuery(BooleanClause.Occur.SHOULD, qXDLLeft, qXDLRight);
+        Query qXDL = this.makeXDL(true, qXDLLeftRight);
+
+        Query qEdgeDL = null;
+        if (bbox.getMinX() == bbox.getMaxX() && Math.abs(bbox.getMinX()) == 180) {
+          double edge = bbox.getMinX() * -1;//opposite dateline edge
+          qEdgeDL = makeQuery(BooleanClause.Occur.SHOULD,
+              makeNumberTermQuery(field_minX, edge), makeNumberTermQuery(field_maxX, edge));
+        }
+
+        // apply the non-XDL and XDL conditions
+        xConditions = this.makeQuery(BooleanClause.Occur.SHOULD, qNonXDL, qXDL, qEdgeDL);
+      }
+    } else {
+      // queries that cross the date line
+
+      // No need to search for documents that do not cross the date line
+
+      // X Conditions for documents that cross the date line,
+      // the left portion of the document contains the min X of the query
+      // AND the right portion of the document contains the max X of the query,
+      // docMinXLeft <= queryExtent.getMinX() AND docMaxXRight >= queryExtent.getMaxX()
+      Query qXDLLeft = this.makeNumericRangeQuery(field_minX, null, bbox.getMinX(), false, true);
+      Query qXDLRight = this.makeNumericRangeQuery(field_maxX, bbox.getMaxX(), null, true, false);
+      Query qXDLLeftRight = this.makeXDL(true, this.makeQuery(BooleanClause.Occur.MUST, qXDLLeft, qXDLRight));
+
+      Query qWorld = makeQuery(BooleanClause.Occur.MUST,
+          makeNumberTermQuery(field_minX, -180), makeNumberTermQuery(field_maxX, 180));
+
+      xConditions = makeQuery(BooleanClause.Occur.SHOULD, qXDLLeftRight, qWorld);
+    }
+
+    // both X and Y conditions must occur
+    return this.makeQuery(BooleanClause.Occur.MUST, xConditions, yConditions);
+  }
+
+  /**
+   * Constructs a query to retrieve documents that are disjoint to the input envelope.
+   *
+   * @return the spatial query
+   */
+  Query makeDisjoint(Rectangle bbox) {
+
+    // general case
+    // docMinX > queryExtent.getMaxX() OR docMaxX < queryExtent.getMinX() OR docMinY > queryExtent.getMaxY() OR docMaxY < queryExtent.getMinY()
+
+    // Y conditions
+    // docMinY > queryExtent.getMaxY() OR docMaxY < queryExtent.getMinY()
+    Query qMinY = this.makeNumericRangeQuery(field_minY, bbox.getMaxY(), null, false, false);
+    Query qMaxY = this.makeNumericRangeQuery(field_maxY, null, bbox.getMinY(), false, false);
+    Query yConditions = this.makeQuery(BooleanClause.Occur.SHOULD, qMinY, qMaxY);
+
+    // X conditions
+    Query xConditions;
+
+    // queries that do not cross the date line
+    if (!bbox.getCrossesDateLine()) {
+
+      // X Conditions for documents that do not cross the date line,
+      // docMinX > queryExtent.getMaxX() OR docMaxX < queryExtent.getMinX()
+      Query qMinX = this.makeNumericRangeQuery(field_minX, bbox.getMaxX(), null, false, false);
+      if (bbox.getMinX() == -180.0 && ctx.isGeo()) {//touches dateline; -180 == 180
+        BooleanQuery.Builder bq = new BooleanQuery.Builder();
+        bq.add(qMinX, BooleanClause.Occur.MUST);
+        bq.add(makeNumberTermQuery(field_maxX, 180.0), BooleanClause.Occur.MUST_NOT);
+        qMinX = bq.build();
+      }
+      Query qMaxX = this.makeNumericRangeQuery(field_maxX, null, bbox.getMinX(), false, false);
+
+      if (bbox.getMaxX() == 180.0 && ctx.isGeo()) {//touches dateline; -180 == 180
+        BooleanQuery.Builder bq = new BooleanQuery.Builder();
+        bq.add(qMaxX, BooleanClause.Occur.MUST);
+        bq.add(makeNumberTermQuery(field_minX, -180.0), BooleanClause.Occur.MUST_NOT);
+        qMaxX = bq.build();
+      }
+      Query qMinMax = this.makeQuery(BooleanClause.Occur.SHOULD, qMinX, qMaxX);
+      Query qNonXDL = this.makeXDL(false, qMinMax);
+
+      if (!ctx.isGeo()) {
+        xConditions = qNonXDL;
+      } else {
+        // X Conditions for documents that cross the date line,
+
+        // both the left and right portions of the document must be disjoint to the query
+        // (docMinXLeft > queryExtent.getMaxX() OR docMaxXLeft < queryExtent.getMinX()) AND
+        // (docMinXRight > queryExtent.getMaxX() OR docMaxXRight < queryExtent.getMinX())
+        // where: docMaxXLeft = 180.0, docMinXRight = -180.0
+        // (docMaxXLeft  < queryExtent.getMinX()) equates to (180.0  < queryExtent.getMinX()) and is ignored
+        // (docMinXRight > queryExtent.getMaxX()) equates to (-180.0 > queryExtent.getMaxX()) and is ignored
+        Query qMinXLeft = this.makeNumericRangeQuery(field_minX, bbox.getMaxX(), null, false, false);
+        Query qMaxXRight = this.makeNumericRangeQuery(field_maxX, null, bbox.getMinX(), false, false);
+        Query qLeftRight = this.makeQuery(BooleanClause.Occur.MUST, qMinXLeft, qMaxXRight);
+        Query qXDL = this.makeXDL(true, qLeftRight);
+
+        // apply the non-XDL and XDL conditions
+        xConditions = this.makeQuery(BooleanClause.Occur.SHOULD, qNonXDL, qXDL);
+      }
+      // queries that cross the date line
+    } else {
+
+      // X Conditions for documents that do not cross the date line,
+      // the document must be disjoint to both the left and right query portions
+      // (docMinX > queryExtent.getMaxX()Left OR docMaxX < queryExtent.getMinX()) AND (docMinX > queryExtent.getMaxX() OR docMaxX < queryExtent.getMinX()Left)
+      // where: queryExtent.getMaxX()Left = 180.0, queryExtent.getMinX()Left = -180.0
+      Query qMinXLeft = this.makeNumericRangeQuery(field_minX, 180.0, null, false, false);
+      Query qMaxXLeft = this.makeNumericRangeQuery(field_maxX, null, bbox.getMinX(), false, false);
+      Query qMinXRight = this.makeNumericRangeQuery(field_minX, bbox.getMaxX(), null, false, false);
+      Query qMaxXRight = this.makeNumericRangeQuery(field_maxX, null, -180.0, false, false);
+      Query qLeft = this.makeQuery(BooleanClause.Occur.SHOULD, qMinXLeft, qMaxXLeft);
+      Query qRight = this.makeQuery(BooleanClause.Occur.SHOULD, qMinXRight, qMaxXRight);
+      Query qLeftRight = this.makeQuery(BooleanClause.Occur.MUST, qLeft, qRight);
+
+      // No need to search for documents that do not cross the date line
+
+      xConditions = this.makeXDL(false, qLeftRight);
+    }
+
+    // either X or Y conditions should occur
+    return this.makeQuery(BooleanClause.Occur.SHOULD, xConditions, yConditions);
+  }
+
+  /**
+   * Constructs a query to retrieve documents that equal the input envelope.
+   *
+   * @return the spatial query
+   */
+  Query makeEquals(Rectangle bbox) {
+
+    // docMinX = queryExtent.getMinX() AND docMinY = queryExtent.getMinY() AND docMaxX = queryExtent.getMaxX() AND docMaxY = queryExtent.getMaxY()
+    Query qMinX = makeNumberTermQuery(field_minX, bbox.getMinX());
+    Query qMinY = makeNumberTermQuery(field_minY, bbox.getMinY());
+    Query qMaxX = makeNumberTermQuery(field_maxX, bbox.getMaxX());
+    Query qMaxY = makeNumberTermQuery(field_maxY, bbox.getMaxY());
+    return makeQuery(BooleanClause.Occur.MUST, qMinX, qMinY, qMaxX, qMaxY);
+  }
+
+  /**
+   * Constructs a query to retrieve documents that intersect the input envelope.
+   *
+   * @return the spatial query
+   */
+  Query makeIntersects(Rectangle bbox) {
+
+    // the original intersects query does not work for envelopes that cross the date line,
+    // switch to a NOT Disjoint query
+
+    // MUST_NOT causes a problem when it's the only clause type within a BooleanQuery,
+    // to get around it we add all documents as a SHOULD
+
+    // there must be an envelope, it must not be disjoint
+    Query qHasEnv;
+    if (ctx.isGeo()) {
+      Query qIsNonXDL = this.makeXDL(false);
+      Query qIsXDL = ctx.isGeo() ? this.makeXDL(true) : null;
+      qHasEnv = this.makeQuery(BooleanClause.Occur.SHOULD, qIsNonXDL, qIsXDL);
+    } else {
+      qHasEnv = this.makeXDL(false);
+    }
+
+    BooleanQuery.Builder qNotDisjoint = new BooleanQuery.Builder();
+    qNotDisjoint.add(qHasEnv, BooleanClause.Occur.MUST);
+    Query qDisjoint = makeDisjoint(bbox);
+    qNotDisjoint.add(qDisjoint, BooleanClause.Occur.MUST_NOT);
+
+    //Query qDisjoint = makeDisjoint();
+    //BooleanQuery qNotDisjoint = new BooleanQuery();
+    //qNotDisjoint.add(new MatchAllDocsQuery(),BooleanClause.Occur.SHOULD);
+    //qNotDisjoint.add(qDisjoint,BooleanClause.Occur.MUST_NOT);
+    return qNotDisjoint.build();
+  }
+
+  /**
+   * Makes a boolean query based upon a collection of queries and a logical operator.
+   *
+   * @param occur the logical operator
+   * @param queries the query collection
+   * @return the query
+   */
+  BooleanQuery makeQuery(BooleanClause.Occur occur, Query... queries) {
+    BooleanQuery.Builder bq = new BooleanQuery.Builder();
+    for (Query query : queries) {
+      if (query != null)
+        bq.add(query, occur);
+    }
+    return bq.build();
+  }
+
+  /**
+   * Constructs a query to retrieve documents are fully within the input envelope.
+   *
+   * @return the spatial query
+   */
+  Query makeWithin(Rectangle bbox) {
+
+    // general case
+    // docMinX >= queryExtent.getMinX() AND docMinY >= queryExtent.getMinY() AND docMaxX <= queryExtent.getMaxX() AND docMaxY <= queryExtent.getMaxY()
+
+    // Y conditions
+    // docMinY >= queryExtent.getMinY() AND docMaxY <= queryExtent.getMaxY()
+    Query qMinY = this.makeNumericRangeQuery(field_minY, bbox.getMinY(), null, true, false);
+    Query qMaxY = this.makeNumericRangeQuery(field_maxY, null, bbox.getMaxY(), false, true);
+    Query yConditions = this.makeQuery(BooleanClause.Occur.MUST, qMinY, qMaxY);
+
+    // X conditions
+    Query xConditions;
+
+    if (ctx.isGeo() && bbox.getMinX() == -180.0 && bbox.getMaxX() == 180.0) {
+      //if query world-wraps, only the y condition matters
+      return yConditions;
+
+    } else if (!bbox.getCrossesDateLine()) {
+      // queries that do not cross the date line
+
+      // docMinX >= queryExtent.getMinX() AND docMaxX <= queryExtent.getMaxX()
+      Query qMinX = this.makeNumericRangeQuery(field_minX, bbox.getMinX(), null, true, false);
+      Query qMaxX = this.makeNumericRangeQuery(field_maxX, null, bbox.getMaxX(), false, true);
+      Query qMinMax = this.makeQuery(BooleanClause.Occur.MUST, qMinX, qMaxX);
+
+      double edge = 0;//none, otherwise opposite dateline of query
+      if (bbox.getMinX() == -180.0)
+        edge = 180;
+      else if (bbox.getMaxX() == 180.0)
+        edge = -180;
+      if (edge != 0 && ctx.isGeo()) {
+        Query edgeQ = makeQuery(BooleanClause.Occur.MUST,
+            makeNumberTermQuery(field_minX, edge), makeNumberTermQuery(field_maxX, edge));
+        qMinMax = makeQuery(BooleanClause.Occur.SHOULD, qMinMax, edgeQ);
+      }
+
+      xConditions = this.makeXDL(false, qMinMax);
+
+      // queries that cross the date line
+    } else {
+
+      // X Conditions for documents that do not cross the date line
+
+      // the document should be within the left portion of the query
+      // docMinX >= queryExtent.getMinX() AND docMaxX <= 180.0
+      Query qMinXLeft = this.makeNumericRangeQuery(field_minX, bbox.getMinX(), null, true, false);
+      Query qMaxXLeft = this.makeNumericRangeQuery(field_maxX, null, 180.0, false, true);
+      Query qLeft = this.makeQuery(BooleanClause.Occur.MUST, qMinXLeft, qMaxXLeft);
+
+      // the document should be within the right portion of the query
+      // docMinX >= -180.0 AND docMaxX <= queryExtent.getMaxX()
+      Query qMinXRight = this.makeNumericRangeQuery(field_minX, -180.0, null, true, false);
+      Query qMaxXRight = this.makeNumericRangeQuery(field_maxX, null, bbox.getMaxX(), false, true);
+      Query qRight = this.makeQuery(BooleanClause.Occur.MUST, qMinXRight, qMaxXRight);
+
+      // either left or right conditions should occur,
+      // apply the left and right conditions to documents that do not cross the date line
+      Query qLeftRight = this.makeQuery(BooleanClause.Occur.SHOULD, qLeft, qRight);
+      Query qNonXDL = this.makeXDL(false, qLeftRight);
+
+      // X Conditions for documents that cross the date line,
+      // the left portion of the document must be within the left portion of the query,
+      // AND the right portion of the document must be within the right portion of the query
+      // docMinXLeft >= queryExtent.getMinX() AND docMaxXLeft <= 180.0
+      // AND docMinXRight >= -180.0 AND docMaxXRight <= queryExtent.getMaxX()
+      Query qXDLLeft = this.makeNumericRangeQuery(field_minX, bbox.getMinX(), null, true, false);
+      Query qXDLRight = this.makeNumericRangeQuery(field_maxX, null, bbox.getMaxX(), false, true);
+      Query qXDLLeftRight = this.makeQuery(BooleanClause.Occur.MUST, qXDLLeft, qXDLRight);
+      Query qXDL = this.makeXDL(true, qXDLLeftRight);
+
+      // apply the non-XDL and XDL conditions
+      xConditions = this.makeQuery(BooleanClause.Occur.SHOULD, qNonXDL, qXDL);
+    }
+
+    // both X and Y conditions must occur
+    return this.makeQuery(BooleanClause.Occur.MUST, xConditions, yConditions);
+  }
+
+  /**
+   * Constructs a query to retrieve documents that do or do not cross the date line.
+   *
+   * @param crossedDateLine <code>true</true> for documents that cross the date line
+   * @return the query
+   */
+  private Query makeXDL(boolean crossedDateLine) {
+    // The 'T' and 'F' values match solr fields
+    return new TermQuery(new Term(field_xdl, crossedDateLine ? "T" : "F"));
+  }
+
+  /**
+   * Constructs a query to retrieve documents that do or do not cross the date line
+   * and match the supplied spatial query.
+   *
+   * @param crossedDateLine <code>true</true> for documents that cross the date line
+   * @param query the spatial query
+   * @return the query
+   */
+  private Query makeXDL(boolean crossedDateLine, Query query) {
+    if (!ctx.isGeo()) {
+      assert !crossedDateLine;
+      return query;
+    }
+    BooleanQuery.Builder bq = new BooleanQuery.Builder();
+    bq.add(this.makeXDL(crossedDateLine), BooleanClause.Occur.MUST);
+    bq.add(query, BooleanClause.Occur.MUST);
+    return bq.build();
+  }
+
+  private Query makeNumberTermQuery(String field, double number) {
+    if (hasPointVals) {
+      return DoublePoint.newExactQuery(field, number);
+    } else if (legacyNumericFieldType != null) {
+      BytesRefBuilder bytes = new BytesRefBuilder();
+      LegacyNumericUtils.longToPrefixCoded(NumericUtils.doubleToSortableLong(number), 0, bytes);
+      return new TermQuery(new Term(field, bytes.get()));
+    }
+    throw new UnsupportedOperationException("An index is required for this operation.");
+  }
+
+  /**
+   * Returns a numeric range query based on FieldType
+   * {@link LegacyNumericRangeQuery} is used for indexes created using {@code FieldType.LegacyNumericType}
+   * {@link DoublePoint#newRangeQuery} is used for indexes created using {@link DoublePoint} fields
+   *
+   * @param fieldname field name. must not be <code>null</code>.
+   * @param min minimum value of the range.
+   * @param max maximum value of the range.
+   * @param minInclusive include the minimum value if <code>true</code>.
+   * @param maxInclusive include the maximum value if <code>true</code>
+   */
+  private Query makeNumericRangeQuery(String fieldname, Double min, Double max, boolean minInclusive, boolean maxInclusive) {
+    if (hasPointVals) {
+      if (min == null) {
+        min = Double.NEGATIVE_INFINITY;
+      }
+
+      if (max == null) {
+        max = Double.POSITIVE_INFINITY;
+      }
+
+      if (minInclusive == false) {
+        min = Math.nextUp(min);
+      }
+
+      if (maxInclusive == false) {
+        max = Math.nextDown(max);
+      }
+
+      return DoublePoint.newRangeQuery(fieldname, min, max);
+    } else if (legacyNumericFieldType != null) {// todo remove legacy numeric support in 7.0
+      return LegacyNumericRangeQuery.newDoubleRange(fieldname, legacyNumericFieldType.numericPrecisionStep(), min, max, minInclusive, maxInclusive);
+    }
+    throw new UnsupportedOperationException("An index is required for this operation.");
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/BBoxValueSource.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/BBoxValueSource.java b/solr/core/src/java/org/apache/solr/legacy/BBoxValueSource.java
new file mode 100644
index 0000000..cd577c7
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/BBoxValueSource.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.queries.function.FunctionValues;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.Explanation;
+import org.locationtech.spatial4j.shape.Rectangle;
+
+/**
+ * A ValueSource in which the indexed Rectangle is returned from
+ * {@link org.apache.lucene.queries.function.FunctionValues#objectVal(int)}.
+ *
+ * @lucene.internal
+ */
+class BBoxValueSource extends ValueSource {
+
+  private final BBoxStrategy strategy;
+
+  public BBoxValueSource(BBoxStrategy strategy) {
+    this.strategy = strategy;
+  }
+
+  @Override
+  public String description() {
+    return "bboxShape(" + strategy.getFieldName() + ")";
+  }
+
+  @Override
+  public FunctionValues getValues(Map context, LeafReaderContext readerContext) throws IOException {
+    LeafReader reader = readerContext.reader();
+    final NumericDocValues minX = DocValues.getNumeric(reader, strategy.field_minX);
+    final NumericDocValues minY = DocValues.getNumeric(reader, strategy.field_minY);
+    final NumericDocValues maxX = DocValues.getNumeric(reader, strategy.field_maxX);
+    final NumericDocValues maxY = DocValues.getNumeric(reader, strategy.field_maxY);
+
+    //reused
+    final Rectangle rect = strategy.getSpatialContext().makeRectangle(0,0,0,0);
+
+    return new FunctionValues() {
+      private int lastDocID = -1;
+
+      private double getDocValue(NumericDocValues values, int doc) throws IOException {
+        int curDocID = values.docID();
+        if (doc > curDocID) {
+          curDocID = values.advance(doc);
+        }
+        if (doc == curDocID) {
+          return Double.longBitsToDouble(values.longValue());
+        } else {
+          return 0.0;
+        }
+      }
+
+      @Override
+      public Object objectVal(int doc) throws IOException {
+        if (doc < lastDocID) {
+          throw new AssertionError("docs were sent out-of-order: lastDocID=" + lastDocID + " vs doc=" + doc);
+        }
+        lastDocID = doc;
+
+        double minXValue = getDocValue(minX, doc);
+        if (minX.docID() != doc) {
+          return null;
+        } else {
+          double minYValue = getDocValue(minY, doc);
+          double maxXValue = getDocValue(maxX, doc);
+          double maxYValue = getDocValue(maxY, doc);
+          rect.reset(minXValue, maxXValue, minYValue, maxYValue);
+          return rect;
+        }
+      }
+
+      @Override
+      public String strVal(int doc) throws IOException {//TODO support WKT output once Spatial4j does
+        Object v = objectVal(doc);
+        return v == null ? null : v.toString();
+      }
+
+      @Override
+      public boolean exists(int doc) throws IOException {
+        getDocValue(minX, doc);
+        return minX.docID() == doc;
+      }
+
+      @Override
+      public Explanation explain(int doc) throws IOException {
+        return Explanation.match(Float.NaN, toString(doc));
+      }
+
+      @Override
+      public String toString(int doc) throws IOException {
+        return description() + '=' + strVal(doc);
+      }
+    };
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+
+    BBoxValueSource that = (BBoxValueSource) o;
+
+    if (!strategy.equals(that.strategy)) return false;
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    return strategy.hashCode();
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/DistanceValueSource.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/DistanceValueSource.java b/solr/core/src/java/org/apache/solr/legacy/DistanceValueSource.java
new file mode 100644
index 0000000..8685d88
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/DistanceValueSource.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+import org.apache.lucene.index.NumericDocValues;
+import org.locationtech.spatial4j.distance.DistanceCalculator;
+import org.locationtech.spatial4j.shape.Point;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.queries.function.FunctionValues;
+import org.apache.lucene.queries.function.ValueSource;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * An implementation of the Lucene ValueSource model that returns the distance
+ * for a {@link PointVectorStrategy}.
+ *
+ * @lucene.internal
+ */
+public class DistanceValueSource extends ValueSource {
+
+  private PointVectorStrategy strategy;
+  private final Point from;
+  private final double multiplier;
+
+  /**
+   * Constructor.
+   */
+  public DistanceValueSource(PointVectorStrategy strategy, Point from, double multiplier) {
+    this.strategy = strategy;
+    this.from = from;
+    this.multiplier = multiplier;
+  }
+
+  /**
+   * Returns the ValueSource description.
+   */
+  @Override
+  public String description() {
+    return "DistanceValueSource("+strategy+", "+from+")";
+  }
+
+  /**
+   * Returns the FunctionValues used by the function query.
+   */
+  @Override
+  public FunctionValues getValues(Map context, LeafReaderContext readerContext) throws IOException {
+    LeafReader reader = readerContext.reader();
+
+    final NumericDocValues ptX = DocValues.getNumeric(reader, strategy.getFieldNameX());
+    final NumericDocValues ptY = DocValues.getNumeric(reader, strategy.getFieldNameY());
+
+    return new FunctionValues() {
+
+      private int lastDocID = -1;
+
+      private final Point from = DistanceValueSource.this.from;
+      private final DistanceCalculator calculator = strategy.getSpatialContext().getDistCalc();
+      private final double nullValue =
+          (strategy.getSpatialContext().isGeo() ? 180 * multiplier : Double.MAX_VALUE);
+
+      private double getDocValue(NumericDocValues values, int doc) throws IOException {
+        int curDocID = values.docID();
+        if (doc > curDocID) {
+          curDocID = values.advance(doc);
+        }
+        if (doc == curDocID) {
+          return Double.longBitsToDouble(values.longValue());
+        } else {
+          return 0.0;
+        }
+      }
+
+      @Override
+      public float floatVal(int doc) throws IOException {
+        return (float) doubleVal(doc);
+      }
+
+      @Override
+      public double doubleVal(int doc) throws IOException {
+        // make sure it has minX and area
+        double x = getDocValue(ptX, doc);
+        if (ptX.docID() == doc) {
+          double y = getDocValue(ptY, doc);
+          assert ptY.docID() == doc;
+          return calculator.distance(from, x, y) * multiplier;
+        }
+        return nullValue;
+      }
+
+      @Override
+      public String toString(int doc) throws IOException {
+        return description() + "=" + floatVal(doc);
+      }
+    };
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+
+    DistanceValueSource that = (DistanceValueSource) o;
+
+    if (!from.equals(that.from)) return false;
+    if (!strategy.equals(that.strategy)) return false;
+    if (multiplier != that.multiplier) return false;
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    return from.hashCode();
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/LegacyDoubleField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/LegacyDoubleField.java b/solr/core/src/java/org/apache/solr/legacy/LegacyDoubleField.java
new file mode 100644
index 0000000..b6a2897
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/LegacyDoubleField.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoublePoint;
+import org.apache.lucene.index.IndexOptions;
+
+
+/**
+ * <p>
+ * Field that indexes <code>double</code> values
+ * for efficient range filtering and sorting. Here's an example usage:
+ * 
+ * <pre class="prettyprint">
+ * document.add(new LegacyDoubleField(name, 6.0, Field.Store.NO));
+ * </pre>
+ * 
+ * For optimal performance, re-use the <code>LegacyDoubleField</code> and
+ * {@link Document} instance for more than one document:
+ * 
+ * <pre class="prettyprint">
+ *  LegacyDoubleField field = new LegacyDoubleField(name, 0.0, Field.Store.NO);
+ *  Document document = new Document();
+ *  document.add(field);
+ * 
+ *  for(all documents) {
+ *    ...
+ *    field.setDoubleValue(value)
+ *    writer.addDocument(document);
+ *    ...
+ *  }
+ * </pre>
+ *
+ * See also {@link LegacyIntField}, {@link LegacyLongField}, {@link
+ * LegacyFloatField}.
+ *
+ * <p>To perform range querying or filtering against a
+ * <code>LegacyDoubleField</code>, use {@link org.apache.solr.legacy.LegacyNumericRangeQuery}.
+ * To sort according to a
+ * <code>LegacyDoubleField</code>, use the normal numeric sort types, eg
+ * {@link org.apache.lucene.search.SortField.Type#DOUBLE}. <code>LegacyDoubleField</code>
+ * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
+ *
+ * <p>You may add the same field name as an <code>LegacyDoubleField</code> to
+ * the same document more than once.  Range querying and
+ * filtering will be the logical OR of all values; so a range query
+ * will hit all documents that have at least one value in
+ * the range. However sort behavior is not defined.  If you need to sort,
+ * you should separately index a single-valued <code>LegacyDoubleField</code>.</p>
+ *
+ * <p>A <code>LegacyDoubleField</code> will consume somewhat more disk space
+ * in the index than an ordinary single-valued field.
+ * However, for a typical index that includes substantial
+ * textual content per document, this increase will likely
+ * be in the noise. </p>
+ *
+ * <p>Within Lucene, each numeric value is indexed as a
+ * <em>trie</em> structure, where each term is logically
+ * assigned to larger and larger pre-defined brackets (which
+ * are simply lower-precision representations of the value).
+ * The step size between each successive bracket is called the
+ * <code>precisionStep</code>, measured in bits.  Smaller
+ * <code>precisionStep</code> values result in larger number
+ * of brackets, which consumes more disk space in the index
+ * but may result in faster range search performance.  The
+ * default value, 16, was selected for a reasonable tradeoff
+ * of disk space consumption versus performance.  You can
+ * create a custom {@link LegacyFieldType} and invoke the {@link
+ * LegacyFieldType#setNumericPrecisionStep} method if you'd
+ * like to change the value.  Note that you must also
+ * specify a congruent value when creating {@link
+ * org.apache.solr.legacy.LegacyNumericRangeQuery}.
+ * For low cardinality fields larger precision steps are good.
+ * If the cardinality is &lt; 100, it is fair
+ * to use {@link Integer#MAX_VALUE}, which produces one
+ * term per value.
+ *
+ * <p>For more information on the internals of numeric trie
+ * indexing, including the <a
+ * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
+ * configuration, see {@link org.apache.solr.legacy.LegacyNumericRangeQuery}. The format of
+ * indexed values is described in {@link org.apache.solr.legacy.LegacyNumericUtils}.
+ *
+ * <p>If you only need to sort by numeric value, and never
+ * run range querying/filtering, you can index using a
+ * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
+ * This will minimize disk space consumed. </p>
+ *
+ * <p>More advanced users can instead use {@link
+ * org.apache.solr.legacy.LegacyNumericTokenStream} directly, when indexing numbers. This
+ * class is a wrapper around this token stream type for
+ * easier, more intuitive usage.</p>
+ *
+ * @deprecated Please use {@link DoublePoint} instead
+ *
+ * @since 2.9
+ */
+
+@Deprecated
+public final class LegacyDoubleField extends LegacyField {
+  
+  /** 
+   * Type for a LegacyDoubleField that is not stored:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_NOT_STORED = new LegacyFieldType();
+  static {
+    TYPE_NOT_STORED.setTokenized(true);
+    TYPE_NOT_STORED.setOmitNorms(true);
+    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_NOT_STORED.setNumericType(LegacyNumericType.DOUBLE);
+    TYPE_NOT_STORED.freeze();
+  }
+
+  /** 
+   * Type for a stored LegacyDoubleField:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_STORED = new LegacyFieldType();
+  static {
+    TYPE_STORED.setTokenized(true);
+    TYPE_STORED.setOmitNorms(true);
+    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_STORED.setNumericType(LegacyNumericType.DOUBLE);
+    TYPE_STORED.setStored(true);
+    TYPE_STORED.freeze();
+  }
+
+  /** Creates a stored or un-stored LegacyDoubleField with the provided value
+   *  and default <code>precisionStep</code> {@link
+   *  org.apache.solr.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
+   *  @param name field name
+   *  @param value 64-bit double value
+   *  @param stored Store.YES if the content should also be stored
+   *  @throws IllegalArgumentException if the field name is null. 
+   */
+  public LegacyDoubleField(String name, double value, Store stored) {
+    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
+    fieldsData = Double.valueOf(value);
+  }
+  
+  /** Expert: allows you to customize the {@link
+   *  LegacyFieldType}. 
+   *  @param name field name
+   *  @param value 64-bit double value
+   *  @param type customized field type: must have {@link LegacyFieldType#numericType()}
+   *         of {@link LegacyNumericType#DOUBLE}.
+   *  @throws IllegalArgumentException if the field name or type is null, or
+   *          if the field type does not have a DOUBLE numericType()
+   */
+  public LegacyDoubleField(String name, double value, LegacyFieldType type) {
+    super(name, type);
+    if (type.numericType() != LegacyNumericType.DOUBLE) {
+      throw new IllegalArgumentException("type.numericType() must be DOUBLE but got " + type.numericType());
+    }
+    fieldsData = Double.valueOf(value);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/LegacyField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/LegacyField.java b/solr/core/src/java/org/apache/solr/legacy/LegacyField.java
new file mode 100644
index 0000000..7a6bde0
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/LegacyField.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexOptions;
+
+/**
+ * Field extension with support for legacy numerics
+ * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+ */
+@Deprecated
+public class LegacyField extends Field {
+
+  /**
+   * Expert: creates a field with no initial value.
+   * Intended only for custom LegacyField subclasses.
+   * @param name field name
+   * @param type field type
+   * @throws IllegalArgumentException if either the name or type
+   *         is null.
+   */
+  public LegacyField(String name, LegacyFieldType type) {
+    super(name, type);
+  }
+  
+  @Override
+  public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) {
+    if (fieldType().indexOptions() == IndexOptions.NONE) {
+      // Not indexed
+      return null;
+    }
+    final LegacyFieldType fieldType = (LegacyFieldType) fieldType();
+    final LegacyNumericType numericType = fieldType.numericType();
+    if (numericType != null) {
+      if (!(reuse instanceof LegacyNumericTokenStream && ((LegacyNumericTokenStream)reuse).getPrecisionStep() == fieldType.numericPrecisionStep())) {
+        // lazy init the TokenStream as it is heavy to instantiate
+        // (attributes,...) if not needed (stored field loading)
+        reuse = new LegacyNumericTokenStream(fieldType.numericPrecisionStep());
+      }
+      final LegacyNumericTokenStream nts = (LegacyNumericTokenStream) reuse;
+      // initialize value in TokenStream
+      final Number val = (Number) fieldsData;
+      switch (numericType) {
+      case INT:
+        nts.setIntValue(val.intValue());
+        break;
+      case LONG:
+        nts.setLongValue(val.longValue());
+        break;
+      case FLOAT:
+        nts.setFloatValue(val.floatValue());
+        break;
+      case DOUBLE:
+        nts.setDoubleValue(val.doubleValue());
+        break;
+      default:
+        throw new AssertionError("Should never get here");
+      }
+      return reuse;
+    }
+    return super.tokenStream(analyzer, reuse);
+  }
+  
+  @Override
+  public void setTokenStream(TokenStream tokenStream) {
+    final LegacyFieldType fieldType = (LegacyFieldType) fieldType();
+    if (fieldType.numericType() != null) {
+      throw new IllegalArgumentException("cannot set private TokenStream on numeric fields");
+    }
+    super.setTokenStream(tokenStream);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/LegacyFieldType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/LegacyFieldType.java b/solr/core/src/java/org/apache/solr/legacy/LegacyFieldType.java
new file mode 100644
index 0000000..a18a00a
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/LegacyFieldType.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.IndexOptions;
+
+/**
+ * FieldType extension with support for legacy numerics
+ * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+ */
+@Deprecated
+public final class LegacyFieldType extends FieldType {
+  private LegacyNumericType numericType;
+  private int numericPrecisionStep = LegacyNumericUtils.PRECISION_STEP_DEFAULT;
+
+  /**
+   * Create a new mutable LegacyFieldType with all of the properties from <code>ref</code>
+   */
+  public LegacyFieldType(LegacyFieldType ref) {
+    super(ref);
+    this.numericType = ref.numericType;
+    this.numericPrecisionStep = ref.numericPrecisionStep;
+  }
+  
+  /**
+   * Create a new FieldType with default properties.
+   */
+  public LegacyFieldType() {
+  }
+  
+  /**
+   * Specifies the field's numeric type.
+   * @param type numeric type, or null if the field has no numeric type.
+   * @throws IllegalStateException if this FieldType is frozen against
+   *         future modifications.
+   * @see #numericType()
+   *
+   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+   */
+  @Deprecated
+  public void setNumericType(LegacyNumericType type) {
+    checkIfFrozen();
+    numericType = type;
+  }
+  
+  /** 
+   * LegacyNumericType: if non-null then the field's value will be indexed
+   * numerically so that {@link org.apache.solr.legacy.LegacyNumericRangeQuery} can be used at
+   * search time. 
+   * <p>
+   * The default is <code>null</code> (no numeric type) 
+   * @see #setNumericType(LegacyNumericType)
+   *
+   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+   */
+  @Deprecated
+  public LegacyNumericType numericType() {
+    return numericType;
+  }
+  
+  /**
+   * Sets the numeric precision step for the field.
+   * @param precisionStep numeric precision step for the field
+   * @throws IllegalArgumentException if precisionStep is less than 1. 
+   * @throws IllegalStateException if this FieldType is frozen against
+   *         future modifications.
+   * @see #numericPrecisionStep()
+   *
+   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+   */
+  @Deprecated
+  public void setNumericPrecisionStep(int precisionStep) {
+    checkIfFrozen();
+    if (precisionStep < 1) {
+      throw new IllegalArgumentException("precisionStep must be >= 1 (got " + precisionStep + ")");
+    }
+    this.numericPrecisionStep = precisionStep;
+  }
+  
+  /** 
+   * Precision step for numeric field. 
+   * <p>
+   * This has no effect if {@link #numericType()} returns null.
+   * <p>
+   * The default is {@link org.apache.solr.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT}
+   * @see #setNumericPrecisionStep(int)
+   *
+   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+   */
+  @Deprecated
+  public int numericPrecisionStep() {
+    return numericPrecisionStep;
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = super.hashCode();
+    result = prime * result + numericPrecisionStep;
+    result = prime * result + ((numericType == null) ? 0 : numericType.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!super.equals(obj)) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) return false;
+    LegacyFieldType other = (LegacyFieldType) obj;
+    if (numericPrecisionStep != other.numericPrecisionStep) return false;
+    if (numericType != other.numericType) return false;
+    return true;
+  }
+
+  /** Prints a Field for human consumption. */
+  @Override
+  public String toString() {
+    StringBuilder result = new StringBuilder();
+    result.append(super.toString());
+    if (indexOptions() != IndexOptions.NONE) {
+      if (result.length() > 0) {
+        result.append(",");
+      }
+      if (numericType != null) {
+        result.append(",numericType=");
+        result.append(numericType);
+        result.append(",numericPrecisionStep=");
+        result.append(numericPrecisionStep);
+      }
+    }
+    return result.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/LegacyFloatField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/LegacyFloatField.java b/solr/core/src/java/org/apache/solr/legacy/LegacyFloatField.java
new file mode 100644
index 0000000..79ec0bd
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/LegacyFloatField.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FloatPoint;
+import org.apache.lucene.index.IndexOptions;
+
+/**
+ * <p>
+ * Field that indexes <code>float</code> values
+ * for efficient range filtering and sorting. Here's an example usage:
+ * 
+ * <pre class="prettyprint">
+ * document.add(new LegacyFloatField(name, 6.0F, Field.Store.NO));
+ * </pre>
+ * 
+ * For optimal performance, re-use the <code>LegacyFloatField</code> and
+ * {@link Document} instance for more than one document:
+ * 
+ * <pre class="prettyprint">
+ *  LegacyFloatField field = new LegacyFloatField(name, 0.0F, Field.Store.NO);
+ *  Document document = new Document();
+ *  document.add(field);
+ * 
+ *  for(all documents) {
+ *    ...
+ *    field.setFloatValue(value)
+ *    writer.addDocument(document);
+ *    ...
+ *  }
+ * </pre>
+ *
+ * See also {@link LegacyIntField}, {@link LegacyLongField}, {@link
+ * LegacyDoubleField}.
+ *
+ * <p>To perform range querying or filtering against a
+ * <code>LegacyFloatField</code>, use {@link org.apache.solr.legacy.LegacyNumericRangeQuery}.
+ * To sort according to a
+ * <code>LegacyFloatField</code>, use the normal numeric sort types, eg
+ * {@link org.apache.lucene.search.SortField.Type#FLOAT}. <code>LegacyFloatField</code>
+ * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
+ *
+ * <p>You may add the same field name as an <code>LegacyFloatField</code> to
+ * the same document more than once.  Range querying and
+ * filtering will be the logical OR of all values; so a range query
+ * will hit all documents that have at least one value in
+ * the range. However sort behavior is not defined.  If you need to sort,
+ * you should separately index a single-valued <code>LegacyFloatField</code>.</p>
+ *
+ * <p>A <code>LegacyFloatField</code> will consume somewhat more disk space
+ * in the index than an ordinary single-valued field.
+ * However, for a typical index that includes substantial
+ * textual content per document, this increase will likely
+ * be in the noise. </p>
+ *
+ * <p>Within Lucene, each numeric value is indexed as a
+ * <em>trie</em> structure, where each term is logically
+ * assigned to larger and larger pre-defined brackets (which
+ * are simply lower-precision representations of the value).
+ * The step size between each successive bracket is called the
+ * <code>precisionStep</code>, measured in bits.  Smaller
+ * <code>precisionStep</code> values result in larger number
+ * of brackets, which consumes more disk space in the index
+ * but may result in faster range search performance.  The
+ * default value, 8, was selected for a reasonable tradeoff
+ * of disk space consumption versus performance.  You can
+ * create a custom {@link LegacyFieldType} and invoke the {@link
+ * LegacyFieldType#setNumericPrecisionStep} method if you'd
+ * like to change the value.  Note that you must also
+ * specify a congruent value when creating {@link
+ * org.apache.solr.legacy.LegacyNumericRangeQuery}.
+ * For low cardinality fields larger precision steps are good.
+ * If the cardinality is &lt; 100, it is fair
+ * to use {@link Integer#MAX_VALUE}, which produces one
+ * term per value.
+ *
+ * <p>For more information on the internals of numeric trie
+ * indexing, including the <a
+ * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
+ * configuration, see {@link org.apache.solr.legacy.LegacyNumericRangeQuery}. The format of
+ * indexed values is described in {@link org.apache.solr.legacy.LegacyNumericUtils}.
+ *
+ * <p>If you only need to sort by numeric value, and never
+ * run range querying/filtering, you can index using a
+ * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
+ * This will minimize disk space consumed. </p>
+ *
+ * <p>More advanced users can instead use {@link
+ * org.apache.solr.legacy.LegacyNumericTokenStream} directly, when indexing numbers. This
+ * class is a wrapper around this token stream type for
+ * easier, more intuitive usage.</p>
+ *
+ * @deprecated Please use {@link FloatPoint} instead
+ *
+ * @since 2.9
+ */
+
+@Deprecated
+public final class LegacyFloatField extends LegacyField {
+  
+  /** 
+   * Type for a LegacyFloatField that is not stored:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_NOT_STORED = new LegacyFieldType();
+  static {
+    TYPE_NOT_STORED.setTokenized(true);
+    TYPE_NOT_STORED.setOmitNorms(true);
+    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_NOT_STORED.setNumericType(LegacyNumericType.FLOAT);
+    TYPE_NOT_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
+    TYPE_NOT_STORED.freeze();
+  }
+
+  /** 
+   * Type for a stored LegacyFloatField:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_STORED = new LegacyFieldType();
+  static {
+    TYPE_STORED.setTokenized(true);
+    TYPE_STORED.setOmitNorms(true);
+    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_STORED.setNumericType(LegacyNumericType.FLOAT);
+    TYPE_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
+    TYPE_STORED.setStored(true);
+    TYPE_STORED.freeze();
+  }
+
+  /** Creates a stored or un-stored LegacyFloatField with the provided value
+   *  and default <code>precisionStep</code> {@link
+   *  org.apache.solr.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
+   *  @param name field name
+   *  @param value 32-bit double value
+   *  @param stored Store.YES if the content should also be stored
+   *  @throws IllegalArgumentException if the field name is null.
+   */
+  public LegacyFloatField(String name, float value, Store stored) {
+    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
+    fieldsData = Float.valueOf(value);
+  }
+  
+  /** Expert: allows you to customize the {@link
+   *  LegacyFieldType}. 
+   *  @param name field name
+   *  @param value 32-bit float value
+   *  @param type customized field type: must have {@link LegacyFieldType#numericType()}
+   *         of {@link LegacyNumericType#FLOAT}.
+   *  @throws IllegalArgumentException if the field name or type is null, or
+   *          if the field type does not have a FLOAT numericType()
+   */
+  public LegacyFloatField(String name, float value, LegacyFieldType type) {
+    super(name, type);
+    if (type.numericType() != LegacyNumericType.FLOAT) {
+      throw new IllegalArgumentException("type.numericType() must be FLOAT but got " + type.numericType());
+    }
+    fieldsData = Float.valueOf(value);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/LegacyIntField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/LegacyIntField.java b/solr/core/src/java/org/apache/solr/legacy/LegacyIntField.java
new file mode 100644
index 0000000..838ad4e
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/LegacyIntField.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.IntPoint;
+import org.apache.lucene.index.IndexOptions;
+
+/**
+ * <p>
+ * Field that indexes <code>int</code> values
+ * for efficient range filtering and sorting. Here's an example usage:
+ * 
+ * <pre class="prettyprint">
+ * document.add(new LegacyIntField(name, 6, Field.Store.NO));
+ * </pre>
+ * 
+ * For optimal performance, re-use the <code>LegacyIntField</code> and
+ * {@link Document} instance for more than one document:
+ * 
+ * <pre class="prettyprint">
+ *  LegacyIntField field = new LegacyIntField(name, 6, Field.Store.NO);
+ *  Document document = new Document();
+ *  document.add(field);
+ * 
+ *  for(all documents) {
+ *    ...
+ *    field.setIntValue(value)
+ *    writer.addDocument(document);
+ *    ...
+ *  }
+ * </pre>
+ *
+ * See also {@link LegacyLongField}, {@link LegacyFloatField}, {@link
+ * LegacyDoubleField}.
+ *
+ * <p>To perform range querying or filtering against a
+ * <code>LegacyIntField</code>, use {@link org.apache.solr.legacy.LegacyNumericRangeQuery}.
+ * To sort according to a
+ * <code>LegacyIntField</code>, use the normal numeric sort types, eg
+ * {@link org.apache.lucene.search.SortField.Type#INT}. <code>LegacyIntField</code>
+ * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
+ *
+ * <p>You may add the same field name as an <code>LegacyIntField</code> to
+ * the same document more than once.  Range querying and
+ * filtering will be the logical OR of all values; so a range query
+ * will hit all documents that have at least one value in
+ * the range. However sort behavior is not defined.  If you need to sort,
+ * you should separately index a single-valued <code>LegacyIntField</code>.</p>
+ *
+ * <p>An <code>LegacyIntField</code> will consume somewhat more disk space
+ * in the index than an ordinary single-valued field.
+ * However, for a typical index that includes substantial
+ * textual content per document, this increase will likely
+ * be in the noise. </p>
+ *
+ * <p>Within Lucene, each numeric value is indexed as a
+ * <em>trie</em> structure, where each term is logically
+ * assigned to larger and larger pre-defined brackets (which
+ * are simply lower-precision representations of the value).
+ * The step size between each successive bracket is called the
+ * <code>precisionStep</code>, measured in bits.  Smaller
+ * <code>precisionStep</code> values result in larger number
+ * of brackets, which consumes more disk space in the index
+ * but may result in faster range search performance.  The
+ * default value, 8, was selected for a reasonable tradeoff
+ * of disk space consumption versus performance.  You can
+ * create a custom {@link LegacyFieldType} and invoke the {@link
+ * LegacyFieldType#setNumericPrecisionStep} method if you'd
+ * like to change the value.  Note that you must also
+ * specify a congruent value when creating {@link
+ * org.apache.solr.legacy.LegacyNumericRangeQuery}.
+ * For low cardinality fields larger precision steps are good.
+ * If the cardinality is &lt; 100, it is fair
+ * to use {@link Integer#MAX_VALUE}, which produces one
+ * term per value.
+ *
+ * <p>For more information on the internals of numeric trie
+ * indexing, including the <a
+ * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
+ * configuration, see {@link org.apache.solr.legacy.LegacyNumericRangeQuery}. The format of
+ * indexed values is described in {@link org.apache.solr.legacy.LegacyNumericUtils}.
+ *
+ * <p>If you only need to sort by numeric value, and never
+ * run range querying/filtering, you can index using a
+ * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
+ * This will minimize disk space consumed. </p>
+ *
+ * <p>More advanced users can instead use {@link
+ * org.apache.solr.legacy.LegacyNumericTokenStream} directly, when indexing numbers. This
+ * class is a wrapper around this token stream type for
+ * easier, more intuitive usage.</p>
+ *
+ * @deprecated Please use {@link IntPoint} instead
+ *
+ * @since 2.9
+ */
+
+@Deprecated
+public final class LegacyIntField extends LegacyField {
+  
+  /** 
+   * Type for an LegacyIntField that is not stored:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_NOT_STORED = new LegacyFieldType();
+  static {
+    TYPE_NOT_STORED.setTokenized(true);
+    TYPE_NOT_STORED.setOmitNorms(true);
+    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_NOT_STORED.setNumericType(LegacyNumericType.INT);
+    TYPE_NOT_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
+    TYPE_NOT_STORED.freeze();
+  }
+
+  /** 
+   * Type for a stored LegacyIntField:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_STORED = new LegacyFieldType();
+  static {
+    TYPE_STORED.setTokenized(true);
+    TYPE_STORED.setOmitNorms(true);
+    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_STORED.setNumericType(LegacyNumericType.INT);
+    TYPE_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
+    TYPE_STORED.setStored(true);
+    TYPE_STORED.freeze();
+  }
+
+  /** Creates a stored or un-stored LegacyIntField with the provided value
+   *  and default <code>precisionStep</code> {@link
+   *  org.apache.solr.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
+   *  @param name field name
+   *  @param value 32-bit integer value
+   *  @param stored Store.YES if the content should also be stored
+   *  @throws IllegalArgumentException if the field name is null.
+   */
+  public LegacyIntField(String name, int value, Store stored) {
+    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
+    fieldsData = Integer.valueOf(value);
+  }
+  
+  /** Expert: allows you to customize the {@link
+   *  LegacyFieldType}. 
+   *  @param name field name
+   *  @param value 32-bit integer value
+   *  @param type customized field type: must have {@link LegacyFieldType#numericType()}
+   *         of {@link LegacyNumericType#INT}.
+   *  @throws IllegalArgumentException if the field name or type is null, or
+   *          if the field type does not have a INT numericType()
+   */
+  public LegacyIntField(String name, int value, LegacyFieldType type) {
+    super(name, type);
+    if (type.numericType() != LegacyNumericType.INT) {
+      throw new IllegalArgumentException("type.numericType() must be INT but got " + type.numericType());
+    }
+    fieldsData = Integer.valueOf(value);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/LegacyLongField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/LegacyLongField.java b/solr/core/src/java/org/apache/solr/legacy/LegacyLongField.java
new file mode 100644
index 0000000..fb48437
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/LegacyLongField.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.index.IndexOptions;
+
+
+/**
+ * <p>
+ * Field that indexes <code>long</code> values
+ * for efficient range filtering and sorting. Here's an example usage:
+ * 
+ * <pre class="prettyprint">
+ * document.add(new LegacyLongField(name, 6L, Field.Store.NO));
+ * </pre>
+ * 
+ * For optimal performance, re-use the <code>LegacyLongField</code> and
+ * {@link Document} instance for more than one document:
+ * 
+ * <pre class="prettyprint">
+ *  LegacyLongField field = new LegacyLongField(name, 0L, Field.Store.NO);
+ *  Document document = new Document();
+ *  document.add(field);
+ * 
+ *  for(all documents) {
+ *    ...
+ *    field.setLongValue(value)
+ *    writer.addDocument(document);
+ *    ...
+ *  }
+ * </pre>
+ *
+ * See also {@link LegacyIntField}, {@link LegacyFloatField}, {@link
+ * LegacyDoubleField}.
+ *
+ * Any type that can be converted to long can also be
+ * indexed.  For example, date/time values represented by a
+ * {@link java.util.Date} can be translated into a long
+ * value using the {@link java.util.Date#getTime} method.  If you
+ * don't need millisecond precision, you can quantize the
+ * value, either by dividing the result of
+ * {@link java.util.Date#getTime} or using the separate getters
+ * (for year, month, etc.) to construct an <code>int</code> or
+ * <code>long</code> value.
+ *
+ * <p>To perform range querying or filtering against a
+ * <code>LegacyLongField</code>, use {@link org.apache.solr.legacy.LegacyNumericRangeQuery}.
+ * To sort according to a
+ * <code>LegacyLongField</code>, use the normal numeric sort types, eg
+ * {@link org.apache.lucene.search.SortField.Type#LONG}. <code>LegacyLongField</code>
+ * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.
+ *
+ * <p>You may add the same field name as an <code>LegacyLongField</code> to
+ * the same document more than once.  Range querying and
+ * filtering will be the logical OR of all values; so a range query
+ * will hit all documents that have at least one value in
+ * the range. However sort behavior is not defined.  If you need to sort,
+ * you should separately index a single-valued <code>LegacyLongField</code>.
+ *
+ * <p>A <code>LegacyLongField</code> will consume somewhat more disk space
+ * in the index than an ordinary single-valued field.
+ * However, for a typical index that includes substantial
+ * textual content per document, this increase will likely
+ * be in the noise. </p>
+ *
+ * <p>Within Lucene, each numeric value is indexed as a
+ * <em>trie</em> structure, where each term is logically
+ * assigned to larger and larger pre-defined brackets (which
+ * are simply lower-precision representations of the value).
+ * The step size between each successive bracket is called the
+ * <code>precisionStep</code>, measured in bits.  Smaller
+ * <code>precisionStep</code> values result in larger number
+ * of brackets, which consumes more disk space in the index
+ * but may result in faster range search performance.  The
+ * default value, 16, was selected for a reasonable tradeoff
+ * of disk space consumption versus performance.  You can
+ * create a custom {@link LegacyFieldType} and invoke the {@link
+ * LegacyFieldType#setNumericPrecisionStep} method if you'd
+ * like to change the value.  Note that you must also
+ * specify a congruent value when creating {@link
+ * org.apache.solr.legacy.LegacyNumericRangeQuery}.
+ * For low cardinality fields larger precision steps are good.
+ * If the cardinality is &lt; 100, it is fair
+ * to use {@link Integer#MAX_VALUE}, which produces one
+ * term per value.
+ *
+ * <p>For more information on the internals of numeric trie
+ * indexing, including the <a
+ * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
+ * configuration, see {@link org.apache.solr.legacy.LegacyNumericRangeQuery}. The format of
+ * indexed values is described in {@link org.apache.solr.legacy.LegacyNumericUtils}.
+ *
+ * <p>If you only need to sort by numeric value, and never
+ * run range querying/filtering, you can index using a
+ * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
+ * This will minimize disk space consumed.
+ *
+ * <p>More advanced users can instead use {@link
+ * org.apache.solr.legacy.LegacyNumericTokenStream} directly, when indexing numbers. This
+ * class is a wrapper around this token stream type for
+ * easier, more intuitive usage.</p>
+ *
+ * @deprecated Please use {@link LongPoint} instead
+ *
+ * @since 2.9
+ */
+
+@Deprecated
+public final class LegacyLongField extends LegacyField {
+  
+  /** 
+   * Type for a LegacyLongField that is not stored:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_NOT_STORED = new LegacyFieldType();
+  static {
+    TYPE_NOT_STORED.setTokenized(true);
+    TYPE_NOT_STORED.setOmitNorms(true);
+    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_NOT_STORED.setNumericType(LegacyNumericType.LONG);
+    TYPE_NOT_STORED.freeze();
+  }
+
+  /** 
+   * Type for a stored LegacyLongField:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_STORED = new LegacyFieldType();
+  static {
+    TYPE_STORED.setTokenized(true);
+    TYPE_STORED.setOmitNorms(true);
+    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_STORED.setNumericType(LegacyNumericType.LONG);
+    TYPE_STORED.setStored(true);
+    TYPE_STORED.freeze();
+  }
+
+  /** Creates a stored or un-stored LegacyLongField with the provided value
+   *  and default <code>precisionStep</code> {@link
+   *  org.apache.solr.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
+   *  @param name field name
+   *  @param value 64-bit long value
+   *  @param stored Store.YES if the content should also be stored
+   *  @throws IllegalArgumentException if the field name is null.
+   */
+  public LegacyLongField(String name, long value, Store stored) {
+    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
+    fieldsData = Long.valueOf(value);
+  }
+  
+  /** Expert: allows you to customize the {@link
+   *  LegacyFieldType}. 
+   *  @param name field name
+   *  @param value 64-bit long value
+   *  @param type customized field type: must have {@link LegacyFieldType#numericType()}
+   *         of {@link LegacyNumericType#LONG}.
+   *  @throws IllegalArgumentException if the field name or type is null, or
+   *          if the field type does not have a LONG numericType()
+   */
+  public LegacyLongField(String name, long value, LegacyFieldType type) {
+    super(name, type);
+    if (type.numericType() != LegacyNumericType.LONG) {
+      throw new IllegalArgumentException("type.numericType() must be LONG but got " + type.numericType());
+    }
+    fieldsData = Long.valueOf(value);
+  }
+}


[08/25] lucene-solr:jira/solr-8668: SOLR-10758: fix broken internal link to new HMM Chinese Tokenizer section

Posted by cp...@apache.org.
SOLR-10758: fix broken internal link to new HMM Chinese Tokenizer section


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9fbc9db1
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9fbc9db1
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9fbc9db1

Branch: refs/heads/jira/solr-8668
Commit: 9fbc9db1c17d9fe5a7281f89a4bb18e18f38fceb
Parents: 6bbdfbc
Author: Steve Rowe <sa...@gmail.com>
Authored: Fri May 26 16:57:53 2017 -0400
Committer: Steve Rowe <sa...@gmail.com>
Committed: Fri May 26 16:57:53 2017 -0400

----------------------------------------------------------------------
 solr/solr-ref-guide/src/language-analysis.adoc | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9fbc9db1/solr/solr-ref-guide/src/language-analysis.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/language-analysis.adoc b/solr/solr-ref-guide/src/language-analysis.adoc
index c82cd61..11b0b78 100644
--- a/solr/solr-ref-guide/src/language-analysis.adoc
+++ b/solr/solr-ref-guide/src/language-analysis.adoc
@@ -565,7 +565,7 @@ See the example under <<LanguageAnalysis-TraditionalChinese,Traditional Chinese>
 [[LanguageAnalysis-SimplifiedChinese]]
 === Simplified Chinese
 
-For Simplified Chinese, Solr provides support for Chinese sentence and word segmentation with the <<LanguageAnalysis-HMMChineseTokenizerFactory,HMM Chinese Tokenizer`>>. This component includes a large dictionary and segments Chinese text into words with the Hidden Markov Model. To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<lib-directives-in-solrconfig.adoc#lib-directives-in-solrconfig,Lib Directives in SolrConfig>>). See the `solr/contrib/analysis-extras/README.txt` for information on which jars you need to add to your `SOLR_HOME/lib`.
+For Simplified Chinese, Solr provides support for Chinese sentence and word segmentation with the <<LanguageAnalysis-HMMChineseTokenizer,HMM Chinese Tokenizer`>>. This component includes a large dictionary and segments Chinese text into words with the Hidden Markov Model. To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<lib-directives-in-solrconfig.adoc#lib-directives-in-solrconfig,Lib Directives in SolrConfig>>). See the `solr/contrib/analysis-extras/README.txt` for information on which jars you need to add to your `SOLR_HOME/lib`.
 
 The default configuration of the <<tokenizers.adoc#Tokenizers-ICUTokenizer,ICU Tokenizer>> is also suitable for Simplified Chinese text.  It follows the Word Break rules from the Unicode Text Segmentation algorithm for non-Chinese text, and uses a dictionary to segment Chinese words.  To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<lib-directives-in-solrconfig.adoc#lib-directives-in-solrconfig,Lib Directives in SolrConfig>>). See the `solr/contrib/analysis-extras/README.txt` for information on which jars you need to add to your `SOLR_HOME/lib`.
 
@@ -598,6 +598,7 @@ Also useful for Chinese analysis:
 </analyzer>
 ----
 
+[[LanguageAnalysis-HMMChineseTokenizer]]
 === HMM Chinese Tokenizer
 
 For Simplified Chinese, Solr provides support for Chinese sentence and word segmentation with the `solr.HMMChineseTokenizerFactory` in the `analysis-extras` contrib module. This component includes a large dictionary and segments Chinese text into words with the Hidden Markov Model. To use this tokenizer, see `solr/contrib/analysis-extras/README.txt` for instructions on which jars you need to add to your `solr_home/lib`.


[25/25] lucene-solr:jira/solr-8668: Merge branch 'master' into jira/solr-8668

Posted by cp...@apache.org.
Merge branch 'master' into jira/solr-8668


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/36faceaf
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/36faceaf
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/36faceaf

Branch: refs/heads/jira/solr-8668
Commit: 36faceaf5bf7c900215aa33c40c2b7b9a3d8a9e6
Parents: 8143752 759fa42
Author: Christine Poerschke <cp...@apache.org>
Authored: Tue May 30 12:13:32 2017 +0100
Committer: Christine Poerschke <cp...@apache.org>
Committed: Tue May 30 12:13:32 2017 +0100

----------------------------------------------------------------------
 dev-tools/scripts/checkJavaDocs.py              |   2 +-
 lucene/CHANGES.txt                              |   6 +
 lucene/MIGRATE.txt                              |   6 +
 .../lucene/analysis/core/KeywordTokenizer.java  |  10 +-
 .../analysis/core/KeywordTokenizerFactory.java  |  19 +-
 .../lucene/analysis/core/LetterTokenizer.java   |  14 +
 .../analysis/core/LetterTokenizerFactory.java   |  19 +-
 .../analysis/core/LowerCaseTokenizer.java       |  13 +
 .../core/LowerCaseTokenizerFactory.java         |  37 +-
 .../core/UnicodeWhitespaceTokenizer.java        |  13 +
 .../analysis/core/WhitespaceTokenizer.java      |  13 +
 .../core/WhitespaceTokenizerFactory.java        |  18 +-
 .../lucene/analysis/util/CharTokenizer.java     |  27 +-
 .../analysis/core/TestKeywordTokenizer.java     |  88 +++
 .../core/TestUnicodeWhitespaceTokenizer.java    |  51 ++
 .../analysis/util/TestCharTokenizers.java       |  95 +++
 .../icu/segmentation/TestICUTokenizerCJK.java   |   9 +-
 .../apache/lucene/legacy/LegacyDoubleField.java | 174 -----
 .../org/apache/lucene/legacy/LegacyField.java   |  90 ---
 .../apache/lucene/legacy/LegacyFieldType.java   | 149 ----
 .../apache/lucene/legacy/LegacyFloatField.java  | 174 -----
 .../apache/lucene/legacy/LegacyIntField.java    | 175 -----
 .../apache/lucene/legacy/LegacyLongField.java   | 184 -----
 .../lucene/legacy/LegacyNumericRangeQuery.java  | 537 --------------
 .../lucene/legacy/LegacyNumericTokenStream.java | 357 ----------
 .../apache/lucene/legacy/LegacyNumericType.java |  34 -
 .../lucene/legacy/LegacyNumericUtils.java       | 510 --------------
 .../lucene/legacy/doc-files/nrq-formula-1.png   | Bin 3171 -> 0 bytes
 .../lucene/legacy/doc-files/nrq-formula-2.png   | Bin 3694 -> 0 bytes
 .../org/apache/lucene/legacy/package-info.java  |  21 -
 .../index/TestBackwardsCompatibility.java       |  52 --
 .../apache/lucene/legacy/TestLegacyField.java   | 186 -----
 .../lucene/legacy/TestLegacyFieldReuse.java     |  81 ---
 .../lucene/legacy/TestLegacyNumericUtils.java   | 571 ---------------
 .../apache/lucene/legacy/TestLegacyTerms.java   | 164 -----
 .../TestMultiValuedNumericRangeQuery.java       |  84 ---
 .../lucene/legacy/TestNumericRangeQuery32.java  | 461 ------------
 .../lucene/legacy/TestNumericRangeQuery64.java  | 490 -------------
 .../lucene/legacy/TestNumericTokenStream.java   | 188 -----
 .../lucene/spatial/bbox/BBoxStrategy.java       |  59 +-
 .../prefix/BytesRefIteratorTokenStream.java     |   2 -
 .../prefix/NumberRangePrefixTreeStrategy.java   |  24 +-
 .../spatial/prefix/PrefixTreeStrategy.java      |  20 +-
 .../prefix/RecursivePrefixTreeStrategy.java     |  41 +-
 .../spatial/vector/PointVectorStrategy.java     |  52 --
 .../lucene/spatial/DistanceStrategyTest.java    |   3 -
 .../lucene/spatial/QueryEqualsHashCodeTest.java |   2 -
 .../lucene/spatial/bbox/TestBBoxStrategy.java   |  33 +-
 .../spatial/prefix/DateNRStrategyTest.java      |  43 +-
 .../spatial/vector/TestPointVectorStrategy.java |   7 +-
 solr/CHANGES.txt                                |  20 +-
 .../solr/analytics/util/AnalyticsParsers.java   |   2 +-
 .../util/valuesource/DateFieldSource.java       |   2 +-
 .../apache/solr/core/CorePropertiesLocator.java |   5 +-
 .../org/apache/solr/handler/StreamHandler.java  |   5 +-
 .../handler/component/HttpShardHandler.java     |  26 +-
 .../solr/highlight/DefaultSolrHighlighter.java  |   8 +-
 .../org/apache/solr/legacy/BBoxStrategy.java    | 706 +++++++++++++++++++
 .../org/apache/solr/legacy/BBoxValueSource.java | 135 ++++
 .../apache/solr/legacy/DistanceValueSource.java | 133 ++++
 .../apache/solr/legacy/LegacyDoubleField.java   | 174 +++++
 .../org/apache/solr/legacy/LegacyField.java     |  90 +++
 .../org/apache/solr/legacy/LegacyFieldType.java | 149 ++++
 .../apache/solr/legacy/LegacyFloatField.java    | 174 +++++
 .../org/apache/solr/legacy/LegacyIntField.java  | 175 +++++
 .../org/apache/solr/legacy/LegacyLongField.java | 184 +++++
 .../solr/legacy/LegacyNumericRangeQuery.java    | 537 ++++++++++++++
 .../solr/legacy/LegacyNumericTokenStream.java   | 357 ++++++++++
 .../apache/solr/legacy/LegacyNumericType.java   |  34 +
 .../apache/solr/legacy/LegacyNumericUtils.java  | 510 ++++++++++++++
 .../apache/solr/legacy/PointVectorStrategy.java | 292 ++++++++
 .../solr/legacy/doc-files/nrq-formula-1.png     | Bin 0 -> 3171 bytes
 .../solr/legacy/doc-files/nrq-formula-2.png     | Bin 0 -> 3694 bytes
 .../org/apache/solr/legacy/package-info.java    |  21 +
 .../java/org/apache/solr/schema/BBoxField.java  |   4 +-
 .../java/org/apache/solr/schema/EnumField.java  |  10 +-
 .../org/apache/solr/schema/IndexSchema.java     |   2 +-
 .../schema/SpatialPointVectorFieldType.java     |   4 +-
 .../org/apache/solr/schema/TrieDoubleField.java |   2 +-
 .../java/org/apache/solr/schema/TrieField.java  |  24 +-
 .../org/apache/solr/schema/TrieFloatField.java  |   2 +-
 .../org/apache/solr/schema/TrieIntField.java    |   2 +-
 .../org/apache/solr/schema/TrieLongField.java   |   2 +-
 .../search/LegacyNumericRangeQueryBuilder.java  |   8 +-
 .../org/apache/solr/search/QueryParsing.java    |   2 +-
 .../apache/solr/search/QueryWrapperFilter.java  |   2 +-
 .../apache/solr/search/mlt/CloudMLTQParser.java |   2 +-
 .../solr/search/mlt/SimpleMLTQParser.java       |   2 +-
 .../org/apache/solr/uninverting/FieldCache.java |  20 +-
 .../solr/uninverting/UninvertingReader.java     |  16 +-
 .../java/org/apache/solr/update/PeerSync.java   |   3 +-
 .../java/org/apache/solr/update/UpdateLog.java  |   3 +-
 .../org/apache/solr/update/VersionInfo.java     |   2 +-
 .../java/org/apache/solr/util/FileUtils.java    |  20 +
 .../src/java/org/apache/solr/util/SolrCLI.java  |   2 +-
 .../collection1/conf/schema-tokenizer-test.xml  | 150 ++++
 .../AbstractCloudBackupRestoreTestCase.java     |   7 +-
 .../apache/solr/cloud/TestConfigSetsAPI.java    |   5 +-
 .../solr/cloud/TestMiniSolrCloudClusterSSL.java |   2 +-
 .../TestSolrCloudWithDelegationTokens.java      |   2 +-
 .../org/apache/solr/legacy/TestLegacyField.java | 186 +++++
 .../solr/legacy/TestLegacyFieldReuse.java       |  81 +++
 .../solr/legacy/TestLegacyNumericUtils.java     | 571 +++++++++++++++
 .../org/apache/solr/legacy/TestLegacyTerms.java | 164 +++++
 .../TestMultiValuedNumericRangeQuery.java       |  84 +++
 .../solr/legacy/TestNumericRangeQuery32.java    | 461 ++++++++++++
 .../solr/legacy/TestNumericRangeQuery64.java    | 490 +++++++++++++
 .../solr/legacy/TestNumericTokenStream.java     | 188 +++++
 .../TestLegacyNumericRangeQueryBuilder.java     |   2 +-
 .../solr/search/TestMaxScoreQueryParser.java    |   2 +-
 .../apache/solr/search/TestSolr4Spatial.java    |   2 +-
 .../solr/search/function/TestOrdValues.java     |   4 +-
 .../hadoop/TestDelegationWithHadoopAuth.java    |   2 +-
 .../solr/uninverting/TestDocTermOrds.java       |   6 +-
 .../solr/uninverting/TestFieldCacheSort.java    |   8 +-
 .../solr/uninverting/TestLegacyFieldCache.java  |   8 +-
 .../solr/uninverting/TestNumericTerms32.java    |   6 +-
 .../solr/uninverting/TestNumericTerms64.java    |   6 +-
 .../solr/uninverting/TestUninvertingReader.java |   8 +-
 .../solr/util/TestMaxTokenLenTokenizer.java     | 135 ++++
 solr/solr-ref-guide/src/language-analysis.adoc  | 121 +++-
 solr/solr-ref-guide/src/tokenizers.adoc         |   2 +-
 .../solr/client/solrj/impl/CloudSolrClient.java | 271 +------
 .../solrj/impl/ConcurrentUpdateSolrClient.java  |  51 +-
 .../impl/DelegationTokenHttpSolrClient.java     |  24 +-
 .../solr/client/solrj/impl/HttpSolrClient.java  |  43 +-
 .../client/solrj/impl/LBHttpSolrClient.java     |  26 +-
 .../impl/SolrHttpClientContextBuilder.java      |   8 -
 .../client/solrj/io/eval/AnovaEvaluator.java    |  76 ++
 .../client/solrj/io/eval/ArrayEvaluator.java    |  63 ++
 .../solrj/io/eval/HistogramEvaluator.java       |  98 +++
 .../solrj/io/graph/ShortestPathStream.java      |  25 -
 .../client/solrj/io/stream/CloudSolrStream.java |  21 -
 .../client/solrj/io/stream/FacetStream.java     |  16 -
 .../client/solrj/io/stream/RandomStream.java    |   4 +-
 .../solr/client/solrj/io/stream/SolrStream.java |  14 -
 .../solr/client/solrj/io/stream/SqlStream.java  |   2 -
 .../client/solrj/io/stream/StatsStream.java     |  10 -
 .../client/solrj/io/stream/TopicStream.java     |  19 -
 .../solrj/io/stream/expr/StreamFactory.java     |   2 +-
 .../solr/common/params/HighlightParams.java     |   2 -
 .../apache/solr/common/util/ExecutorUtil.java   |  41 --
 .../org/apache/solr/common/util/NamedList.java  |  23 +-
 .../solr/common/util/SimpleOrderedMap.java      |   9 +-
 .../embedded/SolrExampleStreamingTest.java      |   2 +-
 .../solrj/impl/BasicHttpSolrClientTest.java     |   2 +-
 .../CloudSolrClientMultiConstructorTest.java    |   4 +-
 .../impl/ConcurrentUpdateSolrClientTest.java    |  23 +-
 .../solrj/io/graph/GraphExpressionTest.java     |   2 +-
 .../solrj/io/stream/StreamExpressionTest.java   | 107 ++-
 .../java/org/apache/solr/SolrTestCaseJ4.java    | 108 +--
 151 files changed, 7429 insertions(+), 5651 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/36faceaf/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
----------------------------------------------------------------------


[16/25] lucene-solr:jira/solr-8668: SOLR-10765: Add anova Stream Evaluator

Posted by cp...@apache.org.
SOLR-10765: Add anova Stream Evaluator


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/963f43f6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/963f43f6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/963f43f6

Branch: refs/heads/jira/solr-8668
Commit: 963f43f6c051c2edb18e75468c1b05d1f4e6edb4
Parents: cb97ad7
Author: Joel Bernstein <jb...@apache.org>
Authored: Mon May 29 20:47:51 2017 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Mon May 29 20:48:25 2017 -0400

----------------------------------------------------------------------
 .../org/apache/solr/handler/StreamHandler.java  |  1 +
 .../client/solrj/io/eval/AnovaEvaluator.java    | 76 ++++++++++++++++++++
 .../solrj/io/stream/StreamExpressionTest.java   | 18 +++++
 3 files changed, 95 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/963f43f6/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
index f5ccbc8..2de8b15 100644
--- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
@@ -192,6 +192,7 @@ public class StreamHandler extends RequestHandlerBase implements SolrCoreAware,
       .withFunctionName("sequence", SequenceEvaluator.class)
       .withFunctionName("array", ArrayEvaluator.class)
       .withFunctionName("hist", HistogramEvaluator.class)
+      .withFunctionName("anova", AnovaEvaluator.class)
 
       // metrics
          .withFunctionName("min", MinMetric.class)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/963f43f6/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/AnovaEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/AnovaEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/AnovaEvaluator.java
new file mode 100644
index 0000000..b228821
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/AnovaEvaluator.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.commons.math3.stat.inference.OneWayAnova;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.Explanation;
+import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType;
+import org.apache.solr.client.solrj.io.stream.expr.Expressible;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class AnovaEvaluator extends ComplexEvaluator implements Expressible {
+
+  private static final long serialVersionUID = 1;
+
+  public AnovaEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
+    super(expression, factory);
+  }
+
+  public Tuple evaluate(Tuple tuple) throws IOException {
+    List<double[]> list = new ArrayList();
+    for(StreamEvaluator subEvaluator : subEvaluators) {
+      List<Number> nums = (List<Number>)subEvaluator.evaluate(tuple);
+      double[] darray = new double[nums.size()];
+      for(int i=0; i< nums.size(); i++) {
+        darray[i]=nums.get(i).doubleValue();
+      }
+      list.add(darray);
+    }
+
+    OneWayAnova anova = new OneWayAnova();
+    double p = anova.anovaPValue(list);
+    double f = anova.anovaFValue(list);
+    Map m = new HashMap();
+    m.put("p-value", p);
+    m.put("f-ratio", f);
+    return new Tuple(m);
+  }
+
+  @Override
+  public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException {
+    StreamExpression expression = new StreamExpression(factory.getFunctionName(getClass()));
+    return expression;
+  }
+
+  @Override
+  public Explanation toExplanation(StreamFactory factory) throws IOException {
+    return new Explanation(nodeId.toString())
+        .withExpressionType(ExpressionType.EVALUATOR)
+        .withFunctionName(factory.getFunctionName(getClass()))
+        .withImplementingClass(getClass().getName())
+        .withExpression(toExpression(factory).toString());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/963f43f6/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index 6c15197..c5c459e 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -5822,6 +5822,24 @@ public class StreamExpressionTest extends SolrCloudTestCase {
   }
 
   @Test
+  public void testAnova() throws Exception {
+    String cexpr = "anova(array(1,2,3,5,4,6), array(5,2,3,5,4,6), array(1,2,7,5,4,6))";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    Map out = (Map)tuples.get(0).get("return-value");
+    assertEquals((double)out.get("p-value"), 0.788298D, .0001);
+    assertEquals((double)out.get("f-ratio"), 0.24169D, .0001);
+  }
+
+
+  @Test
   public void testScale() throws Exception {
     UpdateRequest updateRequest = new UpdateRequest();
 


[05/25] lucene-solr:jira/solr-8668: SOLR-10758: Modernize the Solr ref guide's Chinese language analysis coverage

Posted by cp...@apache.org.
SOLR-10758: Modernize the Solr ref guide's Chinese language analysis coverage


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b23aab54
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b23aab54
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b23aab54

Branch: refs/heads/jira/solr-8668
Commit: b23aab5482f109d6c70470e1902d9e61474aeb1c
Parents: d1436c4
Author: Steve Rowe <sa...@gmail.com>
Authored: Fri May 26 14:47:24 2017 -0400
Committer: Steve Rowe <sa...@gmail.com>
Committed: Fri May 26 14:47:32 2017 -0400

----------------------------------------------------------------------
 .../icu/segmentation/TestICUTokenizerCJK.java   |   9 +-
 solr/CHANGES.txt                                |   4 +
 solr/solr-ref-guide/src/language-analysis.adoc  | 120 ++++++++++++++-----
 solr/solr-ref-guide/src/tokenizers.adoc         |   2 +-
 4 files changed, 101 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b23aab54/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java
index 96f44d6..75481f1 100644
--- a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java
+++ b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerCJK.java
@@ -53,7 +53,14 @@ public class TestICUTokenizerCJK extends BaseTokenStreamTestCase {
         new String[] { "我", "购买", "了", "道具", "和", "服装" }
     );
   }
-  
+
+  public void testTraditionalChinese() throws Exception {
+    assertAnalyzesTo(a, "我購買了道具和服裝。",
+        new String[] { "我", "購買", "了", "道具", "和", "服裝"});
+    assertAnalyzesTo(a, "定義切分字串的基本單位是訂定分詞標準的首要工作", // From http://godel.iis.sinica.edu.tw/CKIP/paper/wordsegment_standard.pdf
+        new String[] { "定義", "切", "分", "字串", "的", "基本", "單位", "是", "訂定", "分詞", "標準", "的", "首要", "工作" });
+  }
+
   public void testChineseNumerics() throws Exception {
     assertAnalyzesTo(a, "9483", new String[] { "9483" });
     assertAnalyzesTo(a, "院內分機9483。",

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b23aab54/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index d2f42e6..cd383d1 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -247,6 +247,10 @@ Optimizations
   so that the second phase which would normally involve calculating the domain for the bucket
   can be skipped entirely, leading to large performance improvements. (yonik)
 
+Ref Guide
+----------------------
+
+* SOLR-10758: Modernize the Solr ref guide's Chinese language analysis coverage. (Steve Rowe)
 
 Other Changes
 ----------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b23aab54/solr/solr-ref-guide/src/language-analysis.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/language-analysis.adoc b/solr/solr-ref-guide/src/language-analysis.adoc
index 0cf8e13..c55a0cd 100644
--- a/solr/solr-ref-guide/src/language-analysis.adoc
+++ b/solr/solr-ref-guide/src/language-analysis.adoc
@@ -378,9 +378,8 @@ These factories are each designed to work with specific languages. The languages
 * <<Brazilian Portuguese>>
 * <<Bulgarian>>
 * <<Catalan>>
-* <<Chinese>>
+* <<Traditional Chinese>>
 * <<Simplified Chinese>>
-* <<CJK>>
 * <<LanguageAnalysis-Czech,Czech>>
 * <<LanguageAnalysis-Danish,Danish>>
 
@@ -508,55 +507,112 @@ Solr can stem Catalan using the Snowball Porter Stemmer with an argument of `lan
 
 *Out:* "llengu"(1), "llengu"(2)
 
-[[LanguageAnalysis-Chinese]]
-=== Chinese
+[[LanguageAnalysis-TraditionalChinese]]
+=== Traditional Chinese
 
-<<tokenizers.adoc#Tokenizers-StandardTokenizer,`solr.StandardTokenizerFactory`>> is suitable for Traditional Chinese text.  Following the Word Break rules from the Unicode Text Segmentation algorithm, it produces one token per Chinese character.
+The default configuration of the <<tokenizers.adoc#Tokenizers-ICUTokenizer,ICU Tokenizer>> is suitable for Traditional Chinese text.  It follows the Word Break rules from the Unicode Text Segmentation algorithm for non-Chinese text, and uses a dictionary to segment Chinese words.  To use this tokenizer, see `solr/contrib/analysis-extras/README.txt` for instructions on which jars you need to add to your `solr_home/lib`.
 
-[[LanguageAnalysis-SimplifiedChinese]]
-=== Simplified Chinese
+<<tokenizers.adoc#Tokenizers-StandardTokenizer,Standard Tokenizer>> can also be used to tokenize Traditional Chinese text.  Following the Word Break rules from the Unicode Text Segmentation algorithm, it produces one token per Chinese character.  When combined with <<LanguageAnalysis-CJKBigramFilter,CJK Bigram Filter>>, overlapping bigrams of Chinese characters are formed.
+ 
+<<LanguageAnalysis-CJKWidthFilter,CJK Width Filter>> folds fullwidth ASCII variants into the equivalent Basic Latin forms.
 
-For Simplified Chinese, Solr provides support for Chinese sentence and word segmentation with the `solr.HMMChineseTokenizerFactory` in the `analysis-extras` contrib module. This component includes a large dictionary and segments Chinese text into words with the Hidden Markov Model. To use this filter, see `solr/contrib/analysis-extras/README.txt` for instructions on which jars you need to add to your `solr_home/lib`.
+*Examples:*
 
-*Factory class:* `solr.HMMChineseTokenizerFactory`
+[source,xml]
+----
+<analyzer>
+  <tokenizer class="solr.ICUTokenizerFactory"/>
+  <filter class="solr.CJKWidthFilterFactory"/>
+  <filter class="solr.LowerCaseFilterFactory"/>
+</analyzer>
+----
 
-*Arguments:* None
+[source,xml]
+----
+<analyzer>
+  <tokenizer class="solr.StandardTokenizerFactory"/>
+  <filter class="solr.CJKBigramFilterFactory"/>
+  <filter class="solr.CJKWidthFilterFactory"/>
+  <filter class="solr.LowerCaseFilterFactory"/>
+</analyzer>
+----
 
-*Examples:*
+[[LanguageAnalysis-CJKBigramFilter]]
+=== CJK Bigram Filter
 
-To use the default setup with fallback to English Porter stemmer for English words, use:
+Forms bigrams (overlapping 2-character sequences) of CJK characters that are generated from <<tokenizers.adoc#Tokenizers-StandardTokenizer,Standard Tokenizer>> or <<tokenizers.adoc#Tokenizers-ICUTokenizer,ICU Tokenizer>>.
 
-`<analyzer class="org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer"/>`
+By default, all CJK characters produce bigrams, but finer grained control is available by specifying orthographic type arguments `han`, `hiragana`, `katakana`, and `hangul`.  When set to `false`, characters of the corresponding type will be passed through as unigrams, and will not be included in any bigrams.
+
+When a CJK character has no adjacent characters to form a bigram, it is output in unigram form. If you want to always output both unigrams and bigrams, set the `outputUnigrams` argument to `true`.
 
-Or to configure your own analysis setup, use the `solr.HMMChineseTokenizerFactory` along with your custom filter setup.
+In all cases, all non-CJK input is passed through unmodified.
+
+*Arguments:*
+
+`han`:: (true/false) If false, Han (Chinese) characters will not form bigrams. Default is true.
+
+`hiragana`:: (true/false) If false, Hiragana (Japanese) characters will not form bigrams. Default is true.
+
+`katakana`:: (true/false) If false, Katakana (Japanese) characters will not form bigrams. Default is true.
+
+`hangul`:: (true/false) If false, Hangul (Korean) characters will not form bigrams. Default is true.
+
+`outputUnigrams`:: (true/false) If true, in addition to forming bigrams, all characters are also passed through as unigrams. Default is false.
+
+See the example under <<LanguageAnalysis-TraditionalChinese,Traditional Chinese>>.
+
+[[LanguageAnalysis-SimplifiedChinese]]
+=== Simplified Chinese
+
+For Simplified Chinese, Solr provides support for Chinese sentence and word segmentation with the <<LanguageAnalysis-HMMChineseTokenizerFactory,HMM Chinese Tokenizer`>>. This component includes a large dictionary and segments Chinese text into words with the Hidden Markov Model. To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<lib-directives-in-solrconfig.adoc#lib-directives-in-solrconfig,Lib Directives in SolrConfig>>). See the `solr/contrib/analysis-extras/README.txt` for information on which jars you need to add to your `SOLR_HOME/lib`.
+
+The default configuration of the <<tokenizers.adoc#Tokenizers-ICUTokenizer,ICU Tokenizer>> is also suitable for Simplified Chinese text.  It follows the Word Break rules from the Unicode Text Segmentation algorithm for non-Chinese text, and uses a dictionary to segment Chinese words.  To use this tokenizer, you must add additional .jars to Solr's classpath (as described in the section <<lib-directives-in-solrconfig.adoc#lib-directives-in-solrconfig,Lib Directives in SolrConfig>>). See the `solr/contrib/analysis-extras/README.txt` for information on which jars you need to add to your `SOLR_HOME/lib`.
+
+Also useful for Chinese analysis:
+
+<<LanguageAnalysis-CJKWidthFilter,CJK Width Filter>> folds fullwidth ASCII variants into the equivalent Basic Latin forms, and folds halfwidth Katakana variants into their equivalent fullwidth forms.
+
+*Examples:*
 
 [source,xml]
 ----
 <analyzer>
   <tokenizer class="solr.HMMChineseTokenizerFactory"/>
+  <filter class="solr.CJKWidthFilterFactory"/>
   <filter class="solr.StopFilterFactory"
           words="org/apache/lucene/analysis/cn/smart/stopwords.txt"/>
   <filter class="solr.PorterStemFilterFactory"/>
+  <filter class="solr.LowerCaseFilterFactory"/>
+</analyzer>
+----
+
+[source,xml]
+----
+<analyzer>
+  <tokenizer class="solr.ICUTokenizerFactory"/>
+  <filter class="solr.CJKWidthFilterFactory"/>
+  <filter class="solr.StopFilterFactory"
+          words="org/apache/lucene/analysis/cn/smart/stopwords.txt"/>
+  <filter class="solr.LowerCaseFilterFactory"/>
 </analyzer>
 ----
 
-[[LanguageAnalysis-CJK]]
-=== CJK
+=== HMM Chinese Tokenizer
 
-This tokenizer breaks Chinese, Japanese and Korean language text into tokens. These are not whitespace delimited languages. The tokens generated by this tokenizer are "doubles", overlapping pairs of CJK characters found in the field text.
+For Simplified Chinese, Solr provides support for Chinese sentence and word segmentation with the `solr.HMMChineseTokenizerFactory` in the `analysis-extras` contrib module. This component includes a large dictionary and segments Chinese text into words with the Hidden Markov Model. To use this tokenizer, see `solr/contrib/analysis-extras/README.txt` for instructions on which jars you need to add to your `solr_home/lib`.
 
-*Factory class:* `solr.CJKTokenizerFactory`
+*Factory class:* `solr.HMMChineseTokenizerFactory`
 
 *Arguments:* None
 
-*Example:*
+*Examples:*
 
-[source,xml]
-----
-<analyzer type="index">
-  <tokenizer class="solr.CJKTokenizerFactory"/>
-</analyzer>
-----
+To use the default setup with fallback to English Porter stemmer for English words, use:
+
+`<analyzer class="org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer"/>`
+
+Or to configure your own analysis setup, use the `solr.HMMChineseTokenizerFactory` along with your custom filter setup.  See an example of this in the <<LanguageAnalysis-SimplifiedChinese,Simplified Chinese>> section. 
 
 [[LanguageAnalysis-Czech]]
 === Czech
@@ -947,15 +1003,15 @@ Solr can stem Irish using the Snowball Porter Stemmer with an argument of `langu
 
 Solr includes support for analyzing Japanese, via the Lucene Kuromoji morphological analyzer, which includes several analysis components - more details on each below:
 
-* `JapaneseIterationMarkCharFilter` normalizes Japanese horizontal iteration marks (odoriji) to their expanded form.
-* `JapaneseTokenizer` tokenizes Japanese using morphological analysis, and annotates each term with part-of-speech, base form (a.k.a. lemma), reading and pronunciation.
-* `JapaneseBaseFormFilter` replaces original terms with their base forms (a.k.a. lemmas).
-* `JapanesePartOfSpeechStopFilter` removes terms that have one of the configured parts-of-speech.
-* `JapaneseKatakanaStemFilter` normalizes common katakana spelling variations ending in a long sound character (U+30FC) by removing the long sound character.
+* <<LanguageAnalysis-JapaneseIterationMarkCharFilter,`JapaneseIterationMarkCharFilter`>> normalizes Japanese horizontal iteration marks (odoriji) to their expanded form.
+* <<LanguageAnalysis-JapaneseTokenizer,`JapaneseTokenizer`>> tokenizes Japanese using morphological analysis, and annotates each term with part-of-speech, base form (a.k.a. lemma), reading and pronunciation.
+* <<LanguageAnalysis-JapaneseBaseFormFilter,`JapaneseBaseFormFilter`>> replaces original terms with their base forms (a.k.a. lemmas).
+* <<LanguageAnalysis-JapanesePartOfSpeechStopFilter,`JapanesePartOfSpeechStopFilter`>> removes terms that have one of the configured parts-of-speech.
+* <<LanguageAnalysis-JapaneseKatakanaStemFilter,`JapaneseKatakanaStemFilter`>> normalizes common katakana spelling variations ending in a long sound character (U+30FC) by removing the long sound character.
 
 Also useful for Japanese analysis, from lucene-analyzers-common:
 
-* `CJKWidthFilter` folds fullwidth ASCII variants into the equivalent Basic Latin forms, and folds halfwidth Katakana variants into their equivalent fullwidth forms.
+* <<LanguageAnalysis-CJKWidthFilter,`CJKWidthFilter`>> folds fullwidth ASCII variants into the equivalent Basic Latin forms, and folds halfwidth Katakana variants into their equivalent fullwidth forms.
 
 [[LanguageAnalysis-JapaneseIterationMarkCharFilter]]
 ==== Japanese Iteration Mark CharFilter
@@ -1022,7 +1078,7 @@ Removes terms with one of the configured parts-of-speech. `JapaneseTokenizer` an
 
 Normalizes common katakana spelling variations ending in a long sound character (U+30FC) by removing the long sound character.
 
-`CJKWidthFilterFactory` should be specified prior to this filter to normalize half-width katakana to full-width.
+<<LanguageAnalysis-CJKWidthFilter,`solr.CJKWidthFilterFactory`>> should be specified prior to this filter to normalize half-width katakana to full-width.
 
 *Factory class:* `JapaneseKatakanaStemFilterFactory`
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b23aab54/solr/solr-ref-guide/src/tokenizers.adoc
----------------------------------------------------------------------
diff --git a/solr/solr-ref-guide/src/tokenizers.adoc b/solr/solr-ref-guide/src/tokenizers.adoc
index 5c7a819..7a8bdeb 100644
--- a/solr/solr-ref-guide/src/tokenizers.adoc
+++ b/solr/solr-ref-guide/src/tokenizers.adoc
@@ -286,7 +286,7 @@ This tokenizer processes multilingual text and tokenizes it appropriately based
 
 You can customize this tokenizer's behavior by specifying http://userguide.icu-project.org/boundaryanalysis#TOC-RBBI-Rules[per-script rule files]. To add per-script rules, add a `rulefiles` argument, which should contain a comma-separated list of `code:rulefile` pairs in the following format: four-letter ISO 15924 script code, followed by a colon, then a resource path. For example, to specify rules for Latin (script code "Latn") and Cyrillic (script code "Cyrl"), you would enter `Latn:my.Latin.rules.rbbi,Cyrl:my.Cyrillic.rules.rbbi`.
 
-The default `solr.ICUTokenizerFactory` provides UAX#29 word break rules tokenization (like `solr.StandardTokenizer`), but also includes custom tailorings for Hebrew (specializing handling of double and single quotation marks), and for syllable tokenization for Khmer, Lao, and Myanmar.
+The default configuration for `solr.ICUTokenizerFactory` provides UAX#29 word break rules tokenization (like `solr.StandardTokenizer`), but also includes custom tailorings for Hebrew (specializing handling of double and single quotation marks), for syllable tokenization for Khmer, Lao, and Myanmar, and dictionary-based word segmentation for CJK characters.
 
 *Factory class:* `solr.ICUTokenizerFactory`
 


[23/25] lucene-solr:jira/solr-8668: LUCENE-7850: Move support for legacy numerics to solr/.

Posted by cp...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericTokenStream.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericTokenStream.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericTokenStream.java
deleted file mode 100644
index a2aba19..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericTokenStream.java
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-
-import java.util.Objects;
-
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.util.Attribute;
-import org.apache.lucene.util.AttributeFactory;
-import org.apache.lucene.util.AttributeImpl;
-import org.apache.lucene.util.AttributeReflector;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.NumericUtils;
-
-/**
- * <b>Expert:</b> This class provides a {@link TokenStream}
- * for indexing numeric values that can be used by {@link
- * org.apache.lucene.legacy.LegacyNumericRangeQuery}.
- *
- * <p>Note that for simple usage, {@link org.apache.lucene.legacy.LegacyIntField}, {@link
- * org.apache.lucene.legacy.LegacyLongField}, {@link org.apache.lucene.legacy.LegacyFloatField} or {@link org.apache.lucene.legacy.LegacyDoubleField} is
- * recommended.  These fields disable norms and
- * term freqs, as they are not usually needed during
- * searching.  If you need to change these settings, you
- * should use this class.
- *
- * <p>Here's an example usage, for an <code>int</code> field:
- *
- * <pre class="prettyprint">
- *  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
- *  fieldType.setOmitNorms(true);
- *  fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
- *  Field field = new Field(name, new LegacyNumericTokenStream(precisionStep).setIntValue(value), fieldType);
- *  document.add(field);
- * </pre>
- *
- * <p>For optimal performance, re-use the TokenStream and Field instance
- * for more than one document:
- *
- * <pre class="prettyprint">
- *  LegacyNumericTokenStream stream = new LegacyNumericTokenStream(precisionStep);
- *  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
- *  fieldType.setOmitNorms(true);
- *  fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
- *  Field field = new Field(name, stream, fieldType);
- *  Document document = new Document();
- *  document.add(field);
- *
- *  for(all documents) {
- *    stream.setIntValue(value)
- *    writer.addDocument(document);
- *  }
- * </pre>
- *
- * <p>This stream is not intended to be used in analyzers;
- * it's more for iterating the different precisions during
- * indexing a specific numeric value.</p>
-
- * <p><b>NOTE</b>: as token streams are only consumed once
- * the document is added to the index, if you index more
- * than one numeric field, use a separate <code>LegacyNumericTokenStream</code>
- * instance for each.</p>
- *
- * <p>See {@link org.apache.lucene.legacy.LegacyNumericRangeQuery} for more details on the
- * <a
- * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * parameter as well as how numeric fields work under the hood.</p>
- *
- * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
- *
- * @since 2.9
- */
-@Deprecated
-public final class LegacyNumericTokenStream extends TokenStream {
-
-  /** The full precision token gets this token type assigned. */
-  public static final String TOKEN_TYPE_FULL_PREC  = "fullPrecNumeric";
-
-  /** The lower precision tokens gets this token type assigned. */
-  public static final String TOKEN_TYPE_LOWER_PREC = "lowerPrecNumeric";
-  
-  /** <b>Expert:</b> Use this attribute to get the details of the currently generated token.
-   * @lucene.experimental
-   * @since 4.0
-   */
-  public interface LegacyNumericTermAttribute extends Attribute {
-    /** Returns current shift value, undefined before first token */
-    int getShift();
-    /** Returns current token's raw value as {@code long} with all {@link #getShift} applied, undefined before first token */
-    long getRawValue();
-    /** Returns value size in bits (32 for {@code float}, {@code int}; 64 for {@code double}, {@code long}) */
-    int getValueSize();
-    
-    /** <em>Don't call this method!</em>
-      * @lucene.internal */
-    void init(long value, int valSize, int precisionStep, int shift);
-
-    /** <em>Don't call this method!</em>
-      * @lucene.internal */
-    void setShift(int shift);
-
-    /** <em>Don't call this method!</em>
-      * @lucene.internal */
-    int incShift();
-  }
-  
-  // just a wrapper to prevent adding CTA
-  private static final class NumericAttributeFactory extends AttributeFactory {
-    private final AttributeFactory delegate;
-
-    NumericAttributeFactory(AttributeFactory delegate) {
-      this.delegate = delegate;
-    }
-  
-    @Override
-    public AttributeImpl createAttributeInstance(Class<? extends Attribute> attClass) {
-      if (CharTermAttribute.class.isAssignableFrom(attClass))
-        throw new IllegalArgumentException("LegacyNumericTokenStream does not support CharTermAttribute.");
-      return delegate.createAttributeInstance(attClass);
-    }
-  }
-
-  /** Implementation of {@link org.apache.lucene.legacy.LegacyNumericTokenStream.LegacyNumericTermAttribute}.
-   * @lucene.internal
-   * @since 4.0
-   */
-  public static final class LegacyNumericTermAttributeImpl extends AttributeImpl implements LegacyNumericTermAttribute,TermToBytesRefAttribute {
-    private long value = 0L;
-    private int valueSize = 0, shift = 0, precisionStep = 0;
-    private BytesRefBuilder bytes = new BytesRefBuilder();
-    
-    /** 
-     * Creates, but does not yet initialize this attribute instance
-     * @see #init(long, int, int, int)
-     */
-    public LegacyNumericTermAttributeImpl() {}
-
-    @Override
-    public BytesRef getBytesRef() {
-      assert valueSize == 64 || valueSize == 32;
-      if (shift >= valueSize) {
-        bytes.clear();
-      } else if (valueSize == 64) {
-        LegacyNumericUtils.longToPrefixCoded(value, shift, bytes);
-      } else {
-        LegacyNumericUtils.intToPrefixCoded((int) value, shift, bytes);
-      }
-      return bytes.get();
-    }
-
-    @Override
-    public int getShift() { return shift; }
-    @Override
-    public void setShift(int shift) { this.shift = shift; }
-    @Override
-    public int incShift() {
-      return (shift += precisionStep);
-    }
-
-    @Override
-    public long getRawValue() { return value  & ~((1L << shift) - 1L); }
-    @Override
-    public int getValueSize() { return valueSize; }
-
-    @Override
-    public void init(long value, int valueSize, int precisionStep, int shift) {
-      this.value = value;
-      this.valueSize = valueSize;
-      this.precisionStep = precisionStep;
-      this.shift = shift;
-    }
-
-    @Override
-    public void clear() {
-      // this attribute has no contents to clear!
-      // we keep it untouched as it's fully controlled by outer class.
-    }
-    
-    @Override
-    public void reflectWith(AttributeReflector reflector) {
-      reflector.reflect(TermToBytesRefAttribute.class, "bytes", getBytesRef());
-      reflector.reflect(LegacyNumericTermAttribute.class, "shift", shift);
-      reflector.reflect(LegacyNumericTermAttribute.class, "rawValue", getRawValue());
-      reflector.reflect(LegacyNumericTermAttribute.class, "valueSize", valueSize);
-    }
-  
-    @Override
-    public void copyTo(AttributeImpl target) {
-      final LegacyNumericTermAttribute a = (LegacyNumericTermAttribute) target;
-      a.init(value, valueSize, precisionStep, shift);
-    }
-    
-    @Override
-    public LegacyNumericTermAttributeImpl clone() {
-      LegacyNumericTermAttributeImpl t = (LegacyNumericTermAttributeImpl)super.clone();
-      // Do a deep clone
-      t.bytes = new BytesRefBuilder();
-      t.bytes.copyBytes(getBytesRef());
-      return t;
-    }
-
-    @Override
-    public int hashCode() {
-      return Objects.hash(precisionStep, shift, value, valueSize);
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (this == obj) return true;
-      if (obj == null) return false;
-      if (getClass() != obj.getClass()) return false;
-      LegacyNumericTermAttributeImpl other = (LegacyNumericTermAttributeImpl) obj;
-      if (precisionStep != other.precisionStep) return false;
-      if (shift != other.shift) return false;
-      if (value != other.value) return false;
-      if (valueSize != other.valueSize) return false;
-      return true;
-    }
-  }
-  
-  /**
-   * Creates a token stream for numeric values using the default <code>precisionStep</code>
-   * {@link org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16). The stream is not yet initialized,
-   * before using set a value using the various set<em>???</em>Value() methods.
-   */
-  public LegacyNumericTokenStream() {
-    this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, LegacyNumericUtils.PRECISION_STEP_DEFAULT);
-  }
-  
-  /**
-   * Creates a token stream for numeric values with the specified
-   * <code>precisionStep</code>. The stream is not yet initialized,
-   * before using set a value using the various set<em>???</em>Value() methods.
-   */
-  public LegacyNumericTokenStream(final int precisionStep) {
-    this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, precisionStep);
-  }
-
-  /**
-   * Expert: Creates a token stream for numeric values with the specified
-   * <code>precisionStep</code> using the given
-   * {@link org.apache.lucene.util.AttributeFactory}.
-   * The stream is not yet initialized,
-   * before using set a value using the various set<em>???</em>Value() methods.
-   */
-  public LegacyNumericTokenStream(AttributeFactory factory, final int precisionStep) {
-    super(new NumericAttributeFactory(factory));
-    if (precisionStep < 1)
-      throw new IllegalArgumentException("precisionStep must be >=1");
-    this.precisionStep = precisionStep;
-    numericAtt.setShift(-precisionStep);
-  }
-
-  /**
-   * Initializes the token stream with the supplied <code>long</code> value.
-   * @param value the value, for which this TokenStream should enumerate tokens.
-   * @return this instance, because of this you can use it the following way:
-   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setLongValue(value))</code>
-   */
-  public LegacyNumericTokenStream setLongValue(final long value) {
-    numericAtt.init(value, valSize = 64, precisionStep, -precisionStep);
-    return this;
-  }
-  
-  /**
-   * Initializes the token stream with the supplied <code>int</code> value.
-   * @param value the value, for which this TokenStream should enumerate tokens.
-   * @return this instance, because of this you can use it the following way:
-   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setIntValue(value))</code>
-   */
-  public LegacyNumericTokenStream setIntValue(final int value) {
-    numericAtt.init(value, valSize = 32, precisionStep, -precisionStep);
-    return this;
-  }
-  
-  /**
-   * Initializes the token stream with the supplied <code>double</code> value.
-   * @param value the value, for which this TokenStream should enumerate tokens.
-   * @return this instance, because of this you can use it the following way:
-   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setDoubleValue(value))</code>
-   */
-  public LegacyNumericTokenStream setDoubleValue(final double value) {
-    numericAtt.init(NumericUtils.doubleToSortableLong(value), valSize = 64, precisionStep, -precisionStep);
-    return this;
-  }
-  
-  /**
-   * Initializes the token stream with the supplied <code>float</code> value.
-   * @param value the value, for which this TokenStream should enumerate tokens.
-   * @return this instance, because of this you can use it the following way:
-   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setFloatValue(value))</code>
-   */
-  public LegacyNumericTokenStream setFloatValue(final float value) {
-    numericAtt.init(NumericUtils.floatToSortableInt(value), valSize = 32, precisionStep, -precisionStep);
-    return this;
-  }
-  
-  @Override
-  public void reset() {
-    if (valSize == 0)
-      throw new IllegalStateException("call set???Value() before usage");
-    numericAtt.setShift(-precisionStep);
-  }
-
-  @Override
-  public boolean incrementToken() {
-    if (valSize == 0)
-      throw new IllegalStateException("call set???Value() before usage");
-    
-    // this will only clear all other attributes in this TokenStream
-    clearAttributes();
-
-    final int shift = numericAtt.incShift();
-    typeAtt.setType((shift == 0) ? TOKEN_TYPE_FULL_PREC : TOKEN_TYPE_LOWER_PREC);
-    posIncrAtt.setPositionIncrement((shift == 0) ? 1 : 0);
-    return (shift < valSize);
-  }
-
-  /** Returns the precision step. */
-  public int getPrecisionStep() {
-    return precisionStep;
-  }
-
-  @Override
-  public String toString() {
-    // We override default because it can throw cryptic "illegal shift value":
-    return getClass().getSimpleName() + "(precisionStep=" + precisionStep + " valueSize=" + numericAtt.getValueSize() + " shift=" + numericAtt.getShift() + ")";
-  }
-  
-  // members
-  private final LegacyNumericTermAttribute numericAtt = addAttribute(LegacyNumericTermAttribute.class);
-  private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
-  private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
-  
-  private int valSize = 0; // valSize==0 means not initialized
-  private final int precisionStep;
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericType.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericType.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericType.java
deleted file mode 100644
index 345b497..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericType.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-/** Data type of the numeric value
- * @since 3.2
- *
- * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
- */
-@Deprecated
-public enum LegacyNumericType {
-  /** 32-bit integer numeric type */
-  INT, 
-  /** 64-bit long numeric type */
-  LONG, 
-  /** 32-bit float numeric type */
-  FLOAT, 
-  /** 64-bit double numeric type */
-  DOUBLE
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericUtils.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericUtils.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericUtils.java
deleted file mode 100644
index e6659d7..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericUtils.java
+++ /dev/null
@@ -1,510 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-
-import java.io.IOException;
-
-import org.apache.lucene.index.FilterLeafReader;
-import org.apache.lucene.index.FilteredTermsEnum;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-
-/**
- * This is a helper class to generate prefix-encoded representations for numerical values
- * and supplies converters to represent float/double values as sortable integers/longs.
- *
- * <p>To quickly execute range queries in Apache Lucene, a range is divided recursively
- * into multiple intervals for searching: The center of the range is searched only with
- * the lowest possible precision in the trie, while the boundaries are matched
- * more exactly. This reduces the number of terms dramatically.
- *
- * <p>This class generates terms to achieve this: First the numerical integer values need to
- * be converted to bytes. For that integer values (32 bit or 64 bit) are made unsigned
- * and the bits are converted to ASCII chars with each 7 bit. The resulting byte[] is
- * sortable like the original integer value (even using UTF-8 sort order). Each value is also
- * prefixed (in the first char) by the <code>shift</code> value (number of bits removed) used
- * during encoding.
- *
- * <p>For easy usage, the trie algorithm is implemented for indexing inside
- * {@link org.apache.lucene.legacy.LegacyNumericTokenStream} that can index <code>int</code>, <code>long</code>,
- * <code>float</code>, and <code>double</code>. For querying,
- * {@link org.apache.lucene.legacy.LegacyNumericRangeQuery} implements the query part
- * for the same data types.
- *
- * @lucene.internal
- *
- * @deprecated Please use {@link org.apache.lucene.index.PointValues} instead.
- *
- * @since 2.9, API changed non backwards-compliant in 4.0
- */
-
-@Deprecated
-public final class LegacyNumericUtils {
-
-  private LegacyNumericUtils() {} // no instance!
-  
-  /**
-   * The default precision step used by {@link org.apache.lucene.legacy.LegacyLongField},
-   * {@link org.apache.lucene.legacy.LegacyDoubleField}, {@link org.apache.lucene.legacy.LegacyNumericTokenStream}, {@link
-   * org.apache.lucene.legacy.LegacyNumericRangeQuery}.
-   */
-  public static final int PRECISION_STEP_DEFAULT = 16;
-  
-  /**
-   * The default precision step used by {@link org.apache.lucene.legacy.LegacyIntField} and
-   * {@link org.apache.lucene.legacy.LegacyFloatField}.
-   */
-  public static final int PRECISION_STEP_DEFAULT_32 = 8;
-  
-  /**
-   * Longs are stored at lower precision by shifting off lower bits. The shift count is
-   * stored as <code>SHIFT_START_LONG+shift</code> in the first byte
-   */
-  public static final byte SHIFT_START_LONG = 0x20;
-
-  /**
-   * The maximum term length (used for <code>byte[]</code> buffer size)
-   * for encoding <code>long</code> values.
-   * @see #longToPrefixCoded
-   */
-  public static final int BUF_SIZE_LONG = 63/7 + 2;
-
-  /**
-   * Integers are stored at lower precision by shifting off lower bits. The shift count is
-   * stored as <code>SHIFT_START_INT+shift</code> in the first byte
-   */
-  public static final byte SHIFT_START_INT  = 0x60;
-
-  /**
-   * The maximum term length (used for <code>byte[]</code> buffer size)
-   * for encoding <code>int</code> values.
-   * @see #intToPrefixCoded
-   */
-  public static final int BUF_SIZE_INT = 31/7 + 2;
-
-  /**
-   * Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
-   * This is method is used by {@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
-   * After encoding, {@code bytes.offset} will always be 0. 
-   * @param val the numeric value
-   * @param shift how many bits to strip from the right
-   * @param bytes will contain the encoded value
-   */
-  public static void longToPrefixCoded(final long val, final int shift, final BytesRefBuilder bytes) {
-    // ensure shift is 0..63
-    if ((shift & ~0x3f) != 0) {
-      throw new IllegalArgumentException("Illegal shift value, must be 0..63; got shift=" + shift);
-    }
-    int nChars = (((63-shift)*37)>>8) + 1;    // i/7 is the same as (i*37)>>8 for i in 0..63
-    bytes.setLength(nChars+1);   // one extra for the byte that contains the shift info
-    bytes.grow(BUF_SIZE_LONG);
-    bytes.setByteAt(0, (byte)(SHIFT_START_LONG + shift));
-    long sortableBits = val ^ 0x8000000000000000L;
-    sortableBits >>>= shift;
-    while (nChars > 0) {
-      // Store 7 bits per byte for compatibility
-      // with UTF-8 encoding of terms
-      bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
-      sortableBits >>>= 7;
-    }
-  }
-
-  /**
-   * Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
-   * This is method is used by {@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
-   * After encoding, {@code bytes.offset} will always be 0.
-   * @param val the numeric value
-   * @param shift how many bits to strip from the right
-   * @param bytes will contain the encoded value
-   */
-  public static void intToPrefixCoded(final int val, final int shift, final BytesRefBuilder bytes) {
-    // ensure shift is 0..31
-    if ((shift & ~0x1f) != 0) {
-      throw new IllegalArgumentException("Illegal shift value, must be 0..31; got shift=" + shift);
-    }
-    int nChars = (((31-shift)*37)>>8) + 1;    // i/7 is the same as (i*37)>>8 for i in 0..63
-    bytes.setLength(nChars+1);   // one extra for the byte that contains the shift info
-    bytes.grow(LegacyNumericUtils.BUF_SIZE_LONG);  // use the max
-    bytes.setByteAt(0, (byte)(SHIFT_START_INT + shift));
-    int sortableBits = val ^ 0x80000000;
-    sortableBits >>>= shift;
-    while (nChars > 0) {
-      // Store 7 bits per byte for compatibility
-      // with UTF-8 encoding of terms
-      bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
-      sortableBits >>>= 7;
-    }
-  }
-
-
-  /**
-   * Returns the shift value from a prefix encoded {@code long}.
-   * @throws NumberFormatException if the supplied {@link BytesRef} is
-   * not correctly prefix encoded.
-   */
-  public static int getPrefixCodedLongShift(final BytesRef val) {
-    final int shift = val.bytes[val.offset] - SHIFT_START_LONG;
-    if (shift > 63 || shift < 0)
-      throw new NumberFormatException("Invalid shift value (" + shift + ") in prefixCoded bytes (is encoded value really an INT?)");
-    return shift;
-  }
-
-  /**
-   * Returns the shift value from a prefix encoded {@code int}.
-   * @throws NumberFormatException if the supplied {@link BytesRef} is
-   * not correctly prefix encoded.
-   */
-  public static int getPrefixCodedIntShift(final BytesRef val) {
-    final int shift = val.bytes[val.offset] - SHIFT_START_INT;
-    if (shift > 31 || shift < 0)
-      throw new NumberFormatException("Invalid shift value in prefixCoded bytes (is encoded value really an INT?)");
-    return shift;
-  }
-
-  /**
-   * Returns a long from prefixCoded bytes.
-   * Rightmost bits will be zero for lower precision codes.
-   * This method can be used to decode a term's value.
-   * @throws NumberFormatException if the supplied {@link BytesRef} is
-   * not correctly prefix encoded.
-   * @see #longToPrefixCoded
-   */
-  public static long prefixCodedToLong(final BytesRef val) {
-    long sortableBits = 0L;
-    for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
-      sortableBits <<= 7;
-      final byte b = val.bytes[i];
-      if (b < 0) {
-        throw new NumberFormatException(
-          "Invalid prefixCoded numerical value representation (byte "+
-          Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
-        );
-      }
-      sortableBits |= b;
-    }
-    return (sortableBits << getPrefixCodedLongShift(val)) ^ 0x8000000000000000L;
-  }
-
-  /**
-   * Returns an int from prefixCoded bytes.
-   * Rightmost bits will be zero for lower precision codes.
-   * This method can be used to decode a term's value.
-   * @throws NumberFormatException if the supplied {@link BytesRef} is
-   * not correctly prefix encoded.
-   * @see #intToPrefixCoded
-   */
-  public static int prefixCodedToInt(final BytesRef val) {
-    int sortableBits = 0;
-    for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
-      sortableBits <<= 7;
-      final byte b = val.bytes[i];
-      if (b < 0) {
-        throw new NumberFormatException(
-          "Invalid prefixCoded numerical value representation (byte "+
-          Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
-        );
-      }
-      sortableBits |= b;
-    }
-    return (sortableBits << getPrefixCodedIntShift(val)) ^ 0x80000000;
-  }
-
-  /**
-   * Splits a long range recursively.
-   * You may implement a builder that adds clauses to a
-   * {@link org.apache.lucene.search.BooleanQuery} for each call to its
-   * {@link LongRangeBuilder#addRange(BytesRef,BytesRef)}
-   * method.
-   * <p>This method is used by {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}.
-   */
-  public static void splitLongRange(final LongRangeBuilder builder,
-    final int precisionStep,  final long minBound, final long maxBound
-  ) {
-    splitRange(builder, 64, precisionStep, minBound, maxBound);
-  }
-  
-  /**
-   * Splits an int range recursively.
-   * You may implement a builder that adds clauses to a
-   * {@link org.apache.lucene.search.BooleanQuery} for each call to its
-   * {@link IntRangeBuilder#addRange(BytesRef,BytesRef)}
-   * method.
-   * <p>This method is used by {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}.
-   */
-  public static void splitIntRange(final IntRangeBuilder builder,
-    final int precisionStep,  final int minBound, final int maxBound
-  ) {
-    splitRange(builder, 32, precisionStep, minBound, maxBound);
-  }
-  
-  /** This helper does the splitting for both 32 and 64 bit. */
-  private static void splitRange(
-    final Object builder, final int valSize,
-    final int precisionStep, long minBound, long maxBound
-  ) {
-    if (precisionStep < 1)
-      throw new IllegalArgumentException("precisionStep must be >=1");
-    if (minBound > maxBound) return;
-    for (int shift=0; ; shift += precisionStep) {
-      // calculate new bounds for inner precision
-      final long diff = 1L << (shift+precisionStep),
-        mask = ((1L<<precisionStep) - 1L) << shift;
-      final boolean
-        hasLower = (minBound & mask) != 0L,
-        hasUpper = (maxBound & mask) != mask;
-      final long
-        nextMinBound = (hasLower ? (minBound + diff) : minBound) & ~mask,
-        nextMaxBound = (hasUpper ? (maxBound - diff) : maxBound) & ~mask;
-      final boolean
-        lowerWrapped = nextMinBound < minBound,
-        upperWrapped = nextMaxBound > maxBound;
-      
-      if (shift+precisionStep>=valSize || nextMinBound>nextMaxBound || lowerWrapped || upperWrapped) {
-        // We are in the lowest precision or the next precision is not available.
-        addRange(builder, valSize, minBound, maxBound, shift);
-        // exit the split recursion loop
-        break;
-      }
-      
-      if (hasLower)
-        addRange(builder, valSize, minBound, minBound | mask, shift);
-      if (hasUpper)
-        addRange(builder, valSize, maxBound & ~mask, maxBound, shift);
-      
-      // recurse to next precision
-      minBound = nextMinBound;
-      maxBound = nextMaxBound;
-    }
-  }
-  
-  /** Helper that delegates to correct range builder */
-  private static void addRange(
-    final Object builder, final int valSize,
-    long minBound, long maxBound,
-    final int shift
-  ) {
-    // for the max bound set all lower bits (that were shifted away):
-    // this is important for testing or other usages of the splitted range
-    // (e.g. to reconstruct the full range). The prefixEncoding will remove
-    // the bits anyway, so they do not hurt!
-    maxBound |= (1L << shift) - 1L;
-    // delegate to correct range builder
-    switch(valSize) {
-      case 64:
-        ((LongRangeBuilder)builder).addRange(minBound, maxBound, shift);
-        break;
-      case 32:
-        ((IntRangeBuilder)builder).addRange((int)minBound, (int)maxBound, shift);
-        break;
-      default:
-        // Should not happen!
-        throw new IllegalArgumentException("valSize must be 32 or 64.");
-    }
-  }
-
-  /**
-   * Callback for {@link #splitLongRange}.
-   * You need to overwrite only one of the methods.
-   * @lucene.internal
-   * @since 2.9, API changed non backwards-compliant in 4.0
-   */
-  public static abstract class LongRangeBuilder {
-    
-    /**
-     * Overwrite this method, if you like to receive the already prefix encoded range bounds.
-     * You can directly build classical (inclusive) range queries from them.
-     */
-    public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
-      throw new UnsupportedOperationException();
-    }
-    
-    /**
-     * Overwrite this method, if you like to receive the raw long range bounds.
-     * You can use this for e.g. debugging purposes (print out range bounds).
-     */
-    public void addRange(final long min, final long max, final int shift) {
-      final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
-      longToPrefixCoded(min, shift, minBytes);
-      longToPrefixCoded(max, shift, maxBytes);
-      addRange(minBytes.get(), maxBytes.get());
-    }
-  
-  }
-  
-  /**
-   * Callback for {@link #splitIntRange}.
-   * You need to overwrite only one of the methods.
-   * @lucene.internal
-   * @since 2.9, API changed non backwards-compliant in 4.0
-   */
-  public static abstract class IntRangeBuilder {
-    
-    /**
-     * Overwrite this method, if you like to receive the already prefix encoded range bounds.
-     * You can directly build classical range (inclusive) queries from them.
-     */
-    public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
-      throw new UnsupportedOperationException();
-    }
-    
-    /**
-     * Overwrite this method, if you like to receive the raw int range bounds.
-     * You can use this for e.g. debugging purposes (print out range bounds).
-     */
-    public void addRange(final int min, final int max, final int shift) {
-      final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
-      intToPrefixCoded(min, shift, minBytes);
-      intToPrefixCoded(max, shift, maxBytes);
-      addRange(minBytes.get(), maxBytes.get());
-    }
-  
-  }
-  
-  /**
-   * Filters the given {@link TermsEnum} by accepting only prefix coded 64 bit
-   * terms with a shift value of <tt>0</tt>.
-   * 
-   * @param termsEnum
-   *          the terms enum to filter
-   * @return a filtered {@link TermsEnum} that only returns prefix coded 64 bit
-   *         terms with a shift value of <tt>0</tt>.
-   */
-  public static TermsEnum filterPrefixCodedLongs(TermsEnum termsEnum) {
-    return new SeekingNumericFilteredTermsEnum(termsEnum) {
-
-      @Override
-      protected AcceptStatus accept(BytesRef term) {
-        return LegacyNumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
-      }
-    };
-  }
-
-  /**
-   * Filters the given {@link TermsEnum} by accepting only prefix coded 32 bit
-   * terms with a shift value of <tt>0</tt>.
-   * 
-   * @param termsEnum
-   *          the terms enum to filter
-   * @return a filtered {@link TermsEnum} that only returns prefix coded 32 bit
-   *         terms with a shift value of <tt>0</tt>.
-   */
-  public static TermsEnum filterPrefixCodedInts(TermsEnum termsEnum) {
-    return new SeekingNumericFilteredTermsEnum(termsEnum) {
-      
-      @Override
-      protected AcceptStatus accept(BytesRef term) {
-        return LegacyNumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
-      }
-    };
-  }
-
-  /** Just like FilteredTermsEnum, except it adds a limited
-   *  seekCeil implementation that only works with {@link
-   *  #filterPrefixCodedInts} and {@link
-   *  #filterPrefixCodedLongs}. */
-  private static abstract class SeekingNumericFilteredTermsEnum extends FilteredTermsEnum {
-    public SeekingNumericFilteredTermsEnum(final TermsEnum tenum) {
-      super(tenum, false);
-    }
-
-    @Override
-    @SuppressWarnings("fallthrough")
-    public SeekStatus seekCeil(BytesRef term) throws IOException {
-
-      // NOTE: This is not general!!  It only handles YES
-      // and END, because that's all we need for the numeric
-      // case here
-
-      SeekStatus status = tenum.seekCeil(term);
-      if (status == SeekStatus.END) {
-        return SeekStatus.END;
-      }
-
-      actualTerm = tenum.term();
-
-      if (accept(actualTerm) == AcceptStatus.YES) {
-        return status;
-      } else {
-        return SeekStatus.END;
-      }
-    }
-  }
-
-  private static Terms intTerms(Terms terms) {
-    return new FilterLeafReader.FilterTerms(terms) {
-        @Override
-        public TermsEnum iterator() throws IOException {
-          return filterPrefixCodedInts(in.iterator());
-        }
-      };
-  }
-
-  private static Terms longTerms(Terms terms) {
-    return new FilterLeafReader.FilterTerms(terms) {
-        @Override
-        public TermsEnum iterator() throws IOException {
-          return filterPrefixCodedLongs(in.iterator());
-        }
-      };
-  }
-    
-  /**
-   * Returns the minimum int value indexed into this
-   * numeric field or null if no terms exist.
-   */
-  public static Integer getMinInt(Terms terms) throws IOException {
-    // All shift=0 terms are sorted first, so we don't need
-    // to filter the incoming terms; we can just get the
-    // min:
-    BytesRef min = terms.getMin();
-    return (min != null) ? LegacyNumericUtils.prefixCodedToInt(min) : null;
-  }
-
-  /**
-   * Returns the maximum int value indexed into this
-   * numeric field or null if no terms exist.
-   */
-  public static Integer getMaxInt(Terms terms) throws IOException {
-    BytesRef max = intTerms(terms).getMax();
-    return (max != null) ? LegacyNumericUtils.prefixCodedToInt(max) : null;
-  }
-
-  /**
-   * Returns the minimum long value indexed into this
-   * numeric field or null if no terms exist.
-   */
-  public static Long getMinLong(Terms terms) throws IOException {
-    // All shift=0 terms are sorted first, so we don't need
-    // to filter the incoming terms; we can just get the
-    // min:
-    BytesRef min = terms.getMin();
-    return (min != null) ? LegacyNumericUtils.prefixCodedToLong(min) : null;
-  }
-
-  /**
-   * Returns the maximum long value indexed into this
-   * numeric field or null if no terms exist.
-   */
-  public static Long getMaxLong(Terms terms) throws IOException {
-    BytesRef max = longTerms(terms).getMax();
-    return (max != null) ? LegacyNumericUtils.prefixCodedToLong(max) : null;
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/doc-files/nrq-formula-1.png
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/doc-files/nrq-formula-1.png b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/doc-files/nrq-formula-1.png
deleted file mode 100644
index fd7d936..0000000
Binary files a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/doc-files/nrq-formula-1.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/doc-files/nrq-formula-2.png
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/doc-files/nrq-formula-2.png b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/doc-files/nrq-formula-2.png
deleted file mode 100644
index 93cb308..0000000
Binary files a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/doc-files/nrq-formula-2.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/package-info.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/package-info.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/package-info.java
deleted file mode 100644
index d0167f8..0000000
--- a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- 
-/** 
- * Deprecated stuff!
- */
-package org.apache.lucene.legacy;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index b4f5047..a876b7d 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -60,10 +60,6 @@ import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.legacy.LegacyIntField;
-import org.apache.lucene.legacy.LegacyLongField;
-import org.apache.lucene.legacy.LegacyNumericRangeQuery;
-import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
@@ -1114,9 +1110,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
     doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", customType2));
     doc.add(new Field("content2", "here is more content with aaa aaa aaa", customType2));
     doc.add(new Field("fie\u2C77ld", "field with non-ascii name", customType2));
-    // add numeric fields, to test if flex preserves encoding
-    doc.add(new LegacyIntField("trieInt", id, Field.Store.NO));
-    doc.add(new LegacyLongField("trieLong", (long) id, Field.Store.NO));
 
     // add docvalues fields
     doc.add(new NumericDocValuesField("dvByte", (byte) id));
@@ -1294,51 +1287,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
     }
   }
   
-  public void testNumericFields() throws Exception {
-    for (String name : oldNames) {
-      
-      Directory dir = oldIndexDirs.get(name);
-      IndexReader reader = DirectoryReader.open(dir);
-      IndexSearcher searcher = newSearcher(reader);
-      
-      for (int id=10; id<15; id++) {
-        ScoreDoc[] hits = searcher.search(LegacyNumericRangeQuery.newIntRange("trieInt", LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs;
-        assertEquals("wrong number of hits", 1, hits.length);
-        Document d = searcher.doc(hits[0].doc);
-        assertEquals(String.valueOf(id), d.get("id"));
-        
-        hits = searcher.search(LegacyNumericRangeQuery.newLongRange("trieLong", LegacyNumericUtils.PRECISION_STEP_DEFAULT, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs;
-        assertEquals("wrong number of hits", 1, hits.length);
-        d = searcher.doc(hits[0].doc);
-        assertEquals(String.valueOf(id), d.get("id"));
-      }
-      
-      // check that also lower-precision fields are ok
-      ScoreDoc[] hits = searcher.search(LegacyNumericRangeQuery.newIntRange("trieInt", LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, Integer.MIN_VALUE, Integer.MAX_VALUE, false, false), 100).scoreDocs;
-      assertEquals("wrong number of hits", 34, hits.length);
-      
-      hits = searcher.search(LegacyNumericRangeQuery.newLongRange("trieLong", LegacyNumericUtils.PRECISION_STEP_DEFAULT, Long.MIN_VALUE, Long.MAX_VALUE, false, false), 100).scoreDocs;
-      assertEquals("wrong number of hits", 34, hits.length);
-      
-      // check decoding of terms
-      Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "trieInt");
-      TermsEnum termsEnum = LegacyNumericUtils.filterPrefixCodedInts(terms.iterator());
-      while (termsEnum.next() != null) {
-        int val = LegacyNumericUtils.prefixCodedToInt(termsEnum.term());
-        assertTrue("value in id bounds", val >= 0 && val < 35);
-      }
-      
-      terms = MultiFields.getTerms(searcher.getIndexReader(), "trieLong");
-      termsEnum = LegacyNumericUtils.filterPrefixCodedLongs(terms.iterator());
-      while (termsEnum.next() != null) {
-        long val = LegacyNumericUtils.prefixCodedToLong(termsEnum.term());
-        assertTrue("value in id bounds", val >= 0L && val < 35L);
-      }
-      
-      reader.close();
-    }
-  }
-  
   private int checkAllSegmentsUpgraded(Directory dir, int indexCreatedVersion) throws IOException {
     final SegmentInfos infos = SegmentInfos.readLatestCommit(dir);
     if (VERBOSE) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyField.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyField.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyField.java
deleted file mode 100644
index 92d1dd6..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyField.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-import java.io.StringReader;
-
-import org.apache.lucene.analysis.CannedTokenStream;
-import org.apache.lucene.analysis.Token;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
-
-public class TestLegacyField extends LuceneTestCase {
-  
-  public void testLegacyDoubleField() throws Exception {
-    Field fields[] = new Field[] {
-        new LegacyDoubleField("foo", 5d, Field.Store.NO),
-        new LegacyDoubleField("foo", 5d, Field.Store.YES)
-    };
-
-    for (Field field : fields) {
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      field.setDoubleValue(6d); // ok
-      trySetIntValue(field);
-      trySetFloatValue(field);
-      trySetLongValue(field);
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      trySetStringValue(field);
-      trySetTokenStreamValue(field);
-    
-      assertEquals(6d, field.numericValue().doubleValue(), 0.0d);
-    }
-  }
-  
-  public void testLegacyFloatField() throws Exception {
-    Field fields[] = new Field[] {
-        new LegacyFloatField("foo", 5f, Field.Store.NO),
-        new LegacyFloatField("foo", 5f, Field.Store.YES)
-    };
-
-    for (Field field : fields) {
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      trySetDoubleValue(field);
-      trySetIntValue(field);
-      field.setFloatValue(6f); // ok
-      trySetLongValue(field);
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      trySetStringValue(field);
-      trySetTokenStreamValue(field);
-      
-      assertEquals(6f, field.numericValue().floatValue(), 0.0f);
-    }
-  }
-  
-  public void testLegacyIntField() throws Exception {
-    Field fields[] = new Field[] {
-        new LegacyIntField("foo", 5, Field.Store.NO),
-        new LegacyIntField("foo", 5, Field.Store.YES)
-    };
-
-    for (Field field : fields) {
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      trySetDoubleValue(field);
-      field.setIntValue(6); // ok
-      trySetFloatValue(field);
-      trySetLongValue(field);
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      trySetStringValue(field);
-      trySetTokenStreamValue(field);
-      
-      assertEquals(6, field.numericValue().intValue());
-    }
-  }
-  
-  public void testLegacyLongField() throws Exception {
-    Field fields[] = new Field[] {
-        new LegacyLongField("foo", 5L, Field.Store.NO),
-        new LegacyLongField("foo", 5L, Field.Store.YES)
-    };
-
-    for (Field field : fields) {
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      trySetDoubleValue(field);
-      trySetIntValue(field);
-      trySetFloatValue(field);
-      field.setLongValue(6); // ok
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      trySetStringValue(field);
-      trySetTokenStreamValue(field);
-      
-      assertEquals(6L, field.numericValue().longValue());
-    }
-  }
-  
-  private void trySetByteValue(Field f) {
-    expectThrows(IllegalArgumentException.class, () -> {
-      f.setByteValue((byte) 10);
-    });
-  }
-
-  private void trySetBytesValue(Field f) {
-    expectThrows(IllegalArgumentException.class, () -> {
-      f.setBytesValue(new byte[] { 5, 5 });
-    });
-  }
-  
-  private void trySetBytesRefValue(Field f) {
-    expectThrows(IllegalArgumentException.class, () -> {
-      f.setBytesValue(new BytesRef("bogus"));
-    });
-  }
-  
-  private void trySetDoubleValue(Field f) {
-    expectThrows(IllegalArgumentException.class, () -> {
-      f.setDoubleValue(Double.MAX_VALUE);
-    });
-  }
-  
-  private void trySetIntValue(Field f) {
-    expectThrows(IllegalArgumentException.class, () -> {
-      f.setIntValue(Integer.MAX_VALUE);
-    });
-  }
-  
-  private void trySetLongValue(Field f) {
-    expectThrows(IllegalArgumentException.class, () -> {
-      f.setLongValue(Long.MAX_VALUE);
-    });
-  }
-  
-  private void trySetFloatValue(Field f) {
-    expectThrows(IllegalArgumentException.class, () -> {
-      f.setFloatValue(Float.MAX_VALUE);
-    });
-  }
-  
-  private void trySetReaderValue(Field f) {
-    expectThrows(IllegalArgumentException.class, () -> {
-      f.setReaderValue(new StringReader("BOO!"));
-    });
-  }
-  
-  private void trySetShortValue(Field f) {
-    expectThrows(IllegalArgumentException.class, () -> {
-      f.setShortValue(Short.MAX_VALUE);
-    });
-  }
-  
-  private void trySetStringValue(Field f) {
-    expectThrows(IllegalArgumentException.class, () -> {
-      f.setStringValue("BOO!");
-    });
-  }
-  
-  private void trySetTokenStreamValue(Field f) {
-    expectThrows(IllegalArgumentException.class, () -> {
-      f.setTokenStream(new CannedTokenStream(new Token("foo", 0, 3)));
-    });
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyFieldReuse.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyFieldReuse.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyFieldReuse.java
deleted file mode 100644
index 9335290..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyFieldReuse.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-
-import java.io.IOException;
-
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.analysis.CannedTokenStream;
-import org.apache.lucene.analysis.Token;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.legacy.LegacyIntField;
-import org.apache.lucene.legacy.LegacyNumericTokenStream;
-import org.apache.lucene.legacy.LegacyNumericUtils;
-import org.apache.lucene.legacy.LegacyNumericTokenStream.LegacyNumericTermAttribute;
-
-/** test tokenstream reuse by DefaultIndexingChain */
-public class TestLegacyFieldReuse extends BaseTokenStreamTestCase {
-  
-  public void testNumericReuse() throws IOException {
-    LegacyIntField legacyIntField = new LegacyIntField("foo", 5, Field.Store.NO);
-    
-    // passing null
-    TokenStream ts = legacyIntField.tokenStream(null, null);
-    assertTrue(ts instanceof LegacyNumericTokenStream);
-    assertEquals(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, ((LegacyNumericTokenStream)ts).getPrecisionStep());
-    assertNumericContents(5, ts);
-
-    // now reuse previous stream
-    legacyIntField = new LegacyIntField("foo", 20, Field.Store.NO);
-    TokenStream ts2 = legacyIntField.tokenStream(null, ts);
-    assertSame(ts, ts2);
-    assertNumericContents(20, ts);
-    
-    // pass a bogus stream and ensure it's still ok
-    legacyIntField = new LegacyIntField("foo", 2343, Field.Store.NO);
-    TokenStream bogus = new CannedTokenStream(new Token("bogus", 0, 5));
-    ts = legacyIntField.tokenStream(null, bogus);
-    assertNotSame(bogus, ts);
-    assertNumericContents(2343, ts);
-    
-    // pass another bogus stream (numeric, but different precision step!)
-    legacyIntField = new LegacyIntField("foo", 42, Field.Store.NO);
-    assert 3 != LegacyNumericUtils.PRECISION_STEP_DEFAULT;
-    bogus = new LegacyNumericTokenStream(3);
-    ts = legacyIntField.tokenStream(null, bogus);
-    assertNotSame(bogus, ts);
-    assertNumericContents(42, ts);
-  }
-   
-  private void assertNumericContents(int value, TokenStream ts) throws IOException {
-    assertTrue(ts instanceof LegacyNumericTokenStream);
-    LegacyNumericTermAttribute numericAtt = ts.getAttribute(LegacyNumericTermAttribute.class);
-    ts.reset();
-    boolean seen = false;
-    while (ts.incrementToken()) {
-      if (numericAtt.getShift() == 0) {
-        assertEquals(value, numericAtt.getRawValue());
-        seen = true;
-      }
-    }
-    ts.end();
-    ts.close();
-    assertTrue(seen);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyNumericUtils.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyNumericUtils.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyNumericUtils.java
deleted file mode 100644
index 8607efd..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyNumericUtils.java
+++ /dev/null
@@ -1,571 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Random;
-
-import org.apache.lucene.legacy.LegacyNumericUtils;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.FixedBitSet;
-import org.apache.lucene.util.LongBitSet;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.NumericUtils;
-
-public class TestLegacyNumericUtils extends LuceneTestCase {
-
-  public void testLongConversionAndOrdering() throws Exception {
-    // generate a series of encoded longs, each numerical one bigger than the one before
-    BytesRefBuilder last = new BytesRefBuilder();
-    BytesRefBuilder act = new BytesRefBuilder();
-    for (long l=-100000L; l<100000L; l++) {
-      LegacyNumericUtils.longToPrefixCoded(l, 0, act);
-      if (last!=null) {
-        // test if smaller
-        assertTrue("actual bigger than last (BytesRef)", last.get().compareTo(act.get()) < 0 );
-        assertTrue("actual bigger than last (as String)", last.get().utf8ToString().compareTo(act.get().utf8ToString()) < 0 );
-      }
-      // test is back and forward conversion works
-      assertEquals("forward and back conversion should generate same long", l, LegacyNumericUtils.prefixCodedToLong(act.get()));
-      // next step
-      last.copyBytes(act);
-    }
-  }
-
-  public void testIntConversionAndOrdering() throws Exception {
-    // generate a series of encoded ints, each numerical one bigger than the one before
-    BytesRefBuilder act = new BytesRefBuilder();
-    BytesRefBuilder last = new BytesRefBuilder();
-    for (int i=-100000; i<100000; i++) {
-      LegacyNumericUtils.intToPrefixCoded(i, 0, act);
-      if (last!=null) {
-        // test if smaller
-        assertTrue("actual bigger than last (BytesRef)", last.get().compareTo(act.get()) < 0 );
-        assertTrue("actual bigger than last (as String)", last.get().utf8ToString().compareTo(act.get().utf8ToString()) < 0 );
-      }
-      // test is back and forward conversion works
-      assertEquals("forward and back conversion should generate same int", i, LegacyNumericUtils.prefixCodedToInt(act.get()));
-      // next step
-      last.copyBytes(act.get());
-    }
-  }
-
-  public void testLongSpecialValues() throws Exception {
-    long[] vals=new long[]{
-      Long.MIN_VALUE, Long.MIN_VALUE+1, Long.MIN_VALUE+2, -5003400000000L,
-      -4000L, -3000L, -2000L, -1000L, -1L, 0L, 1L, 10L, 300L, 50006789999999999L, Long.MAX_VALUE-2, Long.MAX_VALUE-1, Long.MAX_VALUE
-    };
-    BytesRefBuilder[] prefixVals = new BytesRefBuilder[vals.length];
-    
-    for (int i=0; i<vals.length; i++) {
-      prefixVals[i] = new BytesRefBuilder();
-      LegacyNumericUtils.longToPrefixCoded(vals[i], 0, prefixVals[i]);
-      
-      // check forward and back conversion
-      assertEquals( "forward and back conversion should generate same long", vals[i], LegacyNumericUtils.prefixCodedToLong(prefixVals[i].get()) );
-
-      // test if decoding values as int fails correctly
-      final int index = i;
-      expectThrows(NumberFormatException.class, () -> {
-        LegacyNumericUtils.prefixCodedToInt(prefixVals[index].get());
-      });
-    }
-    
-    // check sort order (prefixVals should be ascending)
-    for (int i=1; i<prefixVals.length; i++) {
-      assertTrue( "check sort order", prefixVals[i-1].get().compareTo(prefixVals[i].get()) < 0 );
-    }
-        
-    // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
-    final BytesRefBuilder ref = new BytesRefBuilder();
-    for (int i=0; i<vals.length; i++) {
-      for (int j=0; j<64; j++) {
-        LegacyNumericUtils.longToPrefixCoded(vals[i], j, ref);
-        long prefixVal= LegacyNumericUtils.prefixCodedToLong(ref.get());
-        long mask=(1L << j) - 1L;
-        assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
-      }
-    }
-  }
-
-  public void testIntSpecialValues() throws Exception {
-    int[] vals=new int[]{
-      Integer.MIN_VALUE, Integer.MIN_VALUE+1, Integer.MIN_VALUE+2, -64765767,
-      -4000, -3000, -2000, -1000, -1, 0, 1, 10, 300, 765878989, Integer.MAX_VALUE-2, Integer.MAX_VALUE-1, Integer.MAX_VALUE
-    };
-    BytesRefBuilder[] prefixVals=new BytesRefBuilder[vals.length];
-    
-    for (int i=0; i<vals.length; i++) {
-      prefixVals[i] = new BytesRefBuilder();
-      LegacyNumericUtils.intToPrefixCoded(vals[i], 0, prefixVals[i]);
-      
-      // check forward and back conversion
-      assertEquals( "forward and back conversion should generate same int", vals[i], LegacyNumericUtils.prefixCodedToInt(prefixVals[i].get()) );
-      
-      // test if decoding values as long fails correctly
-      final int index = i;
-      expectThrows(NumberFormatException.class, () -> {
-        LegacyNumericUtils.prefixCodedToLong(prefixVals[index].get());
-      });
-    }
-    
-    // check sort order (prefixVals should be ascending)
-    for (int i=1; i<prefixVals.length; i++) {
-      assertTrue( "check sort order", prefixVals[i-1].get().compareTo(prefixVals[i].get()) < 0 );
-    }
-    
-    // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
-    final BytesRefBuilder ref = new BytesRefBuilder();
-    for (int i=0; i<vals.length; i++) {
-      for (int j=0; j<32; j++) {
-        LegacyNumericUtils.intToPrefixCoded(vals[i], j, ref);
-        int prefixVal= LegacyNumericUtils.prefixCodedToInt(ref.get());
-        int mask=(1 << j) - 1;
-        assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
-      }
-    }
-  }
-
-  public void testDoubles() throws Exception {
-    double[] vals=new double[]{
-      Double.NEGATIVE_INFINITY, -2.3E25, -1.0E15, -1.0, -1.0E-1, -1.0E-2, -0.0, 
-      +0.0, 1.0E-2, 1.0E-1, 1.0, 1.0E15, 2.3E25, Double.POSITIVE_INFINITY, Double.NaN
-    };
-    long[] longVals=new long[vals.length];
-    
-    // check forward and back conversion
-    for (int i=0; i<vals.length; i++) {
-      longVals[i]= NumericUtils.doubleToSortableLong(vals[i]);
-      assertTrue( "forward and back conversion should generate same double", Double.compare(vals[i], NumericUtils.sortableLongToDouble(longVals[i]))==0 );
-    }
-    
-    // check sort order (prefixVals should be ascending)
-    for (int i=1; i<longVals.length; i++) {
-      assertTrue( "check sort order", longVals[i-1] < longVals[i] );
-    }
-  }
-
-  public static final double[] DOUBLE_NANs = {
-    Double.NaN,
-    Double.longBitsToDouble(0x7ff0000000000001L),
-    Double.longBitsToDouble(0x7fffffffffffffffL),
-    Double.longBitsToDouble(0xfff0000000000001L),
-    Double.longBitsToDouble(0xffffffffffffffffL)
-  };
-
-  public void testSortableDoubleNaN() {
-    final long plusInf = NumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
-    for (double nan : DOUBLE_NANs) {
-      assertTrue(Double.isNaN(nan));
-      final long sortable = NumericUtils.doubleToSortableLong(nan);
-      assertTrue("Double not sorted correctly: " + nan + ", long repr: " 
-          + sortable + ", positive inf.: " + plusInf, sortable > plusInf);
-    }
-  }
-  
-  public void testFloats() throws Exception {
-    float[] vals=new float[]{
-      Float.NEGATIVE_INFINITY, -2.3E25f, -1.0E15f, -1.0f, -1.0E-1f, -1.0E-2f, -0.0f, 
-      +0.0f, 1.0E-2f, 1.0E-1f, 1.0f, 1.0E15f, 2.3E25f, Float.POSITIVE_INFINITY, Float.NaN
-    };
-    int[] intVals=new int[vals.length];
-    
-    // check forward and back conversion
-    for (int i=0; i<vals.length; i++) {
-      intVals[i]= NumericUtils.floatToSortableInt(vals[i]);
-      assertTrue( "forward and back conversion should generate same double", Float.compare(vals[i], NumericUtils.sortableIntToFloat(intVals[i]))==0 );
-    }
-    
-    // check sort order (prefixVals should be ascending)
-    for (int i=1; i<intVals.length; i++) {
-      assertTrue( "check sort order", intVals[i-1] < intVals[i] );
-    }
-  }
-
-  public static final float[] FLOAT_NANs = {
-    Float.NaN,
-    Float.intBitsToFloat(0x7f800001),
-    Float.intBitsToFloat(0x7fffffff),
-    Float.intBitsToFloat(0xff800001),
-    Float.intBitsToFloat(0xffffffff)
-  };
-
-  public void testSortableFloatNaN() {
-    final int plusInf = NumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
-    for (float nan : FLOAT_NANs) {
-      assertTrue(Float.isNaN(nan));
-      final int sortable = NumericUtils.floatToSortableInt(nan);
-      assertTrue("Float not sorted correctly: " + nan + ", int repr: " 
-          + sortable + ", positive inf.: " + plusInf, sortable > plusInf);
-    }
-  }
-
-  // INFO: Tests for trieCodeLong()/trieCodeInt() not needed because implicitely tested by range filter tests
-  
-  /** Note: The neededBounds Iterable must be unsigned (easier understanding what's happening) */
-  private void assertLongRangeSplit(final long lower, final long upper, int precisionStep,
-    final boolean useBitSet, final Iterable<Long> expectedBounds, final Iterable<Integer> expectedShifts
-  ) {
-    // Cannot use FixedBitSet since the range could be long:
-    final LongBitSet bits=useBitSet ? new LongBitSet(upper-lower+1) : null;
-    final Iterator<Long> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
-    final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
-
-    LegacyNumericUtils.splitLongRange(new LegacyNumericUtils.LongRangeBuilder() {
-      @Override
-      public void addRange(long min, long max, int shift) {
-        assertTrue("min, max should be inside bounds", min >= lower && min <= upper && max >= lower && max <= upper);
-        if (useBitSet) for (long l = min; l <= max; l++) {
-          assertFalse("ranges should not overlap", bits.getAndSet(l - lower));
-          // extra exit condition to prevent overflow on MAX_VALUE
-          if (l == max) break;
-        }
-        if (neededBounds == null || neededShifts == null)
-          return;
-        // make unsigned longs for easier display and understanding
-        min ^= 0x8000000000000000L;
-        max ^= 0x8000000000000000L;
-        //System.out.println("0x"+Long.toHexString(min>>>shift)+"L,0x"+Long.toHexString(max>>>shift)+"L)/*shift="+shift+"*/,");
-        assertEquals("shift", neededShifts.next().intValue(), shift);
-        assertEquals("inner min bound", neededBounds.next().longValue(), min >>> shift);
-        assertEquals("inner max bound", neededBounds.next().longValue(), max >>> shift);
-      }
-    }, precisionStep, lower, upper);
-    
-    if (useBitSet) {
-      // after flipping all bits in the range, the cardinality should be zero
-      bits.flip(0,upper-lower+1);
-      assertEquals("The sub-range concenated should match the whole range", 0, bits.cardinality());
-    }
-  }
-  
-  /** LUCENE-2541: LegacyNumericRangeQuery errors with endpoints near long min and max values */
-  public void testLongExtremeValues() throws Exception {
-    // upper end extremes
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 1, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 2, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 4, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 6, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 8, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 64, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-
-    assertLongRangeSplit(Long.MAX_VALUE-0xfL, Long.MAX_VALUE, 4, true, Arrays.asList(
-      0xfffffffffffffffL,0xfffffffffffffffL
-    ), Arrays.asList(
-      4
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE-0x10L, Long.MAX_VALUE, 4, true, Arrays.asList(
-      0xffffffffffffffefL,0xffffffffffffffefL,
-      0xfffffffffffffffL,0xfffffffffffffffL
-    ), Arrays.asList(
-      0, 4
-    ));
-
-    // lower end extremes
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 1, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 2, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 4, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 6, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 8, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 64, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE+0xfL, 4, true, Arrays.asList(
-      0x000000000000000L,0x000000000000000L
-    ), Arrays.asList(
-      4
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE+0x10L, 4, true, Arrays.asList(
-      0x0000000000000010L,0x0000000000000010L,
-      0x000000000000000L,0x000000000000000L
-    ), Arrays.asList(
-      0, 4
-    ));
-  }
-  
-  public void testRandomSplit() throws Exception {
-    long num = (long) atLeast(10);
-    for (long i=0; i < num; i++) {
-      executeOneRandomSplit(random());
-    }
-  }
-  
-  private void executeOneRandomSplit(final Random random) throws Exception {
-    long lower = randomLong(random);
-    long len = random.nextInt(16384*1024); // not too large bitsets, else OOME!
-    while (lower + len < lower) { // overflow
-      lower >>= 1;
-    }
-    assertLongRangeSplit(lower, lower + len, random.nextInt(64) + 1, true, null, null);
-  }
-  
-  private long randomLong(final Random random) {
-    long val;
-    switch(random.nextInt(4)) {
-      case 0:
-        val = 1L << (random.nextInt(63)); //  patterns like 0x000000100000 (-1 yields patterns like 0x0000fff)
-        break;
-      case 1:
-        val = -1L << (random.nextInt(63)); // patterns like 0xfffff00000
-        break;
-      default:
-        val = random.nextLong();
-    }
-
-    val += random.nextInt(5)-2;
-
-    if (random.nextBoolean()) {
-      if (random.nextBoolean()) val += random.nextInt(100)-50;
-      if (random.nextBoolean()) val = ~val;
-      if (random.nextBoolean()) val = val<<1;
-      if (random.nextBoolean()) val = val>>>1;
-    }
-
-    return val;
-  }
-  
-  public void testSplitLongRange() throws Exception {
-    // a hard-coded "standard" range
-    assertLongRangeSplit(-5000L, 9500L, 4, true, Arrays.asList(
-      0x7fffffffffffec78L,0x7fffffffffffec7fL,
-      0x8000000000002510L,0x800000000000251cL,
-      0x7fffffffffffec8L, 0x7fffffffffffecfL,
-      0x800000000000250L, 0x800000000000250L,
-      0x7fffffffffffedL,  0x7fffffffffffefL,
-      0x80000000000020L,  0x80000000000024L,
-      0x7ffffffffffffL,   0x8000000000001L
-    ), Arrays.asList(
-      0, 0,
-      4, 4,
-      8, 8,
-      12
-    ));
-    
-    // the same with no range splitting
-    assertLongRangeSplit(-5000L, 9500L, 64, true, Arrays.asList(
-      0x7fffffffffffec78L,0x800000000000251cL
-    ), Arrays.asList(
-      0
-    ));
-    
-    // this tests optimized range splitting, if one of the inner bounds
-    // is also the bound of the next lower precision, it should be used completely
-    assertLongRangeSplit(0L, 1024L+63L, 4, true, Arrays.asList(
-      0x800000000000040L, 0x800000000000043L,
-      0x80000000000000L,  0x80000000000003L
-    ), Arrays.asList(
-      4, 8
-    ));
-    
-    // the full long range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 8, false, Arrays.asList(
-      0x00L,0xffL
-    ), Arrays.asList(
-      56
-    ));
-
-    // the same with precisionStep=4
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 4, false, Arrays.asList(
-      0x0L,0xfL
-    ), Arrays.asList(
-      60
-    ));
-
-    // the same with precisionStep=2
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 2, false, Arrays.asList(
-      0x0L,0x3L
-    ), Arrays.asList(
-      62
-    ));
-
-    // the same with precisionStep=1
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 1, false, Arrays.asList(
-      0x0L,0x1L
-    ), Arrays.asList(
-      63
-    ));
-
-    // a inverse range should produce no sub-ranges
-    assertLongRangeSplit(9500L, -5000L, 4, false, Collections.<Long>emptyList(), Collections.<Integer>emptyList());    
-
-    // a 0-length range should reproduce the range itself
-    assertLongRangeSplit(9500L, 9500L, 4, false, Arrays.asList(
-      0x800000000000251cL,0x800000000000251cL
-    ), Arrays.asList(
-      0
-    ));
-  }
-
-  /** Note: The neededBounds Iterable must be unsigned (easier understanding what's happening) */
-  private void assertIntRangeSplit(final int lower, final int upper, int precisionStep,
-    final boolean useBitSet, final Iterable<Integer> expectedBounds, final Iterable<Integer> expectedShifts
-  ) {
-    final FixedBitSet bits=useBitSet ? new FixedBitSet(upper-lower+1) : null;
-    final Iterator<Integer> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
-    final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
-    
-    LegacyNumericUtils.splitIntRange(new LegacyNumericUtils.IntRangeBuilder() {
-      @Override
-      public void addRange(int min, int max, int shift) {
-        assertTrue("min, max should be inside bounds", min >= lower && min <= upper && max >= lower && max <= upper);
-        if (useBitSet) for (int i = min; i <= max; i++) {
-          assertFalse("ranges should not overlap", bits.getAndSet(i - lower));
-          // extra exit condition to prevent overflow on MAX_VALUE
-          if (i == max) break;
-        }
-        if (neededBounds == null)
-          return;
-        // make unsigned ints for easier display and understanding
-        min ^= 0x80000000;
-        max ^= 0x80000000;
-        //System.out.println("0x"+Integer.toHexString(min>>>shift)+",0x"+Integer.toHexString(max>>>shift)+")/*shift="+shift+"*/,");
-        assertEquals("shift", neededShifts.next().intValue(), shift);
-        assertEquals("inner min bound", neededBounds.next().intValue(), min >>> shift);
-        assertEquals("inner max bound", neededBounds.next().intValue(), max >>> shift);
-      }
-    }, precisionStep, lower, upper);
-    
-    if (useBitSet) {
-      // after flipping all bits in the range, the cardinality should be zero
-      bits.flip(0, upper-lower+1);
-      assertEquals("The sub-range concenated should match the whole range", 0, bits.cardinality());
-    }
-  }
-  
-  public void testSplitIntRange() throws Exception {
-    // a hard-coded "standard" range
-    assertIntRangeSplit(-5000, 9500, 4, true, Arrays.asList(
-      0x7fffec78,0x7fffec7f,
-      0x80002510,0x8000251c,
-      0x7fffec8, 0x7fffecf,
-      0x8000250, 0x8000250,
-      0x7fffed,  0x7fffef,
-      0x800020,  0x800024,
-      0x7ffff,   0x80001
-    ), Arrays.asList(
-      0, 0,
-      4, 4,
-      8, 8,
-      12
-    ));
-    
-    // the same with no range splitting
-    assertIntRangeSplit(-5000, 9500, 32, true, Arrays.asList(
-      0x7fffec78,0x8000251c
-    ), Arrays.asList(
-      0
-    ));
-    
-    // this tests optimized range splitting, if one of the inner bounds
-    // is also the bound of the next lower precision, it should be used completely
-    assertIntRangeSplit(0, 1024+63, 4, true, Arrays.asList(
-      0x8000040, 0x8000043,
-      0x800000,  0x800003
-    ), Arrays.asList(
-      4, 8
-    ));
-    
-    // the full int range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
-    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 8, false, Arrays.asList(
-      0x00,0xff
-    ), Arrays.asList(
-      24
-    ));
-
-    // the same with precisionStep=4
-    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 4, false, Arrays.asList(
-      0x0,0xf
-    ), Arrays.asList(
-      28
-    ));
-
-    // the same with precisionStep=2
-    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 2, false, Arrays.asList(
-      0x0,0x3
-    ), Arrays.asList(
-      30
-    ));
-
-    // the same with precisionStep=1
-    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 1, false, Arrays.asList(
-      0x0,0x1
-    ), Arrays.asList(
-      31
-    ));
-
-    // a inverse range should produce no sub-ranges
-    assertIntRangeSplit(9500, -5000, 4, false, Collections.<Integer>emptyList(), Collections.<Integer>emptyList());    
-
-    // a 0-length range should reproduce the range itself
-    assertIntRangeSplit(9500, 9500, 4, false, Arrays.asList(
-      0x8000251c,0x8000251c
-    ), Arrays.asList(
-      0
-    ));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyTerms.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyTerms.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyTerms.java
deleted file mode 100644
index 27fae15..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyTerms.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.legacy.LegacyDoubleField;
-import org.apache.lucene.legacy.LegacyFloatField;
-import org.apache.lucene.legacy.LegacyIntField;
-import org.apache.lucene.legacy.LegacyLongField;
-import org.apache.lucene.legacy.LegacyNumericUtils;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.NumericUtils;
-
-public class TestLegacyTerms extends LuceneTestCase {
-
-  public void testEmptyIntFieldMinMax() throws Exception {
-    assertNull(LegacyNumericUtils.getMinInt(EMPTY_TERMS));
-    assertNull(LegacyNumericUtils.getMaxInt(EMPTY_TERMS));
-  }
-  
-  public void testIntFieldMinMax() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    int numDocs = atLeast(100);
-    int minValue = Integer.MAX_VALUE;
-    int maxValue = Integer.MIN_VALUE;
-    for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
-      int num = random().nextInt();
-      minValue = Math.min(num, minValue);
-      maxValue = Math.max(num, maxValue);
-      doc.add(new LegacyIntField("field", num, Field.Store.NO));
-      w.addDocument(doc);
-    }
-    
-    IndexReader r = w.getReader();
-    Terms terms = MultiFields.getTerms(r, "field");
-    assertEquals(new Integer(minValue), LegacyNumericUtils.getMinInt(terms));
-    assertEquals(new Integer(maxValue), LegacyNumericUtils.getMaxInt(terms));
-
-    r.close();
-    w.close();
-    dir.close();
-  }
-
-  public void testEmptyLongFieldMinMax() throws Exception {
-    assertNull(LegacyNumericUtils.getMinLong(EMPTY_TERMS));
-    assertNull(LegacyNumericUtils.getMaxLong(EMPTY_TERMS));
-  }
-  
-  public void testLongFieldMinMax() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    int numDocs = atLeast(100);
-    long minValue = Long.MAX_VALUE;
-    long maxValue = Long.MIN_VALUE;
-    for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
-      long num = random().nextLong();
-      minValue = Math.min(num, minValue);
-      maxValue = Math.max(num, maxValue);
-      doc.add(new LegacyLongField("field", num, Field.Store.NO));
-      w.addDocument(doc);
-    }
-    
-    IndexReader r = w.getReader();
-
-    Terms terms = MultiFields.getTerms(r, "field");
-    assertEquals(new Long(minValue), LegacyNumericUtils.getMinLong(terms));
-    assertEquals(new Long(maxValue), LegacyNumericUtils.getMaxLong(terms));
-
-    r.close();
-    w.close();
-    dir.close();
-  }
-
-  public void testFloatFieldMinMax() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    int numDocs = atLeast(100);
-    float minValue = Float.POSITIVE_INFINITY;
-    float maxValue = Float.NEGATIVE_INFINITY;
-    for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
-      float num = random().nextFloat();
-      minValue = Math.min(num, minValue);
-      maxValue = Math.max(num, maxValue);
-      doc.add(new LegacyFloatField("field", num, Field.Store.NO));
-      w.addDocument(doc);
-    }
-    
-    IndexReader r = w.getReader();
-    Terms terms = MultiFields.getTerms(r, "field");
-    assertEquals(minValue, NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMinInt(terms)), 0.0f);
-    assertEquals(maxValue, NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMaxInt(terms)), 0.0f);
-
-    r.close();
-    w.close();
-    dir.close();
-  }
-
-  public void testDoubleFieldMinMax() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    int numDocs = atLeast(100);
-    double minValue = Double.POSITIVE_INFINITY;
-    double maxValue = Double.NEGATIVE_INFINITY;
-    for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
-      double num = random().nextDouble();
-      minValue = Math.min(num, minValue);
-      maxValue = Math.max(num, maxValue);
-      doc.add(new LegacyDoubleField("field", num, Field.Store.NO));
-      w.addDocument(doc);
-    }
-    
-    IndexReader r = w.getReader();
-
-    Terms terms = MultiFields.getTerms(r, "field");
-
-    assertEquals(minValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMinLong(terms)), 0.0);
-    assertEquals(maxValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMaxLong(terms)), 0.0);
-
-    r.close();
-    w.close();
-    dir.close();
-  }
-
-  /**
-   * A complete empty Terms instance that has no terms in it and supports no optional statistics
-   */
-  private static Terms EMPTY_TERMS = new Terms() {
-    public TermsEnum iterator() { return TermsEnum.EMPTY; }
-    public long size() { return -1; }
-    public long getSumTotalTermFreq() { return -1; }
-    public long getSumDocFreq() { return -1; }
-    public int getDocCount() { return -1; }
-    public boolean hasFreqs() { return false; }
-    public boolean hasOffsets() { return false; }
-    public boolean hasPositions() { return false; }
-    public boolean hasPayloads() { return false; }
-  };
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestMultiValuedNumericRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestMultiValuedNumericRangeQuery.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestMultiValuedNumericRangeQuery.java
deleted file mode 100644
index 386ec17..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestMultiValuedNumericRangeQuery.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-
-import java.util.Locale;
-import java.text.DecimalFormat;
-import java.text.DecimalFormatSymbols;
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.legacy.LegacyIntField;
-import org.apache.lucene.legacy.LegacyNumericRangeQuery;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.TermRangeQuery;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.TestUtil;
-
-public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
-
-  /** Tests LegacyNumericRangeQuery on a multi-valued field (multiple numeric values per document).
-   * This test ensures, that a classical TermRangeQuery returns exactly the same document numbers as
-   * LegacyNumericRangeQuery (see SOLR-1322 for discussion) and the multiple precision terms per numeric value
-   * do not interfere with multiple numeric values.
-   */
-  public void testMultiValuedNRQ() throws Exception {
-    Directory directory = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
-        newIndexWriterConfig(new MockAnalyzer(random()))
-        .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
-    
-    DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.ROOT));
-    
-    int num = atLeast(500);
-    for (int l = 0; l < num; l++) {
-      Document doc = new Document();
-      for (int m=0, c=random().nextInt(10); m<=c; m++) {
-        int value = random().nextInt(Integer.MAX_VALUE);
-        doc.add(newStringField("asc", format.format(value), Field.Store.NO));
-        doc.add(new LegacyIntField("trie", value, Field.Store.NO));
-      }
-      writer.addDocument(doc);
-    }
-    IndexReader reader = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher=newSearcher(reader);
-    num = atLeast(50);
-    for (int i = 0; i < num; i++) {
-      int lower=random().nextInt(Integer.MAX_VALUE);
-      int upper=random().nextInt(Integer.MAX_VALUE);
-      if (lower>upper) {
-        int a=lower; lower=upper; upper=a;
-      }
-      TermRangeQuery cq=TermRangeQuery.newStringRange("asc", format.format(lower), format.format(upper), true, true);
-      LegacyNumericRangeQuery<Integer> tq= LegacyNumericRangeQuery.newIntRange("trie", lower, upper, true, true);
-      TopDocs trTopDocs = searcher.search(cq, 1);
-      TopDocs nrTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", trTopDocs.totalHits, nrTopDocs.totalHits );
-    }
-    reader.close();
-    directory.close();
-  }
-  
-}


[14/25] lucene-solr:jira/solr-8668: LUCENE=7705: Allow CharTokenizer-derived tokenizers and KeywordTokenizer to configure the max token length

Posted by cp...@apache.org.
LUCENE=7705: Allow CharTokenizer-derived tokenizers and KeywordTokenizer to configure the max token length


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/906679ad
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/906679ad
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/906679ad

Branch: refs/heads/jira/solr-8668
Commit: 906679adc80f0fad1e5c311b03023c7bd95633d7
Parents: bc973ec
Author: Erick Erickson <er...@apache.org>
Authored: Sun May 28 15:18:48 2017 -0700
Committer: Erick Erickson <er...@apache.org>
Committed: Sun May 28 15:18:48 2017 -0700

----------------------------------------------------------------------
 .../lucene/analysis/core/KeywordTokenizer.java  |  10 +-
 .../analysis/core/KeywordTokenizerFactory.java  |  19 ++-
 .../lucene/analysis/core/LetterTokenizer.java   |  14 ++
 .../analysis/core/LetterTokenizerFactory.java   |  19 ++-
 .../analysis/core/LowerCaseTokenizer.java       |  13 ++
 .../core/LowerCaseTokenizerFactory.java         |  37 +++--
 .../core/UnicodeWhitespaceTokenizer.java        |  13 ++
 .../analysis/core/WhitespaceTokenizer.java      |  13 ++
 .../core/WhitespaceTokenizerFactory.java        |  18 ++-
 .../lucene/analysis/util/CharTokenizer.java     |  27 +++-
 .../analysis/core/TestKeywordTokenizer.java     |  88 +++++++++++
 .../core/TestUnicodeWhitespaceTokenizer.java    |  51 +++++++
 .../analysis/util/TestCharTokenizers.java       |  95 ++++++++++++
 solr/CHANGES.txt                                |   3 +
 .../collection1/conf/schema-tokenizer-test.xml  | 150 +++++++++++++++++++
 .../solr/util/TestMaxTokenLenTokenizer.java     | 135 +++++++++++++++++
 16 files changed, 680 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizer.java
index 209ecee..eb08eea 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizer.java
@@ -24,6 +24,8 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.util.AttributeFactory;
 
+import static org.apache.lucene.analysis.standard.StandardTokenizer.MAX_TOKEN_LENGTH_LIMIT;
+
 /**
  * Emits the entire input as a single token.
  */
@@ -41,16 +43,16 @@ public final class KeywordTokenizer extends Tokenizer {
   }
 
   public KeywordTokenizer(int bufferSize) {
-    if (bufferSize <= 0) {
-      throw new IllegalArgumentException("bufferSize must be > 0");
+    if (bufferSize > MAX_TOKEN_LENGTH_LIMIT || bufferSize <= 0) {
+      throw new IllegalArgumentException("maxTokenLen must be greater than 0 and less than " + MAX_TOKEN_LENGTH_LIMIT + " passed: " + bufferSize);
     }
     termAtt.resizeBuffer(bufferSize);
   }
 
   public KeywordTokenizer(AttributeFactory factory, int bufferSize) {
     super(factory);
-    if (bufferSize <= 0) {
-      throw new IllegalArgumentException("bufferSize must be > 0");
+    if (bufferSize > MAX_TOKEN_LENGTH_LIMIT || bufferSize <= 0) {
+      throw new IllegalArgumentException("maxTokenLen must be greater than 0 and less than " + MAX_TOKEN_LENGTH_LIMIT + " passed: " + bufferSize);
     }
     termAtt.resizeBuffer(bufferSize);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java
index 3654f67..86f65d6 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java
@@ -16,26 +16,39 @@
  */
 package org.apache.lucene.analysis.core;
 
-
 import org.apache.lucene.analysis.util.TokenizerFactory;
 import org.apache.lucene.util.AttributeFactory;
 
 import java.util.Map;
 
+import static org.apache.lucene.analysis.standard.StandardTokenizer.MAX_TOKEN_LENGTH_LIMIT;
+
 /**
  * Factory for {@link KeywordTokenizer}. 
  * <pre class="prettyprint">
  * &lt;fieldType name="text_keyword" class="solr.TextField" positionIncrementGap="100"&gt;
  *   &lt;analyzer&gt;
- *     &lt;tokenizer class="solr.KeywordTokenizerFactory"/&gt;
+ *     &lt;tokenizer class="solr.KeywordTokenizerFactory" maxTokenLen="256"/&gt;
  *   &lt;/analyzer&gt;
  * &lt;/fieldType&gt;</pre> 
+ *
+ * Options:
+ * <ul>
+ *   <li>maxTokenLen: max token length, should be greater than 0 and less than 
+ *        MAX_TOKEN_LENGTH_LIMIT (1024*1024). It is rare to need to change this
+ *      else {@link KeywordTokenizer}::DEFAULT_BUFFER_SIZE</li>
+ * </ul>
  */
 public class KeywordTokenizerFactory extends TokenizerFactory {
+  private final int maxTokenLen;
   
   /** Creates a new KeywordTokenizerFactory */
   public KeywordTokenizerFactory(Map<String,String> args) {
     super(args);
+    maxTokenLen = getInt(args, "maxTokenLen", KeywordTokenizer.DEFAULT_BUFFER_SIZE);
+    if (maxTokenLen > MAX_TOKEN_LENGTH_LIMIT || maxTokenLen <= 0) {
+      throw new IllegalArgumentException("maxTokenLen must be greater than 0 and less than " + MAX_TOKEN_LENGTH_LIMIT + " passed: " + maxTokenLen);
+    }
     if (!args.isEmpty()) {
       throw new IllegalArgumentException("Unknown parameters: " + args);
     }
@@ -43,6 +56,6 @@ public class KeywordTokenizerFactory extends TokenizerFactory {
   
   @Override
   public KeywordTokenizer create(AttributeFactory factory) {
-    return new KeywordTokenizer(factory, KeywordTokenizer.DEFAULT_BUFFER_SIZE);
+    return new KeywordTokenizer(factory, maxTokenLen);
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizer.java
index df41b37..8fb7d0e 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizer.java
@@ -50,6 +50,20 @@ public class LetterTokenizer extends CharTokenizer {
     super(factory);
   }
   
+  /**
+   * Construct a new LetterTokenizer using a given
+   * {@link org.apache.lucene.util.AttributeFactory}.
+   *
+   * @param factory the attribute factory to use for this {@link Tokenizer}
+   * @param maxTokenLen maximum token length the tokenizer will emit. 
+   *        Must be greater than 0 and less than MAX_TOKEN_LENGTH_LIMIT (1024*1024)
+   * @throws IllegalArgumentException if maxTokenLen is invalid.
+
+   */
+  public LetterTokenizer(AttributeFactory factory, int maxTokenLen) {
+    super(factory, maxTokenLen);
+  }
+
   /** Collects only characters which satisfy
    * {@link Character#isLetter(int)}.*/
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java
index 828d6cf..41ada68 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java
@@ -17,25 +17,40 @@
 package org.apache.lucene.analysis.core;
 
 
+import org.apache.lucene.analysis.util.CharTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
 import org.apache.lucene.util.AttributeFactory;
 
 import java.util.Map;
 
+import static org.apache.lucene.analysis.standard.StandardTokenizer.MAX_TOKEN_LENGTH_LIMIT;
+
 /**
  * Factory for {@link LetterTokenizer}. 
  * <pre class="prettyprint">
  * &lt;fieldType name="text_letter" class="solr.TextField" positionIncrementGap="100"&gt;
  *   &lt;analyzer&gt;
- *     &lt;tokenizer class="solr.LetterTokenizerFactory"/&gt;
+ *     &lt;tokenizer class="solr.LetterTokenizerFactory" maxTokenLen="256"/&gt;
  *   &lt;/analyzer&gt;
  * &lt;/fieldType&gt;</pre>
+ *
+ * Options:
+ * <ul>
+ *   <li>maxTokenLen: max token length, must be greater than 0 and less than MAX_TOKEN_LENGTH_LIMIT (1024*1024).
+ *       It is rare to need to change this
+ *      else {@link CharTokenizer}::DEFAULT_MAX_TOKEN_LEN</li>
+ * </ul>
  */
 public class LetterTokenizerFactory extends TokenizerFactory {
+  private final int maxTokenLen;
 
   /** Creates a new LetterTokenizerFactory */
   public LetterTokenizerFactory(Map<String,String> args) {
     super(args);
+    maxTokenLen = getInt(args, "maxTokenLen", CharTokenizer.DEFAULT_MAX_WORD_LEN);
+    if (maxTokenLen > MAX_TOKEN_LENGTH_LIMIT || maxTokenLen <= 0) {
+      throw new IllegalArgumentException("maxTokenLen must be greater than 0 and less than " + MAX_TOKEN_LENGTH_LIMIT + " passed: " + maxTokenLen);
+    }
     if (!args.isEmpty()) {
       throw new IllegalArgumentException("Unknown parameters: " + args);
     }
@@ -43,6 +58,6 @@ public class LetterTokenizerFactory extends TokenizerFactory {
 
   @Override
   public LetterTokenizer create(AttributeFactory factory) {
-    return new LetterTokenizer(factory);
+    return new LetterTokenizer(factory, maxTokenLen);
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizer.java
index 982d356..26b8747 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizer.java
@@ -50,6 +50,19 @@ public final class LowerCaseTokenizer extends LetterTokenizer {
     super(factory);
   }
   
+  /**
+   * Construct a new LowerCaseTokenizer using a given
+   * {@link org.apache.lucene.util.AttributeFactory}.
+   *
+   * @param factory the attribute factory to use for this {@link Tokenizer}
+   * @param maxTokenLen maximum token length the tokenizer will emit. 
+   *        Must be greater than 0 and less than MAX_TOKEN_LENGTH_LIMIT (1024*1024)
+   * @throws IllegalArgumentException if maxTokenLen is invalid.
+   */
+  public LowerCaseTokenizer(AttributeFactory factory, int maxTokenLen) {
+    super(factory, maxTokenLen);
+  }
+  
   /** Converts char to lower case
    * {@link Character#toLowerCase(int)}.*/
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java
index 3e29161..a3e06c7 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java
@@ -18,6 +18,7 @@ package org.apache.lucene.analysis.core;
 
 
 import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharTokenizer;
 import org.apache.lucene.analysis.util.MultiTermAwareComponent;
 import org.apache.lucene.analysis.util.TokenizerFactory;
 import org.apache.lucene.util.AttributeFactory;
@@ -25,20 +26,36 @@ import org.apache.lucene.util.AttributeFactory;
 import java.util.HashMap;
 import java.util.Map;
 
+import static org.apache.lucene.analysis.standard.StandardTokenizer.MAX_TOKEN_LENGTH_LIMIT;
+
 /**
- * Factory for {@link LowerCaseTokenizer}. 
+ * Factory for {@link LowerCaseTokenizer}.
  * <pre class="prettyprint">
  * &lt;fieldType name="text_lwrcase" class="solr.TextField" positionIncrementGap="100"&gt;
- *   &lt;analyzer&gt;
- *     &lt;tokenizer class="solr.LowerCaseTokenizerFactory"/&gt;
- *   &lt;/analyzer&gt;
+ * &lt;analyzer&gt;
+ * &lt;tokenizer class="solr.LowerCaseTokenizerFactory" maxTokenLen="256"/&gt;
+ * &lt;/analyzer&gt;
  * &lt;/fieldType&gt;</pre>
+ * <p>
+ * Options:
+ * <ul>
+ * <li>maxTokenLen: max token length, should be greater than 0 and less than MAX_TOKEN_LENGTH_LIMIT (1024*1024).
+ *     It is rare to need to change this
+ * else {@link CharTokenizer}::DEFAULT_MAX_WORD_LEN</li>
+ * </ul>
  */
 public class LowerCaseTokenizerFactory extends TokenizerFactory implements MultiTermAwareComponent {
-  
-  /** Creates a new LowerCaseTokenizerFactory */
-  public LowerCaseTokenizerFactory(Map<String,String> args) {
+  private final int maxTokenLen;
+
+  /**
+   * Creates a new LowerCaseTokenizerFactory
+   */
+  public LowerCaseTokenizerFactory(Map<String, String> args) {
     super(args);
+    maxTokenLen = getInt(args, "maxTokenLen", CharTokenizer.DEFAULT_MAX_WORD_LEN);
+    if (maxTokenLen > MAX_TOKEN_LENGTH_LIMIT || maxTokenLen <= 0) {
+      throw new IllegalArgumentException("maxTokenLen must be greater than 0 and less than " + MAX_TOKEN_LENGTH_LIMIT + " passed: " + maxTokenLen);
+    }
     if (!args.isEmpty()) {
       throw new IllegalArgumentException("Unknown parameters: " + args);
     }
@@ -46,11 +63,13 @@ public class LowerCaseTokenizerFactory extends TokenizerFactory implements Multi
 
   @Override
   public LowerCaseTokenizer create(AttributeFactory factory) {
-    return new LowerCaseTokenizer(factory);
+    return new LowerCaseTokenizer(factory, maxTokenLen);
   }
 
   @Override
   public AbstractAnalysisFactory getMultiTermComponent() {
-    return new LowerCaseFilterFactory(new HashMap<>(getOriginalArgs()));
+    Map map = new HashMap<>(getOriginalArgs());
+    map.remove("maxTokenLen"); //removing "maxTokenLen" argument for LowerCaseFilterFactory init
+    return new LowerCaseFilterFactory(map);
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/UnicodeWhitespaceTokenizer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/UnicodeWhitespaceTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/UnicodeWhitespaceTokenizer.java
index 5e4313f..00c181f 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/UnicodeWhitespaceTokenizer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/UnicodeWhitespaceTokenizer.java
@@ -47,6 +47,19 @@ public final class UnicodeWhitespaceTokenizer extends CharTokenizer {
   public UnicodeWhitespaceTokenizer(AttributeFactory factory) {
     super(factory);
   }
+
+  /**
+   * Construct a new UnicodeWhitespaceTokenizer using a given
+   * {@link org.apache.lucene.util.AttributeFactory}.
+   *
+   * @param factory the attribute factory to use for this {@link Tokenizer}
+   * @param maxTokenLen maximum token length the tokenizer will emit. 
+   *        Must be greater than 0 and less than MAX_TOKEN_LENGTH_LIMIT (1024*1024)
+   * @throws IllegalArgumentException if maxTokenLen is invalid.
+   */
+  public UnicodeWhitespaceTokenizer(AttributeFactory factory, int maxTokenLen) {
+    super(factory, maxTokenLen);
+  }
   
   /** Collects only characters which do not satisfy Unicode's WHITESPACE property. */
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizer.java
index 70f2d62..0655227 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizer.java
@@ -46,6 +46,19 @@ public final class WhitespaceTokenizer extends CharTokenizer {
   public WhitespaceTokenizer(AttributeFactory factory) {
     super(factory);
   }
+
+  /**
+   * Construct a new WhitespaceTokenizer using a given
+   * {@link org.apache.lucene.util.AttributeFactory}.
+   *
+   * @param factory the attribute factory to use for this {@link Tokenizer}
+   * @param maxTokenLen maximum token length the tokenizer will emit. 
+   *        Must be greater than 0 and less than MAX_TOKEN_LENGTH_LIMIT (1024*1024)
+   * @throws IllegalArgumentException if maxTokenLen is invalid.
+   */
+  public WhitespaceTokenizer(AttributeFactory factory, int maxTokenLen) {
+    super(factory, maxTokenLen);
+  }
   
   /** Collects only characters which do not satisfy
    * {@link Character#isWhitespace(int)}.*/

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java
index fd38b63..29e9ed5 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java
@@ -22,15 +22,18 @@ import java.util.Collection;
 import java.util.Map;
 
 import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.util.CharTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
 import org.apache.lucene.util.AttributeFactory;
 
+import static org.apache.lucene.analysis.standard.StandardTokenizer.MAX_TOKEN_LENGTH_LIMIT;
+
 /**
  * Factory for {@link WhitespaceTokenizer}. 
  * <pre class="prettyprint">
  * &lt;fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100"&gt;
  *   &lt;analyzer&gt;
- *     &lt;tokenizer class="solr.WhitespaceTokenizerFactory" rule="unicode"/&gt;
+ *     &lt;tokenizer class="solr.WhitespaceTokenizerFactory" rule="unicode"  maxTokenLen="256"/&gt;
  *   &lt;/analyzer&gt;
  * &lt;/fieldType&gt;</pre>
  *
@@ -38,6 +41,9 @@ import org.apache.lucene.util.AttributeFactory;
  * <ul>
  *   <li>rule: either "java" for {@link WhitespaceTokenizer}
  *      or "unicode" for {@link UnicodeWhitespaceTokenizer}</li>
+ *   <li>maxTokenLen: max token length, should be greater than 0 and less than MAX_TOKEN_LENGTH_LIMIT (1024*1024).
+ *       It is rare to need to change this
+ *      else {@link CharTokenizer}::DEFAULT_MAX_TOKEN_LEN</li>
  * </ul>
  */
 public class WhitespaceTokenizerFactory extends TokenizerFactory {
@@ -46,13 +52,17 @@ public class WhitespaceTokenizerFactory extends TokenizerFactory {
   private static final Collection<String> RULE_NAMES = Arrays.asList(RULE_JAVA, RULE_UNICODE);
 
   private final String rule;
+  private final int maxTokenLen;
 
   /** Creates a new WhitespaceTokenizerFactory */
   public WhitespaceTokenizerFactory(Map<String,String> args) {
     super(args);
 
     rule = get(args, "rule", RULE_NAMES, RULE_JAVA);
-
+    maxTokenLen = getInt(args, "maxTokenLen", CharTokenizer.DEFAULT_MAX_WORD_LEN);
+    if (maxTokenLen > MAX_TOKEN_LENGTH_LIMIT || maxTokenLen <= 0) {
+      throw new IllegalArgumentException("maxTokenLen must be greater than 0 and less than " + MAX_TOKEN_LENGTH_LIMIT + " passed: " + maxTokenLen);
+    }
     if (!args.isEmpty()) {
       throw new IllegalArgumentException("Unknown parameters: " + args);
     }
@@ -62,9 +72,9 @@ public class WhitespaceTokenizerFactory extends TokenizerFactory {
   public Tokenizer create(AttributeFactory factory) {
     switch (rule) {
       case RULE_JAVA:
-        return new WhitespaceTokenizer(factory);
+        return new WhitespaceTokenizer(factory, maxTokenLen);
       case RULE_UNICODE:
-        return new UnicodeWhitespaceTokenizer(factory);
+        return new UnicodeWhitespaceTokenizer(factory, maxTokenLen);
       default:
         throw new AssertionError();
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java
index 13289be..ff9d6ff 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java
@@ -33,6 +33,8 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.util.AttributeFactory;
 
+import static org.apache.lucene.analysis.standard.StandardTokenizer.MAX_TOKEN_LENGTH_LIMIT;
+
 /**
  * An abstract base class for simple, character-oriented tokenizers.
  * <p>
@@ -50,6 +52,7 @@ public abstract class CharTokenizer extends Tokenizer {
    * Creates a new {@link CharTokenizer} instance
    */
   public CharTokenizer() {
+    this.maxTokenLen = DEFAULT_MAX_WORD_LEN;
   }
   
   /**
@@ -60,6 +63,23 @@ public abstract class CharTokenizer extends Tokenizer {
    */
   public CharTokenizer(AttributeFactory factory) {
     super(factory);
+    this.maxTokenLen = DEFAULT_MAX_WORD_LEN;
+  }
+  
+  /**
+   * Creates a new {@link CharTokenizer} instance
+   *
+   * @param factory the attribute factory to use for this {@link Tokenizer}
+   * @param maxTokenLen maximum token length the tokenizer will emit. 
+   *        Must be greater than 0 and less than MAX_TOKEN_LENGTH_LIMIT (1024*1024)
+   * @throws IllegalArgumentException if maxTokenLen is invalid.
+   */
+  public CharTokenizer(AttributeFactory factory, int maxTokenLen) {
+    super(factory);
+    if (maxTokenLen > MAX_TOKEN_LENGTH_LIMIT || maxTokenLen <= 0) {
+      throw new IllegalArgumentException("maxTokenLen must be greater than 0 and less than " + MAX_TOKEN_LENGTH_LIMIT + " passed: " + maxTokenLen);
+    }
+    this.maxTokenLen = maxTokenLen;
   }
   
   /**
@@ -193,9 +213,10 @@ public abstract class CharTokenizer extends Tokenizer {
   }
   
   private int offset = 0, bufferIndex = 0, dataLen = 0, finalOffset = 0;
-  private static final int MAX_WORD_LEN = 255;
+  public static final int DEFAULT_MAX_WORD_LEN = 255;
   private static final int IO_BUFFER_SIZE = 4096;
-  
+  private final int maxTokenLen;
+
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
   
@@ -256,7 +277,7 @@ public abstract class CharTokenizer extends Tokenizer {
         }
         end += charCount;
         length += Character.toChars(normalize(c), buffer, length); // buffer it, normalized
-        if (length >= MAX_WORD_LEN) { // buffer overflow! make sure to check for >= surrogate pair could break == test
+        if (length >= maxTokenLen) { // buffer overflow! make sure to check for >= surrogate pair could break == test
           break;
         }
       } else if (length > 0) {           // at non-Letter w/ chars

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordTokenizer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordTokenizer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordTokenizer.java
new file mode 100644
index 0000000..3f03a00
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordTokenizer.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.core;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.util.AttributeFactory;
+
+public class TestKeywordTokenizer extends BaseTokenStreamTestCase {
+
+  public void testSimple() throws IOException {
+    StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
+    KeywordTokenizer tokenizer = new KeywordTokenizer();
+    tokenizer.setReader(reader);
+    assertTokenStreamContents(tokenizer, new String[]{"Tokenizer \ud801\udc1ctest"});
+  }
+
+  public void testFactory() {
+    Map<String, String> args = new HashMap<>();
+    KeywordTokenizerFactory factory = new KeywordTokenizerFactory(args);
+    AttributeFactory attributeFactory = newAttributeFactory();
+    Tokenizer tokenizer = factory.create(attributeFactory);
+    assertEquals(KeywordTokenizer.class, tokenizer.getClass());
+  }
+
+  private Map<String, String> makeArgs(String... args) {
+    Map<String, String> ret = new HashMap<>();
+    for (int idx = 0; idx < args.length; idx += 2) {
+      ret.put(args[idx], args[idx + 1]);
+    }
+    return ret;
+  }
+
+  public void testParamsFactory() throws IOException {
+    // negative maxTokenLen
+    IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () ->
+        new KeywordTokenizerFactory(makeArgs("maxTokenLen", "-1")));
+    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: -1", iae.getMessage());
+
+    // zero maxTokenLen
+    iae = expectThrows(IllegalArgumentException.class, () ->
+        new KeywordTokenizerFactory(makeArgs("maxTokenLen", "0")));
+    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: 0", iae.getMessage());
+
+    // Added random param, should throw illegal error
+    iae = expectThrows(IllegalArgumentException.class, () ->
+        new KeywordTokenizerFactory(makeArgs("maxTokenLen", "255", "randomParam", "rValue")));
+    assertEquals("Unknown parameters: {randomParam=rValue}", iae.getMessage());
+
+    // tokeniser will never split, no matter what is passed, 
+    // but the buffer will not be more than length of the token
+
+    KeywordTokenizerFactory factory = new KeywordTokenizerFactory(makeArgs("maxTokenLen", "5"));
+    AttributeFactory attributeFactory = newAttributeFactory();
+    Tokenizer tokenizer = factory.create(attributeFactory);
+    StringReader reader = new StringReader("Tokenizertest");
+    tokenizer.setReader(reader);
+    assertTokenStreamContents(tokenizer, new String[]{"Tokenizertest"});
+
+    // tokeniser will never split, no matter what is passed, 
+    // but the buffer will not be more than length of the token
+    factory = new KeywordTokenizerFactory(makeArgs("maxTokenLen", "2"));
+    attributeFactory = newAttributeFactory();
+    tokenizer = factory.create(attributeFactory);
+    reader = new StringReader("Tokenizer\u00A0test");
+    tokenizer.setReader(reader);
+    assertTokenStreamContents(tokenizer, new String[]{"Tokenizer\u00A0test"});
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestUnicodeWhitespaceTokenizer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestUnicodeWhitespaceTokenizer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestUnicodeWhitespaceTokenizer.java
index acdb670..16089e9 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestUnicodeWhitespaceTokenizer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestUnicodeWhitespaceTokenizer.java
@@ -54,4 +54,55 @@ public class TestUnicodeWhitespaceTokenizer extends BaseTokenStreamTestCase {
     assertEquals(UnicodeWhitespaceTokenizer.class, tokenizer.getClass());
   }
 
+  private Map<String, String> makeArgs(String... args) {
+    Map<String, String> ret = new HashMap<>();
+    for (int idx = 0; idx < args.length; idx += 2) {
+      ret.put(args[idx], args[idx + 1]);
+    }
+    return ret;
+  }
+
+  public void testParamsFactory() throws IOException {
+    
+
+    // negative maxTokenLen
+    IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () ->
+        new WhitespaceTokenizerFactory(makeArgs("rule", "unicode", "maxTokenLen", "-1")));
+    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: -1", iae.getMessage());
+
+    // zero maxTokenLen
+    iae = expectThrows(IllegalArgumentException.class, () ->
+        new WhitespaceTokenizerFactory(makeArgs("rule", "unicode", "maxTokenLen", "0")));
+    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: 0", iae.getMessage());
+
+    // Added random param, should throw illegal error
+    iae = expectThrows(IllegalArgumentException.class, () ->
+        new WhitespaceTokenizerFactory(makeArgs("rule", "unicode", "maxTokenLen", "255", "randomParam", "rValue")));
+    assertEquals("Unknown parameters: {randomParam=rValue}", iae.getMessage());
+
+    // tokeniser will split at 5, Token | izer, no matter what happens 
+    WhitespaceTokenizerFactory factory = new WhitespaceTokenizerFactory(makeArgs("rule", "unicode", "maxTokenLen", "5"));
+    AttributeFactory attributeFactory = newAttributeFactory();
+    Tokenizer tokenizer = factory.create(attributeFactory);
+    StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
+    tokenizer.setReader(reader);
+    assertTokenStreamContents(tokenizer, new String[]{"Token", "izer", "\ud801\udc1ctes", "t"});
+
+    // tokeniser will split at 2, To | ke | ni | ze | r, no matter what happens 
+    factory = new WhitespaceTokenizerFactory(makeArgs("rule", "unicode", "maxTokenLen", "2"));
+    attributeFactory = newAttributeFactory();
+    tokenizer = factory.create(attributeFactory);
+    reader = new StringReader("Tokenizer\u00A0test");
+    tokenizer.setReader(reader);
+    assertTokenStreamContents(tokenizer, new String[]{"To", "ke", "ni", "ze", "r", "te", "st"});
+
+    // tokeniser will split at 10, no matter what happens, 
+    // but tokens' length are less than that
+    factory = new WhitespaceTokenizerFactory(makeArgs("rule", "unicode", "maxTokenLen", "10"));
+    attributeFactory = newAttributeFactory();
+    tokenizer = factory.create(attributeFactory);
+    reader = new StringReader("Tokenizer\u00A0test");
+    tokenizer.setReader(reader);
+    assertTokenStreamContents(tokenizer, new String[]{"Tokenizer", "test"});
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
index 783fc3e..4596608 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
@@ -25,8 +25,10 @@ import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.KeywordTokenizer;
 import org.apache.lucene.analysis.core.LetterTokenizer;
 import org.apache.lucene.analysis.core.LowerCaseTokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.util.TestUtil;
 
@@ -89,6 +91,99 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
     tokenizer.setReader(new StringReader(builder.toString() + builder.toString()));
     assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(Locale.ROOT), builder.toString().toLowerCase(Locale.ROOT)});
   }
+
+  /*
+   * tests the max word length passed as parameter - tokenizer will split at the passed position char no matter what happens
+   */
+  public void testCustomMaxTokenLength() throws IOException {
+
+    StringBuilder builder = new StringBuilder();
+    for (int i = 0; i < 100; i++) {
+      builder.append("A");
+    }
+    Tokenizer tokenizer = new LowerCaseTokenizer(newAttributeFactory(), 100);
+    // Tricky, passing two copies of the string to the reader....
+    tokenizer.setReader(new StringReader(builder.toString() + builder.toString()));
+    assertTokenStreamContents(tokenizer, new String[]{builder.toString().toLowerCase(Locale.ROOT), 
+        builder.toString().toLowerCase(Locale.ROOT) });
+
+    Exception e = expectThrows(IllegalArgumentException.class, () ->
+        new LowerCaseTokenizer(newAttributeFactory(), -1));
+    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: -1", e.getMessage());
+
+    tokenizer = new LetterTokenizer(newAttributeFactory(), 100);
+    tokenizer.setReader(new StringReader(builder.toString() + builder.toString()));
+    assertTokenStreamContents(tokenizer, new String[]{builder.toString(), builder.toString()});
+
+
+    // Let's test that we can get a token longer than 255 through.
+    builder.setLength(0);
+    for (int i = 0; i < 500; i++) {
+      builder.append("Z");
+    }
+    tokenizer = new LetterTokenizer(newAttributeFactory(), 500);
+    tokenizer.setReader(new StringReader(builder.toString()));
+    assertTokenStreamContents(tokenizer, new String[]{builder.toString()});
+
+    
+    // Just to be sure what is happening here, token lengths of zero make no sense, 
+    // Let's try the edge cases, token > I/O buffer (4096)
+    builder.setLength(0);
+    for (int i = 0; i < 600; i++) {
+      builder.append("aUrOkIjq"); // 600 * 8 = 4800 chars.
+    }
+
+    e = expectThrows(IllegalArgumentException.class, () ->
+        new LowerCaseTokenizer(newAttributeFactory(), 0));
+    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: 0", e.getMessage());
+
+    e = expectThrows(IllegalArgumentException.class, () ->
+        new LowerCaseTokenizer(newAttributeFactory(), 10_000_000));
+    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: 10000000", e.getMessage());
+
+    tokenizer = new LowerCaseTokenizer(newAttributeFactory(), 4800);
+    tokenizer.setReader(new StringReader(builder.toString()));
+    assertTokenStreamContents(tokenizer, new String[]{builder.toString().toLowerCase(Locale.ROOT)});
+
+
+    e = expectThrows(IllegalArgumentException.class, () ->
+        new KeywordTokenizer(newAttributeFactory(), 0));
+    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: 0", e.getMessage());
+
+    e = expectThrows(IllegalArgumentException.class, () ->
+        new KeywordTokenizer(newAttributeFactory(), 10_000_000));
+    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: 10000000", e.getMessage());
+
+
+    tokenizer = new KeywordTokenizer(newAttributeFactory(), 4800);
+    tokenizer.setReader(new StringReader(builder.toString()));
+    assertTokenStreamContents(tokenizer, new String[]{builder.toString()});
+
+    e = expectThrows(IllegalArgumentException.class, () ->
+        new LetterTokenizer(newAttributeFactory(), 0));
+    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: 0", e.getMessage());
+
+    e = expectThrows(IllegalArgumentException.class, () ->
+        new LetterTokenizer(newAttributeFactory(), 2_000_000));
+    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: 2000000", e.getMessage());
+
+    tokenizer = new LetterTokenizer(newAttributeFactory(), 4800);
+    tokenizer.setReader(new StringReader(builder.toString()));
+    assertTokenStreamContents(tokenizer, new String[]{builder.toString()});
+
+    e = expectThrows(IllegalArgumentException.class, () ->
+        new WhitespaceTokenizer(newAttributeFactory(), 0));
+    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: 0", e.getMessage());
+
+    e = expectThrows(IllegalArgumentException.class, () ->
+        new WhitespaceTokenizer(newAttributeFactory(), 3_000_000));
+    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: 3000000", e.getMessage());
+
+    tokenizer = new WhitespaceTokenizer(newAttributeFactory(), 4800);
+    tokenizer.setReader(new StringReader(builder.toString()));
+    assertTokenStreamContents(tokenizer, new String[]{builder.toString()});
+
+  }
   
   /*
    * tests the max word length of 255 with a surrogate pair at position 255

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index d4e6eac..c413cf8 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -260,6 +260,9 @@ Other Changes
   
 * SOLR-10438: Assign explicit useDocValuesAsStored values to all points field types in 
   schema-point.xml/TestPointFields. (hossman, Steve Rowe)
+  
+* LUCENE-7705: Allow CharTokenizer-derived tokenizers and KeywordTokenizer to configure the max token length.
+  (Amrit Sarkar via Erick Erickson)
 
 * SOLR-10659: Remove ResponseBuilder.getSortSpec use in SearchGroupShardResponseProcessor.
   (Judith Silverman via Christine Poerschke)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/solr/core/src/test-files/solr/collection1/conf/schema-tokenizer-test.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-tokenizer-test.xml b/solr/core/src/test-files/solr/collection1/conf/schema-tokenizer-test.xml
new file mode 100644
index 0000000..f3d3196
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-tokenizer-test.xml
@@ -0,0 +1,150 @@
+<?xml version="1.0" ?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!-- The Solr schema file. This file should be named "schema.xml" and
+should be located where the classloader for the Solr webapp can find it.
+
+This schema is used for testing, and as such has everything and the
+kitchen sink thrown in. See example/solr/conf/schema.xml for a
+more concise example.
+
+-->
+
+<schema name="test" version="1.0">
+
+  <!-- field type definitions... note that the "name" attribute is
+  just a label to be used by field definitions.  The "class"
+  attribute and any other attributes determine the real type and
+  behavior of the fieldType.
+  -->
+
+  <!--
+  Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.
+  -->
+  <fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
+  <fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
+  <fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
+  <fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
+
+  <!--
+  Numeric field types that index each value at various levels of precision
+  to accelerate range queries when the number of values between the range
+  endpoints is large. See the javadoc for LegacyNumericRangeQuery for internal
+  implementation details.
+  -->
+
+  <!-- Seperate analyzers for index and query time -->
+
+  <fieldType name="letterfieldType" class="solr.TextField" positionIncrementGap="100">
+      <analyzer type="index">
+        <tokenizer class="solr.LetterTokenizerFactory" maxTokenLen="3" />
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+      </analyzer>
+  </fieldType>
+
+  <fieldType name="lowerCasefieldType" class="solr.TextField" positionIncrementGap="100">
+    <analyzer type="index">
+      <tokenizer class="solr.LowerCaseTokenizerFactory" maxTokenLen="3" />
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.StandardTokenizerFactory"/>
+    </analyzer>
+  </fieldType>
+
+  <fieldType name="whiteSpfieldType" class="solr.TextField" positionIncrementGap="100">
+    <analyzer type="index">
+      <tokenizer class="solr.WhitespaceTokenizerFactory" maxTokenLen="3" />
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.StandardTokenizerFactory"/>
+    </analyzer>
+  </fieldType>
+
+  <fieldType name="uniWhiteSpfieldType" class="solr.TextField" positionIncrementGap="100">
+    <analyzer type="index">
+      <tokenizer class="solr.WhitespaceTokenizerFactory" maxTokenLen="3" />
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.StandardTokenizerFactory"/>
+    </analyzer>
+  </fieldType>
+
+  <fieldType name="keywordfieldType" class="solr.TextField" positionIncrementGap="100">
+    <analyzer index="index">
+      <tokenizer class="solr.KeywordTokenizerFactory" maxTokenLen="3" />
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.StandardTokenizerFactory"/>
+    </analyzer>
+  </fieldType>
+
+  <!-- Same analyzers for both index and query time -->
+
+  <fieldType name="letter0fieldType" class="solr.TextField" positionIncrementGap="100">
+    <analyzer>
+      <tokenizer class="solr.LetterTokenizerFactory" maxTokenLen="3" />
+    </analyzer>
+  </fieldType>
+
+  <fieldType name="lowerCase0fieldType" class="solr.TextField" positionIncrementGap="100">
+    <analyzer>
+      <tokenizer class="solr.LowerCaseTokenizerFactory" maxTokenLen="3" />
+    </analyzer>
+  </fieldType>
+
+  <fieldType name="whiteSp0fieldType" class="solr.TextField" positionIncrementGap="100">
+    <analyzer>
+      <tokenizer class="solr.WhitespaceTokenizerFactory" maxTokenLen="3" />
+    </analyzer>
+  </fieldType>
+
+  <fieldType name="uniWhiteSp0fieldType" class="solr.TextField" positionIncrementGap="100">
+    <analyzer>
+      <tokenizer class="solr.WhitespaceTokenizerFactory" maxTokenLen="3" />
+    </analyzer>
+  </fieldType>
+
+  <fieldType name="keyword0fieldType" class="solr.TextField" positionIncrementGap="100">
+    <analyzer>
+      <tokenizer class="solr.KeywordTokenizerFactory"  maxTokenLen="3" />
+    </analyzer>
+  </fieldType>
+
+  <field name="id" type="int" indexed="true" stored="true" multiValued="false" required="true"/>
+
+  <field name="letter" type="letterfieldType" indexed="true" stored="true"/>
+  <field name="lowerCase" type="lowerCasefieldType" indexed="true" stored="true"/>
+  <field name="whiteSpace" type="whiteSpfieldType" indexed="true" stored="true"/>
+  <field name="unicodeWhiteSpace" type="uniWhiteSpfieldType" indexed="true" stored="true"/>
+  <field name="keyword" type="keywordfieldType" indexed="true" stored="true"/>
+
+  <field name="letter0" type="letter0fieldType" indexed="true" stored="true"/>
+  <field name="lowerCase0" type="lowerCase0fieldType" indexed="true" stored="true"/>
+  <field name="whiteSpace0" type="whiteSp0fieldType" indexed="true" stored="true"/>
+  <field name="unicodeWhiteSpace0" type="uniWhiteSp0fieldType" indexed="true" stored="true"/>
+  <field name="keyword0" type="keyword0fieldType" indexed="true" stored="true"/>
+
+  <field name="_version_" type="long" indexed="true" stored="true" multiValued="false"/>
+
+
+  <uniqueKey>id</uniqueKey>
+
+
+</schema>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/906679ad/solr/core/src/test/org/apache/solr/util/TestMaxTokenLenTokenizer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/util/TestMaxTokenLenTokenizer.java b/solr/core/src/test/org/apache/solr/util/TestMaxTokenLenTokenizer.java
new file mode 100644
index 0000000..c7e0dc3
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/util/TestMaxTokenLenTokenizer.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.util;
+
+import org.apache.solr.SolrTestCaseJ4;
+import org.junit.BeforeClass;
+
+/**
+ * Tests for:
+ * {@link org.apache.lucene.analysis.core.LowerCaseTokenizerFactory}
+ * {@link org.apache.lucene.analysis.core.LetterTokenizerFactory}
+ * {@link org.apache.lucene.analysis.core.KeywordTokenizerFactory}
+ * {@link org.apache.lucene.analysis.core.WhitespaceTokenizerFactory}
+ */
+
+public class TestMaxTokenLenTokenizer extends SolrTestCaseJ4 {
+  /* field names are used in accordance with the solrconfig and schema supplied */
+  private static final String ID = "id";
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("solrconfig-update-processor-chains.xml", "schema-tokenizer-test.xml");
+  }
+
+  public void testSingleFieldDiffAnalyzers() throws Exception {
+
+    clearIndex();
+
+    // using fields with definitions, different tokenizer factories respectively at index time and standard tokenizer at query time.
+
+    updateJ("{\"add\":{\"doc\": {\"id\":1,\"letter\":\"letter\"}},\"commit\":{}}",null);
+    updateJ("{\"add\":{\"doc\": {\"id\":2,\"lowerCase\":\"lowerCase\"}},\"commit\":{}}",null);
+    updateJ("{\"add\":{\"doc\": {\"id\":3,\"whiteSpace\":\"whiteSpace in\"}},\"commit\":{}}",null);
+    updateJ("{\"add\":{\"doc\": {\"id\":4,\"unicodeWhiteSpace\":\"unicode in\"}},\"commit\":{}}",null);
+    updateJ("{\"add\":{\"doc\": {\"id\":5,\"keyword\":\"keyword\"}},\"commit\":{}}",null);
+
+    assertU(commit());
+
+    assertQ("Check the total number of docs", req("q","*:*"), "//result[@numFound=5]");
+
+    //Tokens generated for "letter": "let" "ter" "letter" , maxTokenLen=3
+    assertQ("Check the total number of docs", req("q","letter:let"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","letter:lett"), "//result[@numFound=0]");
+
+    //Tokens generated for "lowerCase": "low" "erC" "ase" "lowerCase" , maxTokenLen=3
+    assertQ("Check the total number of docs", req("q","lowerCase:low"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","lowerCase:l"), "//result[@numFound=0]");
+    assertQ("Check the total number of docs", req("q","lowerCase:lo"), "//result[@numFound=0]");
+    assertQ("Check the total number of docs", req("q","lowerCase:lower"), "//result[@numFound=0]");
+
+    //Tokens generated for "whiteSpace in": "whi" "teS" "pac" "e" "in" "whiteSpace" , maxTokenLen=3
+    assertQ("Check the total number of docs", req("q","whiteSpace:whi"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","whiteSpace:teS"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","whiteSpace:in"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","whiteSpace:white"), "//result[@numFound=0]");
+
+    //Tokens generated for "unicode in": "uni" "cod" "e" "in" "unicode" , maxTokenLen=3
+    assertQ("Check the total number of docs", req("q","unicodeWhiteSpace:uni"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","unicodeWhiteSpace:cod"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","unicodeWhiteSpace:e"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","unicodeWhiteSpace:unico"), "//result[@numFound=0]");
+
+    //Tokens generated for "keyword": "keyword" , maxTokenLen=3
+    assertQ("Check the total number of docs", req("q","keyword:keyword"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","keyword:key"), "//result[@numFound=0]");
+
+  }
+
+  public void testSingleFieldSameAnalyzers() throws Exception {
+
+    clearIndex();
+
+    // using fields with definitions, same tokenizers both at index and query time.
+
+    updateJ("{\"add\":{\"doc\": {\"id\":1,\"letter0\":\"letter\"}},\"commit\":{}}",null);
+    updateJ("{\"add\":{\"doc\": {\"id\":2,\"lowerCase0\":\"lowerCase\"}},\"commit\":{}}",null);
+    updateJ("{\"add\":{\"doc\": {\"id\":3,\"whiteSpace0\":\"whiteSpace in\"}},\"commit\":{}}",null);
+    updateJ("{\"add\":{\"doc\": {\"id\":4,\"unicodeWhiteSpace0\":\"unicode in\"}},\"commit\":{}}",null);
+    updateJ("{\"add\":{\"doc\": {\"id\":5,\"keyword0\":\"keyword\"}},\"commit\":{}}",null);
+
+    assertU(commit());
+
+    assertQ("Check the total number of docs", req("q","*:*"), "//result[@numFound=5]");
+
+    //Tokens generated for "letter": "let" "ter" "letter" , maxTokenLen=3
+    // Anything that matches the first three letters should be found when maxLen=3
+    assertQ("Check the total number of docs", req("q","letter0:l"), "//result[@numFound=0]");
+    assertQ("Check the total number of docs", req("q","letter0:let"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","letter0:lett"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","letter0:letXYZ"), "//result[@numFound=1]");
+
+    //Tokens generated for "lowerCase": "low" "erC" "ase" "lowerCase" , maxTokenLen=3
+    // Anything that matches the first three letters should be found when maxLen=3
+    assertQ("Check the total number of docs", req("q","lowerCase0:low"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","lowerCase0:l"), "//result[@numFound=0]");
+    assertQ("Check the total number of docs", req("q","lowerCase0:lo"), "//result[@numFound=0]");
+    assertQ("Check the total number of docs", req("q","lowerCase0:lowerXYZ"), "//result[@numFound=1]");
+
+    //Tokens generated for "whiteSpace in": "whi" "teS" "pac" "e" "in" "whiteSpace" , maxTokenLen=3
+    // Anything that matches the first three letters should be found when maxLen=3
+    assertQ("Check the total number of docs", req("q","whiteSpace0:h"), "//result[@numFound=0]");
+    assertQ("Check the total number of docs", req("q","whiteSpace0:whi"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","whiteSpace0:teS"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","whiteSpace0:in"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","whiteSpace0:whiteZKY"), "//result[@numFound=1]");
+
+    //Tokens generated for "unicode in": "uni" "cod" "e" "in" "unicode" , maxTokenLen=3
+    // Anything that matches the first three letters should be found when maxLen=3
+    assertQ("Check the total number of docs", req("q","unicodeWhiteSpace0:u"), "//result[@numFound=0]");
+    assertQ("Check the total number of docs", req("q","unicodeWhiteSpace0:uni"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","unicodeWhiteSpace0:cod"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","unicodeWhiteSpace0:e"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","unicodeWhiteSpace0:unicoVBRT"), "//result[@numFound=1]");
+
+    //Tokens generated for "keyword": "keyword" , maxTokenLen=3
+    assertQ("Check the total number of docs", req("q","keyword0:keyword"), "//result[@numFound=1]");
+    assertQ("Check the total number of docs", req("q","keyword0:key"), "//result[@numFound=0]");
+
+  }
+}


[20/25] lucene-solr:jira/solr-8668: LUCENE-7850: Move support for legacy numerics to solr/.

Posted by cp...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/LegacyNumericRangeQuery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/LegacyNumericRangeQuery.java b/solr/core/src/java/org/apache/solr/legacy/LegacyNumericRangeQuery.java
new file mode 100644
index 0000000..d07e497
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/LegacyNumericRangeQuery.java
@@ -0,0 +1,537 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.Objects;
+
+import org.apache.lucene.document.DoublePoint;
+import org.apache.lucene.document.FloatPoint;
+import org.apache.lucene.document.IntPoint;
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.index.FilteredTermsEnum;
+import org.apache.lucene.index.PointValues;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.index.Term; // for javadocs
+
+/**
+ * <p>A {@link Query} that matches numeric values within a
+ * specified range.  To use this, you must first index the
+ * numeric values using {@link org.apache.solr.legacy.LegacyIntField}, {@link
+ * org.apache.solr.legacy.LegacyFloatField}, {@link org.apache.solr.legacy.LegacyLongField} or {@link org.apache.solr.legacy.LegacyDoubleField} (expert: {@link
+ * org.apache.solr.legacy.LegacyNumericTokenStream}).  If your terms are instead textual,
+ * you should use {@link TermRangeQuery}.</p>
+ *
+ * <p>You create a new LegacyNumericRangeQuery with the static
+ * factory methods, eg:
+ *
+ * <pre class="prettyprint">
+ * Query q = LegacyNumericRangeQuery.newFloatRange("weight", 0.03f, 0.10f, true, true);
+ * </pre>
+ *
+ * matches all documents whose float valued "weight" field
+ * ranges from 0.03 to 0.10, inclusive.
+ *
+ * <p>The performance of LegacyNumericRangeQuery is much better
+ * than the corresponding {@link TermRangeQuery} because the
+ * number of terms that must be searched is usually far
+ * fewer, thanks to trie indexing, described below.</p>
+ *
+ * <p>You can optionally specify a <a
+ * href="#precisionStepDesc"><code>precisionStep</code></a>
+ * when creating this query.  This is necessary if you've
+ * changed this configuration from its default (4) during
+ * indexing.  Lower values consume more disk space but speed
+ * up searching.  Suitable values are between <b>1</b> and
+ * <b>8</b>. A good starting point to test is <b>4</b>,
+ * which is the default value for all <code>Numeric*</code>
+ * classes.  See <a href="#precisionStepDesc">below</a> for
+ * details.
+ *
+ * <p>This query defaults to {@linkplain
+ * MultiTermQuery#CONSTANT_SCORE_REWRITE}.
+ * With precision steps of &le;4, this query can be run with
+ * one of the BooleanQuery rewrite methods without changing
+ * BooleanQuery's default max clause count.
+ *
+ * <br><h3>How it works</h3>
+ *
+ * <p>See the publication about <a target="_blank" href="http://www.panfmp.org">panFMP</a>,
+ * where this algorithm was described (referred to as <code>TrieRangeQuery</code>):
+ *
+ * <blockquote><strong>Schindler, U, Diepenbroek, M</strong>, 2008.
+ * <em>Generic XML-based Framework for Metadata Portals.</em>
+ * Computers &amp; Geosciences 34 (12), 1947-1955.
+ * <a href="http://dx.doi.org/10.1016/j.cageo.2008.02.023"
+ * target="_blank">doi:10.1016/j.cageo.2008.02.023</a></blockquote>
+ *
+ * <p><em>A quote from this paper:</em> Because Apache Lucene is a full-text
+ * search engine and not a conventional database, it cannot handle numerical ranges
+ * (e.g., field value is inside user defined bounds, even dates are numerical values).
+ * We have developed an extension to Apache Lucene that stores
+ * the numerical values in a special string-encoded format with variable precision
+ * (all numerical values like doubles, longs, floats, and ints are converted to
+ * lexicographic sortable string representations and stored with different precisions
+ * (for a more detailed description of how the values are stored,
+ * see {@link org.apache.solr.legacy.LegacyNumericUtils}). A range is then divided recursively into multiple intervals for searching:
+ * The center of the range is searched only with the lowest possible precision in the <em>trie</em>,
+ * while the boundaries are matched more exactly. This reduces the number of terms dramatically.</p>
+ *
+ * <p>For the variant that stores long values in 8 different precisions (each reduced by 8 bits) that
+ * uses a lowest precision of 1 byte, the index contains only a maximum of 256 distinct values in the
+ * lowest precision. Overall, a range could consist of a theoretical maximum of
+ * <code>7*255*2 + 255 = 3825</code> distinct terms (when there is a term for every distinct value of an
+ * 8-byte-number in the index and the range covers almost all of them; a maximum of 255 distinct values is used
+ * because it would always be possible to reduce the full 256 values to one term with degraded precision).
+ * In practice, we have seen up to 300 terms in most cases (index with 500,000 metadata records
+ * and a uniform value distribution).</p>
+ *
+ * <h3><a name="precisionStepDesc">Precision Step</a></h3>
+ * <p>You can choose any <code>precisionStep</code> when encoding values.
+ * Lower step values mean more precisions and so more terms in index (and index gets larger). The number
+ * of indexed terms per value is (those are generated by {@link org.apache.solr.legacy.LegacyNumericTokenStream}):
+ * <p style="font-family:serif">
+ * &nbsp;&nbsp;indexedTermsPerValue = <b>ceil</b><big>(</big>bitsPerValue / precisionStep<big>)</big>
+ * </p>
+ * As the lower precision terms are shared by many values, the additional terms only
+ * slightly grow the term dictionary (approx. 7% for <code>precisionStep=4</code>), but have a larger
+ * impact on the postings (the postings file will have  more entries, as every document is linked to
+ * <code>indexedTermsPerValue</code> terms instead of one). The formula to estimate the growth
+ * of the term dictionary in comparison to one term per value:
+ * <p>
+ * <!-- the formula in the alt attribute was transformed from latex to PNG with http://1.618034.com/latex.php (with 110 dpi): -->
+ * &nbsp;&nbsp;<img src="doc-files/nrq-formula-1.png" alt="\mathrm{termDictOverhead} = \sum\limits_{i=0}^{\mathrm{indexedTermsPerValue}-1} \frac{1}{2^{\mathrm{precisionStep}\cdot i}}">
+ * </p>
+ * <p>On the other hand, if the <code>precisionStep</code> is smaller, the maximum number of terms to match reduces,
+ * which optimizes query speed. The formula to calculate the maximum number of terms that will be visited while
+ * executing the query is:
+ * <p>
+ * <!-- the formula in the alt attribute was transformed from latex to PNG with http://1.618034.com/latex.php (with 110 dpi): -->
+ * &nbsp;&nbsp;<img src="doc-files/nrq-formula-2.png" alt="\mathrm{maxQueryTerms} = \left[ \left( \mathrm{indexedTermsPerValue} - 1 \right) \cdot \left(2^\mathrm{precisionStep} - 1 \right) \cdot 2 \right] + \left( 2^\mathrm{precisionStep} - 1 \right)">
+ * </p>
+ * <p>For longs stored using a precision step of 4, <code>maxQueryTerms = 15*15*2 + 15 = 465</code>, and for a precision
+ * step of 2, <code>maxQueryTerms = 31*3*2 + 3 = 189</code>. But the faster search speed is reduced by more seeking
+ * in the term enum of the index. Because of this, the ideal <code>precisionStep</code> value can only
+ * be found out by testing. <b>Important:</b> You can index with a lower precision step value and test search speed
+ * using a multiple of the original step value.</p>
+ *
+ * <p>Good values for <code>precisionStep</code> are depending on usage and data type:
+ * <ul>
+ *  <li>The default for all data types is <b>4</b>, which is used, when no <code>precisionStep</code> is given.
+ *  <li>Ideal value in most cases for <em>64 bit</em> data types <em>(long, double)</em> is <b>6</b> or <b>8</b>.
+ *  <li>Ideal value in most cases for <em>32 bit</em> data types <em>(int, float)</em> is <b>4</b>.
+ *  <li>For low cardinality fields larger precision steps are good. If the cardinality is &lt; 100, it is
+ *  fair to use {@link Integer#MAX_VALUE} (see below).
+ *  <li>Steps <b>&ge;64</b> for <em>long/double</em> and <b>&ge;32</b> for <em>int/float</em> produces one token
+ *  per value in the index and querying is as slow as a conventional {@link TermRangeQuery}. But it can be used
+ *  to produce fields, that are solely used for sorting (in this case simply use {@link Integer#MAX_VALUE} as
+ *  <code>precisionStep</code>). Using {@link org.apache.solr.legacy.LegacyIntField},
+ *  {@link org.apache.solr.legacy.LegacyLongField}, {@link org.apache.solr.legacy.LegacyFloatField} or {@link org.apache.solr.legacy.LegacyDoubleField} for sorting
+ *  is ideal, because building the field cache is much faster than with text-only numbers.
+ *  These fields have one term per value and therefore also work with term enumeration for building distinct lists
+ *  (e.g. facets / preselected values to search for).
+ *  Sorting is also possible with range query optimized fields using one of the above <code>precisionSteps</code>.
+ * </ul>
+ *
+ * <p>Comparisons of the different types of RangeQueries on an index with about 500,000 docs showed
+ * that {@link TermRangeQuery} in boolean rewrite mode (with raised {@link BooleanQuery} clause count)
+ * took about 30-40 secs to complete, {@link TermRangeQuery} in constant score filter rewrite mode took 5 secs
+ * and executing this class took &lt;100ms to complete (on an Opteron64 machine, Java 1.5, 8 bit
+ * precision step). This query type was developed for a geographic portal, where the performance for
+ * e.g. bounding boxes or exact date/time stamps is important.</p>
+ *
+ * @deprecated Instead index with {@link IntPoint}, {@link LongPoint}, {@link FloatPoint}, {@link DoublePoint}, and
+ *             create range queries with {@link IntPoint#newRangeQuery(String, int, int) IntPoint.newRangeQuery()},
+ *             {@link LongPoint#newRangeQuery(String, long, long) LongPoint.newRangeQuery()},
+ *             {@link FloatPoint#newRangeQuery(String, float, float) FloatPoint.newRangeQuery()},
+ *             {@link DoublePoint#newRangeQuery(String, double, double) DoublePoint.newRangeQuery()} respectively.
+ *             See {@link PointValues} for background information on Points.
+ *
+ * @since 2.9
+ **/
+
+@Deprecated
+public final class LegacyNumericRangeQuery<T extends Number> extends MultiTermQuery {
+
+  private LegacyNumericRangeQuery(final String field, final int precisionStep, final LegacyNumericType dataType,
+                                  T min, T max, final boolean minInclusive, final boolean maxInclusive) {
+    super(field);
+    if (precisionStep < 1)
+      throw new IllegalArgumentException("precisionStep must be >=1");
+    this.precisionStep = precisionStep;
+    this.dataType = Objects.requireNonNull(dataType, "LegacyNumericType must not be null");
+    this.min = min;
+    this.max = max;
+    this.minInclusive = minInclusive;
+    this.maxInclusive = maxInclusive;
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>long</code>
+   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Long> newLongRange(final String field, final int precisionStep,
+    Long min, Long max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, precisionStep, LegacyNumericType.LONG, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>long</code>
+   * range using the default <code>precisionStep</code> {@link org.apache.solr.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Long> newLongRange(final String field,
+    Long min, Long max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT, LegacyNumericType.LONG, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>int</code>
+   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Integer> newIntRange(final String field, final int precisionStep,
+    Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, precisionStep, LegacyNumericType.INT, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>int</code>
+   * range using the default <code>precisionStep</code> {@link org.apache.solr.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Integer> newIntRange(final String field,
+    Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, LegacyNumericType.INT, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>double</code>
+   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>.
+   * {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
+   * with {@code min == max == Double.NaN}.  By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Double> newDoubleRange(final String field, final int precisionStep,
+    Double min, Double max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, precisionStep, LegacyNumericType.DOUBLE, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>double</code>
+   * range using the default <code>precisionStep</code> {@link org.apache.solr.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>.
+   * {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
+   * with {@code min == max == Double.NaN}.  By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Double> newDoubleRange(final String field,
+    Double min, Double max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT, LegacyNumericType.DOUBLE, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>float</code>
+   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>.
+   * {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
+   * with {@code min == max == Float.NaN}.  By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Float> newFloatRange(final String field, final int precisionStep,
+    Float min, Float max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, precisionStep, LegacyNumericType.FLOAT, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>float</code>
+   * range using the default <code>precisionStep</code> {@link org.apache.solr.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>.
+   * {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
+   * with {@code min == max == Float.NaN}.  By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Float> newFloatRange(final String field,
+    Float min, Float max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, LegacyNumericType.FLOAT, min, max, minInclusive, maxInclusive);
+  }
+
+  @Override @SuppressWarnings("unchecked")
+  protected TermsEnum getTermsEnum(final Terms terms, AttributeSource atts) throws IOException {
+    // very strange: java.lang.Number itself is not Comparable, but all subclasses used here are
+    if (min != null && max != null && ((Comparable<T>) min).compareTo(max) > 0) {
+      return TermsEnum.EMPTY;
+    }
+    return new NumericRangeTermsEnum(terms.iterator());
+  }
+
+  /** Returns <code>true</code> if the lower endpoint is inclusive */
+  public boolean includesMin() { return minInclusive; }
+  
+  /** Returns <code>true</code> if the upper endpoint is inclusive */
+  public boolean includesMax() { return maxInclusive; }
+
+  /** Returns the lower value of this range query */
+  public T getMin() { return min; }
+
+  /** Returns the upper value of this range query */
+  public T getMax() { return max; }
+  
+  /** Returns the precision step. */
+  public int getPrecisionStep() { return precisionStep; }
+  
+  @Override
+  public String toString(final String field) {
+    final StringBuilder sb = new StringBuilder();
+    if (!getField().equals(field)) sb.append(getField()).append(':');
+    return sb.append(minInclusive ? '[' : '{')
+      .append((min == null) ? "*" : min.toString())
+      .append(" TO ")
+      .append((max == null) ? "*" : max.toString())
+      .append(maxInclusive ? ']' : '}')
+      .toString();
+  }
+
+  @Override
+  @SuppressWarnings({"unchecked","rawtypes"})
+  public final boolean equals(final Object o) {
+    if (o==this) return true;
+    if (!super.equals(o))
+      return false;
+    if (o instanceof LegacyNumericRangeQuery) {
+      final LegacyNumericRangeQuery q=(LegacyNumericRangeQuery)o;
+      return (
+        (q.min == null ? min == null : q.min.equals(min)) &&
+        (q.max == null ? max == null : q.max.equals(max)) &&
+        minInclusive == q.minInclusive &&
+        maxInclusive == q.maxInclusive &&
+        precisionStep == q.precisionStep
+      );
+    }
+    return false;
+  }
+
+  @Override
+  public final int hashCode() {
+    int hash = super.hashCode();
+    hash = 31 * hash + precisionStep;
+    hash = 31 * hash + Objects.hashCode(min);
+    hash = 31 * hash + Objects.hashCode(max);
+    hash = 31 * hash + Objects.hashCode(minInclusive);
+    hash = 31 * hash + Objects.hashCode(maxInclusive);
+    return hash;
+  }
+
+  // members (package private, to be also fast accessible by NumericRangeTermEnum)
+  final int precisionStep;
+  final LegacyNumericType dataType;
+  final T min, max;
+  final boolean minInclusive,maxInclusive;
+
+  // used to handle float/double infinity correcty
+  static final long LONG_NEGATIVE_INFINITY =
+    NumericUtils.doubleToSortableLong(Double.NEGATIVE_INFINITY);
+  static final long LONG_POSITIVE_INFINITY =
+    NumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
+  static final int INT_NEGATIVE_INFINITY =
+    NumericUtils.floatToSortableInt(Float.NEGATIVE_INFINITY);
+  static final int INT_POSITIVE_INFINITY =
+    NumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
+
+  /**
+   * Subclass of FilteredTermsEnum for enumerating all terms that match the
+   * sub-ranges for trie range queries, using flex API.
+   * <p>
+   * WARNING: This term enumeration is not guaranteed to be always ordered by
+   * {@link Term#compareTo}.
+   * The ordering depends on how {@link org.apache.solr.legacy.LegacyNumericUtils#splitLongRange} and
+   * {@link org.apache.solr.legacy.LegacyNumericUtils#splitIntRange} generates the sub-ranges. For
+   * {@link MultiTermQuery} ordering is not relevant.
+   */
+  private final class NumericRangeTermsEnum extends FilteredTermsEnum {
+
+    private BytesRef currentLowerBound, currentUpperBound;
+
+    private final LinkedList<BytesRef> rangeBounds = new LinkedList<>();
+
+    NumericRangeTermsEnum(final TermsEnum tenum) {
+      super(tenum);
+      switch (dataType) {
+        case LONG:
+        case DOUBLE: {
+          // lower
+          long minBound;
+          if (dataType == LegacyNumericType.LONG) {
+            minBound = (min == null) ? Long.MIN_VALUE : min.longValue();
+          } else {
+            assert dataType == LegacyNumericType.DOUBLE;
+            minBound = (min == null) ? LONG_NEGATIVE_INFINITY
+              : NumericUtils.doubleToSortableLong(min.doubleValue());
+          }
+          if (!minInclusive && min != null) {
+            if (minBound == Long.MAX_VALUE) break;
+            minBound++;
+          }
+          
+          // upper
+          long maxBound;
+          if (dataType == LegacyNumericType.LONG) {
+            maxBound = (max == null) ? Long.MAX_VALUE : max.longValue();
+          } else {
+            assert dataType == LegacyNumericType.DOUBLE;
+            maxBound = (max == null) ? LONG_POSITIVE_INFINITY
+              : NumericUtils.doubleToSortableLong(max.doubleValue());
+          }
+          if (!maxInclusive && max != null) {
+            if (maxBound == Long.MIN_VALUE) break;
+            maxBound--;
+          }
+          
+          LegacyNumericUtils.splitLongRange(new LegacyNumericUtils.LongRangeBuilder() {
+            @Override
+            public final void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
+              rangeBounds.add(minPrefixCoded);
+              rangeBounds.add(maxPrefixCoded);
+            }
+          }, precisionStep, minBound, maxBound);
+          break;
+        }
+          
+        case INT:
+        case FLOAT: {
+          // lower
+          int minBound;
+          if (dataType == LegacyNumericType.INT) {
+            minBound = (min == null) ? Integer.MIN_VALUE : min.intValue();
+          } else {
+            assert dataType == LegacyNumericType.FLOAT;
+            minBound = (min == null) ? INT_NEGATIVE_INFINITY
+              : NumericUtils.floatToSortableInt(min.floatValue());
+          }
+          if (!minInclusive && min != null) {
+            if (minBound == Integer.MAX_VALUE) break;
+            minBound++;
+          }
+          
+          // upper
+          int maxBound;
+          if (dataType == LegacyNumericType.INT) {
+            maxBound = (max == null) ? Integer.MAX_VALUE : max.intValue();
+          } else {
+            assert dataType == LegacyNumericType.FLOAT;
+            maxBound = (max == null) ? INT_POSITIVE_INFINITY
+              : NumericUtils.floatToSortableInt(max.floatValue());
+          }
+          if (!maxInclusive && max != null) {
+            if (maxBound == Integer.MIN_VALUE) break;
+            maxBound--;
+          }
+          
+          LegacyNumericUtils.splitIntRange(new LegacyNumericUtils.IntRangeBuilder() {
+            @Override
+            public final void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
+              rangeBounds.add(minPrefixCoded);
+              rangeBounds.add(maxPrefixCoded);
+            }
+          }, precisionStep, minBound, maxBound);
+          break;
+        }
+          
+        default:
+          // should never happen
+          throw new IllegalArgumentException("Invalid LegacyNumericType");
+      }
+    }
+    
+    private void nextRange() {
+      assert rangeBounds.size() % 2 == 0;
+
+      currentLowerBound = rangeBounds.removeFirst();
+      assert currentUpperBound == null || currentUpperBound.compareTo(currentLowerBound) <= 0 :
+        "The current upper bound must be <= the new lower bound";
+      
+      currentUpperBound = rangeBounds.removeFirst();
+    }
+    
+    @Override
+    protected final BytesRef nextSeekTerm(BytesRef term) {
+      while (rangeBounds.size() >= 2) {
+        nextRange();
+        
+        // if the new upper bound is before the term parameter, the sub-range is never a hit
+        if (term != null && term.compareTo(currentUpperBound) > 0)
+          continue;
+        // never seek backwards, so use current term if lower bound is smaller
+        return (term != null && term.compareTo(currentLowerBound) > 0) ?
+          term : currentLowerBound;
+      }
+      
+      // no more sub-range enums available
+      assert rangeBounds.isEmpty();
+      currentLowerBound = currentUpperBound = null;
+      return null;
+    }
+    
+    @Override
+    protected final AcceptStatus accept(BytesRef term) {
+      while (currentUpperBound == null || term.compareTo(currentUpperBound) > 0) {
+        if (rangeBounds.isEmpty())
+          return AcceptStatus.END;
+        // peek next sub-range, only seek if the current term is smaller than next lower bound
+        if (term.compareTo(rangeBounds.getFirst()) < 0)
+          return AcceptStatus.NO_AND_SEEK;
+        // step forward to next range without seeking, as next lower range bound is less or equal current term
+        nextRange();
+      }
+      return AcceptStatus.YES;
+    }
+
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/LegacyNumericTokenStream.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/LegacyNumericTokenStream.java b/solr/core/src/java/org/apache/solr/legacy/LegacyNumericTokenStream.java
new file mode 100644
index 0000000..c18cd59
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/LegacyNumericTokenStream.java
@@ -0,0 +1,357 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+
+import java.util.Objects;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.util.Attribute;
+import org.apache.lucene.util.AttributeFactory;
+import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.AttributeReflector;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.NumericUtils;
+
+/**
+ * <b>Expert:</b> This class provides a {@link TokenStream}
+ * for indexing numeric values that can be used by {@link
+ * org.apache.solr.legacy.LegacyNumericRangeQuery}.
+ *
+ * <p>Note that for simple usage, {@link org.apache.solr.legacy.LegacyIntField}, {@link
+ * org.apache.solr.legacy.LegacyLongField}, {@link org.apache.solr.legacy.LegacyFloatField} or {@link org.apache.solr.legacy.LegacyDoubleField} is
+ * recommended.  These fields disable norms and
+ * term freqs, as they are not usually needed during
+ * searching.  If you need to change these settings, you
+ * should use this class.
+ *
+ * <p>Here's an example usage, for an <code>int</code> field:
+ *
+ * <pre class="prettyprint">
+ *  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
+ *  fieldType.setOmitNorms(true);
+ *  fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
+ *  Field field = new Field(name, new LegacyNumericTokenStream(precisionStep).setIntValue(value), fieldType);
+ *  document.add(field);
+ * </pre>
+ *
+ * <p>For optimal performance, re-use the TokenStream and Field instance
+ * for more than one document:
+ *
+ * <pre class="prettyprint">
+ *  LegacyNumericTokenStream stream = new LegacyNumericTokenStream(precisionStep);
+ *  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
+ *  fieldType.setOmitNorms(true);
+ *  fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
+ *  Field field = new Field(name, stream, fieldType);
+ *  Document document = new Document();
+ *  document.add(field);
+ *
+ *  for(all documents) {
+ *    stream.setIntValue(value)
+ *    writer.addDocument(document);
+ *  }
+ * </pre>
+ *
+ * <p>This stream is not intended to be used in analyzers;
+ * it's more for iterating the different precisions during
+ * indexing a specific numeric value.</p>
+
+ * <p><b>NOTE</b>: as token streams are only consumed once
+ * the document is added to the index, if you index more
+ * than one numeric field, use a separate <code>LegacyNumericTokenStream</code>
+ * instance for each.</p>
+ *
+ * <p>See {@link org.apache.solr.legacy.LegacyNumericRangeQuery} for more details on the
+ * <a
+ * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
+ * parameter as well as how numeric fields work under the hood.</p>
+ *
+ * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+ *
+ * @since 2.9
+ */
+@Deprecated
+public final class LegacyNumericTokenStream extends TokenStream {
+
+  /** The full precision token gets this token type assigned. */
+  public static final String TOKEN_TYPE_FULL_PREC  = "fullPrecNumeric";
+
+  /** The lower precision tokens gets this token type assigned. */
+  public static final String TOKEN_TYPE_LOWER_PREC = "lowerPrecNumeric";
+  
+  /** <b>Expert:</b> Use this attribute to get the details of the currently generated token.
+   * @lucene.experimental
+   * @since 4.0
+   */
+  public interface LegacyNumericTermAttribute extends Attribute {
+    /** Returns current shift value, undefined before first token */
+    int getShift();
+    /** Returns current token's raw value as {@code long} with all {@link #getShift} applied, undefined before first token */
+    long getRawValue();
+    /** Returns value size in bits (32 for {@code float}, {@code int}; 64 for {@code double}, {@code long}) */
+    int getValueSize();
+    
+    /** <em>Don't call this method!</em>
+      * @lucene.internal */
+    void init(long value, int valSize, int precisionStep, int shift);
+
+    /** <em>Don't call this method!</em>
+      * @lucene.internal */
+    void setShift(int shift);
+
+    /** <em>Don't call this method!</em>
+      * @lucene.internal */
+    int incShift();
+  }
+  
+  // just a wrapper to prevent adding CTA
+  private static final class NumericAttributeFactory extends AttributeFactory {
+    private final AttributeFactory delegate;
+
+    NumericAttributeFactory(AttributeFactory delegate) {
+      this.delegate = delegate;
+    }
+  
+    @Override
+    public AttributeImpl createAttributeInstance(Class<? extends Attribute> attClass) {
+      if (CharTermAttribute.class.isAssignableFrom(attClass))
+        throw new IllegalArgumentException("LegacyNumericTokenStream does not support CharTermAttribute.");
+      return delegate.createAttributeInstance(attClass);
+    }
+  }
+
+  /** Implementation of {@link org.apache.solr.legacy.LegacyNumericTokenStream.LegacyNumericTermAttribute}.
+   * @lucene.internal
+   * @since 4.0
+   */
+  public static final class LegacyNumericTermAttributeImpl extends AttributeImpl implements LegacyNumericTermAttribute,TermToBytesRefAttribute {
+    private long value = 0L;
+    private int valueSize = 0, shift = 0, precisionStep = 0;
+    private BytesRefBuilder bytes = new BytesRefBuilder();
+    
+    /** 
+     * Creates, but does not yet initialize this attribute instance
+     * @see #init(long, int, int, int)
+     */
+    public LegacyNumericTermAttributeImpl() {}
+
+    @Override
+    public BytesRef getBytesRef() {
+      assert valueSize == 64 || valueSize == 32;
+      if (shift >= valueSize) {
+        bytes.clear();
+      } else if (valueSize == 64) {
+        LegacyNumericUtils.longToPrefixCoded(value, shift, bytes);
+      } else {
+        LegacyNumericUtils.intToPrefixCoded((int) value, shift, bytes);
+      }
+      return bytes.get();
+    }
+
+    @Override
+    public int getShift() { return shift; }
+    @Override
+    public void setShift(int shift) { this.shift = shift; }
+    @Override
+    public int incShift() {
+      return (shift += precisionStep);
+    }
+
+    @Override
+    public long getRawValue() { return value  & ~((1L << shift) - 1L); }
+    @Override
+    public int getValueSize() { return valueSize; }
+
+    @Override
+    public void init(long value, int valueSize, int precisionStep, int shift) {
+      this.value = value;
+      this.valueSize = valueSize;
+      this.precisionStep = precisionStep;
+      this.shift = shift;
+    }
+
+    @Override
+    public void clear() {
+      // this attribute has no contents to clear!
+      // we keep it untouched as it's fully controlled by outer class.
+    }
+    
+    @Override
+    public void reflectWith(AttributeReflector reflector) {
+      reflector.reflect(TermToBytesRefAttribute.class, "bytes", getBytesRef());
+      reflector.reflect(LegacyNumericTermAttribute.class, "shift", shift);
+      reflector.reflect(LegacyNumericTermAttribute.class, "rawValue", getRawValue());
+      reflector.reflect(LegacyNumericTermAttribute.class, "valueSize", valueSize);
+    }
+  
+    @Override
+    public void copyTo(AttributeImpl target) {
+      final LegacyNumericTermAttribute a = (LegacyNumericTermAttribute) target;
+      a.init(value, valueSize, precisionStep, shift);
+    }
+    
+    @Override
+    public LegacyNumericTermAttributeImpl clone() {
+      LegacyNumericTermAttributeImpl t = (LegacyNumericTermAttributeImpl)super.clone();
+      // Do a deep clone
+      t.bytes = new BytesRefBuilder();
+      t.bytes.copyBytes(getBytesRef());
+      return t;
+    }
+
+    @Override
+    public int hashCode() {
+      return Objects.hash(precisionStep, shift, value, valueSize);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (this == obj) return true;
+      if (obj == null) return false;
+      if (getClass() != obj.getClass()) return false;
+      LegacyNumericTermAttributeImpl other = (LegacyNumericTermAttributeImpl) obj;
+      if (precisionStep != other.precisionStep) return false;
+      if (shift != other.shift) return false;
+      if (value != other.value) return false;
+      if (valueSize != other.valueSize) return false;
+      return true;
+    }
+  }
+  
+  /**
+   * Creates a token stream for numeric values using the default <code>precisionStep</code>
+   * {@link org.apache.solr.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16). The stream is not yet initialized,
+   * before using set a value using the various set<em>???</em>Value() methods.
+   */
+  public LegacyNumericTokenStream() {
+    this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, LegacyNumericUtils.PRECISION_STEP_DEFAULT);
+  }
+  
+  /**
+   * Creates a token stream for numeric values with the specified
+   * <code>precisionStep</code>. The stream is not yet initialized,
+   * before using set a value using the various set<em>???</em>Value() methods.
+   */
+  public LegacyNumericTokenStream(final int precisionStep) {
+    this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, precisionStep);
+  }
+
+  /**
+   * Expert: Creates a token stream for numeric values with the specified
+   * <code>precisionStep</code> using the given
+   * {@link org.apache.lucene.util.AttributeFactory}.
+   * The stream is not yet initialized,
+   * before using set a value using the various set<em>???</em>Value() methods.
+   */
+  public LegacyNumericTokenStream(AttributeFactory factory, final int precisionStep) {
+    super(new NumericAttributeFactory(factory));
+    if (precisionStep < 1)
+      throw new IllegalArgumentException("precisionStep must be >=1");
+    this.precisionStep = precisionStep;
+    numericAtt.setShift(-precisionStep);
+  }
+
+  /**
+   * Initializes the token stream with the supplied <code>long</code> value.
+   * @param value the value, for which this TokenStream should enumerate tokens.
+   * @return this instance, because of this you can use it the following way:
+   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setLongValue(value))</code>
+   */
+  public LegacyNumericTokenStream setLongValue(final long value) {
+    numericAtt.init(value, valSize = 64, precisionStep, -precisionStep);
+    return this;
+  }
+  
+  /**
+   * Initializes the token stream with the supplied <code>int</code> value.
+   * @param value the value, for which this TokenStream should enumerate tokens.
+   * @return this instance, because of this you can use it the following way:
+   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setIntValue(value))</code>
+   */
+  public LegacyNumericTokenStream setIntValue(final int value) {
+    numericAtt.init(value, valSize = 32, precisionStep, -precisionStep);
+    return this;
+  }
+  
+  /**
+   * Initializes the token stream with the supplied <code>double</code> value.
+   * @param value the value, for which this TokenStream should enumerate tokens.
+   * @return this instance, because of this you can use it the following way:
+   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setDoubleValue(value))</code>
+   */
+  public LegacyNumericTokenStream setDoubleValue(final double value) {
+    numericAtt.init(NumericUtils.doubleToSortableLong(value), valSize = 64, precisionStep, -precisionStep);
+    return this;
+  }
+  
+  /**
+   * Initializes the token stream with the supplied <code>float</code> value.
+   * @param value the value, for which this TokenStream should enumerate tokens.
+   * @return this instance, because of this you can use it the following way:
+   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setFloatValue(value))</code>
+   */
+  public LegacyNumericTokenStream setFloatValue(final float value) {
+    numericAtt.init(NumericUtils.floatToSortableInt(value), valSize = 32, precisionStep, -precisionStep);
+    return this;
+  }
+  
+  @Override
+  public void reset() {
+    if (valSize == 0)
+      throw new IllegalStateException("call set???Value() before usage");
+    numericAtt.setShift(-precisionStep);
+  }
+
+  @Override
+  public boolean incrementToken() {
+    if (valSize == 0)
+      throw new IllegalStateException("call set???Value() before usage");
+    
+    // this will only clear all other attributes in this TokenStream
+    clearAttributes();
+
+    final int shift = numericAtt.incShift();
+    typeAtt.setType((shift == 0) ? TOKEN_TYPE_FULL_PREC : TOKEN_TYPE_LOWER_PREC);
+    posIncrAtt.setPositionIncrement((shift == 0) ? 1 : 0);
+    return (shift < valSize);
+  }
+
+  /** Returns the precision step. */
+  public int getPrecisionStep() {
+    return precisionStep;
+  }
+
+  @Override
+  public String toString() {
+    // We override default because it can throw cryptic "illegal shift value":
+    return getClass().getSimpleName() + "(precisionStep=" + precisionStep + " valueSize=" + numericAtt.getValueSize() + " shift=" + numericAtt.getShift() + ")";
+  }
+  
+  // members
+  private final LegacyNumericTermAttribute numericAtt = addAttribute(LegacyNumericTermAttribute.class);
+  private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
+  private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+  
+  private int valSize = 0; // valSize==0 means not initialized
+  private final int precisionStep;
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/LegacyNumericType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/LegacyNumericType.java b/solr/core/src/java/org/apache/solr/legacy/LegacyNumericType.java
new file mode 100644
index 0000000..8cc3fcc
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/LegacyNumericType.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+/** Data type of the numeric value
+ * @since 3.2
+ *
+ * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+ */
+@Deprecated
+public enum LegacyNumericType {
+  /** 32-bit integer numeric type */
+  INT, 
+  /** 64-bit long numeric type */
+  LONG, 
+  /** 32-bit float numeric type */
+  FLOAT, 
+  /** 64-bit double numeric type */
+  DOUBLE
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/LegacyNumericUtils.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/LegacyNumericUtils.java b/solr/core/src/java/org/apache/solr/legacy/LegacyNumericUtils.java
new file mode 100644
index 0000000..52fae9c
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/LegacyNumericUtils.java
@@ -0,0 +1,510 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+
+import java.io.IOException;
+
+import org.apache.lucene.index.FilterLeafReader;
+import org.apache.lucene.index.FilteredTermsEnum;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+
+/**
+ * This is a helper class to generate prefix-encoded representations for numerical values
+ * and supplies converters to represent float/double values as sortable integers/longs.
+ *
+ * <p>To quickly execute range queries in Apache Lucene, a range is divided recursively
+ * into multiple intervals for searching: The center of the range is searched only with
+ * the lowest possible precision in the trie, while the boundaries are matched
+ * more exactly. This reduces the number of terms dramatically.
+ *
+ * <p>This class generates terms to achieve this: First the numerical integer values need to
+ * be converted to bytes. For that integer values (32 bit or 64 bit) are made unsigned
+ * and the bits are converted to ASCII chars with each 7 bit. The resulting byte[] is
+ * sortable like the original integer value (even using UTF-8 sort order). Each value is also
+ * prefixed (in the first char) by the <code>shift</code> value (number of bits removed) used
+ * during encoding.
+ *
+ * <p>For easy usage, the trie algorithm is implemented for indexing inside
+ * {@link org.apache.solr.legacy.LegacyNumericTokenStream} that can index <code>int</code>, <code>long</code>,
+ * <code>float</code>, and <code>double</code>. For querying,
+ * {@link org.apache.solr.legacy.LegacyNumericRangeQuery} implements the query part
+ * for the same data types.
+ *
+ * @lucene.internal
+ *
+ * @deprecated Please use {@link org.apache.lucene.index.PointValues} instead.
+ *
+ * @since 2.9, API changed non backwards-compliant in 4.0
+ */
+
+@Deprecated
+public final class LegacyNumericUtils {
+
+  private LegacyNumericUtils() {} // no instance!
+  
+  /**
+   * The default precision step used by {@link org.apache.solr.legacy.LegacyLongField},
+   * {@link org.apache.solr.legacy.LegacyDoubleField}, {@link org.apache.solr.legacy.LegacyNumericTokenStream}, {@link
+   * org.apache.solr.legacy.LegacyNumericRangeQuery}.
+   */
+  public static final int PRECISION_STEP_DEFAULT = 16;
+  
+  /**
+   * The default precision step used by {@link org.apache.solr.legacy.LegacyIntField} and
+   * {@link org.apache.solr.legacy.LegacyFloatField}.
+   */
+  public static final int PRECISION_STEP_DEFAULT_32 = 8;
+  
+  /**
+   * Longs are stored at lower precision by shifting off lower bits. The shift count is
+   * stored as <code>SHIFT_START_LONG+shift</code> in the first byte
+   */
+  public static final byte SHIFT_START_LONG = 0x20;
+
+  /**
+   * The maximum term length (used for <code>byte[]</code> buffer size)
+   * for encoding <code>long</code> values.
+   * @see #longToPrefixCoded
+   */
+  public static final int BUF_SIZE_LONG = 63/7 + 2;
+
+  /**
+   * Integers are stored at lower precision by shifting off lower bits. The shift count is
+   * stored as <code>SHIFT_START_INT+shift</code> in the first byte
+   */
+  public static final byte SHIFT_START_INT  = 0x60;
+
+  /**
+   * The maximum term length (used for <code>byte[]</code> buffer size)
+   * for encoding <code>int</code> values.
+   * @see #intToPrefixCoded
+   */
+  public static final int BUF_SIZE_INT = 31/7 + 2;
+
+  /**
+   * Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
+   * This is method is used by {@link org.apache.solr.legacy.LegacyNumericTokenStream}.
+   * After encoding, {@code bytes.offset} will always be 0. 
+   * @param val the numeric value
+   * @param shift how many bits to strip from the right
+   * @param bytes will contain the encoded value
+   */
+  public static void longToPrefixCoded(final long val, final int shift, final BytesRefBuilder bytes) {
+    // ensure shift is 0..63
+    if ((shift & ~0x3f) != 0) {
+      throw new IllegalArgumentException("Illegal shift value, must be 0..63; got shift=" + shift);
+    }
+    int nChars = (((63-shift)*37)>>8) + 1;    // i/7 is the same as (i*37)>>8 for i in 0..63
+    bytes.setLength(nChars+1);   // one extra for the byte that contains the shift info
+    bytes.grow(BUF_SIZE_LONG);
+    bytes.setByteAt(0, (byte)(SHIFT_START_LONG + shift));
+    long sortableBits = val ^ 0x8000000000000000L;
+    sortableBits >>>= shift;
+    while (nChars > 0) {
+      // Store 7 bits per byte for compatibility
+      // with UTF-8 encoding of terms
+      bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
+      sortableBits >>>= 7;
+    }
+  }
+
+  /**
+   * Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
+   * This is method is used by {@link org.apache.solr.legacy.LegacyNumericTokenStream}.
+   * After encoding, {@code bytes.offset} will always be 0.
+   * @param val the numeric value
+   * @param shift how many bits to strip from the right
+   * @param bytes will contain the encoded value
+   */
+  public static void intToPrefixCoded(final int val, final int shift, final BytesRefBuilder bytes) {
+    // ensure shift is 0..31
+    if ((shift & ~0x1f) != 0) {
+      throw new IllegalArgumentException("Illegal shift value, must be 0..31; got shift=" + shift);
+    }
+    int nChars = (((31-shift)*37)>>8) + 1;    // i/7 is the same as (i*37)>>8 for i in 0..63
+    bytes.setLength(nChars+1);   // one extra for the byte that contains the shift info
+    bytes.grow(LegacyNumericUtils.BUF_SIZE_LONG);  // use the max
+    bytes.setByteAt(0, (byte)(SHIFT_START_INT + shift));
+    int sortableBits = val ^ 0x80000000;
+    sortableBits >>>= shift;
+    while (nChars > 0) {
+      // Store 7 bits per byte for compatibility
+      // with UTF-8 encoding of terms
+      bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
+      sortableBits >>>= 7;
+    }
+  }
+
+
+  /**
+   * Returns the shift value from a prefix encoded {@code long}.
+   * @throws NumberFormatException if the supplied {@link BytesRef} is
+   * not correctly prefix encoded.
+   */
+  public static int getPrefixCodedLongShift(final BytesRef val) {
+    final int shift = val.bytes[val.offset] - SHIFT_START_LONG;
+    if (shift > 63 || shift < 0)
+      throw new NumberFormatException("Invalid shift value (" + shift + ") in prefixCoded bytes (is encoded value really an INT?)");
+    return shift;
+  }
+
+  /**
+   * Returns the shift value from a prefix encoded {@code int}.
+   * @throws NumberFormatException if the supplied {@link BytesRef} is
+   * not correctly prefix encoded.
+   */
+  public static int getPrefixCodedIntShift(final BytesRef val) {
+    final int shift = val.bytes[val.offset] - SHIFT_START_INT;
+    if (shift > 31 || shift < 0)
+      throw new NumberFormatException("Invalid shift value in prefixCoded bytes (is encoded value really an INT?)");
+    return shift;
+  }
+
+  /**
+   * Returns a long from prefixCoded bytes.
+   * Rightmost bits will be zero for lower precision codes.
+   * This method can be used to decode a term's value.
+   * @throws NumberFormatException if the supplied {@link BytesRef} is
+   * not correctly prefix encoded.
+   * @see #longToPrefixCoded
+   */
+  public static long prefixCodedToLong(final BytesRef val) {
+    long sortableBits = 0L;
+    for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
+      sortableBits <<= 7;
+      final byte b = val.bytes[i];
+      if (b < 0) {
+        throw new NumberFormatException(
+          "Invalid prefixCoded numerical value representation (byte "+
+          Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
+        );
+      }
+      sortableBits |= b;
+    }
+    return (sortableBits << getPrefixCodedLongShift(val)) ^ 0x8000000000000000L;
+  }
+
+  /**
+   * Returns an int from prefixCoded bytes.
+   * Rightmost bits will be zero for lower precision codes.
+   * This method can be used to decode a term's value.
+   * @throws NumberFormatException if the supplied {@link BytesRef} is
+   * not correctly prefix encoded.
+   * @see #intToPrefixCoded
+   */
+  public static int prefixCodedToInt(final BytesRef val) {
+    int sortableBits = 0;
+    for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
+      sortableBits <<= 7;
+      final byte b = val.bytes[i];
+      if (b < 0) {
+        throw new NumberFormatException(
+          "Invalid prefixCoded numerical value representation (byte "+
+          Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
+        );
+      }
+      sortableBits |= b;
+    }
+    return (sortableBits << getPrefixCodedIntShift(val)) ^ 0x80000000;
+  }
+
+  /**
+   * Splits a long range recursively.
+   * You may implement a builder that adds clauses to a
+   * {@link org.apache.lucene.search.BooleanQuery} for each call to its
+   * {@link LongRangeBuilder#addRange(BytesRef,BytesRef)}
+   * method.
+   * <p>This method is used by {@link org.apache.solr.legacy.LegacyNumericRangeQuery}.
+   */
+  public static void splitLongRange(final LongRangeBuilder builder,
+    final int precisionStep,  final long minBound, final long maxBound
+  ) {
+    splitRange(builder, 64, precisionStep, minBound, maxBound);
+  }
+  
+  /**
+   * Splits an int range recursively.
+   * You may implement a builder that adds clauses to a
+   * {@link org.apache.lucene.search.BooleanQuery} for each call to its
+   * {@link IntRangeBuilder#addRange(BytesRef,BytesRef)}
+   * method.
+   * <p>This method is used by {@link org.apache.solr.legacy.LegacyNumericRangeQuery}.
+   */
+  public static void splitIntRange(final IntRangeBuilder builder,
+    final int precisionStep,  final int minBound, final int maxBound
+  ) {
+    splitRange(builder, 32, precisionStep, minBound, maxBound);
+  }
+  
+  /** This helper does the splitting for both 32 and 64 bit. */
+  private static void splitRange(
+    final Object builder, final int valSize,
+    final int precisionStep, long minBound, long maxBound
+  ) {
+    if (precisionStep < 1)
+      throw new IllegalArgumentException("precisionStep must be >=1");
+    if (minBound > maxBound) return;
+    for (int shift=0; ; shift += precisionStep) {
+      // calculate new bounds for inner precision
+      final long diff = 1L << (shift+precisionStep),
+        mask = ((1L<<precisionStep) - 1L) << shift;
+      final boolean
+        hasLower = (minBound & mask) != 0L,
+        hasUpper = (maxBound & mask) != mask;
+      final long
+        nextMinBound = (hasLower ? (minBound + diff) : minBound) & ~mask,
+        nextMaxBound = (hasUpper ? (maxBound - diff) : maxBound) & ~mask;
+      final boolean
+        lowerWrapped = nextMinBound < minBound,
+        upperWrapped = nextMaxBound > maxBound;
+      
+      if (shift+precisionStep>=valSize || nextMinBound>nextMaxBound || lowerWrapped || upperWrapped) {
+        // We are in the lowest precision or the next precision is not available.
+        addRange(builder, valSize, minBound, maxBound, shift);
+        // exit the split recursion loop
+        break;
+      }
+      
+      if (hasLower)
+        addRange(builder, valSize, minBound, minBound | mask, shift);
+      if (hasUpper)
+        addRange(builder, valSize, maxBound & ~mask, maxBound, shift);
+      
+      // recurse to next precision
+      minBound = nextMinBound;
+      maxBound = nextMaxBound;
+    }
+  }
+  
+  /** Helper that delegates to correct range builder */
+  private static void addRange(
+    final Object builder, final int valSize,
+    long minBound, long maxBound,
+    final int shift
+  ) {
+    // for the max bound set all lower bits (that were shifted away):
+    // this is important for testing or other usages of the splitted range
+    // (e.g. to reconstruct the full range). The prefixEncoding will remove
+    // the bits anyway, so they do not hurt!
+    maxBound |= (1L << shift) - 1L;
+    // delegate to correct range builder
+    switch(valSize) {
+      case 64:
+        ((LongRangeBuilder)builder).addRange(minBound, maxBound, shift);
+        break;
+      case 32:
+        ((IntRangeBuilder)builder).addRange((int)minBound, (int)maxBound, shift);
+        break;
+      default:
+        // Should not happen!
+        throw new IllegalArgumentException("valSize must be 32 or 64.");
+    }
+  }
+
+  /**
+   * Callback for {@link #splitLongRange}.
+   * You need to overwrite only one of the methods.
+   * @lucene.internal
+   * @since 2.9, API changed non backwards-compliant in 4.0
+   */
+  public static abstract class LongRangeBuilder {
+    
+    /**
+     * Overwrite this method, if you like to receive the already prefix encoded range bounds.
+     * You can directly build classical (inclusive) range queries from them.
+     */
+    public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
+      throw new UnsupportedOperationException();
+    }
+    
+    /**
+     * Overwrite this method, if you like to receive the raw long range bounds.
+     * You can use this for e.g. debugging purposes (print out range bounds).
+     */
+    public void addRange(final long min, final long max, final int shift) {
+      final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
+      longToPrefixCoded(min, shift, minBytes);
+      longToPrefixCoded(max, shift, maxBytes);
+      addRange(minBytes.get(), maxBytes.get());
+    }
+  
+  }
+  
+  /**
+   * Callback for {@link #splitIntRange}.
+   * You need to overwrite only one of the methods.
+   * @lucene.internal
+   * @since 2.9, API changed non backwards-compliant in 4.0
+   */
+  public static abstract class IntRangeBuilder {
+    
+    /**
+     * Overwrite this method, if you like to receive the already prefix encoded range bounds.
+     * You can directly build classical range (inclusive) queries from them.
+     */
+    public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
+      throw new UnsupportedOperationException();
+    }
+    
+    /**
+     * Overwrite this method, if you like to receive the raw int range bounds.
+     * You can use this for e.g. debugging purposes (print out range bounds).
+     */
+    public void addRange(final int min, final int max, final int shift) {
+      final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
+      intToPrefixCoded(min, shift, minBytes);
+      intToPrefixCoded(max, shift, maxBytes);
+      addRange(minBytes.get(), maxBytes.get());
+    }
+  
+  }
+  
+  /**
+   * Filters the given {@link TermsEnum} by accepting only prefix coded 64 bit
+   * terms with a shift value of <tt>0</tt>.
+   * 
+   * @param termsEnum
+   *          the terms enum to filter
+   * @return a filtered {@link TermsEnum} that only returns prefix coded 64 bit
+   *         terms with a shift value of <tt>0</tt>.
+   */
+  public static TermsEnum filterPrefixCodedLongs(TermsEnum termsEnum) {
+    return new SeekingNumericFilteredTermsEnum(termsEnum) {
+
+      @Override
+      protected AcceptStatus accept(BytesRef term) {
+        return LegacyNumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
+      }
+    };
+  }
+
+  /**
+   * Filters the given {@link TermsEnum} by accepting only prefix coded 32 bit
+   * terms with a shift value of <tt>0</tt>.
+   * 
+   * @param termsEnum
+   *          the terms enum to filter
+   * @return a filtered {@link TermsEnum} that only returns prefix coded 32 bit
+   *         terms with a shift value of <tt>0</tt>.
+   */
+  public static TermsEnum filterPrefixCodedInts(TermsEnum termsEnum) {
+    return new SeekingNumericFilteredTermsEnum(termsEnum) {
+      
+      @Override
+      protected AcceptStatus accept(BytesRef term) {
+        return LegacyNumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
+      }
+    };
+  }
+
+  /** Just like FilteredTermsEnum, except it adds a limited
+   *  seekCeil implementation that only works with {@link
+   *  #filterPrefixCodedInts} and {@link
+   *  #filterPrefixCodedLongs}. */
+  private static abstract class SeekingNumericFilteredTermsEnum extends FilteredTermsEnum {
+    public SeekingNumericFilteredTermsEnum(final TermsEnum tenum) {
+      super(tenum, false);
+    }
+
+    @Override
+    @SuppressWarnings("fallthrough")
+    public SeekStatus seekCeil(BytesRef term) throws IOException {
+
+      // NOTE: This is not general!!  It only handles YES
+      // and END, because that's all we need for the numeric
+      // case here
+
+      SeekStatus status = tenum.seekCeil(term);
+      if (status == SeekStatus.END) {
+        return SeekStatus.END;
+      }
+
+      actualTerm = tenum.term();
+
+      if (accept(actualTerm) == AcceptStatus.YES) {
+        return status;
+      } else {
+        return SeekStatus.END;
+      }
+    }
+  }
+
+  private static Terms intTerms(Terms terms) {
+    return new FilterLeafReader.FilterTerms(terms) {
+        @Override
+        public TermsEnum iterator() throws IOException {
+          return filterPrefixCodedInts(in.iterator());
+        }
+      };
+  }
+
+  private static Terms longTerms(Terms terms) {
+    return new FilterLeafReader.FilterTerms(terms) {
+        @Override
+        public TermsEnum iterator() throws IOException {
+          return filterPrefixCodedLongs(in.iterator());
+        }
+      };
+  }
+    
+  /**
+   * Returns the minimum int value indexed into this
+   * numeric field or null if no terms exist.
+   */
+  public static Integer getMinInt(Terms terms) throws IOException {
+    // All shift=0 terms are sorted first, so we don't need
+    // to filter the incoming terms; we can just get the
+    // min:
+    BytesRef min = terms.getMin();
+    return (min != null) ? LegacyNumericUtils.prefixCodedToInt(min) : null;
+  }
+
+  /**
+   * Returns the maximum int value indexed into this
+   * numeric field or null if no terms exist.
+   */
+  public static Integer getMaxInt(Terms terms) throws IOException {
+    BytesRef max = intTerms(terms).getMax();
+    return (max != null) ? LegacyNumericUtils.prefixCodedToInt(max) : null;
+  }
+
+  /**
+   * Returns the minimum long value indexed into this
+   * numeric field or null if no terms exist.
+   */
+  public static Long getMinLong(Terms terms) throws IOException {
+    // All shift=0 terms are sorted first, so we don't need
+    // to filter the incoming terms; we can just get the
+    // min:
+    BytesRef min = terms.getMin();
+    return (min != null) ? LegacyNumericUtils.prefixCodedToLong(min) : null;
+  }
+
+  /**
+   * Returns the maximum long value indexed into this
+   * numeric field or null if no terms exist.
+   */
+  public static Long getMaxLong(Terms terms) throws IOException {
+    BytesRef max = longTerms(terms).getMax();
+    return (max != null) ? LegacyNumericUtils.prefixCodedToLong(max) : null;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/PointVectorStrategy.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/PointVectorStrategy.java b/solr/core/src/java/org/apache/solr/legacy/PointVectorStrategy.java
new file mode 100644
index 0000000..3b29a61
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/PointVectorStrategy.java
@@ -0,0 +1,292 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.legacy;
+
+import org.apache.lucene.document.DoubleDocValuesField;
+import org.apache.lucene.document.DoublePoint;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexOptions;
+import org.apache.solr.legacy.LegacyDoubleField;
+import org.apache.solr.legacy.LegacyFieldType;
+import org.apache.solr.legacy.LegacyNumericRangeQuery;
+import org.apache.solr.legacy.LegacyNumericType;
+import org.apache.lucene.queries.function.FunctionRangeQuery;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.spatial.SpatialStrategy;
+import org.apache.lucene.spatial.query.SpatialArgs;
+import org.apache.lucene.spatial.query.SpatialOperation;
+import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
+import org.locationtech.spatial4j.context.SpatialContext;
+import org.locationtech.spatial4j.shape.Circle;
+import org.locationtech.spatial4j.shape.Point;
+import org.locationtech.spatial4j.shape.Rectangle;
+import org.locationtech.spatial4j.shape.Shape;
+
+/**
+ * Simple {@link SpatialStrategy} which represents Points in two numeric fields.
+ * The Strategy's best feature is decent distance sort.
+ *
+ * <p>
+ * <b>Characteristics:</b>
+ * <br>
+ * <ul>
+ * <li>Only indexes points; just one per field value.</li>
+ * <li>Can query by a rectangle or circle.</li>
+ * <li>{@link
+ * org.apache.lucene.spatial.query.SpatialOperation#Intersects} and {@link
+ * SpatialOperation#IsWithin} is supported.</li>
+ * <li>Requires DocValues for
+ * {@link #makeDistanceValueSource(org.locationtech.spatial4j.shape.Point)} and for
+ * searching with a Circle.</li>
+ * </ul>
+ *
+ * <p>
+ * <b>Implementation:</b>
+ * <p>
+ * This is a simple Strategy.  Search works with a pair of range queries on two {@link DoublePoint}s representing
+ * x &amp; y fields.  A Circle query does the same bbox query but adds a
+ * ValueSource filter on
+ * {@link #makeDistanceValueSource(org.locationtech.spatial4j.shape.Point)}.
+ * <p>
+ * One performance shortcoming with this strategy is that a scenario involving
+ * both a search using a Circle and sort will result in calculations for the
+ * spatial distance being done twice -- once for the filter and second for the
+ * sort.
+ *
+ * @lucene.experimental
+ */
+public class PointVectorStrategy extends SpatialStrategy {
+
+  // note: we use a FieldType to articulate the options we want on the field.  We don't use it as-is with a Field, we
+  //  create more than one Field.
+
+  /**
+   * pointValues, docValues, and nothing else.
+   */
+  public static FieldType DEFAULT_FIELDTYPE;
+
+  @Deprecated
+  public static LegacyFieldType LEGACY_FIELDTYPE;
+  static {
+    // Default: pointValues + docValues
+    FieldType type = new FieldType();
+    type.setDimensions(1, Double.BYTES);//pointValues (assume Double)
+    type.setDocValuesType(DocValuesType.NUMERIC);//docValues
+    type.setStored(false);
+    type.freeze();
+    DEFAULT_FIELDTYPE = type;
+    // Legacy default: legacyNumerics
+    LegacyFieldType legacyType = new LegacyFieldType();
+    legacyType.setIndexOptions(IndexOptions.DOCS);
+    legacyType.setNumericType(LegacyNumericType.DOUBLE);
+    legacyType.setNumericPrecisionStep(8);// same as solr default
+    legacyType.setDocValuesType(DocValuesType.NONE);//no docValues!
+    legacyType.setStored(false);
+    legacyType.freeze();
+    LEGACY_FIELDTYPE = legacyType;
+  }
+
+  public static final String SUFFIX_X = "__x";
+  public static final String SUFFIX_Y = "__y";
+
+  private final String fieldNameX;
+  private final String fieldNameY;
+
+  private final int fieldsLen;
+  private final boolean hasStored;
+  private final boolean hasDocVals;
+  private final boolean hasPointVals;
+  // equiv to "hasLegacyNumerics":
+  private final LegacyFieldType legacyNumericFieldType; // not stored; holds precision step.
+
+  /**
+   * Create a new {@link PointVectorStrategy} instance that uses {@link DoublePoint} and {@link DoublePoint#newRangeQuery}
+   */
+  public static PointVectorStrategy newInstance(SpatialContext ctx, String fieldNamePrefix) {
+    return new PointVectorStrategy(ctx, fieldNamePrefix, DEFAULT_FIELDTYPE);
+  }
+
+  /**
+   * Create a new {@link PointVectorStrategy} instance that uses {@link LegacyDoubleField} for backwards compatibility.
+   * However, back-compat is limited; we don't support circle queries or {@link #makeDistanceValueSource(Point, double)}
+   * since that requires docValues (the legacy config didn't have that).
+   *
+   * @deprecated LegacyNumerics will be removed
+   */
+  @Deprecated
+  public static PointVectorStrategy newLegacyInstance(SpatialContext ctx, String fieldNamePrefix) {
+    return new PointVectorStrategy(ctx, fieldNamePrefix, LEGACY_FIELDTYPE);
+  }
+
+  /**
+   * Create a new instance configured with the provided FieldType options. See {@link #DEFAULT_FIELDTYPE}.
+   * a field type is used to articulate the desired options (namely pointValues, docValues, stored).  Legacy numerics
+   * is configurable this way too.
+   */
+  public PointVectorStrategy(SpatialContext ctx, String fieldNamePrefix, FieldType fieldType) {
+    super(ctx, fieldNamePrefix);
+    this.fieldNameX = fieldNamePrefix+SUFFIX_X;
+    this.fieldNameY = fieldNamePrefix+SUFFIX_Y;
+
+    int numPairs = 0;
+    if ((this.hasStored = fieldType.stored())) {
+      numPairs++;
+    }
+    if ((this.hasDocVals = fieldType.docValuesType() != DocValuesType.NONE)) {
+      numPairs++;
+    }
+    if ((this.hasPointVals = fieldType.pointDimensionCount() > 0)) {
+      numPairs++;
+    }
+    if (fieldType.indexOptions() != IndexOptions.NONE && fieldType instanceof LegacyFieldType && ((LegacyFieldType)fieldType).numericType() != null) {
+      if (hasPointVals) {
+        throw new IllegalArgumentException("pointValues and LegacyNumericType are mutually exclusive");
+      }
+      final LegacyFieldType legacyType = (LegacyFieldType) fieldType;
+      if (legacyType.numericType() != LegacyNumericType.DOUBLE) {
+        throw new IllegalArgumentException(getClass() + " does not support " + legacyType.numericType());
+      }
+      numPairs++;
+      legacyNumericFieldType = new LegacyFieldType(LegacyDoubleField.TYPE_NOT_STORED);
+      legacyNumericFieldType.setNumericPrecisionStep(legacyType.numericPrecisionStep());
+      legacyNumericFieldType.freeze();
+    } else {
+      legacyNumericFieldType = null;
+    }
+    this.fieldsLen = numPairs * 2;
+  }
+
+
+  String getFieldNameX() {
+    return fieldNameX;
+  }
+
+  String getFieldNameY() {
+    return fieldNameY;
+  }
+
+  @Override
+  public Field[] createIndexableFields(Shape shape) {
+    if (shape instanceof Point)
+      return createIndexableFields((Point) shape);
+    throw new UnsupportedOperationException("Can only index Point, not " + shape);
+  }
+
+  /** @see #createIndexableFields(org.locationtech.spatial4j.shape.Shape) */
+  public Field[] createIndexableFields(Point point) {
+    Field[] fields = new Field[fieldsLen];
+    int idx = -1;
+    if (hasStored) {
+      fields[++idx] = new StoredField(fieldNameX, point.getX());
+      fields[++idx] = new StoredField(fieldNameY, point.getY());
+    }
+    if (hasDocVals) {
+      fields[++idx] = new DoubleDocValuesField(fieldNameX, point.getX());
+      fields[++idx] = new DoubleDocValuesField(fieldNameY, point.getY());
+    }
+    if (hasPointVals) {
+      fields[++idx] = new DoublePoint(fieldNameX, point.getX());
+      fields[++idx] = new DoublePoint(fieldNameY, point.getY());
+    }
+    if (legacyNumericFieldType != null) {
+      fields[++idx] = new LegacyDoubleField(fieldNameX, point.getX(), legacyNumericFieldType);
+      fields[++idx] = new LegacyDoubleField(fieldNameY, point.getY(), legacyNumericFieldType);
+    }
+    assert idx == fields.length - 1;
+    return fields;
+  }
+
+  @Override
+  public ValueSource makeDistanceValueSource(Point queryPoint, double multiplier) {
+    return new DistanceValueSource(this, queryPoint, multiplier);
+  }
+
+  @Override
+  public ConstantScoreQuery makeQuery(SpatialArgs args) {
+    if(! SpatialOperation.is( args.getOperation(),
+        SpatialOperation.Intersects,
+        SpatialOperation.IsWithin ))
+      throw new UnsupportedSpatialOperation(args.getOperation());
+    Shape shape = args.getShape();
+    if (shape instanceof Rectangle) {
+      Rectangle bbox = (Rectangle) shape;
+      return new ConstantScoreQuery(makeWithin(bbox));
+    } else if (shape instanceof Circle) {
+      Circle circle = (Circle)shape;
+      Rectangle bbox = circle.getBoundingBox();
+      Query approxQuery = makeWithin(bbox);
+      BooleanQuery.Builder bqBuilder = new BooleanQuery.Builder();
+      FunctionRangeQuery vsRangeQuery =
+          new FunctionRangeQuery(makeDistanceValueSource(circle.getCenter()), 0.0, circle.getRadius(), true, true);
+      bqBuilder.add(approxQuery, BooleanClause.Occur.FILTER);//should have lowest "cost" value; will drive iteration
+      bqBuilder.add(vsRangeQuery, BooleanClause.Occur.FILTER);
+      return new ConstantScoreQuery(bqBuilder.build());
+    } else {
+      throw new UnsupportedOperationException("Only Rectangles and Circles are currently supported, " +
+          "found [" + shape.getClass() + "]");//TODO
+    }
+  }
+
+  /**
+   * Constructs a query to retrieve documents that fully contain the input envelope.
+   */
+  private Query makeWithin(Rectangle bbox) {
+    BooleanQuery.Builder bq = new BooleanQuery.Builder();
+    BooleanClause.Occur MUST = BooleanClause.Occur.MUST;
+    if (bbox.getCrossesDateLine()) {
+      //use null as performance trick since no data will be beyond the world bounds
+      bq.add(rangeQuery(fieldNameX, null/*-180*/, bbox.getMaxX()), BooleanClause.Occur.SHOULD );
+      bq.add(rangeQuery(fieldNameX, bbox.getMinX(), null/*+180*/), BooleanClause.Occur.SHOULD );
+      bq.setMinimumNumberShouldMatch(1);//must match at least one of the SHOULD
+    } else {
+      bq.add(rangeQuery(fieldNameX, bbox.getMinX(), bbox.getMaxX()), MUST);
+    }
+    bq.add(rangeQuery(fieldNameY, bbox.getMinY(), bbox.getMaxY()), MUST);
+    return bq.build();
+  }
+
+  /**
+   * Returns a numeric range query based on FieldType
+   * {@link LegacyNumericRangeQuery} is used for indexes created using {@code FieldType.LegacyNumericType}
+   * {@link DoublePoint#newRangeQuery} is used for indexes created using {@link DoublePoint} fields
+   */
+  private Query rangeQuery(String fieldName, Double min, Double max) {
+    if (hasPointVals) {
+      if (min == null) {
+        min = Double.NEGATIVE_INFINITY;
+      }
+
+      if (max == null) {
+        max = Double.POSITIVE_INFINITY;
+      }
+
+      return DoublePoint.newRangeQuery(fieldName, min, max);
+
+    } else if (legacyNumericFieldType != null) {// todo remove legacy numeric support in 7.0
+      return LegacyNumericRangeQuery.newDoubleRange(fieldName, legacyNumericFieldType.numericPrecisionStep(), min, max, true, true);//inclusive
+    }
+    //TODO try doc-value range query?
+    throw new UnsupportedOperationException("An index is required for this operation.");
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/doc-files/nrq-formula-1.png
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/doc-files/nrq-formula-1.png b/solr/core/src/java/org/apache/solr/legacy/doc-files/nrq-formula-1.png
new file mode 100644
index 0000000..fd7d936
Binary files /dev/null and b/solr/core/src/java/org/apache/solr/legacy/doc-files/nrq-formula-1.png differ

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/doc-files/nrq-formula-2.png
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/doc-files/nrq-formula-2.png b/solr/core/src/java/org/apache/solr/legacy/doc-files/nrq-formula-2.png
new file mode 100644
index 0000000..93cb308
Binary files /dev/null and b/solr/core/src/java/org/apache/solr/legacy/doc-files/nrq-formula-2.png differ

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/legacy/package-info.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/legacy/package-info.java b/solr/core/src/java/org/apache/solr/legacy/package-info.java
new file mode 100644
index 0000000..df981d0
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/legacy/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+/** 
+ * Deprecated stuff!
+ */
+package org.apache.solr.legacy;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/schema/BBoxField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/BBoxField.java b/solr/core/src/java/org/apache/solr/schema/BBoxField.java
index d69255b..4d773c9 100644
--- a/solr/core/src/java/org/apache/solr/schema/BBoxField.java
+++ b/solr/core/src/java/org/apache/solr/schema/BBoxField.java
@@ -23,10 +23,10 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.legacy.LegacyFieldType;
+import org.apache.solr.legacy.LegacyFieldType;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.spatial.bbox.BBoxOverlapRatioValueSource;
-import org.apache.lucene.spatial.bbox.BBoxStrategy;
+import org.apache.solr.legacy.BBoxStrategy;
 import org.apache.lucene.spatial.query.SpatialArgs;
 import org.apache.lucene.spatial.util.ShapeAreaValueSource;
 import org.apache.solr.common.SolrException;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/schema/EnumField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/EnumField.java b/solr/core/src/java/org/apache/solr/schema/EnumField.java
index 3127262..f023805 100644
--- a/solr/core/src/java/org/apache/solr/schema/EnumField.java
+++ b/solr/core/src/java/org/apache/solr/schema/EnumField.java
@@ -35,11 +35,11 @@ import javax.xml.xpath.XPathFactory;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.legacy.LegacyFieldType;
-import org.apache.lucene.legacy.LegacyIntField;
-import org.apache.lucene.legacy.LegacyNumericRangeQuery;
-import org.apache.lucene.legacy.LegacyNumericType;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyFieldType;
+import org.apache.solr.legacy.LegacyIntField;
+import org.apache.solr.legacy.LegacyNumericRangeQuery;
+import org.apache.solr.legacy.LegacyNumericType;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.EnumFieldSource;
 import org.apache.lucene.search.ConstantScoreQuery;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java b/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java
index ef05f18..64e42ef 100644
--- a/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java
@@ -20,8 +20,8 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.lucene.legacy.LegacyFieldType;
-import org.apache.lucene.spatial.vector.PointVectorStrategy;
+import org.apache.solr.legacy.LegacyFieldType;
+import org.apache.solr.legacy.PointVectorStrategy;
 
 /**
  * @see PointVectorStrategy

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java b/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
index b610e6e..e9e7779 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
@@ -23,7 +23,7 @@ import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.docvalues.DoubleDocValues;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/schema/TrieField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieField.java b/solr/core/src/java/org/apache/solr/schema/TrieField.java
index e7a33bd..f90877c 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieField.java
@@ -30,14 +30,14 @@ import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.legacy.LegacyDoubleField;
-import org.apache.lucene.legacy.LegacyFieldType;
-import org.apache.lucene.legacy.LegacyFloatField;
-import org.apache.lucene.legacy.LegacyIntField;
-import org.apache.lucene.legacy.LegacyLongField;
-import org.apache.lucene.legacy.LegacyNumericRangeQuery;
-import org.apache.lucene.legacy.LegacyNumericType;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyDoubleField;
+import org.apache.solr.legacy.LegacyFieldType;
+import org.apache.solr.legacy.LegacyFloatField;
+import org.apache.solr.legacy.LegacyIntField;
+import org.apache.solr.legacy.LegacyLongField;
+import org.apache.solr.legacy.LegacyNumericRangeQuery;
+import org.apache.solr.legacy.LegacyNumericType;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
@@ -63,9 +63,9 @@ import org.slf4j.LoggerFactory;
 
 /**
  * Provides field types to support for Lucene's {@link
- * org.apache.lucene.legacy.LegacyIntField}, {@link org.apache.lucene.legacy.LegacyLongField}, {@link org.apache.lucene.legacy.LegacyFloatField} and
- * {@link org.apache.lucene.legacy.LegacyDoubleField}.
- * See {@link org.apache.lucene.legacy.LegacyNumericRangeQuery} for more details.
+ * org.apache.solr.legacy.LegacyIntField}, {@link org.apache.solr.legacy.LegacyLongField}, {@link org.apache.solr.legacy.LegacyFloatField} and
+ * {@link org.apache.solr.legacy.LegacyDoubleField}.
+ * See {@link org.apache.solr.legacy.LegacyNumericRangeQuery} for more details.
  * It supports integer, float, long, double and date types.
  * <p>
  * For each number being added to this field, multiple terms are generated as per the algorithm described in the above
@@ -78,7 +78,7 @@ import org.slf4j.LoggerFactory;
  * generated, range search will be no faster than any other number field, but sorting will still be possible.
  *
  *
- * @see org.apache.lucene.legacy.LegacyNumericRangeQuery
+ * @see org.apache.solr.legacy.LegacyNumericRangeQuery
  * @since solr 1.4
  */
 public class TrieField extends NumericFieldType {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java b/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
index b069810..57efa75 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
@@ -23,7 +23,7 @@ import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.docvalues.FloatDocValues;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieIntField.java b/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
index 6d4d7cd..1a9f486 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
@@ -23,7 +23,7 @@ import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.docvalues.IntDocValues;


[02/25] lucene-solr:jira/solr-8668: SOLR-10753: Add array Stream Evaluator

Posted by cp...@apache.org.
SOLR-10753: Add array Stream Evaluator


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/81821b29
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/81821b29
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/81821b29

Branch: refs/heads/jira/solr-8668
Commit: 81821b29ae4c507f02d09b29fcd0196db8647865
Parents: e7099e4
Author: Joel Bernstein <jb...@apache.org>
Authored: Fri May 26 10:23:21 2017 -0400
Committer: Joel Bernstein <jb...@apache.org>
Committed: Fri May 26 10:23:46 2017 -0400

----------------------------------------------------------------------
 .../org/apache/solr/handler/StreamHandler.java  |  1 +
 .../client/solrj/io/eval/ArrayEvaluator.java    | 63 ++++++++++++++++++++
 .../solrj/io/stream/expr/StreamFactory.java     |  2 +-
 .../solrj/io/stream/StreamExpressionTest.java   | 40 +++++++++++++
 4 files changed, 105 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/81821b29/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
index a35f0ec..dc097be 100644
--- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
@@ -190,6 +190,7 @@ public class StreamHandler extends RequestHandlerBase implements SolrCoreAware,
       .withFunctionName("describe", DescribeEvaluator.class)
       .withFunctionName("finddelay", FindDelayEvaluator.class)
       .withFunctionName("sequence", SequenceEvaluator.class)
+      .withFunctionName("array", ArrayEvaluator.class)
 
       // metrics
          .withFunctionName("min", MinMetric.class)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/81821b29/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArrayEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArrayEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArrayEvaluator.java
new file mode 100644
index 0000000..31d89a2
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArrayEvaluator.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.Explanation;
+import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType;
+import org.apache.solr.client.solrj.io.stream.expr.Expressible;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class ArrayEvaluator extends ComplexEvaluator implements Expressible {
+
+  private static final long serialVersionUID = 1;
+
+  public ArrayEvaluator(StreamExpression expression, StreamFactory factory) throws IOException {
+    super(expression, factory);
+  }
+
+  public List<Number> evaluate(Tuple tuple) throws IOException {
+    List<Number> list = new ArrayList();
+    for(StreamEvaluator subEvaluator : subEvaluators) {
+      Number num = (Number)subEvaluator.evaluate(tuple);
+      list.add(num);
+    }
+
+    return list;
+  }
+
+  @Override
+  public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException {
+    StreamExpression expression = new StreamExpression(factory.getFunctionName(getClass()));
+    return expression;
+  }
+
+  @Override
+  public Explanation toExplanation(StreamFactory factory) throws IOException {
+    return new Explanation(nodeId.toString())
+        .withExpressionType(ExpressionType.EVALUATOR)
+        .withFunctionName(factory.getFunctionName(getClass()))
+        .withImplementingClass(getClass().getName())
+        .withExpression(toExpression(factory).toString());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/81821b29/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/expr/StreamFactory.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/expr/StreamFactory.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/expr/StreamFactory.java
index 703acf4..74e1de8 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/expr/StreamFactory.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/expr/StreamFactory.java
@@ -433,7 +433,7 @@ public class StreamFactory implements Serializable {
     if("null".equals(lower)){ return null; }
     if("true".equals(lower) || "false".equals(lower)){ return Boolean.parseBoolean(lower); }
     try{ return Long.valueOf(original); } catch(Exception ignored){};
-    try{ if (original.matches(".{1,8}")){ return Float.valueOf(original); }} catch(Exception ignored){};
+    try{ if (original.matches(".{1,8}")){ return Double.valueOf(original); }} catch(Exception ignored){};
     try{ if (original.matches(".{1,17}")){ return Double.valueOf(original); }} catch(Exception ignored){};
     
     // is a string

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/81821b29/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index a2e8ca3..cbd57b8 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -5734,6 +5734,46 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     assertTrue(ranked.get(3).doubleValue() == 3D);
   }
 
+
+  @Test
+  public void testArray() throws Exception {
+    String cexpr = "array(1, 2, 3, 300, 2, 500)";
+    ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+    TupleStream solrStream = new SolrStream(url, paramsLoc);
+    StreamContext context = new StreamContext();
+    solrStream.setStreamContext(context);
+    List<Tuple> tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    List<Number> out = (List<Number>)tuples.get(0).get("out");
+    assertTrue(out.size() == 6);
+    assertTrue(out.get(0).intValue() == 1);
+    assertTrue(out.get(1).intValue() == 2);
+    assertTrue(out.get(2).intValue() == 3);
+    assertTrue(out.get(3).intValue() == 300);
+    assertTrue(out.get(4).intValue() == 2);
+    assertTrue(out.get(5).intValue() == 500);
+
+    cexpr = "array(1.122, 2.222, 3.333, 300.1, 2.13, 500.23)";
+    paramsLoc = new ModifiableSolrParams();
+    paramsLoc.set("expr", cexpr);
+    paramsLoc.set("qt", "/stream");
+    solrStream = new SolrStream(url, paramsLoc);
+    solrStream.setStreamContext(context);
+    tuples = getTuples(solrStream);
+    assertTrue(tuples.size() == 1);
+    out = (List<Number>)tuples.get(0).get("out");
+    assertTrue(out.size() == 6);
+    assertTrue(out.get(0).doubleValue() == 1.122D);
+    assertTrue(out.get(1).doubleValue() == 2.222D);
+    assertTrue(out.get(2).doubleValue() == 3.333D);
+    assertTrue(out.get(3).doubleValue() == 300.1D);
+    assertTrue(out.get(4).doubleValue() == 2.13D);
+    assertTrue(out.get(5).doubleValue() == 500.23D);
+  }
+
   @Test
   public void testScale() throws Exception {
     UpdateRequest updateRequest = new UpdateRequest();


[22/25] lucene-solr:jira/solr-8668: LUCENE-7850: Move support for legacy numerics to solr/.

Posted by cp...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery32.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery32.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery32.java
deleted file mode 100644
index acd0c04..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery32.java
+++ /dev/null
@@ -1,461 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MultiTermQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.QueryUtils;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.TestUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestNumericRangeQuery32 extends LuceneTestCase {
-  // distance of entries
-  private static int distance;
-  // shift the starting of the values to the left, to also have negative values:
-  private static final int startOffset = - 1 << 15;
-  // number of docs to generate for testing
-  private static int noDocs;
-  
-  private static Directory directory = null;
-  private static IndexReader reader = null;
-  private static IndexSearcher searcher = null;
-  
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    noDocs = atLeast(4096);
-    distance = (1 << 30) / noDocs;
-    directory = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
-        newIndexWriterConfig(new MockAnalyzer(random()))
-        .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
-        .setMergePolicy(newLogMergePolicy()));
-    
-    final LegacyFieldType storedInt = new LegacyFieldType(LegacyIntField.TYPE_NOT_STORED);
-    storedInt.setStored(true);
-    storedInt.freeze();
-
-    final LegacyFieldType storedInt8 = new LegacyFieldType(storedInt);
-    storedInt8.setNumericPrecisionStep(8);
-
-    final LegacyFieldType storedInt4 = new LegacyFieldType(storedInt);
-    storedInt4.setNumericPrecisionStep(4);
-
-    final LegacyFieldType storedInt2 = new LegacyFieldType(storedInt);
-    storedInt2.setNumericPrecisionStep(2);
-
-    final LegacyFieldType storedIntNone = new LegacyFieldType(storedInt);
-    storedIntNone.setNumericPrecisionStep(Integer.MAX_VALUE);
-
-    final LegacyFieldType unstoredInt = LegacyIntField.TYPE_NOT_STORED;
-
-    final LegacyFieldType unstoredInt8 = new LegacyFieldType(unstoredInt);
-    unstoredInt8.setNumericPrecisionStep(8);
-
-    final LegacyFieldType unstoredInt4 = new LegacyFieldType(unstoredInt);
-    unstoredInt4.setNumericPrecisionStep(4);
-
-    final LegacyFieldType unstoredInt2 = new LegacyFieldType(unstoredInt);
-    unstoredInt2.setNumericPrecisionStep(2);
-
-    LegacyIntField
-      field8 = new LegacyIntField("field8", 0, storedInt8),
-      field4 = new LegacyIntField("field4", 0, storedInt4),
-      field2 = new LegacyIntField("field2", 0, storedInt2),
-      fieldNoTrie = new LegacyIntField("field"+Integer.MAX_VALUE, 0, storedIntNone),
-      ascfield8 = new LegacyIntField("ascfield8", 0, unstoredInt8),
-      ascfield4 = new LegacyIntField("ascfield4", 0, unstoredInt4),
-      ascfield2 = new LegacyIntField("ascfield2", 0, unstoredInt2);
-    
-    Document doc = new Document();
-    // add fields, that have a distance to test general functionality
-    doc.add(field8); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
-    // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
-    doc.add(ascfield8); doc.add(ascfield4); doc.add(ascfield2);
-    
-    // Add a series of noDocs docs with increasing int values
-    for (int l=0; l<noDocs; l++) {
-      int val=distance*l+startOffset;
-      field8.setIntValue(val);
-      field4.setIntValue(val);
-      field2.setIntValue(val);
-      fieldNoTrie.setIntValue(val);
-
-      val=l-(noDocs/2);
-      ascfield8.setIntValue(val);
-      ascfield4.setIntValue(val);
-      ascfield2.setIntValue(val);
-      writer.addDocument(doc);
-    }
-  
-    reader = writer.getReader();
-    searcher=newSearcher(reader);
-    writer.close();
-  }
-  
-  @AfterClass
-  public static void afterClass() throws Exception {
-    searcher = null;
-    reader.close();
-    reader = null;
-    directory.close();
-    directory = null;
-  }
-  
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    // set the theoretical maximum term count for 8bit (see docs for the number)
-    // super.tearDown will restore the default
-    BooleanQuery.setMaxClauseCount(3*255*2 + 255);
-  }
-  
-  /** test for both constant score and boolean query, the other tests only use the constant score mode */
-  private void testRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    int lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
-    LegacyNumericRangeQuery<Integer> q = LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
-    for (byte i=0; i<2; i++) {
-      TopDocs topDocs;
-      String type;
-      switch (i) {
-        case 0:
-          type = " (constant score filter rewrite)";
-          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
-          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-          break;
-        case 1:
-          type = " (constant score boolean rewrite)";
-          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
-          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-          break;
-        default:
-          return;
-      }
-      ScoreDoc[] sd = topDocs.scoreDocs;
-      assertNotNull(sd);
-      assertEquals("Score doc count"+type, count, sd.length );
-      Document doc=searcher.doc(sd[0].doc);
-      assertEquals("First doc"+type, 2*distance+startOffset, doc.getField(field).numericValue().intValue());
-      doc=searcher.doc(sd[sd.length-1].doc);
-      assertEquals("Last doc"+type, (1+count)*distance+startOffset, doc.getField(field).numericValue().intValue());
-    }
-  }
-
-  @Test
-  public void testRange_8bit() throws Exception {
-    testRange(8);
-  }
-  
-  @Test
-  public void testRange_4bit() throws Exception {
-    testRange(4);
-  }
-  
-  @Test
-  public void testRange_2bit() throws Exception {
-    testRange(2);
-  }
-  
-  @Test
-  public void testOneMatchQuery() throws Exception {
-    LegacyNumericRangeQuery<Integer> q = LegacyNumericRangeQuery.newIntRange("ascfield8", 8, 1000, 1000, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", 1, sd.length );
-  }
-  
-  private void testLeftOpenRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    int upper=(count-1)*distance + (distance/3) + startOffset;
-    LegacyNumericRangeQuery<Integer> q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, null, upper, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", count, sd.length );
-    Document doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", startOffset, doc.getField(field).numericValue().intValue());
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
-    
-    q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, null, upper, false, true);
-    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", count, sd.length );
-    doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", startOffset, doc.getField(field).numericValue().intValue());
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
-  }
-  
-  @Test
-  public void testLeftOpenRange_8bit() throws Exception {
-    testLeftOpenRange(8);
-  }
-  
-  @Test
-  public void testLeftOpenRange_4bit() throws Exception {
-    testLeftOpenRange(4);
-  }
-  
-  @Test
-  public void testLeftOpenRange_2bit() throws Exception {
-    testLeftOpenRange(2);
-  }
-  
-  private void testRightOpenRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    int lower=(count-1)*distance + (distance/3) +startOffset;
-    LegacyNumericRangeQuery<Integer> q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", noDocs-count, sd.length );
-    Document doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().intValue());
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
-
-    q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, false);
-    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", noDocs-count, sd.length );
-    doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().intValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().intValue() );
-  }
-  
-  @Test
-  public void testRightOpenRange_8bit() throws Exception {
-    testRightOpenRange(8);
-  }
-  
-  @Test
-  public void testRightOpenRange_4bit() throws Exception {
-    testRightOpenRange(4);
-  }
-  
-  @Test
-  public void testRightOpenRange_2bit() throws Exception {
-    testRightOpenRange(2);
-  }
-  
-  @Test
-  public void testInfiniteValues() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
-      newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new LegacyFloatField("float", Float.NEGATIVE_INFINITY, Field.Store.NO));
-    doc.add(new LegacyIntField("int", Integer.MIN_VALUE, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    doc = new Document();
-    doc.add(new LegacyFloatField("float", Float.POSITIVE_INFINITY, Field.Store.NO));
-    doc.add(new LegacyIntField("int", Integer.MAX_VALUE, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    doc = new Document();
-    doc.add(new LegacyFloatField("float", 0.0f, Field.Store.NO));
-    doc.add(new LegacyIntField("int", 0, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    for (float f : TestLegacyNumericUtils.FLOAT_NANs) {
-      doc = new Document();
-      doc.add(new LegacyFloatField("float", f, Field.Store.NO));
-      writer.addDocument(doc);
-    }
-    
-    writer.close();
-    
-    IndexReader r = DirectoryReader.open(dir);
-    IndexSearcher s = newSearcher(r);
-    
-    Query q= LegacyNumericRangeQuery.newIntRange("int", null, null, true, true);
-    TopDocs topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-    
-    q= LegacyNumericRangeQuery.newIntRange("int", null, null, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-    
-    q= LegacyNumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newFloatRange("float", null, null, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newFloatRange("float", null, null, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newFloatRange("float", Float.NaN, Float.NaN, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", TestLegacyNumericUtils.FLOAT_NANs.length,  topDocs.scoreDocs.length );
-
-    r.close();
-    dir.close();
-  }
-  
-  private void testRangeSplit(int precisionStep) throws Exception {
-    String field="ascfield"+precisionStep;
-    // 10 random tests
-    int num = TestUtil.nextInt(random(), 10, 20);
-    for (int  i =0;  i< num; i++) {
-      int lower=(int)(random().nextDouble()*noDocs - noDocs/2);
-      int upper=(int)(random().nextDouble()*noDocs - noDocs/2);
-      if (lower>upper) {
-        int a=lower; lower=upper; upper=a;
-      }
-      // test inclusive range
-      Query tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
-      TopDocs tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-      // test exclusive range
-      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
-      // test left exclusive range
-      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
-      // test right exclusive range
-      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
-    }
-  }
-
-  @Test
-  public void testRangeSplit_8bit() throws Exception {
-    testRangeSplit(8);
-  }
-  
-  @Test
-  public void testRangeSplit_4bit() throws Exception {
-    testRangeSplit(4);
-  }
-  
-  @Test
-  public void testRangeSplit_2bit() throws Exception {
-    testRangeSplit(2);
-  }
-  
-  /** we fake a float test using int2float conversion of LegacyNumericUtils */
-  private void testFloatRange(int precisionStep) throws Exception {
-    final String field="ascfield"+precisionStep;
-    final int lower=-1000, upper=+2000;
-    
-    Query tq= LegacyNumericRangeQuery.newFloatRange(field, precisionStep,
-        NumericUtils.sortableIntToFloat(lower), NumericUtils.sortableIntToFloat(upper), true, true);
-    TopDocs tTopDocs = searcher.search(tq, 1);
-    assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-  }
-
-  @Test
-  public void testFloatRange_8bit() throws Exception {
-    testFloatRange(8);
-  }
-  
-  @Test
-  public void testFloatRange_4bit() throws Exception {
-    testFloatRange(4);
-  }
-  
-  @Test
-  public void testFloatRange_2bit() throws Exception {
-    testFloatRange(2);
-  }
-  
-  @Test
-  public void testEqualsAndHash() throws Exception {
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test1", 4, 10, 20, true, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test2", 4, 10, 20, false, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test3", 4, 10, 20, true, false));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test4", 4, 10, 20, false, false));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test5", 4, 10, null, true, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test6", 4, null, 20, true, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test7", 4, null, null, true, true));
-    QueryUtils.checkEqual(
-      LegacyNumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true),
-      LegacyNumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newIntRange("test9", 4, 10, 20, true, true),
-      LegacyNumericRangeQuery.newIntRange("test9", 8, 10, 20, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newIntRange("test10a", 4, 10, 20, true, true),
-      LegacyNumericRangeQuery.newIntRange("test10b", 4, 10, 20, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newIntRange("test11", 4, 10, 20, true, true),
-      LegacyNumericRangeQuery.newIntRange("test11", 4, 20, 10, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newIntRange("test12", 4, 10, 20, true, true),
-      LegacyNumericRangeQuery.newIntRange("test12", 4, 10, 20, false, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newIntRange("test13", 4, 10, 20, true, true),
-      LegacyNumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
-    );
-    // the following produces a hash collision, because Long and Integer have the same hashcode, so only test equality:
-    Query q1 = LegacyNumericRangeQuery.newIntRange("test14", 4, 10, 20, true, true);
-    Query q2 = LegacyNumericRangeQuery.newLongRange("test14", 4, 10L, 20L, true, true);
-    assertFalse(q1.equals(q2));
-    assertFalse(q2.equals(q1));
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery64.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery64.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery64.java
deleted file mode 100644
index b3ce55a..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery64.java
+++ /dev/null
@@ -1,490 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MultiTermQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.QueryUtils;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.TopDocs;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.TestUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestNumericRangeQuery64 extends LuceneTestCase {
-  // distance of entries
-  private static long distance;
-  // shift the starting of the values to the left, to also have negative values:
-  private static final long startOffset = - 1L << 31;
-  // number of docs to generate for testing
-  private static int noDocs;
-  
-  private static Directory directory = null;
-  private static IndexReader reader = null;
-  private static IndexSearcher searcher = null;
-  
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    noDocs = atLeast(4096);
-    distance = (1L << 60) / noDocs;
-    directory = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
-        newIndexWriterConfig(new MockAnalyzer(random()))
-        .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
-        .setMergePolicy(newLogMergePolicy()));
-
-    final LegacyFieldType storedLong = new LegacyFieldType(LegacyLongField.TYPE_NOT_STORED);
-    storedLong.setStored(true);
-    storedLong.freeze();
-
-    final LegacyFieldType storedLong8 = new LegacyFieldType(storedLong);
-    storedLong8.setNumericPrecisionStep(8);
-
-    final LegacyFieldType storedLong4 = new LegacyFieldType(storedLong);
-    storedLong4.setNumericPrecisionStep(4);
-
-    final LegacyFieldType storedLong6 = new LegacyFieldType(storedLong);
-    storedLong6.setNumericPrecisionStep(6);
-
-    final LegacyFieldType storedLong2 = new LegacyFieldType(storedLong);
-    storedLong2.setNumericPrecisionStep(2);
-
-    final LegacyFieldType storedLongNone = new LegacyFieldType(storedLong);
-    storedLongNone.setNumericPrecisionStep(Integer.MAX_VALUE);
-
-    final LegacyFieldType unstoredLong = LegacyLongField.TYPE_NOT_STORED;
-
-    final LegacyFieldType unstoredLong8 = new LegacyFieldType(unstoredLong);
-    unstoredLong8.setNumericPrecisionStep(8);
-
-    final LegacyFieldType unstoredLong6 = new LegacyFieldType(unstoredLong);
-    unstoredLong6.setNumericPrecisionStep(6);
-
-    final LegacyFieldType unstoredLong4 = new LegacyFieldType(unstoredLong);
-    unstoredLong4.setNumericPrecisionStep(4);
-
-    final LegacyFieldType unstoredLong2 = new LegacyFieldType(unstoredLong);
-    unstoredLong2.setNumericPrecisionStep(2);
-
-    LegacyLongField
-      field8 = new LegacyLongField("field8", 0L, storedLong8),
-      field6 = new LegacyLongField("field6", 0L, storedLong6),
-      field4 = new LegacyLongField("field4", 0L, storedLong4),
-      field2 = new LegacyLongField("field2", 0L, storedLong2),
-      fieldNoTrie = new LegacyLongField("field"+Integer.MAX_VALUE, 0L, storedLongNone),
-      ascfield8 = new LegacyLongField("ascfield8", 0L, unstoredLong8),
-      ascfield6 = new LegacyLongField("ascfield6", 0L, unstoredLong6),
-      ascfield4 = new LegacyLongField("ascfield4", 0L, unstoredLong4),
-      ascfield2 = new LegacyLongField("ascfield2", 0L, unstoredLong2);
-
-    Document doc = new Document();
-    // add fields, that have a distance to test general functionality
-    doc.add(field8); doc.add(field6); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
-    // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
-    doc.add(ascfield8); doc.add(ascfield6); doc.add(ascfield4); doc.add(ascfield2);
-    
-    // Add a series of noDocs docs with increasing long values, by updating the fields
-    for (int l=0; l<noDocs; l++) {
-      long val=distance*l+startOffset;
-      field8.setLongValue(val);
-      field6.setLongValue(val);
-      field4.setLongValue(val);
-      field2.setLongValue(val);
-      fieldNoTrie.setLongValue(val);
-
-      val=l-(noDocs/2);
-      ascfield8.setLongValue(val);
-      ascfield6.setLongValue(val);
-      ascfield4.setLongValue(val);
-      ascfield2.setLongValue(val);
-      writer.addDocument(doc);
-    }
-    reader = writer.getReader();
-    searcher=newSearcher(reader);
-    writer.close();
-  }
-  
-  @AfterClass
-  public static void afterClass() throws Exception {
-    searcher = null;
-    reader.close();
-    reader = null;
-    directory.close();
-    directory = null;
-  }
-  
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    // set the theoretical maximum term count for 8bit (see docs for the number)
-    // super.tearDown will restore the default
-    BooleanQuery.setMaxClauseCount(7*255*2 + 255);
-  }
-  
-  /** test for constant score + boolean query + filter, the other tests only use the constant score mode */
-  private void testRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    long lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
-    LegacyNumericRangeQuery<Long> q = LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
-    for (byte i=0; i<2; i++) {
-      TopDocs topDocs;
-      String type;
-      switch (i) {
-        case 0:
-          type = " (constant score filter rewrite)";
-          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
-          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-          break;
-        case 1:
-          type = " (constant score boolean rewrite)";
-          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
-          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-          break;
-        default:
-          return;
-      }
-      ScoreDoc[] sd = topDocs.scoreDocs;
-      assertNotNull(sd);
-      assertEquals("Score doc count"+type, count, sd.length );
-      Document doc=searcher.doc(sd[0].doc);
-      assertEquals("First doc"+type, 2*distance+startOffset, doc.getField(field).numericValue().longValue() );
-      doc=searcher.doc(sd[sd.length-1].doc);
-      assertEquals("Last doc"+type, (1+count)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-    }
-  }
-
-  @Test
-  public void testRange_8bit() throws Exception {
-    testRange(8);
-  }
-  
-  @Test
-  public void testRange_6bit() throws Exception {
-    testRange(6);
-  }
-  
-  @Test
-  public void testRange_4bit() throws Exception {
-    testRange(4);
-  }
-  
-  @Test
-  public void testRange_2bit() throws Exception {
-    testRange(2);
-  }
-  
-  @Test
-  public void testOneMatchQuery() throws Exception {
-    LegacyNumericRangeQuery<Long> q = LegacyNumericRangeQuery.newLongRange("ascfield8", 8, 1000L, 1000L, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", 1, sd.length );
-  }
-  
-  private void testLeftOpenRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    long upper=(count-1)*distance + (distance/3) + startOffset;
-    LegacyNumericRangeQuery<Long> q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, null, upper, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", count, sd.length );
-    Document doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", startOffset, doc.getField(field).numericValue().longValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-
-    q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, null, upper, false, true);
-    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", count, sd.length );
-    doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", startOffset, doc.getField(field).numericValue().longValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-  }
-  
-  @Test
-  public void testLeftOpenRange_8bit() throws Exception {
-    testLeftOpenRange(8);
-  }
-  
-  @Test
-  public void testLeftOpenRange_6bit() throws Exception {
-    testLeftOpenRange(6);
-  }
-  
-  @Test
-  public void testLeftOpenRange_4bit() throws Exception {
-    testLeftOpenRange(4);
-  }
-  
-  @Test
-  public void testLeftOpenRange_2bit() throws Exception {
-    testLeftOpenRange(2);
-  }
-  
-  private void testRightOpenRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    long lower=(count-1)*distance + (distance/3) +startOffset;
-    LegacyNumericRangeQuery<Long> q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", noDocs-count, sd.length );
-    Document doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().longValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-
-    q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, false);
-    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", noDocs-count, sd.length );
-    doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().longValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-  }
-  
-  @Test
-  public void testRightOpenRange_8bit() throws Exception {
-    testRightOpenRange(8);
-  }
-  
-  @Test
-  public void testRightOpenRange_6bit() throws Exception {
-    testRightOpenRange(6);
-  }
-  
-  @Test
-  public void testRightOpenRange_4bit() throws Exception {
-    testRightOpenRange(4);
-  }
-  
-  @Test
-  public void testRightOpenRange_2bit() throws Exception {
-    testRightOpenRange(2);
-  }
-  
-  @Test
-  public void testInfiniteValues() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
-      newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new LegacyDoubleField("double", Double.NEGATIVE_INFINITY, Field.Store.NO));
-    doc.add(new LegacyLongField("long", Long.MIN_VALUE, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    doc = new Document();
-    doc.add(new LegacyDoubleField("double", Double.POSITIVE_INFINITY, Field.Store.NO));
-    doc.add(new LegacyLongField("long", Long.MAX_VALUE, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    doc = new Document();
-    doc.add(new LegacyDoubleField("double", 0.0, Field.Store.NO));
-    doc.add(new LegacyLongField("long", 0L, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    for (double d : TestLegacyNumericUtils.DOUBLE_NANs) {
-      doc = new Document();
-      doc.add(new LegacyDoubleField("double", d, Field.Store.NO));
-      writer.addDocument(doc);
-    }
-    
-    writer.close();
-    
-    IndexReader r = DirectoryReader.open(dir);
-    IndexSearcher s = newSearcher(r);
-    
-    Query q= LegacyNumericRangeQuery.newLongRange("long", null, null, true, true);
-    TopDocs topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-    
-    q= LegacyNumericRangeQuery.newLongRange("long", null, null, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-    
-    q= LegacyNumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newDoubleRange("double", null, null, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newDoubleRange("double", null, null, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NaN, Double.NaN, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", TestLegacyNumericUtils.DOUBLE_NANs.length,  topDocs.scoreDocs.length );
-
-    r.close();
-    dir.close();
-  }
-  
-  private void testRangeSplit(int precisionStep) throws Exception {
-    String field="ascfield"+precisionStep;
-    // 10 random tests
-    int num = TestUtil.nextInt(random(), 10, 20);
-    for (int i = 0; i < num; i++) {
-      long lower=(long)(random().nextDouble()*noDocs - noDocs/2);
-      long upper=(long)(random().nextDouble()*noDocs - noDocs/2);
-      if (lower>upper) {
-        long a=lower; lower=upper; upper=a;
-      }
-      // test inclusive range
-      Query tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
-      TopDocs tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-      // test exclusive range
-      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
-      // test left exclusive range
-      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
-      // test right exclusive range
-      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
-    }
-  }
-
-  @Test
-  public void testRangeSplit_8bit() throws Exception {
-    testRangeSplit(8);
-  }
-  
-  @Test
-  public void testRangeSplit_6bit() throws Exception {
-    testRangeSplit(6);
-  }
-  
-  @Test
-  public void testRangeSplit_4bit() throws Exception {
-    testRangeSplit(4);
-  }
-  
-  @Test
-  public void testRangeSplit_2bit() throws Exception {
-    testRangeSplit(2);
-  }
-  
-  /** we fake a double test using long2double conversion of LegacyNumericUtils */
-  private void testDoubleRange(int precisionStep) throws Exception {
-    final String field="ascfield"+precisionStep;
-    final long lower=-1000L, upper=+2000L;
-    
-    Query tq= LegacyNumericRangeQuery.newDoubleRange(field, precisionStep,
-        NumericUtils.sortableLongToDouble(lower), NumericUtils.sortableLongToDouble(upper), true, true);
-    TopDocs tTopDocs = searcher.search(tq, 1);
-    assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-  }
-
-  @Test
-  public void testDoubleRange_8bit() throws Exception {
-    testDoubleRange(8);
-  }
-  
-  @Test
-  public void testDoubleRange_6bit() throws Exception {
-    testDoubleRange(6);
-  }
-  
-  @Test
-  public void testDoubleRange_4bit() throws Exception {
-    testDoubleRange(4);
-  }
-  
-  @Test
-  public void testDoubleRange_2bit() throws Exception {
-    testDoubleRange(2);
-  }
-  
-  @Test
-  public void testEqualsAndHash() throws Exception {
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test1", 4, 10L, 20L, true, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test2", 4, 10L, 20L, false, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test3", 4, 10L, 20L, true, false));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test4", 4, 10L, 20L, false, false));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test5", 4, 10L, null, true, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test6", 4, null, 20L, true, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test7", 4, null, null, true, true));
-    QueryUtils.checkEqual(
-      LegacyNumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true),
-      LegacyNumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newLongRange("test9", 4, 10L, 20L, true, true),
-      LegacyNumericRangeQuery.newLongRange("test9", 8, 10L, 20L, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newLongRange("test10a", 4, 10L, 20L, true, true),
-      LegacyNumericRangeQuery.newLongRange("test10b", 4, 10L, 20L, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newLongRange("test11", 4, 10L, 20L, true, true),
-      LegacyNumericRangeQuery.newLongRange("test11", 4, 20L, 10L, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newLongRange("test12", 4, 10L, 20L, true, true),
-      LegacyNumericRangeQuery.newLongRange("test12", 4, 10L, 20L, false, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newLongRange("test13", 4, 10L, 20L, true, true),
-      LegacyNumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
-    );
-     // difference to int range is tested in TestNumericRangeQuery32
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericTokenStream.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericTokenStream.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericTokenStream.java
deleted file mode 100644
index a507af0..0000000
--- a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericTokenStream.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.legacy;
-
-
-import org.apache.lucene.util.AttributeImpl;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.legacy.LegacyNumericTokenStream;
-import org.apache.lucene.legacy.LegacyNumericUtils;
-import org.apache.lucene.legacy.LegacyNumericTokenStream.LegacyNumericTermAttributeImpl;
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttributeImpl;
-
-@Deprecated
-public class TestNumericTokenStream extends BaseTokenStreamTestCase {
-
-  final long lvalue = random().nextLong();
-  final int ivalue = random().nextInt();
-
-  public void testLongStream() throws Exception {
-    @SuppressWarnings("resource")
-    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream().setLongValue(lvalue);
-    final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
-    assertNotNull(bytesAtt);
-    final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
-    assertNotNull(typeAtt);
-    final LegacyNumericTokenStream.LegacyNumericTermAttribute numericAtt = stream.getAttribute(LegacyNumericTokenStream.LegacyNumericTermAttribute.class);
-    assertNotNull(numericAtt);
-    stream.reset();
-    assertEquals(64, numericAtt.getValueSize());
-    for (int shift=0; shift<64; shift+= LegacyNumericUtils.PRECISION_STEP_DEFAULT) {
-      assertTrue("New token is available", stream.incrementToken());
-      assertEquals("Shift value wrong", shift, numericAtt.getShift());
-      assertEquals("Term is incorrectly encoded", lvalue & ~((1L << shift) - 1L), LegacyNumericUtils.prefixCodedToLong(bytesAtt.getBytesRef()));
-      assertEquals("Term raw value is incorrectly encoded", lvalue & ~((1L << shift) - 1L), numericAtt.getRawValue());
-      assertEquals("Type incorrect", (shift == 0) ? LegacyNumericTokenStream.TOKEN_TYPE_FULL_PREC : LegacyNumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
-    }
-    assertFalse("More tokens available", stream.incrementToken());
-    stream.end();
-    stream.close();
-  }
-
-  public void testIntStream() throws Exception {
-    @SuppressWarnings("resource")
-    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream().setIntValue(ivalue);
-    final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
-    assertNotNull(bytesAtt);
-    final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
-    assertNotNull(typeAtt);
-    final LegacyNumericTokenStream.LegacyNumericTermAttribute numericAtt = stream.getAttribute(LegacyNumericTokenStream.LegacyNumericTermAttribute.class);
-    assertNotNull(numericAtt);
-    stream.reset();
-    assertEquals(32, numericAtt.getValueSize());
-    for (int shift=0; shift<32; shift+= LegacyNumericUtils.PRECISION_STEP_DEFAULT) {
-      assertTrue("New token is available", stream.incrementToken());
-      assertEquals("Shift value wrong", shift, numericAtt.getShift());
-      assertEquals("Term is incorrectly encoded", ivalue & ~((1 << shift) - 1), LegacyNumericUtils.prefixCodedToInt(bytesAtt.getBytesRef()));
-      assertEquals("Term raw value is incorrectly encoded", ((long) ivalue) & ~((1L << shift) - 1L), numericAtt.getRawValue());
-      assertEquals("Type incorrect", (shift == 0) ? LegacyNumericTokenStream.TOKEN_TYPE_FULL_PREC : LegacyNumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
-    }
-    assertFalse("More tokens available", stream.incrementToken());
-    stream.end();
-    stream.close();
-  }
-  
-  public void testNotInitialized() throws Exception {
-    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream();
-    
-    expectThrows(IllegalStateException.class, () -> {
-      stream.reset();
-    });
-
-    expectThrows(IllegalStateException.class, () -> {
-      stream.incrementToken();
-    });
-    
-    stream.close();
-  }
-  
-  public static interface TestAttribute extends CharTermAttribute {}
-  public static class TestAttributeImpl extends CharTermAttributeImpl implements TestAttribute {}
-  
-  public void testCTA() throws Exception {
-    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream();
-    IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
-      stream.addAttribute(CharTermAttribute.class);
-    });
-    assertTrue(e.getMessage().startsWith("LegacyNumericTokenStream does not support"));
-
-    e = expectThrows(IllegalArgumentException.class, () -> {
-      stream.addAttribute(TestAttribute.class);
-    });
-    assertTrue(e.getMessage().startsWith("LegacyNumericTokenStream does not support"));
-    stream.close();
-  }
-  
-  /** LUCENE-7027 */
-  public void testCaptureStateAfterExhausted() throws Exception {
-    // default precstep
-    try (LegacyNumericTokenStream stream=new LegacyNumericTokenStream()) {
-      // int
-      stream.setIntValue(ivalue);
-      stream.reset();
-      while (stream.incrementToken());
-      stream.captureState();
-      stream.end();
-      stream.captureState();
-      // long
-      stream.setLongValue(lvalue);
-      stream.reset();
-      while (stream.incrementToken());
-      stream.captureState();
-      stream.end();
-      stream.captureState();
-    }
-    // huge precstep
-    try (LegacyNumericTokenStream stream=new LegacyNumericTokenStream(Integer.MAX_VALUE)) {
-      // int
-      stream.setIntValue(ivalue);
-      stream.reset();
-      while (stream.incrementToken());
-      stream.captureState();
-      stream.end();
-      stream.captureState();
-      // long
-      stream.setLongValue(lvalue);
-      stream.reset();
-      while (stream.incrementToken());
-      stream.captureState();
-      stream.end();
-      stream.captureState();
-    }
-  }
-  
-  public void testAttributeClone() throws Exception {
-    LegacyNumericTermAttributeImpl att = new LegacyNumericTermAttributeImpl();
-    att.init(lvalue, 64, 8, 0); // set some value, to make getBytesRef() work
-    LegacyNumericTermAttributeImpl copy = assertCloneIsEqual(att);
-    assertNotSame(att.getBytesRef(), copy.getBytesRef());
-    LegacyNumericTermAttributeImpl copy2 = assertCopyIsEqual(att);
-    assertNotSame(att.getBytesRef(), copy2.getBytesRef());
-    
-    // LUCENE-7027 test
-    att.init(lvalue, 64, 8, 64); // Exhausted TokenStream -> should return empty BytesRef
-    assertEquals(new BytesRef(), att.getBytesRef());
-    copy = assertCloneIsEqual(att);
-    assertEquals(new BytesRef(), copy.getBytesRef());
-    assertNotSame(att.getBytesRef(), copy.getBytesRef());
-    copy2 = assertCopyIsEqual(att);
-    assertEquals(new BytesRef(), copy2.getBytesRef());
-    assertNotSame(att.getBytesRef(), copy2.getBytesRef());
-  }
-  
-  public static <T extends AttributeImpl> T assertCloneIsEqual(T att) {
-    @SuppressWarnings("unchecked")
-    T clone = (T) att.clone();
-    assertEquals("Clone must be equal", att, clone);
-    assertEquals("Clone's hashcode must be equal", att.hashCode(), clone.hashCode());
-    return clone;
-  }
-
-  public static <T extends AttributeImpl> T assertCopyIsEqual(T att) throws Exception {
-    @SuppressWarnings("unchecked")
-    T copy = (T) att.getClass().newInstance();
-    att.copyTo(copy);
-    assertEquals("Copied instance must be equal", att, copy);
-    assertEquals("Copied instance's hashcode must be equal", att.hashCode(), copy.hashCode());
-    return copy;
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
index 90e36d8..7536b60 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
@@ -25,11 +25,6 @@ import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.legacy.LegacyDoubleField;
-import org.apache.lucene.legacy.LegacyFieldType;
-import org.apache.lucene.legacy.LegacyNumericRangeQuery;
-import org.apache.lucene.legacy.LegacyNumericType;
-import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
@@ -41,8 +36,6 @@ import org.apache.lucene.spatial.query.SpatialArgs;
 import org.apache.lucene.spatial.query.SpatialOperation;
 import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
 import org.apache.lucene.spatial.util.DistanceToShapeValueSource;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.NumericUtils;
 import org.locationtech.spatial4j.context.SpatialContext;
 import org.locationtech.spatial4j.shape.Point;
 import org.locationtech.spatial4j.shape.Rectangle;
@@ -88,8 +81,6 @@ public class BBoxStrategy extends SpatialStrategy {
    */
   public static FieldType DEFAULT_FIELDTYPE;
 
-  @Deprecated
-  public static LegacyFieldType LEGACY_FIELDTYPE;
   static {
     // Default: pointValues + docValues
     FieldType type = new FieldType();
@@ -98,15 +89,6 @@ public class BBoxStrategy extends SpatialStrategy {
     type.setStored(false);
     type.freeze();
     DEFAULT_FIELDTYPE = type;
-    // Legacy default: legacyNumerics + docValues
-    LegacyFieldType legacyType = new LegacyFieldType();
-    legacyType.setIndexOptions(IndexOptions.DOCS);
-    legacyType.setNumericType(LegacyNumericType.DOUBLE);
-    legacyType.setNumericPrecisionStep(8);// same as solr default
-    legacyType.setDocValuesType(DocValuesType.NUMERIC);//docValues
-    legacyType.setStored(false);
-    legacyType.freeze();
-    LEGACY_FIELDTYPE = legacyType;
   }
 
   public static final String SUFFIX_MINX = "__minX";
@@ -131,8 +113,6 @@ public class BBoxStrategy extends SpatialStrategy {
   private final boolean hasStored;
   private final boolean hasDocVals;
   private final boolean hasPointVals;
-  // equiv to "hasLegacyNumerics":
-  private final LegacyFieldType legacyNumericFieldType; // not stored; holds precision step.
   private final FieldType xdlFieldType;
 
   /**
@@ -143,15 +123,6 @@ public class BBoxStrategy extends SpatialStrategy {
   }
 
   /**
-   * Creates a new {@link BBoxStrategy} instance that uses {@link LegacyDoubleField} for backwards compatibility
-   * @deprecated LegacyNumerics will be removed
-   */
-  @Deprecated
-  public static BBoxStrategy newLegacyInstance(SpatialContext ctx, String fieldNamePrefix) {
-    return new BBoxStrategy(ctx, fieldNamePrefix, LEGACY_FIELDTYPE);
-  }
-
-  /**
    * Creates this strategy.
    * {@code fieldType} is used to customize the indexing options of the 4 number fields, and to a lesser degree the XDL
    * field too. Search requires pointValues (or legacy numerics), and relevancy requires docValues. If these features
@@ -179,23 +150,8 @@ public class BBoxStrategy extends SpatialStrategy {
     if ((this.hasPointVals = fieldType.pointDimensionCount() > 0)) {
       numQuads++;
     }
-    if (fieldType.indexOptions() != IndexOptions.NONE && fieldType instanceof LegacyFieldType && ((LegacyFieldType)fieldType).numericType() != null) {
-      if (hasPointVals) {
-        throw new IllegalArgumentException("pointValues and LegacyNumericType are mutually exclusive");
-      }
-      final LegacyFieldType legacyType = (LegacyFieldType) fieldType;
-      if (legacyType.numericType() != LegacyNumericType.DOUBLE) {
-        throw new IllegalArgumentException(getClass() + " does not support " + legacyType.numericType());
-      }
-      numQuads++;
-      legacyNumericFieldType = new LegacyFieldType(LegacyDoubleField.TYPE_NOT_STORED);
-      legacyNumericFieldType.setNumericPrecisionStep(legacyType.numericPrecisionStep());
-      legacyNumericFieldType.freeze();
-    } else {
-      legacyNumericFieldType = null;
-    }
 
-    if (hasPointVals || legacyNumericFieldType != null) { // if we have an index...
+    if (hasPointVals) { // if we have an index...
       xdlFieldType = new FieldType(StringField.TYPE_NOT_STORED);
       xdlFieldType.setIndexOptions(IndexOptions.DOCS);
       xdlFieldType.freeze();
@@ -242,12 +198,6 @@ public class BBoxStrategy extends SpatialStrategy {
       fields[++idx] = new DoublePoint(field_maxX, bbox.getMaxX());
       fields[++idx] = new DoublePoint(field_maxY, bbox.getMaxY());
     }
-    if (legacyNumericFieldType != null) {
-      fields[++idx] = new LegacyDoubleField(field_minX, bbox.getMinX(), legacyNumericFieldType);
-      fields[++idx] = new LegacyDoubleField(field_minY, bbox.getMinY(), legacyNumericFieldType);
-      fields[++idx] = new LegacyDoubleField(field_maxX, bbox.getMaxX(), legacyNumericFieldType);
-      fields[++idx] = new LegacyDoubleField(field_maxY, bbox.getMaxY(), legacyNumericFieldType);
-    }
     if (xdlFieldType != null) {
       fields[++idx] = new Field(field_xdl, bbox.getCrossesDateLine()?"T":"F", xdlFieldType);
     }
@@ -664,17 +614,12 @@ public class BBoxStrategy extends SpatialStrategy {
   private Query makeNumberTermQuery(String field, double number) {
     if (hasPointVals) {
       return DoublePoint.newExactQuery(field, number);
-    } else if (legacyNumericFieldType != null) {
-      BytesRefBuilder bytes = new BytesRefBuilder();
-      LegacyNumericUtils.longToPrefixCoded(NumericUtils.doubleToSortableLong(number), 0, bytes);
-      return new TermQuery(new Term(field, bytes.get()));
     }
     throw new UnsupportedOperationException("An index is required for this operation.");
   }
 
   /**
    * Returns a numeric range query based on FieldType
-   * {@link LegacyNumericRangeQuery} is used for indexes created using {@code FieldType.LegacyNumericType}
    * {@link DoublePoint#newRangeQuery} is used for indexes created using {@link DoublePoint} fields
    *
    * @param fieldname field name. must not be <code>null</code>.
@@ -702,8 +647,6 @@ public class BBoxStrategy extends SpatialStrategy {
       }
 
       return DoublePoint.newRangeQuery(fieldname, min, max);
-    } else if (legacyNumericFieldType != null) {// todo remove legacy numeric support in 7.0
-      return LegacyNumericRangeQuery.newDoubleRange(fieldname, legacyNumericFieldType.numericPrecisionStep(), min, max, minInclusive, maxInclusive);
     }
     throw new UnsupportedOperationException("An index is required for this operation.");
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java
index 757e2bd..ca38abf 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java
@@ -26,8 +26,6 @@ import org.apache.lucene.util.BytesRefIterator;
 /**
  * A TokenStream used internally by {@link org.apache.lucene.spatial.prefix.PrefixTreeStrategy}.
  *
- * This is modelled after {@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
- *
  * @lucene.internal
  */
 class BytesRefIteratorTokenStream extends TokenStream {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
index 59aff49..ef3eaa4 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
@@ -22,11 +22,6 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.legacy.LegacyDoubleField;
-import org.apache.lucene.legacy.LegacyFieldType;
-import org.apache.lucene.legacy.LegacyNumericRangeQuery;
-import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.lucene.queries.function.FunctionRangeQuery;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.BooleanClause;
@@ -86,8 +81,6 @@ public class PointVectorStrategy extends SpatialStrategy {
    */
   public static FieldType DEFAULT_FIELDTYPE;
 
-  @Deprecated
-  public static LegacyFieldType LEGACY_FIELDTYPE;
   static {
     // Default: pointValues + docValues
     FieldType type = new FieldType();
@@ -96,15 +89,6 @@ public class PointVectorStrategy extends SpatialStrategy {
     type.setStored(false);
     type.freeze();
     DEFAULT_FIELDTYPE = type;
-    // Legacy default: legacyNumerics
-    LegacyFieldType legacyType = new LegacyFieldType();
-    legacyType.setIndexOptions(IndexOptions.DOCS);
-    legacyType.setNumericType(LegacyNumericType.DOUBLE);
-    legacyType.setNumericPrecisionStep(8);// same as solr default
-    legacyType.setDocValuesType(DocValuesType.NONE);//no docValues!
-    legacyType.setStored(false);
-    legacyType.freeze();
-    LEGACY_FIELDTYPE = legacyType;
   }
 
   public static final String SUFFIX_X = "__x";
@@ -117,8 +101,6 @@ public class PointVectorStrategy extends SpatialStrategy {
   private final boolean hasStored;
   private final boolean hasDocVals;
   private final boolean hasPointVals;
-  // equiv to "hasLegacyNumerics":
-  private final LegacyFieldType legacyNumericFieldType; // not stored; holds precision step.
 
   /**
    * Create a new {@link PointVectorStrategy} instance that uses {@link DoublePoint} and {@link DoublePoint#newRangeQuery}
@@ -128,18 +110,6 @@ public class PointVectorStrategy extends SpatialStrategy {
   }
 
   /**
-   * Create a new {@link PointVectorStrategy} instance that uses {@link LegacyDoubleField} for backwards compatibility.
-   * However, back-compat is limited; we don't support circle queries or {@link #makeDistanceValueSource(Point, double)}
-   * since that requires docValues (the legacy config didn't have that).
-   *
-   * @deprecated LegacyNumerics will be removed
-   */
-  @Deprecated
-  public static PointVectorStrategy newLegacyInstance(SpatialContext ctx, String fieldNamePrefix) {
-    return new PointVectorStrategy(ctx, fieldNamePrefix, LEGACY_FIELDTYPE);
-  }
-
-  /**
    * Create a new instance configured with the provided FieldType options. See {@link #DEFAULT_FIELDTYPE}.
    * a field type is used to articulate the desired options (namely pointValues, docValues, stored).  Legacy numerics
    * is configurable this way too.
@@ -159,21 +129,6 @@ public class PointVectorStrategy extends SpatialStrategy {
     if ((this.hasPointVals = fieldType.pointDimensionCount() > 0)) {
       numPairs++;
     }
-    if (fieldType.indexOptions() != IndexOptions.NONE && fieldType instanceof LegacyFieldType && ((LegacyFieldType)fieldType).numericType() != null) {
-      if (hasPointVals) {
-        throw new IllegalArgumentException("pointValues and LegacyNumericType are mutually exclusive");
-      }
-      final LegacyFieldType legacyType = (LegacyFieldType) fieldType;
-      if (legacyType.numericType() != LegacyNumericType.DOUBLE) {
-        throw new IllegalArgumentException(getClass() + " does not support " + legacyType.numericType());
-      }
-      numPairs++;
-      legacyNumericFieldType = new LegacyFieldType(LegacyDoubleField.TYPE_NOT_STORED);
-      legacyNumericFieldType.setNumericPrecisionStep(legacyType.numericPrecisionStep());
-      legacyNumericFieldType.freeze();
-    } else {
-      legacyNumericFieldType = null;
-    }
     this.fieldsLen = numPairs * 2;
   }
 
@@ -209,10 +164,6 @@ public class PointVectorStrategy extends SpatialStrategy {
       fields[++idx] = new DoublePoint(fieldNameX, point.getX());
       fields[++idx] = new DoublePoint(fieldNameY, point.getY());
     }
-    if (legacyNumericFieldType != null) {
-      fields[++idx] = new LegacyDoubleField(fieldNameX, point.getX(), legacyNumericFieldType);
-      fields[++idx] = new LegacyDoubleField(fieldNameY, point.getY(), legacyNumericFieldType);
-    }
     assert idx == fields.length - 1;
     return fields;
   }
@@ -268,7 +219,6 @@ public class PointVectorStrategy extends SpatialStrategy {
 
   /**
    * Returns a numeric range query based on FieldType
-   * {@link LegacyNumericRangeQuery} is used for indexes created using {@code FieldType.LegacyNumericType}
    * {@link DoublePoint#newRangeQuery} is used for indexes created using {@link DoublePoint} fields
    */
   private Query rangeQuery(String fieldName, Double min, Double max) {
@@ -283,8 +233,6 @@ public class PointVectorStrategy extends SpatialStrategy {
 
       return DoublePoint.newRangeQuery(fieldName, min, max);
 
-    } else if (legacyNumericFieldType != null) {// todo remove legacy numeric support in 7.0
-      return LegacyNumericRangeQuery.newDoubleRange(fieldName, legacyNumericFieldType.numericPrecisionStep(), min, max, true, true);//inclusive
     }
     //TODO try doc-value range query?
     throw new UnsupportedOperationException("An index is required for this operation.");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java
index d54e1c9..536436b 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java
@@ -68,9 +68,6 @@ public class DistanceStrategyTest extends StrategyTestCase {
     strategy = BBoxStrategy.newInstance(ctx, "bbox");
     ctorArgs.add(new Object[]{strategy.getFieldName(), strategy});
 
-    strategy = BBoxStrategy.newLegacyInstance(ctx, "bbox_legacy");
-    ctorArgs.add(new Object[]{strategy.getFieldName(), strategy});
-
     strategy = new SerializedDVStrategy(ctx, "serialized");
     ctorArgs.add(new Object[]{strategy.getFieldName(), strategy});
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/QueryEqualsHashCodeTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/QueryEqualsHashCodeTest.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/QueryEqualsHashCodeTest.java
index c14fe54..f52ef2b 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/QueryEqualsHashCodeTest.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/QueryEqualsHashCodeTest.java
@@ -58,9 +58,7 @@ public class QueryEqualsHashCodeTest extends LuceneTestCase {
     strategies.add(recursive_geohash);
     strategies.add(new TermQueryPrefixTreeStrategy(gridQuad, "termquery_quad"));
     strategies.add(PointVectorStrategy.newInstance(ctx, "pointvector"));
-    strategies.add(PointVectorStrategy.newLegacyInstance(ctx, "pointvector_legacy"));
     strategies.add(BBoxStrategy.newInstance(ctx, "bbox"));
-    strategies.add(BBoxStrategy.newLegacyInstance(ctx, "bbox_legacy"));
     final SerializedDVStrategy serialized = new SerializedDVStrategy(ctx, "serialized");
     strategies.add(serialized);
     strategies.add(new CompositeSpatialStrategy("composite", recursive_geohash, serialized));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
index 20df730..210ab38 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
@@ -21,8 +21,6 @@ import java.io.IOException;
 import com.carrotsearch.randomizedtesting.annotations.Repeat;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.legacy.LegacyFieldType;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.spatial.SpatialMatchConcern;
 import org.apache.lucene.spatial.prefix.RandomSpatialOpStrategyTestCase;
@@ -93,20 +91,10 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase {
       factory.worldBounds = new RectangleImpl(-300, 300, -100, 100, null);
       this.ctx = factory.newSpatialContext();
     }
-    // randomly test legacy (numeric) and point based bbox strategy
-    if (random().nextBoolean()) {
-      this.strategy = BBoxStrategy.newInstance(ctx, "bbox");
-    } else {
-      this.strategy = BBoxStrategy.newLegacyInstance(ctx, "bbox");
-    }
+    this.strategy = BBoxStrategy.newInstance(ctx, "bbox");
     //test we can disable docValues for predicate tests
     if (random().nextBoolean()) {
-      FieldType fieldType = ((BBoxStrategy)strategy).getFieldType();
-      if (fieldType instanceof LegacyFieldType) {
-        fieldType = new LegacyFieldType((LegacyFieldType)fieldType);
-      } else {
-        fieldType = new FieldType(fieldType);
-      }
+      FieldType fieldType = new FieldType(((BBoxStrategy)strategy).getFieldType());
       fieldType.setDocValuesType(DocValuesType.NONE);
       strategy = new BBoxStrategy(ctx, strategy.getFieldName(), fieldType);
     }
@@ -194,11 +182,7 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase {
 
   private void setupGeo() {
     this.ctx = SpatialContext.GEO;
-    if (random().nextBoolean()) {
-      this.strategy = BBoxStrategy.newInstance(ctx, "bbox");
-    } else {
-      this.strategy = BBoxStrategy.newLegacyInstance(ctx, "bbox");
-    }
+    this.strategy = BBoxStrategy.newInstance(ctx, "bbox");
   }
 
   // OLD STATIC TESTS (worthless?)
@@ -239,16 +223,9 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase {
     FieldType fieldType;
     // random  legacy or not legacy
     String FIELD_PREFIX = "bbox";
+    fieldType = new FieldType(BBoxStrategy.DEFAULT_FIELDTYPE);
     if (random().nextBoolean()) {
-      fieldType = new FieldType(BBoxStrategy.DEFAULT_FIELDTYPE);
-      if (random().nextBoolean()) {
-        fieldType.setDimensions(0, 0);
-      }
-    } else {
-      fieldType = new FieldType(BBoxStrategy.LEGACY_FIELDTYPE);
-      if (random().nextBoolean()) {
-        fieldType.setIndexOptions(IndexOptions.NONE);
-      }
+      fieldType.setDimensions(0, 0);
     }
 
     strategy = new BBoxStrategy(ctx, FIELD_PREFIX, fieldType);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/vector/TestPointVectorStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/vector/TestPointVectorStrategy.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/vector/TestPointVectorStrategy.java
index ac5ab95..901594e 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/vector/TestPointVectorStrategy.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/vector/TestPointVectorStrategy.java
@@ -63,12 +63,7 @@ public class TestPointVectorStrategy extends StrategyTestCase {
   @Test
   public void testCitiesIntersectsBBox() throws IOException {
     // note: does not require docValues
-    if (random().nextBoolean()) {
-      this.strategy = PointVectorStrategy.newInstance(ctx, getClass().getSimpleName());
-    } else {
-      // switch to legacy instance sometimes, which has no docValues
-      this.strategy = PointVectorStrategy.newLegacyInstance(ctx, getClass().getSimpleName());
-    }
+    this.strategy = PointVectorStrategy.newInstance(ctx, getClass().getSimpleName());
     getAddAndVerifyIndexedDocuments(DATA_WORLD_CITIES_POINTS);
     executeQueries(SpatialMatchConcern.FILTER, QTEST_Cities_Intersects_BBox);
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java
----------------------------------------------------------------------
diff --git a/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java b/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java
index aadb9e2..dd64c3f 100644
--- a/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java
+++ b/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java
@@ -20,7 +20,7 @@ import java.io.IOException;
 import java.time.Instant;
 import java.util.Arrays;
 
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.NumericUtils;
 import org.apache.solr.schema.FieldType;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/759fa42b/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java
----------------------------------------------------------------------
diff --git a/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java b/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java
index d13795d..803d8e0 100644
--- a/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java
+++ b/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java
@@ -24,7 +24,7 @@ import java.util.Map;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.solr.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.docvalues.LongDocValues;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;