You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2016/08/17 13:29:52 UTC

[1/7] lucene-solr:master: LUCENE-7413: move legacy numeric support to backwards module

Repository: lucene-solr
Updated Branches:
  refs/heads/master ba09fa7a4 -> 105c7eae8


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/schema/TrieField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieField.java b/solr/core/src/java/org/apache/solr/schema/TrieField.java
index b7bac1c..b23a8b6 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieField.java
@@ -26,24 +26,25 @@ import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 
-import org.apache.lucene.document.FieldType.LegacyNumericType;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.LegacyDoubleField;
-import org.apache.lucene.document.LegacyFloatField;
-import org.apache.lucene.document.LegacyIntField;
-import org.apache.lucene.document.LegacyLongField;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.legacy.LegacyDoubleField;
+import org.apache.lucene.legacy.LegacyFieldType;
+import org.apache.lucene.legacy.LegacyFloatField;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyLongField;
+import org.apache.lucene.legacy.LegacyNumericRangeQuery;
+import org.apache.lucene.legacy.LegacyNumericType;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
 import org.apache.lucene.queries.function.valuesource.IntFieldSource;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
 import org.apache.lucene.search.DocValuesRangeQuery;
-import org.apache.lucene.search.LegacyNumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.SortedSetSelector;
@@ -51,7 +52,6 @@ import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.CharsRefBuilder;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.mutable.MutableValueDate;
 import org.apache.lucene.util.mutable.MutableValueLong;
@@ -67,9 +67,9 @@ import org.slf4j.LoggerFactory;
 
 /**
  * Provides field types to support for Lucene's {@link
- * org.apache.lucene.document.LegacyIntField}, {@link org.apache.lucene.document.LegacyLongField}, {@link org.apache.lucene.document.LegacyFloatField} and
- * {@link org.apache.lucene.document.LegacyDoubleField}.
- * See {@link org.apache.lucene.search.LegacyNumericRangeQuery} for more details.
+ * org.apache.lucene.legacy.LegacyIntField}, {@link org.apache.lucene.legacy.LegacyLongField}, {@link org.apache.lucene.legacy.LegacyFloatField} and
+ * {@link org.apache.lucene.legacy.LegacyDoubleField}.
+ * See {@link org.apache.lucene.legacy.LegacyNumericRangeQuery} for more details.
  * It supports integer, float, long, double and date types.
  * <p>
  * For each number being added to this field, multiple terms are generated as per the algorithm described in the above
@@ -82,7 +82,7 @@ import org.slf4j.LoggerFactory;
  * generated, range search will be no faster than any other number field, but sorting will still be possible.
  *
  *
- * @see org.apache.lucene.search.LegacyNumericRangeQuery
+ * @see org.apache.lucene.legacy.LegacyNumericRangeQuery
  * @since solr 1.4
  */
 public class TrieField extends PrimitiveFieldType {
@@ -349,17 +349,17 @@ public class TrieField extends PrimitiveFieldType {
   }
 
   @Override
-  public FieldType.LegacyNumericType getNumericType() {
+  public LegacyNumericType getNumericType() {
     switch (type) {
       case INTEGER:
-        return FieldType.LegacyNumericType.INT;
+        return LegacyNumericType.INT;
       case LONG:
       case DATE:
-        return FieldType.LegacyNumericType.LONG;
+        return LegacyNumericType.LONG;
       case FLOAT:
-        return FieldType.LegacyNumericType.FLOAT;
+        return LegacyNumericType.FLOAT;
       case DOUBLE:
-        return FieldType.LegacyNumericType.DOUBLE;
+        return LegacyNumericType.DOUBLE;
       default:
         throw new AssertionError();
     }
@@ -666,7 +666,7 @@ public class TrieField extends PrimitiveFieldType {
       return null;
     }
     
-    FieldType ft = new FieldType();
+    LegacyFieldType ft = new LegacyFieldType();
     ft.setStored(stored);
     ft.setTokenized(true);
     ft.setOmitNorms(field.omitNorms());
@@ -677,16 +677,16 @@ public class TrieField extends PrimitiveFieldType {
         ft.setNumericType(LegacyNumericType.INT);
         break;
       case FLOAT:
-        ft.setNumericType(FieldType.LegacyNumericType.FLOAT);
+        ft.setNumericType(LegacyNumericType.FLOAT);
         break;
       case LONG:
-        ft.setNumericType(FieldType.LegacyNumericType.LONG);
+        ft.setNumericType(LegacyNumericType.LONG);
         break;
       case DOUBLE:
-        ft.setNumericType(FieldType.LegacyNumericType.DOUBLE);
+        ft.setNumericType(LegacyNumericType.DOUBLE);
         break;
       case DATE:
-        ft.setNumericType(FieldType.LegacyNumericType.LONG);
+        ft.setNumericType(LegacyNumericType.LONG);
         break;
       default:
         throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field: " + type);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java b/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
index 8d1739e..bcc4d4f 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
@@ -23,13 +23,13 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.docvalues.FloatDocValues;
 import org.apache.lucene.queries.function.valuesource.SortedSetFieldSource;
 import org.apache.lucene.search.SortedSetSelector;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueFloat;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieIntField.java b/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
index 07d9c74..6c8622b 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
@@ -23,13 +23,13 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.docvalues.IntDocValues;
 import org.apache.lucene.queries.function.valuesource.SortedSetFieldSource;
 import org.apache.lucene.search.SortedSetSelector;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueInt;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieLongField.java b/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
index 28345af..371d5bb 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
@@ -23,13 +23,13 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.docvalues.LongDocValues;
 import org.apache.lucene.queries.function.valuesource.SortedSetFieldSource;
 import org.apache.lucene.search.SortedSetSelector;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueLong;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/search/QueryParsing.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/QueryParsing.java b/solr/core/src/java/org/apache/solr/search/QueryParsing.java
index e7a6c36..fb32c6e 100644
--- a/solr/core/src/java/org/apache/solr/search/QueryParsing.java
+++ b/solr/core/src/java/org/apache/solr/search/QueryParsing.java
@@ -17,12 +17,12 @@
 package org.apache.solr.search;
 
 import org.apache.lucene.index.Term;
+import org.apache.lucene.legacy.LegacyNumericRangeQuery;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.BoostQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.FuzzyQuery;
-import org.apache.lucene.search.LegacyNumericRangeQuery;
 import org.apache.lucene.search.PrefixQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java b/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java
index 6bba1de..d526cf3 100644
--- a/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java
+++ b/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java
@@ -34,7 +34,7 @@ import org.apache.lucene.util.Bits;
  * Constrains search results to only match those which also match a provided
  * query.  
  *
- * <p> This could be used, for example, with a {@link org.apache.lucene.search.LegacyNumericRangeQuery} on a suitably
+ * <p> This could be used, for example, with a {@link org.apache.lucene.legacy.LegacyNumericRangeQuery} on a suitably
  * formatted date field to implement date filtering.  One could re-use a single
  * CachingWrapperFilter(QueryWrapperFilter) that matches, e.g., only documents modified 
  * within the last week.  This would only need to be reconstructed once per day.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/search/facet/FacetField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetField.java b/solr/core/src/java/org/apache/solr/search/facet/FacetField.java
index c06e182..92c64e7 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetField.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetField.java
@@ -19,6 +19,7 @@ package org.apache.solr.search.facet;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.schema.FieldType;
 import org.apache.solr.schema.SchemaField;
@@ -94,7 +95,7 @@ public class FacetField extends FacetRequest {
       return new FacetFieldProcessorByEnumTermsStream(fcontext, this, sf);
     }
 
-    org.apache.lucene.document.FieldType.LegacyNumericType ntype = ft.getNumericType();
+    LegacyNumericType ntype = ft.getNumericType();
 
     if (!multiToken) {
       if (ntype != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java b/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java
index 46b927e..0f85feb 100644
--- a/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java
+++ b/solr/core/src/java/org/apache/solr/search/mlt/CloudMLTQParser.java
@@ -23,6 +23,7 @@ import java.util.Map;
 import java.util.regex.Pattern;
 
 import org.apache.lucene.index.Term;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.mlt.MoreLikeThis;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
@@ -30,7 +31,6 @@ import org.apache.lucene.search.BoostQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.StringUtils;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java b/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java
index 962f452..da3a487 100644
--- a/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java
+++ b/solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java
@@ -16,6 +16,7 @@
  */
 package org.apache.solr.search.mlt;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.mlt.MoreLikeThis;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
@@ -25,7 +26,6 @@ import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.StringUtils;
 import org.apache.solr.common.params.SolrParams;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/uninverting/FieldCache.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/uninverting/FieldCache.java b/solr/core/src/java/org/apache/solr/uninverting/FieldCache.java
index 7ef4956..be08a60 100644
--- a/solr/core/src/java/org/apache/solr/uninverting/FieldCache.java
+++ b/solr/core/src/java/org/apache/solr/uninverting/FieldCache.java
@@ -28,10 +28,10 @@ import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.util.Accountable;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.RamUsageEstimator;
 
@@ -161,8 +161,8 @@ interface FieldCache {
   };
   
   /**
-   * A parser instance for int values encoded by {@link org.apache.lucene.util.LegacyNumericUtils}, e.g. when indexed
-   * via {@link org.apache.lucene.document.LegacyIntField}/{@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
+   * A parser instance for int values encoded by {@link org.apache.lucene.legacy.LegacyNumericUtils}, e.g. when indexed
+   * via {@link org.apache.lucene.legacy.LegacyIntField}/{@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
    * @deprecated Index with points and use {@link #INT_POINT_PARSER} instead.
    */
   @Deprecated
@@ -184,8 +184,8 @@ interface FieldCache {
   };
 
   /**
-   * A parser instance for float values encoded with {@link org.apache.lucene.util.LegacyNumericUtils}, e.g. when indexed
-   * via {@link org.apache.lucene.document.LegacyFloatField}/{@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
+   * A parser instance for float values encoded with {@link org.apache.lucene.legacy.LegacyNumericUtils}, e.g. when indexed
+   * via {@link org.apache.lucene.legacy.LegacyFloatField}/{@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
    * @deprecated Index with points and use {@link #FLOAT_POINT_PARSER} instead.
    */
   @Deprecated
@@ -209,8 +209,8 @@ interface FieldCache {
   };
 
   /**
-   * A parser instance for long values encoded by {@link org.apache.lucene.util.LegacyNumericUtils}, e.g. when indexed
-   * via {@link org.apache.lucene.document.LegacyLongField}/{@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
+   * A parser instance for long values encoded by {@link org.apache.lucene.legacy.LegacyNumericUtils}, e.g. when indexed
+   * via {@link org.apache.lucene.legacy.LegacyLongField}/{@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
    * @deprecated Index with points and use {@link #LONG_POINT_PARSER} instead.
    */
   @Deprecated
@@ -231,8 +231,8 @@ interface FieldCache {
   };
 
   /**
-   * A parser instance for double values encoded with {@link org.apache.lucene.util.LegacyNumericUtils}, e.g. when indexed
-   * via {@link org.apache.lucene.document.LegacyDoubleField}/{@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
+   * A parser instance for double values encoded with {@link org.apache.lucene.legacy.LegacyNumericUtils}, e.g. when indexed
+   * via {@link org.apache.lucene.legacy.LegacyDoubleField}/{@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
    * @deprecated Index with points and use {@link #DOUBLE_POINT_PARSER} instead.
    */
   @Deprecated
@@ -279,7 +279,7 @@ interface FieldCache {
    * @param parser
    *          Computes long for string values. May be {@code null} if the
    *          requested field was indexed as {@link NumericDocValuesField} or
-   *          {@link org.apache.lucene.document.LegacyLongField}.
+   *          {@link org.apache.lucene.legacy.LegacyLongField}.
    * @param setDocsWithField
    *          If true then {@link #getDocsWithField} will also be computed and
    *          stored in the FieldCache.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java b/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
index 4450cbb..42b2f76 100644
--- a/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
+++ b/solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java
@@ -86,7 +86,7 @@ public class UninvertingReader extends FilterLeafReader {
      */
     DOUBLE_POINT,
     /** 
-     * Single-valued Integer, (e.g. indexed with {@link org.apache.lucene.document.LegacyIntField})
+     * Single-valued Integer, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyIntField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link NumericDocValuesField}.
@@ -95,7 +95,7 @@ public class UninvertingReader extends FilterLeafReader {
     @Deprecated
     LEGACY_INTEGER,
     /** 
-     * Single-valued Long, (e.g. indexed with {@link org.apache.lucene.document.LegacyLongField})
+     * Single-valued Long, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyLongField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link NumericDocValuesField}.
@@ -104,7 +104,7 @@ public class UninvertingReader extends FilterLeafReader {
     @Deprecated
     LEGACY_LONG,
     /** 
-     * Single-valued Float, (e.g. indexed with {@link org.apache.lucene.document.LegacyFloatField})
+     * Single-valued Float, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyFloatField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link NumericDocValuesField}.
@@ -113,7 +113,7 @@ public class UninvertingReader extends FilterLeafReader {
     @Deprecated
     LEGACY_FLOAT,
     /** 
-     * Single-valued Double, (e.g. indexed with {@link org.apache.lucene.document.LegacyDoubleField})
+     * Single-valued Double, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyDoubleField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link NumericDocValuesField}.
@@ -143,28 +143,28 @@ public class UninvertingReader extends FilterLeafReader {
      */
     SORTED_SET_BINARY,
     /** 
-     * Multi-valued Integer, (e.g. indexed with {@link org.apache.lucene.document.LegacyIntField})
+     * Multi-valued Integer, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyIntField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link SortedSetDocValuesField}.
      */
     SORTED_SET_INTEGER,
     /** 
-     * Multi-valued Float, (e.g. indexed with {@link org.apache.lucene.document.LegacyFloatField})
+     * Multi-valued Float, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyFloatField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link SortedSetDocValuesField}.
      */
     SORTED_SET_FLOAT,
     /** 
-     * Multi-valued Long, (e.g. indexed with {@link org.apache.lucene.document.LegacyLongField})
+     * Multi-valued Long, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyLongField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link SortedSetDocValuesField}.
      */
     SORTED_SET_LONG,
     /** 
-     * Multi-valued Double, (e.g. indexed with {@link org.apache.lucene.document.LegacyDoubleField})
+     * Multi-valued Double, (e.g. indexed with {@link org.apache.lucene.legacy.LegacyDoubleField})
      * <p>
      * Fields with this type act as if they were indexed with
      * {@link SortedSetDocValuesField}.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/update/VersionInfo.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/VersionInfo.java b/solr/core/src/java/org/apache/solr/update/VersionInfo.java
index bee30f5..0003c24 100644
--- a/solr/core/src/java/org/apache/solr/update/VersionInfo.java
+++ b/solr/core/src/java/org/apache/solr/update/VersionInfo.java
@@ -25,12 +25,12 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.Terms;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.util.BitUtil;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.util.SuppressForbidden;
 import org.apache.solr.index.SlowCompositeReaderWrapper;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java b/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
index 32e3eb2..6059528 100644
--- a/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
@@ -17,6 +17,7 @@
 package org.apache.solr.search;
 
 import org.apache.lucene.index.Term;
+import org.apache.lucene.legacy.LegacyNumericRangeQuery;
 import org.apache.lucene.search.*;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.util.AbstractSolrTestCase;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/test/org/apache/solr/search/function/TestOrdValues.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/function/TestOrdValues.java b/solr/core/src/test/org/apache/solr/search/function/TestOrdValues.java
index 785ee55..b3a70ae 100644
--- a/solr/core/src/test/org/apache/solr/search/function/TestOrdValues.java
+++ b/solr/core/src/test/org/apache/solr/search/function/TestOrdValues.java
@@ -21,8 +21,6 @@ import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.LegacyFloatField;
-import org.apache.lucene.document.LegacyIntField;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.TextField;
@@ -31,6 +29,8 @@ import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.legacy.LegacyFloatField;
+import org.apache.lucene.legacy.LegacyIntField;
 import org.apache.lucene.queries.function.FunctionQuery;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java b/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java
index f1627a6..46339a7 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestDocTermOrds.java
@@ -28,8 +28,6 @@ import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.LegacyIntField;
-import org.apache.lucene.document.LegacyLongField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocValues;
@@ -45,10 +43,12 @@ import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyLongField;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.TestUtil;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSanityChecker.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSanityChecker.java b/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSanityChecker.java
index d54d579..a5958f9 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSanityChecker.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSanityChecker.java
@@ -21,14 +21,14 @@ import java.io.IOException;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.LegacyDoubleField;
-import org.apache.lucene.document.LegacyFloatField;
-import org.apache.lucene.document.LegacyIntField;
-import org.apache.lucene.document.LegacyLongField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.legacy.LegacyDoubleField;
+import org.apache.lucene.legacy.LegacyFloatField;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyLongField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.index.SlowCompositeReaderWrapper;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSort.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSort.java b/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSort.java
index 34d9237..d53f610 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSort.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestFieldCacheSort.java
@@ -24,13 +24,9 @@ import java.util.Map;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.DoublePoint;
-import org.apache.lucene.document.LegacyDoubleField;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FloatPoint;
 import org.apache.lucene.document.IntPoint;
-import org.apache.lucene.document.LegacyFloatField;
-import org.apache.lucene.document.LegacyIntField;
-import org.apache.lucene.document.LegacyLongField;
 import org.apache.lucene.document.LongPoint;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.document.StringField;
@@ -41,6 +37,10 @@ import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.legacy.LegacyDoubleField;
+import org.apache.lucene.legacy.LegacyFloatField;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyLongField;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.IndexSearcher;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/test/org/apache/solr/uninverting/TestLegacyFieldCache.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestLegacyFieldCache.java b/solr/core/src/test/org/apache/solr/uninverting/TestLegacyFieldCache.java
index 1192f4b..5220460 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestLegacyFieldCache.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestLegacyFieldCache.java
@@ -28,10 +28,6 @@ import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.LegacyDoubleField;
-import org.apache.lucene.document.LegacyFloatField;
-import org.apache.lucene.document.LegacyIntField;
-import org.apache.lucene.document.LegacyLongField;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.SortedSetDocValuesField;
@@ -45,11 +41,15 @@ import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.legacy.LegacyDoubleField;
+import org.apache.lucene.legacy.LegacyFloatField;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyLongField;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.apache.solr.index.SlowCompositeReaderWrapper;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms32.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms32.java b/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms32.java
index 2b861ad..6fed73b 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms32.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms32.java
@@ -21,12 +21,12 @@ import java.util.Map;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.LegacyIntField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.legacy.LegacyFieldType;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyNumericRangeQuery;
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.LegacyNumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
@@ -62,17 +62,17 @@ public class TestNumericTerms32 extends LuceneTestCase {
         .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
         .setMergePolicy(newLogMergePolicy()));
     
-    final FieldType storedInt = new FieldType(LegacyIntField.TYPE_NOT_STORED);
+    final LegacyFieldType storedInt = new LegacyFieldType(LegacyIntField.TYPE_NOT_STORED);
     storedInt.setStored(true);
     storedInt.freeze();
 
-    final FieldType storedInt8 = new FieldType(storedInt);
+    final LegacyFieldType storedInt8 = new LegacyFieldType(storedInt);
     storedInt8.setNumericPrecisionStep(8);
 
-    final FieldType storedInt4 = new FieldType(storedInt);
+    final LegacyFieldType storedInt4 = new LegacyFieldType(storedInt);
     storedInt4.setNumericPrecisionStep(4);
 
-    final FieldType storedInt2 = new FieldType(storedInt);
+    final LegacyFieldType storedInt2 = new LegacyFieldType(storedInt);
     storedInt2.setNumericPrecisionStep(2);
 
     LegacyIntField

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms64.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms64.java b/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms64.java
index 4da8be9..2f341b7 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms64.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestNumericTerms64.java
@@ -21,12 +21,12 @@ import java.util.Map;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.LegacyLongField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.legacy.LegacyFieldType;
+import org.apache.lucene.legacy.LegacyLongField;
+import org.apache.lucene.legacy.LegacyNumericRangeQuery;
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.LegacyNumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
@@ -62,20 +62,20 @@ public class TestNumericTerms64 extends LuceneTestCase {
         .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
         .setMergePolicy(newLogMergePolicy()));
 
-    final FieldType storedLong = new FieldType(LegacyLongField.TYPE_NOT_STORED);
+    final LegacyFieldType storedLong = new LegacyFieldType(LegacyLongField.TYPE_NOT_STORED);
     storedLong.setStored(true);
     storedLong.freeze();
 
-    final FieldType storedLong8 = new FieldType(storedLong);
+    final LegacyFieldType storedLong8 = new LegacyFieldType(storedLong);
     storedLong8.setNumericPrecisionStep(8);
 
-    final FieldType storedLong4 = new FieldType(storedLong);
+    final LegacyFieldType storedLong4 = new LegacyFieldType(storedLong);
     storedLong4.setNumericPrecisionStep(4);
 
-    final FieldType storedLong6 = new FieldType(storedLong);
+    final LegacyFieldType storedLong6 = new LegacyFieldType(storedLong);
     storedLong6.setNumericPrecisionStep(6);
 
-    final FieldType storedLong2 = new FieldType(storedLong);
+    final LegacyFieldType storedLong2 = new LegacyFieldType(storedLong);
     storedLong2.setNumericPrecisionStep(2);
 
     LegacyLongField

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java b/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java
index 2ecc63e..c0188e4 100644
--- a/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java
+++ b/solr/core/src/test/org/apache/solr/uninverting/TestUninvertingReader.java
@@ -28,10 +28,7 @@ import java.util.Set;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.IntPoint;
-import org.apache.lucene.document.LegacyIntField;
-import org.apache.lucene.document.LegacyLongField;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.document.StringField;
@@ -43,9 +40,12 @@ import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.legacy.LegacyFieldType;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyLongField;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.apache.solr.index.SlowCompositeReaderWrapper;
@@ -224,7 +224,7 @@ public class TestUninvertingReader extends LuceneTestCase {
     final Directory dir = newDirectory();
     final IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
     
-    final FieldType NO_TRIE_TYPE = new FieldType(LegacyIntField.TYPE_NOT_STORED);
+    final LegacyFieldType NO_TRIE_TYPE = new LegacyFieldType(LegacyIntField.TYPE_NOT_STORED);
     NO_TRIE_TYPE.setNumericPrecisionStep(Integer.MAX_VALUE);
 
     final Map<String,Type> UNINVERT_MAP = new LinkedHashMap<String,Type>();


[3/7] lucene-solr:master: LUCENE-7413: move legacy numeric support to backwards module

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/test/org/apache/lucene/index/TestFieldReuse.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFieldReuse.java b/lucene/core/src/test/org/apache/lucene/index/TestFieldReuse.java
index b36cfef..977df3d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFieldReuse.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFieldReuse.java
@@ -24,16 +24,12 @@ import java.util.Collections;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.CannedTokenStream;
-import org.apache.lucene.analysis.LegacyNumericTokenStream.LegacyNumericTermAttribute;
-import org.apache.lucene.analysis.LegacyNumericTokenStream;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.LegacyIntField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
 
 /** test tokenstream reuse by DefaultIndexingChain */
 public class TestFieldReuse extends BaseTokenStreamTestCase {
@@ -61,7 +57,7 @@ public class TestFieldReuse extends BaseTokenStreamTestCase {
     
     // pass a bogus stream and ensure it's still ok
     stringField = new StringField("foo", "beer", Field.Store.NO);
-    TokenStream bogus = new LegacyNumericTokenStream();
+    TokenStream bogus = new CannedTokenStream();
     ts = stringField.tokenStream(null, bogus);
     assertNotSame(ts, bogus);
     assertTokenStreamContents(ts, 
@@ -71,37 +67,6 @@ public class TestFieldReuse extends BaseTokenStreamTestCase {
     );
   }
   
-  public void testNumericReuse() throws IOException {
-    LegacyIntField legacyIntField = new LegacyIntField("foo", 5, Field.Store.NO);
-    
-    // passing null
-    TokenStream ts = legacyIntField.tokenStream(null, null);
-    assertTrue(ts instanceof LegacyNumericTokenStream);
-    assertEquals(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, ((LegacyNumericTokenStream)ts).getPrecisionStep());
-    assertNumericContents(5, ts);
-
-    // now reuse previous stream
-    legacyIntField = new LegacyIntField("foo", 20, Field.Store.NO);
-    TokenStream ts2 = legacyIntField.tokenStream(null, ts);
-    assertSame(ts, ts2);
-    assertNumericContents(20, ts);
-    
-    // pass a bogus stream and ensure it's still ok
-    legacyIntField = new LegacyIntField("foo", 2343, Field.Store.NO);
-    TokenStream bogus = new CannedTokenStream(new Token("bogus", 0, 5));
-    ts = legacyIntField.tokenStream(null, bogus);
-    assertNotSame(bogus, ts);
-    assertNumericContents(2343, ts);
-    
-    // pass another bogus stream (numeric, but different precision step!)
-    legacyIntField = new LegacyIntField("foo", 42, Field.Store.NO);
-    assert 3 != LegacyNumericUtils.PRECISION_STEP_DEFAULT;
-    bogus = new LegacyNumericTokenStream(3);
-    ts = legacyIntField.tokenStream(null, bogus);
-    assertNotSame(bogus, ts);
-    assertNumericContents(42, ts);
-  }
-  
   static class MyField implements IndexableField {
     TokenStream lastSeen;
     TokenStream lastReturned;
@@ -163,20 +128,4 @@ public class TestFieldReuse extends BaseTokenStreamTestCase {
     iw.close();
     dir.close();
   }
-  
-  private void assertNumericContents(int value, TokenStream ts) throws IOException {
-    assertTrue(ts instanceof LegacyNumericTokenStream);
-    LegacyNumericTermAttribute numericAtt = ts.getAttribute(LegacyNumericTermAttribute.class);
-    ts.reset();
-    boolean seen = false;
-    while (ts.incrementToken()) {
-      if (numericAtt.getShift() == 0) {
-        assertEquals(value, numericAtt.getRawValue());
-        seen = true;
-      }
-    }
-    ts.end();
-    ts.close();
-    assertTrue(seen);
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/test/org/apache/lucene/index/TestTerms.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTerms.java b/lucene/core/src/test/org/apache/lucene/index/TestTerms.java
index 0ee3447..e8871dc 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTerms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTerms.java
@@ -18,17 +18,11 @@ package org.apache.lucene.index;
 
 import org.apache.lucene.analysis.CannedBinaryTokenStream;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.LegacyDoubleField;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.LegacyFloatField;
-import org.apache.lucene.document.LegacyIntField;
-import org.apache.lucene.document.LegacyLongField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.TestUtil;
 
 public class TestTerms extends LuceneTestCase {
@@ -88,132 +82,4 @@ public class TestTerms extends LuceneTestCase {
     w.close();
     dir.close();
   }
-
-  public void testEmptyIntFieldMinMax() throws Exception {
-    assertNull(LegacyNumericUtils.getMinInt(EMPTY_TERMS));
-    assertNull(LegacyNumericUtils.getMaxInt(EMPTY_TERMS));
-  }
-  
-  public void testIntFieldMinMax() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    int numDocs = atLeast(100);
-    int minValue = Integer.MAX_VALUE;
-    int maxValue = Integer.MIN_VALUE;
-    for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
-      int num = random().nextInt();
-      minValue = Math.min(num, minValue);
-      maxValue = Math.max(num, maxValue);
-      doc.add(new LegacyIntField("field", num, Field.Store.NO));
-      w.addDocument(doc);
-    }
-    
-    IndexReader r = w.getReader();
-    Terms terms = MultiFields.getTerms(r, "field");
-    assertEquals(new Integer(minValue), LegacyNumericUtils.getMinInt(terms));
-    assertEquals(new Integer(maxValue), LegacyNumericUtils.getMaxInt(terms));
-
-    r.close();
-    w.close();
-    dir.close();
-  }
-
-  public void testEmptyLongFieldMinMax() throws Exception {
-    assertNull(LegacyNumericUtils.getMinLong(EMPTY_TERMS));
-    assertNull(LegacyNumericUtils.getMaxLong(EMPTY_TERMS));
-  }
-  
-  public void testLongFieldMinMax() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    int numDocs = atLeast(100);
-    long minValue = Long.MAX_VALUE;
-    long maxValue = Long.MIN_VALUE;
-    for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
-      long num = random().nextLong();
-      minValue = Math.min(num, minValue);
-      maxValue = Math.max(num, maxValue);
-      doc.add(new LegacyLongField("field", num, Field.Store.NO));
-      w.addDocument(doc);
-    }
-    
-    IndexReader r = w.getReader();
-
-    Terms terms = MultiFields.getTerms(r, "field");
-    assertEquals(new Long(minValue), LegacyNumericUtils.getMinLong(terms));
-    assertEquals(new Long(maxValue), LegacyNumericUtils.getMaxLong(terms));
-
-    r.close();
-    w.close();
-    dir.close();
-  }
-
-  public void testFloatFieldMinMax() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    int numDocs = atLeast(100);
-    float minValue = Float.POSITIVE_INFINITY;
-    float maxValue = Float.NEGATIVE_INFINITY;
-    for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
-      float num = random().nextFloat();
-      minValue = Math.min(num, minValue);
-      maxValue = Math.max(num, maxValue);
-      doc.add(new LegacyFloatField("field", num, Field.Store.NO));
-      w.addDocument(doc);
-    }
-    
-    IndexReader r = w.getReader();
-    Terms terms = MultiFields.getTerms(r, "field");
-    assertEquals(minValue, NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMinInt(terms)), 0.0f);
-    assertEquals(maxValue, NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMaxInt(terms)), 0.0f);
-
-    r.close();
-    w.close();
-    dir.close();
-  }
-
-  public void testDoubleFieldMinMax() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    int numDocs = atLeast(100);
-    double minValue = Double.POSITIVE_INFINITY;
-    double maxValue = Double.NEGATIVE_INFINITY;
-    for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
-      double num = random().nextDouble();
-      minValue = Math.min(num, minValue);
-      maxValue = Math.max(num, maxValue);
-      doc.add(new LegacyDoubleField("field", num, Field.Store.NO));
-      w.addDocument(doc);
-    }
-    
-    IndexReader r = w.getReader();
-
-    Terms terms = MultiFields.getTerms(r, "field");
-
-    assertEquals(minValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMinLong(terms)), 0.0);
-    assertEquals(maxValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMaxLong(terms)), 0.0);
-
-    r.close();
-    w.close();
-    dir.close();
-  }
-
-  /**
-   * A complete empty Terms instance that has no terms in it and supports no optional statistics
-   */
-  private static Terms EMPTY_TERMS = new Terms() {
-    public TermsEnum iterator() { return TermsEnum.EMPTY; }
-    public long size() { return -1; }
-    public long getSumTotalTermFreq() { return -1; }
-    public long getSumDocFreq() { return -1; }
-    public int getDocCount() { return -1; }
-    public boolean hasFreqs() { return false; }
-    public boolean hasOffsets() { return false; }
-    public boolean hasPositions() { return false; }
-    public boolean hasPayloads() { return false; }
-  };
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
deleted file mode 100644
index eba37ce..0000000
--- a/lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search;
-
-
-import java.util.Locale;
-import java.text.DecimalFormat;
-import java.text.DecimalFormatSymbols;
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.LegacyIntField;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.TestUtil;
-
-public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
-
-  /** Tests LegacyNumericRangeQuery on a multi-valued field (multiple numeric values per document).
-   * This test ensures, that a classical TermRangeQuery returns exactly the same document numbers as
-   * LegacyNumericRangeQuery (see SOLR-1322 for discussion) and the multiple precision terms per numeric value
-   * do not interfere with multiple numeric values.
-   */
-  public void testMultiValuedNRQ() throws Exception {
-    Directory directory = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
-        newIndexWriterConfig(new MockAnalyzer(random()))
-        .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
-    
-    DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.ROOT));
-    
-    int num = atLeast(500);
-    for (int l = 0; l < num; l++) {
-      Document doc = new Document();
-      for (int m=0, c=random().nextInt(10); m<=c; m++) {
-        int value = random().nextInt(Integer.MAX_VALUE);
-        doc.add(newStringField("asc", format.format(value), Field.Store.NO));
-        doc.add(new LegacyIntField("trie", value, Field.Store.NO));
-      }
-      writer.addDocument(doc);
-    }
-    IndexReader reader = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher=newSearcher(reader);
-    num = atLeast(50);
-    for (int i = 0; i < num; i++) {
-      int lower=random().nextInt(Integer.MAX_VALUE);
-      int upper=random().nextInt(Integer.MAX_VALUE);
-      if (lower>upper) {
-        int a=lower; lower=upper; upper=a;
-      }
-      TermRangeQuery cq=TermRangeQuery.newStringRange("asc", format.format(lower), format.format(upper), true, true);
-      LegacyNumericRangeQuery<Integer> tq= LegacyNumericRangeQuery.newIntRange("trie", lower, upper, true, true);
-      TopDocs trTopDocs = searcher.search(cq, 1);
-      TopDocs nrTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", trTopDocs.totalHits, nrTopDocs.totalHits );
-    }
-    reader.close();
-    directory.close();
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
deleted file mode 100644
index 3a82ff3..0000000
--- a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
+++ /dev/null
@@ -1,589 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search;
-
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.LegacyFloatField;
-import org.apache.lucene.document.LegacyIntField;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.LegacyNumericUtils;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.TestLegacyNumericUtils; // NaN arrays
-import org.apache.lucene.util.TestUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestNumericRangeQuery32 extends LuceneTestCase {
-  // distance of entries
-  private static int distance;
-  // shift the starting of the values to the left, to also have negative values:
-  private static final int startOffset = - 1 << 15;
-  // number of docs to generate for testing
-  private static int noDocs;
-  
-  private static Directory directory = null;
-  private static IndexReader reader = null;
-  private static IndexSearcher searcher = null;
-  
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    noDocs = atLeast(4096);
-    distance = (1 << 30) / noDocs;
-    directory = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
-        newIndexWriterConfig(new MockAnalyzer(random()))
-        .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
-        .setMergePolicy(newLogMergePolicy()));
-    
-    final FieldType storedInt = new FieldType(LegacyIntField.TYPE_NOT_STORED);
-    storedInt.setStored(true);
-    storedInt.freeze();
-
-    final FieldType storedInt8 = new FieldType(storedInt);
-    storedInt8.setNumericPrecisionStep(8);
-
-    final FieldType storedInt4 = new FieldType(storedInt);
-    storedInt4.setNumericPrecisionStep(4);
-
-    final FieldType storedInt2 = new FieldType(storedInt);
-    storedInt2.setNumericPrecisionStep(2);
-
-    final FieldType storedIntNone = new FieldType(storedInt);
-    storedIntNone.setNumericPrecisionStep(Integer.MAX_VALUE);
-
-    final FieldType unstoredInt = LegacyIntField.TYPE_NOT_STORED;
-
-    final FieldType unstoredInt8 = new FieldType(unstoredInt);
-    unstoredInt8.setNumericPrecisionStep(8);
-
-    final FieldType unstoredInt4 = new FieldType(unstoredInt);
-    unstoredInt4.setNumericPrecisionStep(4);
-
-    final FieldType unstoredInt2 = new FieldType(unstoredInt);
-    unstoredInt2.setNumericPrecisionStep(2);
-
-    LegacyIntField
-      field8 = new LegacyIntField("field8", 0, storedInt8),
-      field4 = new LegacyIntField("field4", 0, storedInt4),
-      field2 = new LegacyIntField("field2", 0, storedInt2),
-      fieldNoTrie = new LegacyIntField("field"+Integer.MAX_VALUE, 0, storedIntNone),
-      ascfield8 = new LegacyIntField("ascfield8", 0, unstoredInt8),
-      ascfield4 = new LegacyIntField("ascfield4", 0, unstoredInt4),
-      ascfield2 = new LegacyIntField("ascfield2", 0, unstoredInt2);
-    
-    Document doc = new Document();
-    // add fields, that have a distance to test general functionality
-    doc.add(field8); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
-    // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
-    doc.add(ascfield8); doc.add(ascfield4); doc.add(ascfield2);
-    
-    // Add a series of noDocs docs with increasing int values
-    for (int l=0; l<noDocs; l++) {
-      int val=distance*l+startOffset;
-      field8.setIntValue(val);
-      field4.setIntValue(val);
-      field2.setIntValue(val);
-      fieldNoTrie.setIntValue(val);
-
-      val=l-(noDocs/2);
-      ascfield8.setIntValue(val);
-      ascfield4.setIntValue(val);
-      ascfield2.setIntValue(val);
-      writer.addDocument(doc);
-    }
-  
-    reader = writer.getReader();
-    searcher=newSearcher(reader);
-    writer.close();
-  }
-  
-  @AfterClass
-  public static void afterClass() throws Exception {
-    searcher = null;
-    reader.close();
-    reader = null;
-    directory.close();
-    directory = null;
-  }
-  
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    // set the theoretical maximum term count for 8bit (see docs for the number)
-    // super.tearDown will restore the default
-    BooleanQuery.setMaxClauseCount(3*255*2 + 255);
-  }
-  
-  /** test for both constant score and boolean query, the other tests only use the constant score mode */
-  private void testRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    int lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
-    LegacyNumericRangeQuery<Integer> q = LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
-    for (byte i=0; i<2; i++) {
-      TopDocs topDocs;
-      String type;
-      switch (i) {
-        case 0:
-          type = " (constant score filter rewrite)";
-          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
-          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-          break;
-        case 1:
-          type = " (constant score boolean rewrite)";
-          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
-          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-          break;
-        default:
-          return;
-      }
-      ScoreDoc[] sd = topDocs.scoreDocs;
-      assertNotNull(sd);
-      assertEquals("Score doc count"+type, count, sd.length );
-      Document doc=searcher.doc(sd[0].doc);
-      assertEquals("First doc"+type, 2*distance+startOffset, doc.getField(field).numericValue().intValue());
-      doc=searcher.doc(sd[sd.length-1].doc);
-      assertEquals("Last doc"+type, (1+count)*distance+startOffset, doc.getField(field).numericValue().intValue());
-    }
-  }
-
-  @Test
-  public void testRange_8bit() throws Exception {
-    testRange(8);
-  }
-  
-  @Test
-  public void testRange_4bit() throws Exception {
-    testRange(4);
-  }
-  
-  @Test
-  public void testRange_2bit() throws Exception {
-    testRange(2);
-  }
-  
-  @Test
-  public void testOneMatchQuery() throws Exception {
-    LegacyNumericRangeQuery<Integer> q = LegacyNumericRangeQuery.newIntRange("ascfield8", 8, 1000, 1000, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", 1, sd.length );
-  }
-  
-  private void testLeftOpenRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    int upper=(count-1)*distance + (distance/3) + startOffset;
-    LegacyNumericRangeQuery<Integer> q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, null, upper, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", count, sd.length );
-    Document doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", startOffset, doc.getField(field).numericValue().intValue());
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
-    
-    q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, null, upper, false, true);
-    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", count, sd.length );
-    doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", startOffset, doc.getField(field).numericValue().intValue());
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
-  }
-  
-  @Test
-  public void testLeftOpenRange_8bit() throws Exception {
-    testLeftOpenRange(8);
-  }
-  
-  @Test
-  public void testLeftOpenRange_4bit() throws Exception {
-    testLeftOpenRange(4);
-  }
-  
-  @Test
-  public void testLeftOpenRange_2bit() throws Exception {
-    testLeftOpenRange(2);
-  }
-  
-  private void testRightOpenRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    int lower=(count-1)*distance + (distance/3) +startOffset;
-    LegacyNumericRangeQuery<Integer> q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", noDocs-count, sd.length );
-    Document doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().intValue());
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
-
-    q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, false);
-    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", noDocs-count, sd.length );
-    doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().intValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().intValue() );
-  }
-  
-  @Test
-  public void testRightOpenRange_8bit() throws Exception {
-    testRightOpenRange(8);
-  }
-  
-  @Test
-  public void testRightOpenRange_4bit() throws Exception {
-    testRightOpenRange(4);
-  }
-  
-  @Test
-  public void testRightOpenRange_2bit() throws Exception {
-    testRightOpenRange(2);
-  }
-  
-  @Test
-  public void testInfiniteValues() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
-      newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new LegacyFloatField("float", Float.NEGATIVE_INFINITY, Field.Store.NO));
-    doc.add(new LegacyIntField("int", Integer.MIN_VALUE, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    doc = new Document();
-    doc.add(new LegacyFloatField("float", Float.POSITIVE_INFINITY, Field.Store.NO));
-    doc.add(new LegacyIntField("int", Integer.MAX_VALUE, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    doc = new Document();
-    doc.add(new LegacyFloatField("float", 0.0f, Field.Store.NO));
-    doc.add(new LegacyIntField("int", 0, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    for (float f : TestLegacyNumericUtils.FLOAT_NANs) {
-      doc = new Document();
-      doc.add(new LegacyFloatField("float", f, Field.Store.NO));
-      writer.addDocument(doc);
-    }
-    
-    writer.close();
-    
-    IndexReader r = DirectoryReader.open(dir);
-    IndexSearcher s = newSearcher(r);
-    
-    Query q= LegacyNumericRangeQuery.newIntRange("int", null, null, true, true);
-    TopDocs topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-    
-    q= LegacyNumericRangeQuery.newIntRange("int", null, null, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-    
-    q= LegacyNumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newFloatRange("float", null, null, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newFloatRange("float", null, null, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newFloatRange("float", Float.NaN, Float.NaN, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", TestLegacyNumericUtils.FLOAT_NANs.length,  topDocs.scoreDocs.length );
-
-    r.close();
-    dir.close();
-  }
-  
-  private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int totalTermCountT=0,totalTermCountC=0,termCountT,termCountC;
-    int num = TestUtil.nextInt(random(), 10, 20);
-    for (int i = 0; i < num; i++) {
-      int lower=(int)(random().nextDouble()*noDocs*distance)+startOffset;
-      int upper=(int)(random().nextDouble()*noDocs*distance)+startOffset;
-      if (lower>upper) {
-        int a=lower; lower=upper; upper=a;
-      }
-      final BytesRef lowerBytes, upperBytes;
-      BytesRefBuilder b = new BytesRefBuilder();
-      LegacyNumericUtils.intToPrefixCoded(lower, 0, b);
-      lowerBytes = b.toBytesRef();
-      LegacyNumericUtils.intToPrefixCoded(upper, 0, b);
-      upperBytes = b.toBytesRef();
-
-      // test inclusive range
-      LegacyNumericRangeQuery<Integer> tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
-      TermRangeQuery cq=new TermRangeQuery(field, lowerBytes, upperBytes, true, true);
-      TopDocs tTopDocs = searcher.search(tq, 1);
-      TopDocs cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-      // test exclusive range
-      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
-      cq=new TermRangeQuery(field, lowerBytes, upperBytes, false, false);
-      tTopDocs = searcher.search(tq, 1);
-      cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-      // test left exclusive range
-      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
-      cq=new TermRangeQuery(field, lowerBytes, upperBytes, false, true);
-      tTopDocs = searcher.search(tq, 1);
-      cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-      // test right exclusive range
-      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
-      cq=new TermRangeQuery(field, lowerBytes, upperBytes, true, false);
-      tTopDocs = searcher.search(tq, 1);
-      cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-    }
-    
-    checkTermCounts(precisionStep, totalTermCountT, totalTermCountC);
-    if (VERBOSE && precisionStep != Integer.MAX_VALUE) {
-      System.out.println("Average number of terms during random search on '" + field + "':");
-      System.out.println(" Numeric query: " + (((double)totalTermCountT)/(num * 4)));
-      System.out.println(" Classical query: " + (((double)totalTermCountC)/(num * 4)));
-    }
-  }
-  
-  @Test
-  public void testEmptyEnums() throws Exception {
-    int count=3000;
-    int lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
-    // test empty enum
-    assert lower < upper;
-    assertTrue(0 < countTerms(LegacyNumericRangeQuery.newIntRange("field4", 4, lower, upper, true, true)));
-    assertEquals(0, countTerms(LegacyNumericRangeQuery.newIntRange("field4", 4, upper, lower, true, true)));
-    // test empty enum outside of bounds
-    lower = distance*noDocs+startOffset;
-    upper = 2 * lower;
-    assert lower < upper;
-    assertEquals(0, countTerms(LegacyNumericRangeQuery.newIntRange("field4", 4, lower, upper, true, true)));
-  }
-  
-  private int countTerms(MultiTermQuery q) throws Exception {
-    final Terms terms = MultiFields.getTerms(reader, q.getField());
-    if (terms == null)
-      return 0;
-    final TermsEnum termEnum = q.getTermsEnum(terms);
-    assertNotNull(termEnum);
-    int count = 0;
-    BytesRef cur, last = null;
-    while ((cur = termEnum.next()) != null) {
-      count++;
-      if (last != null) {
-        assertTrue(last.compareTo(cur) < 0);
-      }
-      last = BytesRef.deepCopyOf(cur);
-    } 
-    // LUCENE-3314: the results after next() already returned null are undefined,
-    // assertNull(termEnum.next());
-    return count;
-  }
-  
-  private void checkTermCounts(int precisionStep, int termCountT, int termCountC) {
-    if (precisionStep == Integer.MAX_VALUE) {
-      assertEquals("Number of terms should be equal for unlimited precStep", termCountC, termCountT);
-    } else {
-      assertTrue("Number of terms for NRQ should be <= compared to classical TRQ", termCountT <= termCountC);
-    }
-  }
-
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_8bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(8);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_4bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(4);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_2bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(2);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_NoTrie() throws Exception {
-    testRandomTrieAndClassicRangeQuery(Integer.MAX_VALUE);
-  }
-  
-  private void testRangeSplit(int precisionStep) throws Exception {
-    String field="ascfield"+precisionStep;
-    // 10 random tests
-    int num = TestUtil.nextInt(random(), 10, 20);
-    for (int  i =0;  i< num; i++) {
-      int lower=(int)(random().nextDouble()*noDocs - noDocs/2);
-      int upper=(int)(random().nextDouble()*noDocs - noDocs/2);
-      if (lower>upper) {
-        int a=lower; lower=upper; upper=a;
-      }
-      // test inclusive range
-      Query tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
-      TopDocs tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-      // test exclusive range
-      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
-      // test left exclusive range
-      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
-      // test right exclusive range
-      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
-    }
-  }
-
-  @Test
-  public void testRangeSplit_8bit() throws Exception {
-    testRangeSplit(8);
-  }
-  
-  @Test
-  public void testRangeSplit_4bit() throws Exception {
-    testRangeSplit(4);
-  }
-  
-  @Test
-  public void testRangeSplit_2bit() throws Exception {
-    testRangeSplit(2);
-  }
-  
-  /** we fake a float test using int2float conversion of LegacyNumericUtils */
-  private void testFloatRange(int precisionStep) throws Exception {
-    final String field="ascfield"+precisionStep;
-    final int lower=-1000, upper=+2000;
-    
-    Query tq= LegacyNumericRangeQuery.newFloatRange(field, precisionStep,
-        NumericUtils.sortableIntToFloat(lower), NumericUtils.sortableIntToFloat(upper), true, true);
-    TopDocs tTopDocs = searcher.search(tq, 1);
-    assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-  }
-
-  @Test
-  public void testFloatRange_8bit() throws Exception {
-    testFloatRange(8);
-  }
-  
-  @Test
-  public void testFloatRange_4bit() throws Exception {
-    testFloatRange(4);
-  }
-  
-  @Test
-  public void testFloatRange_2bit() throws Exception {
-    testFloatRange(2);
-  }
-  
-  @Test
-  public void testEqualsAndHash() throws Exception {
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test1", 4, 10, 20, true, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test2", 4, 10, 20, false, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test3", 4, 10, 20, true, false));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test4", 4, 10, 20, false, false));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test5", 4, 10, null, true, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test6", 4, null, 20, true, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test7", 4, null, null, true, true));
-    QueryUtils.checkEqual(
-      LegacyNumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true),
-      LegacyNumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newIntRange("test9", 4, 10, 20, true, true),
-      LegacyNumericRangeQuery.newIntRange("test9", 8, 10, 20, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newIntRange("test10a", 4, 10, 20, true, true),
-      LegacyNumericRangeQuery.newIntRange("test10b", 4, 10, 20, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newIntRange("test11", 4, 10, 20, true, true),
-      LegacyNumericRangeQuery.newIntRange("test11", 4, 20, 10, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newIntRange("test12", 4, 10, 20, true, true),
-      LegacyNumericRangeQuery.newIntRange("test12", 4, 10, 20, false, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newIntRange("test13", 4, 10, 20, true, true),
-      LegacyNumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
-    );
-    // the following produces a hash collision, because Long and Integer have the same hashcode, so only test equality:
-    Query q1 = LegacyNumericRangeQuery.newIntRange("test14", 4, 10, 20, true, true);
-    Query q2 = LegacyNumericRangeQuery.newLongRange("test14", 4, 10L, 20L, true, true);
-    assertFalse(q1.equals(q2));
-    assertFalse(q2.equals(q1));
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java b/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
deleted file mode 100644
index 7f63fbc..0000000
--- a/lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
+++ /dev/null
@@ -1,623 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search;
-
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.LegacyDoubleField;
-import org.apache.lucene.document.LegacyLongField;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.LegacyNumericUtils;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.TestLegacyNumericUtils;
-import org.apache.lucene.util.TestUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestNumericRangeQuery64 extends LuceneTestCase {
-  // distance of entries
-  private static long distance;
-  // shift the starting of the values to the left, to also have negative values:
-  private static final long startOffset = - 1L << 31;
-  // number of docs to generate for testing
-  private static int noDocs;
-  
-  private static Directory directory = null;
-  private static IndexReader reader = null;
-  private static IndexSearcher searcher = null;
-  
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    noDocs = atLeast(4096);
-    distance = (1L << 60) / noDocs;
-    directory = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
-        newIndexWriterConfig(new MockAnalyzer(random()))
-        .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
-        .setMergePolicy(newLogMergePolicy()));
-
-    final FieldType storedLong = new FieldType(LegacyLongField.TYPE_NOT_STORED);
-    storedLong.setStored(true);
-    storedLong.freeze();
-
-    final FieldType storedLong8 = new FieldType(storedLong);
-    storedLong8.setNumericPrecisionStep(8);
-
-    final FieldType storedLong4 = new FieldType(storedLong);
-    storedLong4.setNumericPrecisionStep(4);
-
-    final FieldType storedLong6 = new FieldType(storedLong);
-    storedLong6.setNumericPrecisionStep(6);
-
-    final FieldType storedLong2 = new FieldType(storedLong);
-    storedLong2.setNumericPrecisionStep(2);
-
-    final FieldType storedLongNone = new FieldType(storedLong);
-    storedLongNone.setNumericPrecisionStep(Integer.MAX_VALUE);
-
-    final FieldType unstoredLong = LegacyLongField.TYPE_NOT_STORED;
-
-    final FieldType unstoredLong8 = new FieldType(unstoredLong);
-    unstoredLong8.setNumericPrecisionStep(8);
-
-    final FieldType unstoredLong6 = new FieldType(unstoredLong);
-    unstoredLong6.setNumericPrecisionStep(6);
-
-    final FieldType unstoredLong4 = new FieldType(unstoredLong);
-    unstoredLong4.setNumericPrecisionStep(4);
-
-    final FieldType unstoredLong2 = new FieldType(unstoredLong);
-    unstoredLong2.setNumericPrecisionStep(2);
-
-    LegacyLongField
-      field8 = new LegacyLongField("field8", 0L, storedLong8),
-      field6 = new LegacyLongField("field6", 0L, storedLong6),
-      field4 = new LegacyLongField("field4", 0L, storedLong4),
-      field2 = new LegacyLongField("field2", 0L, storedLong2),
-      fieldNoTrie = new LegacyLongField("field"+Integer.MAX_VALUE, 0L, storedLongNone),
-      ascfield8 = new LegacyLongField("ascfield8", 0L, unstoredLong8),
-      ascfield6 = new LegacyLongField("ascfield6", 0L, unstoredLong6),
-      ascfield4 = new LegacyLongField("ascfield4", 0L, unstoredLong4),
-      ascfield2 = new LegacyLongField("ascfield2", 0L, unstoredLong2);
-
-    Document doc = new Document();
-    // add fields, that have a distance to test general functionality
-    doc.add(field8); doc.add(field6); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
-    // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
-    doc.add(ascfield8); doc.add(ascfield6); doc.add(ascfield4); doc.add(ascfield2);
-    
-    // Add a series of noDocs docs with increasing long values, by updating the fields
-    for (int l=0; l<noDocs; l++) {
-      long val=distance*l+startOffset;
-      field8.setLongValue(val);
-      field6.setLongValue(val);
-      field4.setLongValue(val);
-      field2.setLongValue(val);
-      fieldNoTrie.setLongValue(val);
-
-      val=l-(noDocs/2);
-      ascfield8.setLongValue(val);
-      ascfield6.setLongValue(val);
-      ascfield4.setLongValue(val);
-      ascfield2.setLongValue(val);
-      writer.addDocument(doc);
-    }
-    reader = writer.getReader();
-    searcher=newSearcher(reader);
-    writer.close();
-  }
-  
-  @AfterClass
-  public static void afterClass() throws Exception {
-    searcher = null;
-    reader.close();
-    reader = null;
-    directory.close();
-    directory = null;
-  }
-  
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    // set the theoretical maximum term count for 8bit (see docs for the number)
-    // super.tearDown will restore the default
-    BooleanQuery.setMaxClauseCount(7*255*2 + 255);
-  }
-  
-  /** test for constant score + boolean query + filter, the other tests only use the constant score mode */
-  private void testRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    long lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
-    LegacyNumericRangeQuery<Long> q = LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
-    for (byte i=0; i<2; i++) {
-      TopDocs topDocs;
-      String type;
-      switch (i) {
-        case 0:
-          type = " (constant score filter rewrite)";
-          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
-          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-          break;
-        case 1:
-          type = " (constant score boolean rewrite)";
-          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
-          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-          break;
-        default:
-          return;
-      }
-      ScoreDoc[] sd = topDocs.scoreDocs;
-      assertNotNull(sd);
-      assertEquals("Score doc count"+type, count, sd.length );
-      Document doc=searcher.doc(sd[0].doc);
-      assertEquals("First doc"+type, 2*distance+startOffset, doc.getField(field).numericValue().longValue() );
-      doc=searcher.doc(sd[sd.length-1].doc);
-      assertEquals("Last doc"+type, (1+count)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-    }
-  }
-
-  @Test
-  public void testRange_8bit() throws Exception {
-    testRange(8);
-  }
-  
-  @Test
-  public void testRange_6bit() throws Exception {
-    testRange(6);
-  }
-  
-  @Test
-  public void testRange_4bit() throws Exception {
-    testRange(4);
-  }
-  
-  @Test
-  public void testRange_2bit() throws Exception {
-    testRange(2);
-  }
-  
-  @Test
-  public void testOneMatchQuery() throws Exception {
-    LegacyNumericRangeQuery<Long> q = LegacyNumericRangeQuery.newLongRange("ascfield8", 8, 1000L, 1000L, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", 1, sd.length );
-  }
-  
-  private void testLeftOpenRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    long upper=(count-1)*distance + (distance/3) + startOffset;
-    LegacyNumericRangeQuery<Long> q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, null, upper, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", count, sd.length );
-    Document doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", startOffset, doc.getField(field).numericValue().longValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-
-    q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, null, upper, false, true);
-    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", count, sd.length );
-    doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", startOffset, doc.getField(field).numericValue().longValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-  }
-  
-  @Test
-  public void testLeftOpenRange_8bit() throws Exception {
-    testLeftOpenRange(8);
-  }
-  
-  @Test
-  public void testLeftOpenRange_6bit() throws Exception {
-    testLeftOpenRange(6);
-  }
-  
-  @Test
-  public void testLeftOpenRange_4bit() throws Exception {
-    testLeftOpenRange(4);
-  }
-  
-  @Test
-  public void testLeftOpenRange_2bit() throws Exception {
-    testLeftOpenRange(2);
-  }
-  
-  private void testRightOpenRange(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int count=3000;
-    long lower=(count-1)*distance + (distance/3) +startOffset;
-    LegacyNumericRangeQuery<Long> q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, true);
-    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    ScoreDoc[] sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", noDocs-count, sd.length );
-    Document doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().longValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-
-    q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, false);
-    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
-    sd = topDocs.scoreDocs;
-    assertNotNull(sd);
-    assertEquals("Score doc count", noDocs-count, sd.length );
-    doc=searcher.doc(sd[0].doc);
-    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().longValue() );
-    doc=searcher.doc(sd[sd.length-1].doc);
-    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
-  }
-  
-  @Test
-  public void testRightOpenRange_8bit() throws Exception {
-    testRightOpenRange(8);
-  }
-  
-  @Test
-  public void testRightOpenRange_6bit() throws Exception {
-    testRightOpenRange(6);
-  }
-  
-  @Test
-  public void testRightOpenRange_4bit() throws Exception {
-    testRightOpenRange(4);
-  }
-  
-  @Test
-  public void testRightOpenRange_2bit() throws Exception {
-    testRightOpenRange(2);
-  }
-  
-  @Test
-  public void testInfiniteValues() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
-      newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    doc.add(new LegacyDoubleField("double", Double.NEGATIVE_INFINITY, Field.Store.NO));
-    doc.add(new LegacyLongField("long", Long.MIN_VALUE, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    doc = new Document();
-    doc.add(new LegacyDoubleField("double", Double.POSITIVE_INFINITY, Field.Store.NO));
-    doc.add(new LegacyLongField("long", Long.MAX_VALUE, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    doc = new Document();
-    doc.add(new LegacyDoubleField("double", 0.0, Field.Store.NO));
-    doc.add(new LegacyLongField("long", 0L, Field.Store.NO));
-    writer.addDocument(doc);
-    
-    for (double d : TestLegacyNumericUtils.DOUBLE_NANs) {
-      doc = new Document();
-      doc.add(new LegacyDoubleField("double", d, Field.Store.NO));
-      writer.addDocument(doc);
-    }
-    
-    writer.close();
-    
-    IndexReader r = DirectoryReader.open(dir);
-    IndexSearcher s = newSearcher(r);
-    
-    Query q= LegacyNumericRangeQuery.newLongRange("long", null, null, true, true);
-    TopDocs topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-    
-    q= LegacyNumericRangeQuery.newLongRange("long", null, null, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-    
-    q= LegacyNumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newDoubleRange("double", null, null, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newDoubleRange("double", null, null, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, false, false);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
-
-    q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NaN, Double.NaN, true, true);
-    topDocs = s.search(q, 10);
-    assertEquals("Score doc count", TestLegacyNumericUtils.DOUBLE_NANs.length,  topDocs.scoreDocs.length );
-
-    r.close();
-    dir.close();
-  }
-  
-  private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception {
-    String field="field"+precisionStep;
-    int totalTermCountT=0,totalTermCountC=0,termCountT,termCountC;
-    int num = TestUtil.nextInt(random(), 10, 20);
-    for (int i = 0; i < num; i++) {
-      long lower=(long)(random().nextDouble()*noDocs*distance)+startOffset;
-      long upper=(long)(random().nextDouble()*noDocs*distance)+startOffset;
-      if (lower>upper) {
-        long a=lower; lower=upper; upper=a;
-      }
-      final BytesRef lowerBytes, upperBytes;
-      BytesRefBuilder b = new BytesRefBuilder();
-      LegacyNumericUtils.longToPrefixCoded(lower, 0, b);
-      lowerBytes = b.toBytesRef();
-      LegacyNumericUtils.longToPrefixCoded(upper, 0, b);
-      upperBytes = b.toBytesRef();
-      
-      // test inclusive range
-      LegacyNumericRangeQuery<Long> tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
-      TermRangeQuery cq=new TermRangeQuery(field, lowerBytes, upperBytes, true, true);
-      TopDocs tTopDocs = searcher.search(tq, 1);
-      TopDocs cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-      // test exclusive range
-      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
-      cq=new TermRangeQuery(field, lowerBytes, upperBytes, false, false);
-      tTopDocs = searcher.search(tq, 1);
-      cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-      // test left exclusive range
-      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
-      cq=new TermRangeQuery(field, lowerBytes, upperBytes, false, true);
-      tTopDocs = searcher.search(tq, 1);
-      cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-      // test right exclusive range
-      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
-      cq=new TermRangeQuery(field, lowerBytes, upperBytes, true, false);
-      tTopDocs = searcher.search(tq, 1);
-      cTopDocs = searcher.search(cq, 1);
-      assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
-      totalTermCountT += termCountT = countTerms(tq);
-      totalTermCountC += termCountC = countTerms(cq);
-      checkTermCounts(precisionStep, termCountT, termCountC);
-    }
-    
-    checkTermCounts(precisionStep, totalTermCountT, totalTermCountC);
-    if (VERBOSE && precisionStep != Integer.MAX_VALUE) {
-      System.out.println("Average number of terms during random search on '" + field + "':");
-      System.out.println(" Numeric query: " + (((double)totalTermCountT)/(num * 4)));
-      System.out.println(" Classical query: " + (((double)totalTermCountC)/(num * 4)));
-    }
-  }
-  
-  @Test
-  public void testEmptyEnums() throws Exception {
-    int count=3000;
-    long lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
-    // test empty enum
-    assert lower < upper;
-    assertTrue(0 < countTerms(LegacyNumericRangeQuery.newLongRange("field4", 4, lower, upper, true, true)));
-    assertEquals(0, countTerms(LegacyNumericRangeQuery.newLongRange("field4", 4, upper, lower, true, true)));
-    // test empty enum outside of bounds
-    lower = distance*noDocs+startOffset;
-    upper = 2L * lower;
-    assert lower < upper;
-    assertEquals(0, countTerms(LegacyNumericRangeQuery.newLongRange("field4", 4, lower, upper, true, true)));
-  }
-  
-  private int countTerms(MultiTermQuery q) throws Exception {
-    final Terms terms = MultiFields.getTerms(reader, q.getField());
-    if (terms == null)
-      return 0;
-    final TermsEnum termEnum = q.getTermsEnum(terms);
-    assertNotNull(termEnum);
-    int count = 0;
-    BytesRef cur, last = null;
-    while ((cur = termEnum.next()) != null) {
-      count++;
-      if (last != null) {
-        assertTrue(last.compareTo(cur) < 0);
-      }
-      last = BytesRef.deepCopyOf(cur);
-    } 
-    // LUCENE-3314: the results after next() already returned null are undefined,
-    // assertNull(termEnum.next());
-    return count;
-  }
-  
-  private void checkTermCounts(int precisionStep, int termCountT, int termCountC) {
-    if (precisionStep == Integer.MAX_VALUE) {
-      assertEquals("Number of terms should be equal for unlimited precStep", termCountC, termCountT);
-    } else {
-      assertTrue("Number of terms for NRQ should be <= compared to classical TRQ", termCountT <= termCountC);
-    }
-  }
-
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_8bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(8);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_6bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(6);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_4bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(4);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_2bit() throws Exception {
-    testRandomTrieAndClassicRangeQuery(2);
-  }
-  
-  @Test
-  public void testRandomTrieAndClassicRangeQuery_NoTrie() throws Exception {
-    testRandomTrieAndClassicRangeQuery(Integer.MAX_VALUE);
-  }
-  
-  private void testRangeSplit(int precisionStep) throws Exception {
-    String field="ascfield"+precisionStep;
-    // 10 random tests
-    int num = TestUtil.nextInt(random(), 10, 20);
-    for (int i = 0; i < num; i++) {
-      long lower=(long)(random().nextDouble()*noDocs - noDocs/2);
-      long upper=(long)(random().nextDouble()*noDocs - noDocs/2);
-      if (lower>upper) {
-        long a=lower; lower=upper; upper=a;
-      }
-      // test inclusive range
-      Query tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
-      TopDocs tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-      // test exclusive range
-      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
-      // test left exclusive range
-      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
-      // test right exclusive range
-      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
-      tTopDocs = searcher.search(tq, 1);
-      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
-    }
-  }
-
-  @Test
-  public void testRangeSplit_8bit() throws Exception {
-    testRangeSplit(8);
-  }
-  
-  @Test
-  public void testRangeSplit_6bit() throws Exception {
-    testRangeSplit(6);
-  }
-  
-  @Test
-  public void testRangeSplit_4bit() throws Exception {
-    testRangeSplit(4);
-  }
-  
-  @Test
-  public void testRangeSplit_2bit() throws Exception {
-    testRangeSplit(2);
-  }
-  
-  /** we fake a double test using long2double conversion of LegacyNumericUtils */
-  private void testDoubleRange(int precisionStep) throws Exception {
-    final String field="ascfield"+precisionStep;
-    final long lower=-1000L, upper=+2000L;
-    
-    Query tq= LegacyNumericRangeQuery.newDoubleRange(field, precisionStep,
-        NumericUtils.sortableLongToDouble(lower), NumericUtils.sortableLongToDouble(upper), true, true);
-    TopDocs tTopDocs = searcher.search(tq, 1);
-    assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
-  }
-
-  @Test
-  public void testDoubleRange_8bit() throws Exception {
-    testDoubleRange(8);
-  }
-  
-  @Test
-  public void testDoubleRange_6bit() throws Exception {
-    testDoubleRange(6);
-  }
-  
-  @Test
-  public void testDoubleRange_4bit() throws Exception {
-    testDoubleRange(4);
-  }
-  
-  @Test
-  public void testDoubleRange_2bit() throws Exception {
-    testDoubleRange(2);
-  }
-  
-  @Test
-  public void testEqualsAndHash() throws Exception {
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test1", 4, 10L, 20L, true, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test2", 4, 10L, 20L, false, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test3", 4, 10L, 20L, true, false));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test4", 4, 10L, 20L, false, false));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test5", 4, 10L, null, true, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test6", 4, null, 20L, true, true));
-    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test7", 4, null, null, true, true));
-    QueryUtils.checkEqual(
-      LegacyNumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true),
-      LegacyNumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newLongRange("test9", 4, 10L, 20L, true, true),
-      LegacyNumericRangeQuery.newLongRange("test9", 8, 10L, 20L, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newLongRange("test10a", 4, 10L, 20L, true, true),
-      LegacyNumericRangeQuery.newLongRange("test10b", 4, 10L, 20L, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newLongRange("test11", 4, 10L, 20L, true, true),
-      LegacyNumericRangeQuery.newLongRange("test11", 4, 20L, 10L, true, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newLongRange("test12", 4, 10L, 20L, true, true),
-      LegacyNumericRangeQuery.newLongRange("test12", 4, 10L, 20L, false, true)
-    );
-    QueryUtils.checkUnequal(
-      LegacyNumericRangeQuery.newLongRange("test13", 4, 10L, 20L, true, true),
-      LegacyNumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
-    );
-     // difference to int range is tested in TestNumericRangeQuery32
-  }
-}


[7/7] lucene-solr:master: LUCENE-7413: move legacy numeric support to backwards module

Posted by rm...@apache.org.
LUCENE-7413: move legacy numeric support to backwards module


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/105c7eae
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/105c7eae
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/105c7eae

Branch: refs/heads/master
Commit: 105c7eae87896762cbcb295c73c8e8b1fd8f71f8
Parents: ba09fa7
Author: Robert Muir <rm...@apache.org>
Authored: Wed Aug 17 09:28:45 2016 -0400
Committer: Robert Muir <rm...@apache.org>
Committed: Wed Aug 17 09:28:45 2016 -0400

----------------------------------------------------------------------
 .../apache/lucene/legacy/LegacyDoubleField.java | 174 ++++++
 .../org/apache/lucene/legacy/LegacyField.java   |  90 +++
 .../apache/lucene/legacy/LegacyFieldType.java   | 149 +++++
 .../apache/lucene/legacy/LegacyFloatField.java  | 174 ++++++
 .../apache/lucene/legacy/LegacyIntField.java    | 175 ++++++
 .../apache/lucene/legacy/LegacyLongField.java   | 184 ++++++
 .../lucene/legacy/LegacyNumericRangeQuery.java  | 537 ++++++++++++++++
 .../lucene/legacy/LegacyNumericTokenStream.java | 357 +++++++++++
 .../apache/lucene/legacy/LegacyNumericType.java |  34 +
 .../lucene/legacy/LegacyNumericUtils.java       | 510 +++++++++++++++
 .../org/apache/lucene/legacy/package-info.java  |  21 +
 .../index/TestBackwardsCompatibility.java       |   8 +-
 .../apache/lucene/legacy/TestLegacyField.java   | 196 ++++++
 .../lucene/legacy/TestLegacyFieldReuse.java     |  81 +++
 .../lucene/legacy/TestLegacyNumericUtils.java   | 571 +++++++++++++++++
 .../apache/lucene/legacy/TestLegacyTerms.java   | 164 +++++
 .../TestMultiValuedNumericRangeQuery.java       |  84 +++
 .../lucene/legacy/TestNumericRangeQuery32.java  | 461 ++++++++++++++
 .../lucene/legacy/TestNumericRangeQuery64.java  | 490 +++++++++++++++
 .../lucene/legacy/TestNumericTokenStream.java   | 188 ++++++
 .../analysis/LegacyNumericTokenStream.java      | 357 -----------
 .../java/org/apache/lucene/document/Field.java  |  33 -
 .../org/apache/lucene/document/FieldType.java   |  98 +--
 .../lucene/document/LegacyDoubleField.java      | 172 -----
 .../lucene/document/LegacyFloatField.java       | 174 ------
 .../apache/lucene/document/LegacyIntField.java  | 174 ------
 .../apache/lucene/document/LegacyLongField.java | 182 ------
 .../lucene/search/LegacyNumericRangeQuery.java  | 536 ----------------
 .../apache/lucene/util/LegacyNumericUtils.java  | 508 ---------------
 .../lucene/analysis/TestNumericTokenStream.java | 169 -----
 .../org/apache/lucene/document/TestField.java   |  94 ---
 .../apache/lucene/document/TestFieldType.java   |   9 -
 .../org/apache/lucene/index/TestFieldReuse.java |  53 +-
 .../test/org/apache/lucene/index/TestTerms.java | 134 ----
 .../TestMultiValuedNumericRangeQuery.java       |  80 ---
 .../lucene/search/TestNumericRangeQuery32.java  | 589 ------------------
 .../lucene/search/TestNumericRangeQuery64.java  | 623 -------------------
 .../lucene/util/TestLegacyNumericUtils.java     | 564 -----------------
 lucene/join/build.xml                           |   6 +-
 .../search/join/DocValuesTermsCollector.java    |   9 +-
 .../org/apache/lucene/search/join/JoinUtil.java |   6 +-
 .../search/join/TermsIncludingScoreQuery.java   |   2 +-
 .../apache/lucene/search/join/TestJoinUtil.java |   6 +-
 .../memory/TestMemoryIndexAgainstRAMDir.java    |   7 -
 .../search/TestDiversifiedTopDocsCollector.java |   5 +-
 lucene/queryparser/build.xml                    |   6 +-
 .../LegacyNumericRangeQueryNodeBuilder.java     |  10 +-
 .../standard/config/LegacyNumericConfig.java    |  15 +-
 .../nodes/LegacyNumericRangeQueryNode.java      |   9 +-
 .../LegacyNumericRangeQueryBuilder.java         |   8 +-
 .../standard/TestLegacyNumericQueryParser.java  |  34 +-
 .../xml/CoreParserTestIndexData.java            |   2 +-
 .../builders/TestNumericRangeQueryBuilder.java  |   2 +-
 lucene/spatial-extras/build.xml                 |   8 +-
 .../lucene/spatial/bbox/BBoxStrategy.java       |  39 +-
 .../prefix/BytesRefIteratorTokenStream.java     |   2 +-
 .../spatial/vector/PointVectorStrategy.java     |  37 +-
 .../lucene/spatial/bbox/TestBBoxStrategy.java   |   8 +-
 .../solr/analytics/util/AnalyticsParsers.java   |   2 +-
 .../util/valuesource/DateFieldSource.java       |   2 +-
 .../solr/handler/component/StatsField.java      |  17 +-
 .../org/apache/solr/request/IntervalFacets.java |   2 +-
 .../org/apache/solr/request/NumericFacets.java  |   3 +-
 .../java/org/apache/solr/schema/BBoxField.java  |   7 +-
 .../java/org/apache/solr/schema/EnumField.java  |  17 +-
 .../java/org/apache/solr/schema/FieldType.java  |   3 +-
 .../schema/SpatialPointVectorFieldType.java     |   9 +-
 .../org/apache/solr/schema/TrieDoubleField.java |   2 +-
 .../java/org/apache/solr/schema/TrieField.java  |  44 +-
 .../org/apache/solr/schema/TrieFloatField.java  |   2 +-
 .../org/apache/solr/schema/TrieIntField.java    |   2 +-
 .../org/apache/solr/schema/TrieLongField.java   |   2 +-
 .../org/apache/solr/search/QueryParsing.java    |   2 +-
 .../apache/solr/search/QueryWrapperFilter.java  |   2 +-
 .../apache/solr/search/facet/FacetField.java    |   3 +-
 .../apache/solr/search/mlt/CloudMLTQParser.java |   2 +-
 .../solr/search/mlt/SimpleMLTQParser.java       |   2 +-
 .../org/apache/solr/uninverting/FieldCache.java |  20 +-
 .../solr/uninverting/UninvertingReader.java     |  16 +-
 .../org/apache/solr/update/VersionInfo.java     |   2 +-
 .../solr/search/TestMaxScoreQueryParser.java    |   1 +
 .../solr/search/function/TestOrdValues.java     |   4 +-
 .../solr/uninverting/TestDocTermOrds.java       |   6 +-
 .../TestFieldCacheSanityChecker.java            |   8 +-
 .../solr/uninverting/TestFieldCacheSort.java    |   8 +-
 .../solr/uninverting/TestLegacyFieldCache.java  |  10 +-
 .../solr/uninverting/TestNumericTerms32.java    |  14 +-
 .../solr/uninverting/TestNumericTerms64.java    |  16 +-
 .../solr/uninverting/TestUninvertingReader.java |  10 +-
 89 files changed, 4885 insertions(+), 4773 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyDoubleField.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyDoubleField.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyDoubleField.java
new file mode 100644
index 0000000..e98a4f0
--- /dev/null
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyDoubleField.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoublePoint;
+import org.apache.lucene.index.IndexOptions;
+
+
+/**
+ * <p>
+ * Field that indexes <code>double</code> values
+ * for efficient range filtering and sorting. Here's an example usage:
+ * 
+ * <pre class="prettyprint">
+ * document.add(new LegacyDoubleField(name, 6.0, Field.Store.NO));
+ * </pre>
+ * 
+ * For optimal performance, re-use the <code>LegacyDoubleField</code> and
+ * {@link Document} instance for more than one document:
+ * 
+ * <pre class="prettyprint">
+ *  LegacyDoubleField field = new LegacyDoubleField(name, 0.0, Field.Store.NO);
+ *  Document document = new Document();
+ *  document.add(field);
+ * 
+ *  for(all documents) {
+ *    ...
+ *    field.setDoubleValue(value)
+ *    writer.addDocument(document);
+ *    ...
+ *  }
+ * </pre>
+ *
+ * See also {@link LegacyIntField}, {@link LegacyLongField}, {@link
+ * LegacyFloatField}.
+ *
+ * <p>To perform range querying or filtering against a
+ * <code>LegacyDoubleField</code>, use {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}.
+ * To sort according to a
+ * <code>LegacyDoubleField</code>, use the normal numeric sort types, eg
+ * {@link org.apache.lucene.search.SortField.Type#DOUBLE}. <code>LegacyDoubleField</code>
+ * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
+ *
+ * <p>You may add the same field name as an <code>LegacyDoubleField</code> to
+ * the same document more than once.  Range querying and
+ * filtering will be the logical OR of all values; so a range query
+ * will hit all documents that have at least one value in
+ * the range. However sort behavior is not defined.  If you need to sort,
+ * you should separately index a single-valued <code>LegacyDoubleField</code>.</p>
+ *
+ * <p>A <code>LegacyDoubleField</code> will consume somewhat more disk space
+ * in the index than an ordinary single-valued field.
+ * However, for a typical index that includes substantial
+ * textual content per document, this increase will likely
+ * be in the noise. </p>
+ *
+ * <p>Within Lucene, each numeric value is indexed as a
+ * <em>trie</em> structure, where each term is logically
+ * assigned to larger and larger pre-defined brackets (which
+ * are simply lower-precision representations of the value).
+ * The step size between each successive bracket is called the
+ * <code>precisionStep</code>, measured in bits.  Smaller
+ * <code>precisionStep</code> values result in larger number
+ * of brackets, which consumes more disk space in the index
+ * but may result in faster range search performance.  The
+ * default value, 16, was selected for a reasonable tradeoff
+ * of disk space consumption versus performance.  You can
+ * create a custom {@link LegacyFieldType} and invoke the {@link
+ * LegacyFieldType#setNumericPrecisionStep} method if you'd
+ * like to change the value.  Note that you must also
+ * specify a congruent value when creating {@link
+ * org.apache.lucene.legacy.LegacyNumericRangeQuery}.
+ * For low cardinality fields larger precision steps are good.
+ * If the cardinality is &lt; 100, it is fair
+ * to use {@link Integer#MAX_VALUE}, which produces one
+ * term per value.
+ *
+ * <p>For more information on the internals of numeric trie
+ * indexing, including the <a
+ * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
+ * configuration, see {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}. The format of
+ * indexed values is described in {@link org.apache.lucene.legacy.LegacyNumericUtils}.
+ *
+ * <p>If you only need to sort by numeric value, and never
+ * run range querying/filtering, you can index using a
+ * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
+ * This will minimize disk space consumed. </p>
+ *
+ * <p>More advanced users can instead use {@link
+ * org.apache.lucene.legacy.LegacyNumericTokenStream} directly, when indexing numbers. This
+ * class is a wrapper around this token stream type for
+ * easier, more intuitive usage.</p>
+ *
+ * @deprecated Please use {@link DoublePoint} instead
+ *
+ * @since 2.9
+ */
+
+@Deprecated
+public final class LegacyDoubleField extends LegacyField {
+  
+  /** 
+   * Type for a LegacyDoubleField that is not stored:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_NOT_STORED = new LegacyFieldType();
+  static {
+    TYPE_NOT_STORED.setTokenized(true);
+    TYPE_NOT_STORED.setOmitNorms(true);
+    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_NOT_STORED.setNumericType(LegacyNumericType.DOUBLE);
+    TYPE_NOT_STORED.freeze();
+  }
+
+  /** 
+   * Type for a stored LegacyDoubleField:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_STORED = new LegacyFieldType();
+  static {
+    TYPE_STORED.setTokenized(true);
+    TYPE_STORED.setOmitNorms(true);
+    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_STORED.setNumericType(LegacyNumericType.DOUBLE);
+    TYPE_STORED.setStored(true);
+    TYPE_STORED.freeze();
+  }
+
+  /** Creates a stored or un-stored LegacyDoubleField with the provided value
+   *  and default <code>precisionStep</code> {@link
+   *  org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
+   *  @param name field name
+   *  @param value 64-bit double value
+   *  @param stored Store.YES if the content should also be stored
+   *  @throws IllegalArgumentException if the field name is null. 
+   */
+  public LegacyDoubleField(String name, double value, Store stored) {
+    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
+    fieldsData = Double.valueOf(value);
+  }
+  
+  /** Expert: allows you to customize the {@link
+   *  LegacyFieldType}. 
+   *  @param name field name
+   *  @param value 64-bit double value
+   *  @param type customized field type: must have {@link LegacyFieldType#numericType()}
+   *         of {@link LegacyNumericType#DOUBLE}.
+   *  @throws IllegalArgumentException if the field name or type is null, or
+   *          if the field type does not have a DOUBLE numericType()
+   */
+  public LegacyDoubleField(String name, double value, LegacyFieldType type) {
+    super(name, type);
+    if (type.numericType() != LegacyNumericType.DOUBLE) {
+      throw new IllegalArgumentException("type.numericType() must be DOUBLE but got " + type.numericType());
+    }
+    fieldsData = Double.valueOf(value);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyField.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyField.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyField.java
new file mode 100644
index 0000000..87ac0e5
--- /dev/null
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyField.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexOptions;
+
+/**
+ * Field extension with support for legacy numerics
+ * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+ */
+@Deprecated
+public class LegacyField extends Field {
+
+  /**
+   * Expert: creates a field with no initial value.
+   * Intended only for custom LegacyField subclasses.
+   * @param name field name
+   * @param type field type
+   * @throws IllegalArgumentException if either the name or type
+   *         is null.
+   */
+  public LegacyField(String name, LegacyFieldType type) {
+    super(name, type);
+  }
+  
+  @Override
+  public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) {
+    if (fieldType().indexOptions() == IndexOptions.NONE) {
+      // Not indexed
+      return null;
+    }
+    final LegacyFieldType fieldType = (LegacyFieldType) fieldType();
+    final LegacyNumericType numericType = fieldType.numericType();
+    if (numericType != null) {
+      if (!(reuse instanceof LegacyNumericTokenStream && ((LegacyNumericTokenStream)reuse).getPrecisionStep() == fieldType.numericPrecisionStep())) {
+        // lazy init the TokenStream as it is heavy to instantiate
+        // (attributes,...) if not needed (stored field loading)
+        reuse = new LegacyNumericTokenStream(fieldType.numericPrecisionStep());
+      }
+      final LegacyNumericTokenStream nts = (LegacyNumericTokenStream) reuse;
+      // initialize value in TokenStream
+      final Number val = (Number) fieldsData;
+      switch (numericType) {
+      case INT:
+        nts.setIntValue(val.intValue());
+        break;
+      case LONG:
+        nts.setLongValue(val.longValue());
+        break;
+      case FLOAT:
+        nts.setFloatValue(val.floatValue());
+        break;
+      case DOUBLE:
+        nts.setDoubleValue(val.doubleValue());
+        break;
+      default:
+        throw new AssertionError("Should never get here");
+      }
+      return reuse;
+    }
+    return super.tokenStream(analyzer, reuse);
+  }
+  
+  @Override
+  public void setTokenStream(TokenStream tokenStream) {
+    final LegacyFieldType fieldType = (LegacyFieldType) fieldType();
+    if (fieldType.numericType() != null) {
+      throw new IllegalArgumentException("cannot set private TokenStream on numeric fields");
+    }
+    super.setTokenStream(tokenStream);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFieldType.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFieldType.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFieldType.java
new file mode 100644
index 0000000..1f4b0af
--- /dev/null
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFieldType.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.IndexOptions;
+
+/**
+ * FieldType extension with support for legacy numerics
+ * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+ */
+@Deprecated
+public final class LegacyFieldType extends FieldType {
+  private LegacyNumericType numericType;
+  private int numericPrecisionStep = LegacyNumericUtils.PRECISION_STEP_DEFAULT;
+
+  /**
+   * Create a new mutable LegacyFieldType with all of the properties from <code>ref</code>
+   */
+  public LegacyFieldType(LegacyFieldType ref) {
+    super(ref);
+    this.numericType = ref.numericType;
+    this.numericPrecisionStep = ref.numericPrecisionStep;
+  }
+  
+  /**
+   * Create a new FieldType with default properties.
+   */
+  public LegacyFieldType() {
+  }
+  
+  /**
+   * Specifies the field's numeric type.
+   * @param type numeric type, or null if the field has no numeric type.
+   * @throws IllegalStateException if this FieldType is frozen against
+   *         future modifications.
+   * @see #numericType()
+   *
+   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+   */
+  @Deprecated
+  public void setNumericType(LegacyNumericType type) {
+    checkIfFrozen();
+    numericType = type;
+  }
+  
+  /** 
+   * LegacyNumericType: if non-null then the field's value will be indexed
+   * numerically so that {@link org.apache.lucene.legacy.LegacyNumericRangeQuery} can be used at
+   * search time. 
+   * <p>
+   * The default is <code>null</code> (no numeric type) 
+   * @see #setNumericType(LegacyNumericType)
+   *
+   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+   */
+  @Deprecated
+  public LegacyNumericType numericType() {
+    return numericType;
+  }
+  
+  /**
+   * Sets the numeric precision step for the field.
+   * @param precisionStep numeric precision step for the field
+   * @throws IllegalArgumentException if precisionStep is less than 1. 
+   * @throws IllegalStateException if this FieldType is frozen against
+   *         future modifications.
+   * @see #numericPrecisionStep()
+   *
+   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+   */
+  @Deprecated
+  public void setNumericPrecisionStep(int precisionStep) {
+    checkIfFrozen();
+    if (precisionStep < 1) {
+      throw new IllegalArgumentException("precisionStep must be >= 1 (got " + precisionStep + ")");
+    }
+    this.numericPrecisionStep = precisionStep;
+  }
+  
+  /** 
+   * Precision step for numeric field. 
+   * <p>
+   * This has no effect if {@link #numericType()} returns null.
+   * <p>
+   * The default is {@link org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT}
+   * @see #setNumericPrecisionStep(int)
+   *
+   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+   */
+  @Deprecated
+  public int numericPrecisionStep() {
+    return numericPrecisionStep;
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = super.hashCode();
+    result = prime * result + numericPrecisionStep;
+    result = prime * result + ((numericType == null) ? 0 : numericType.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!super.equals(obj)) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) return false;
+    LegacyFieldType other = (LegacyFieldType) obj;
+    if (numericPrecisionStep != other.numericPrecisionStep) return false;
+    if (numericType != other.numericType) return false;
+    return true;
+  }
+
+  /** Prints a Field for human consumption. */
+  @Override
+  public String toString() {
+    StringBuilder result = new StringBuilder();
+    result.append(super.toString());
+    if (indexOptions() != IndexOptions.NONE) {
+      if (result.length() > 0) {
+        result.append(",");
+      }
+      if (numericType != null) {
+        result.append(",numericType=");
+        result.append(numericType);
+        result.append(",numericPrecisionStep=");
+        result.append(numericPrecisionStep);
+      }
+    }
+    return result.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFloatField.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFloatField.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFloatField.java
new file mode 100644
index 0000000..ea3b84a
--- /dev/null
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyFloatField.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FloatPoint;
+import org.apache.lucene.index.IndexOptions;
+
+/**
+ * <p>
+ * Field that indexes <code>float</code> values
+ * for efficient range filtering and sorting. Here's an example usage:
+ * 
+ * <pre class="prettyprint">
+ * document.add(new LegacyFloatField(name, 6.0F, Field.Store.NO));
+ * </pre>
+ * 
+ * For optimal performance, re-use the <code>LegacyFloatField</code> and
+ * {@link Document} instance for more than one document:
+ * 
+ * <pre class="prettyprint">
+ *  LegacyFloatField field = new LegacyFloatField(name, 0.0F, Field.Store.NO);
+ *  Document document = new Document();
+ *  document.add(field);
+ * 
+ *  for(all documents) {
+ *    ...
+ *    field.setFloatValue(value)
+ *    writer.addDocument(document);
+ *    ...
+ *  }
+ * </pre>
+ *
+ * See also {@link LegacyIntField}, {@link LegacyLongField}, {@link
+ * LegacyDoubleField}.
+ *
+ * <p>To perform range querying or filtering against a
+ * <code>LegacyFloatField</code>, use {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}.
+ * To sort according to a
+ * <code>LegacyFloatField</code>, use the normal numeric sort types, eg
+ * {@link org.apache.lucene.search.SortField.Type#FLOAT}. <code>LegacyFloatField</code>
+ * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
+ *
+ * <p>You may add the same field name as an <code>LegacyFloatField</code> to
+ * the same document more than once.  Range querying and
+ * filtering will be the logical OR of all values; so a range query
+ * will hit all documents that have at least one value in
+ * the range. However sort behavior is not defined.  If you need to sort,
+ * you should separately index a single-valued <code>LegacyFloatField</code>.</p>
+ *
+ * <p>A <code>LegacyFloatField</code> will consume somewhat more disk space
+ * in the index than an ordinary single-valued field.
+ * However, for a typical index that includes substantial
+ * textual content per document, this increase will likely
+ * be in the noise. </p>
+ *
+ * <p>Within Lucene, each numeric value is indexed as a
+ * <em>trie</em> structure, where each term is logically
+ * assigned to larger and larger pre-defined brackets (which
+ * are simply lower-precision representations of the value).
+ * The step size between each successive bracket is called the
+ * <code>precisionStep</code>, measured in bits.  Smaller
+ * <code>precisionStep</code> values result in larger number
+ * of brackets, which consumes more disk space in the index
+ * but may result in faster range search performance.  The
+ * default value, 8, was selected for a reasonable tradeoff
+ * of disk space consumption versus performance.  You can
+ * create a custom {@link LegacyFieldType} and invoke the {@link
+ * LegacyFieldType#setNumericPrecisionStep} method if you'd
+ * like to change the value.  Note that you must also
+ * specify a congruent value when creating {@link
+ * org.apache.lucene.legacy.LegacyNumericRangeQuery}.
+ * For low cardinality fields larger precision steps are good.
+ * If the cardinality is &lt; 100, it is fair
+ * to use {@link Integer#MAX_VALUE}, which produces one
+ * term per value.
+ *
+ * <p>For more information on the internals of numeric trie
+ * indexing, including the <a
+ * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
+ * configuration, see {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}. The format of
+ * indexed values is described in {@link org.apache.lucene.legacy.LegacyNumericUtils}.
+ *
+ * <p>If you only need to sort by numeric value, and never
+ * run range querying/filtering, you can index using a
+ * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
+ * This will minimize disk space consumed. </p>
+ *
+ * <p>More advanced users can instead use {@link
+ * org.apache.lucene.legacy.LegacyNumericTokenStream} directly, when indexing numbers. This
+ * class is a wrapper around this token stream type for
+ * easier, more intuitive usage.</p>
+ *
+ * @deprecated Please use {@link FloatPoint} instead
+ *
+ * @since 2.9
+ */
+
+@Deprecated
+public final class LegacyFloatField extends LegacyField {
+  
+  /** 
+   * Type for a LegacyFloatField that is not stored:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_NOT_STORED = new LegacyFieldType();
+  static {
+    TYPE_NOT_STORED.setTokenized(true);
+    TYPE_NOT_STORED.setOmitNorms(true);
+    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_NOT_STORED.setNumericType(LegacyNumericType.FLOAT);
+    TYPE_NOT_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
+    TYPE_NOT_STORED.freeze();
+  }
+
+  /** 
+   * Type for a stored LegacyFloatField:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_STORED = new LegacyFieldType();
+  static {
+    TYPE_STORED.setTokenized(true);
+    TYPE_STORED.setOmitNorms(true);
+    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_STORED.setNumericType(LegacyNumericType.FLOAT);
+    TYPE_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
+    TYPE_STORED.setStored(true);
+    TYPE_STORED.freeze();
+  }
+
+  /** Creates a stored or un-stored LegacyFloatField with the provided value
+   *  and default <code>precisionStep</code> {@link
+   *  org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
+   *  @param name field name
+   *  @param value 32-bit double value
+   *  @param stored Store.YES if the content should also be stored
+   *  @throws IllegalArgumentException if the field name is null.
+   */
+  public LegacyFloatField(String name, float value, Store stored) {
+    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
+    fieldsData = Float.valueOf(value);
+  }
+  
+  /** Expert: allows you to customize the {@link
+   *  LegacyFieldType}. 
+   *  @param name field name
+   *  @param value 32-bit float value
+   *  @param type customized field type: must have {@link LegacyFieldType#numericType()}
+   *         of {@link LegacyNumericType#FLOAT}.
+   *  @throws IllegalArgumentException if the field name or type is null, or
+   *          if the field type does not have a FLOAT numericType()
+   */
+  public LegacyFloatField(String name, float value, LegacyFieldType type) {
+    super(name, type);
+    if (type.numericType() != LegacyNumericType.FLOAT) {
+      throw new IllegalArgumentException("type.numericType() must be FLOAT but got " + type.numericType());
+    }
+    fieldsData = Float.valueOf(value);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyIntField.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyIntField.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyIntField.java
new file mode 100644
index 0000000..e3ae965
--- /dev/null
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyIntField.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.IntPoint;
+import org.apache.lucene.index.IndexOptions;
+
+/**
+ * <p>
+ * Field that indexes <code>int</code> values
+ * for efficient range filtering and sorting. Here's an example usage:
+ * 
+ * <pre class="prettyprint">
+ * document.add(new LegacyIntField(name, 6, Field.Store.NO));
+ * </pre>
+ * 
+ * For optimal performance, re-use the <code>LegacyIntField</code> and
+ * {@link Document} instance for more than one document:
+ * 
+ * <pre class="prettyprint">
+ *  LegacyIntField field = new LegacyIntField(name, 6, Field.Store.NO);
+ *  Document document = new Document();
+ *  document.add(field);
+ * 
+ *  for(all documents) {
+ *    ...
+ *    field.setIntValue(value)
+ *    writer.addDocument(document);
+ *    ...
+ *  }
+ * </pre>
+ *
+ * See also {@link LegacyLongField}, {@link LegacyFloatField}, {@link
+ * LegacyDoubleField}.
+ *
+ * <p>To perform range querying or filtering against a
+ * <code>LegacyIntField</code>, use {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}.
+ * To sort according to a
+ * <code>LegacyIntField</code>, use the normal numeric sort types, eg
+ * {@link org.apache.lucene.search.SortField.Type#INT}. <code>LegacyIntField</code>
+ * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
+ *
+ * <p>You may add the same field name as an <code>LegacyIntField</code> to
+ * the same document more than once.  Range querying and
+ * filtering will be the logical OR of all values; so a range query
+ * will hit all documents that have at least one value in
+ * the range. However sort behavior is not defined.  If you need to sort,
+ * you should separately index a single-valued <code>LegacyIntField</code>.</p>
+ *
+ * <p>An <code>LegacyIntField</code> will consume somewhat more disk space
+ * in the index than an ordinary single-valued field.
+ * However, for a typical index that includes substantial
+ * textual content per document, this increase will likely
+ * be in the noise. </p>
+ *
+ * <p>Within Lucene, each numeric value is indexed as a
+ * <em>trie</em> structure, where each term is logically
+ * assigned to larger and larger pre-defined brackets (which
+ * are simply lower-precision representations of the value).
+ * The step size between each successive bracket is called the
+ * <code>precisionStep</code>, measured in bits.  Smaller
+ * <code>precisionStep</code> values result in larger number
+ * of brackets, which consumes more disk space in the index
+ * but may result in faster range search performance.  The
+ * default value, 8, was selected for a reasonable tradeoff
+ * of disk space consumption versus performance.  You can
+ * create a custom {@link LegacyFieldType} and invoke the {@link
+ * LegacyFieldType#setNumericPrecisionStep} method if you'd
+ * like to change the value.  Note that you must also
+ * specify a congruent value when creating {@link
+ * org.apache.lucene.legacy.LegacyNumericRangeQuery}.
+ * For low cardinality fields larger precision steps are good.
+ * If the cardinality is &lt; 100, it is fair
+ * to use {@link Integer#MAX_VALUE}, which produces one
+ * term per value.
+ *
+ * <p>For more information on the internals of numeric trie
+ * indexing, including the <a
+ * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
+ * configuration, see {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}. The format of
+ * indexed values is described in {@link org.apache.lucene.legacy.LegacyNumericUtils}.
+ *
+ * <p>If you only need to sort by numeric value, and never
+ * run range querying/filtering, you can index using a
+ * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
+ * This will minimize disk space consumed. </p>
+ *
+ * <p>More advanced users can instead use {@link
+ * org.apache.lucene.legacy.LegacyNumericTokenStream} directly, when indexing numbers. This
+ * class is a wrapper around this token stream type for
+ * easier, more intuitive usage.</p>
+ *
+ * @deprecated Please use {@link IntPoint} instead
+ *
+ * @since 2.9
+ */
+
+@Deprecated
+public final class LegacyIntField extends LegacyField {
+  
+  /** 
+   * Type for an LegacyIntField that is not stored:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_NOT_STORED = new LegacyFieldType();
+  static {
+    TYPE_NOT_STORED.setTokenized(true);
+    TYPE_NOT_STORED.setOmitNorms(true);
+    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_NOT_STORED.setNumericType(LegacyNumericType.INT);
+    TYPE_NOT_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
+    TYPE_NOT_STORED.freeze();
+  }
+
+  /** 
+   * Type for a stored LegacyIntField:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_STORED = new LegacyFieldType();
+  static {
+    TYPE_STORED.setTokenized(true);
+    TYPE_STORED.setOmitNorms(true);
+    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_STORED.setNumericType(LegacyNumericType.INT);
+    TYPE_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
+    TYPE_STORED.setStored(true);
+    TYPE_STORED.freeze();
+  }
+
+  /** Creates a stored or un-stored LegacyIntField with the provided value
+   *  and default <code>precisionStep</code> {@link
+   *  org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
+   *  @param name field name
+   *  @param value 32-bit integer value
+   *  @param stored Store.YES if the content should also be stored
+   *  @throws IllegalArgumentException if the field name is null.
+   */
+  public LegacyIntField(String name, int value, Store stored) {
+    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
+    fieldsData = Integer.valueOf(value);
+  }
+  
+  /** Expert: allows you to customize the {@link
+   *  LegacyFieldType}. 
+   *  @param name field name
+   *  @param value 32-bit integer value
+   *  @param type customized field type: must have {@link LegacyFieldType#numericType()}
+   *         of {@link LegacyNumericType#INT}.
+   *  @throws IllegalArgumentException if the field name or type is null, or
+   *          if the field type does not have a INT numericType()
+   */
+  public LegacyIntField(String name, int value, LegacyFieldType type) {
+    super(name, type);
+    if (type.numericType() != LegacyNumericType.INT) {
+      throw new IllegalArgumentException("type.numericType() must be INT but got " + type.numericType());
+    }
+    fieldsData = Integer.valueOf(value);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyLongField.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyLongField.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyLongField.java
new file mode 100644
index 0000000..3e20b44
--- /dev/null
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyLongField.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.index.IndexOptions;
+
+
+/**
+ * <p>
+ * Field that indexes <code>long</code> values
+ * for efficient range filtering and sorting. Here's an example usage:
+ * 
+ * <pre class="prettyprint">
+ * document.add(new LegacyLongField(name, 6L, Field.Store.NO));
+ * </pre>
+ * 
+ * For optimal performance, re-use the <code>LegacyLongField</code> and
+ * {@link Document} instance for more than one document:
+ * 
+ * <pre class="prettyprint">
+ *  LegacyLongField field = new LegacyLongField(name, 0L, Field.Store.NO);
+ *  Document document = new Document();
+ *  document.add(field);
+ * 
+ *  for(all documents) {
+ *    ...
+ *    field.setLongValue(value)
+ *    writer.addDocument(document);
+ *    ...
+ *  }
+ * </pre>
+ *
+ * See also {@link LegacyIntField}, {@link LegacyFloatField}, {@link
+ * LegacyDoubleField}.
+ *
+ * Any type that can be converted to long can also be
+ * indexed.  For example, date/time values represented by a
+ * {@link java.util.Date} can be translated into a long
+ * value using the {@link java.util.Date#getTime} method.  If you
+ * don't need millisecond precision, you can quantize the
+ * value, either by dividing the result of
+ * {@link java.util.Date#getTime} or using the separate getters
+ * (for year, month, etc.) to construct an <code>int</code> or
+ * <code>long</code> value.
+ *
+ * <p>To perform range querying or filtering against a
+ * <code>LegacyLongField</code>, use {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}.
+ * To sort according to a
+ * <code>LegacyLongField</code>, use the normal numeric sort types, eg
+ * {@link org.apache.lucene.search.SortField.Type#LONG}. <code>LegacyLongField</code>
+ * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.
+ *
+ * <p>You may add the same field name as an <code>LegacyLongField</code> to
+ * the same document more than once.  Range querying and
+ * filtering will be the logical OR of all values; so a range query
+ * will hit all documents that have at least one value in
+ * the range. However sort behavior is not defined.  If you need to sort,
+ * you should separately index a single-valued <code>LegacyLongField</code>.
+ *
+ * <p>A <code>LegacyLongField</code> will consume somewhat more disk space
+ * in the index than an ordinary single-valued field.
+ * However, for a typical index that includes substantial
+ * textual content per document, this increase will likely
+ * be in the noise. </p>
+ *
+ * <p>Within Lucene, each numeric value is indexed as a
+ * <em>trie</em> structure, where each term is logically
+ * assigned to larger and larger pre-defined brackets (which
+ * are simply lower-precision representations of the value).
+ * The step size between each successive bracket is called the
+ * <code>precisionStep</code>, measured in bits.  Smaller
+ * <code>precisionStep</code> values result in larger number
+ * of brackets, which consumes more disk space in the index
+ * but may result in faster range search performance.  The
+ * default value, 16, was selected for a reasonable tradeoff
+ * of disk space consumption versus performance.  You can
+ * create a custom {@link LegacyFieldType} and invoke the {@link
+ * LegacyFieldType#setNumericPrecisionStep} method if you'd
+ * like to change the value.  Note that you must also
+ * specify a congruent value when creating {@link
+ * org.apache.lucene.legacy.LegacyNumericRangeQuery}.
+ * For low cardinality fields larger precision steps are good.
+ * If the cardinality is &lt; 100, it is fair
+ * to use {@link Integer#MAX_VALUE}, which produces one
+ * term per value.
+ *
+ * <p>For more information on the internals of numeric trie
+ * indexing, including the <a
+ * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
+ * configuration, see {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}. The format of
+ * indexed values is described in {@link org.apache.lucene.legacy.LegacyNumericUtils}.
+ *
+ * <p>If you only need to sort by numeric value, and never
+ * run range querying/filtering, you can index using a
+ * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
+ * This will minimize disk space consumed.
+ *
+ * <p>More advanced users can instead use {@link
+ * org.apache.lucene.legacy.LegacyNumericTokenStream} directly, when indexing numbers. This
+ * class is a wrapper around this token stream type for
+ * easier, more intuitive usage.</p>
+ *
+ * @deprecated Please use {@link LongPoint} instead
+ *
+ * @since 2.9
+ */
+
+@Deprecated
+public final class LegacyLongField extends LegacyField {
+  
+  /** 
+   * Type for a LegacyLongField that is not stored:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_NOT_STORED = new LegacyFieldType();
+  static {
+    TYPE_NOT_STORED.setTokenized(true);
+    TYPE_NOT_STORED.setOmitNorms(true);
+    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_NOT_STORED.setNumericType(LegacyNumericType.LONG);
+    TYPE_NOT_STORED.freeze();
+  }
+
+  /** 
+   * Type for a stored LegacyLongField:
+   * normalization factors, frequencies, and positions are omitted.
+   */
+  public static final LegacyFieldType TYPE_STORED = new LegacyFieldType();
+  static {
+    TYPE_STORED.setTokenized(true);
+    TYPE_STORED.setOmitNorms(true);
+    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
+    TYPE_STORED.setNumericType(LegacyNumericType.LONG);
+    TYPE_STORED.setStored(true);
+    TYPE_STORED.freeze();
+  }
+
+  /** Creates a stored or un-stored LegacyLongField with the provided value
+   *  and default <code>precisionStep</code> {@link
+   *  org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
+   *  @param name field name
+   *  @param value 64-bit long value
+   *  @param stored Store.YES if the content should also be stored
+   *  @throws IllegalArgumentException if the field name is null.
+   */
+  public LegacyLongField(String name, long value, Store stored) {
+    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
+    fieldsData = Long.valueOf(value);
+  }
+  
+  /** Expert: allows you to customize the {@link
+   *  LegacyFieldType}. 
+   *  @param name field name
+   *  @param value 64-bit long value
+   *  @param type customized field type: must have {@link LegacyFieldType#numericType()}
+   *         of {@link LegacyNumericType#LONG}.
+   *  @throws IllegalArgumentException if the field name or type is null, or
+   *          if the field type does not have a LONG numericType()
+   */
+  public LegacyLongField(String name, long value, LegacyFieldType type) {
+    super(name, type);
+    if (type.numericType() != LegacyNumericType.LONG) {
+      throw new IllegalArgumentException("type.numericType() must be LONG but got " + type.numericType());
+    }
+    fieldsData = Long.valueOf(value);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericRangeQuery.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericRangeQuery.java
new file mode 100644
index 0000000..f172a20
--- /dev/null
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericRangeQuery.java
@@ -0,0 +1,537 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.Objects;
+
+import org.apache.lucene.document.DoublePoint;
+import org.apache.lucene.document.FloatPoint;
+import org.apache.lucene.document.IntPoint;
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.index.FilteredTermsEnum;
+import org.apache.lucene.index.PointValues;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.index.Term; // for javadocs
+
+/**
+ * <p>A {@link Query} that matches numeric values within a
+ * specified range.  To use this, you must first index the
+ * numeric values using {@link org.apache.lucene.legacy.LegacyIntField}, {@link
+ * org.apache.lucene.legacy.LegacyFloatField}, {@link org.apache.lucene.legacy.LegacyLongField} or {@link org.apache.lucene.legacy.LegacyDoubleField} (expert: {@link
+ * org.apache.lucene.legacy.LegacyNumericTokenStream}).  If your terms are instead textual,
+ * you should use {@link TermRangeQuery}.</p>
+ *
+ * <p>You create a new LegacyNumericRangeQuery with the static
+ * factory methods, eg:
+ *
+ * <pre class="prettyprint">
+ * Query q = LegacyNumericRangeQuery.newFloatRange("weight", 0.03f, 0.10f, true, true);
+ * </pre>
+ *
+ * matches all documents whose float valued "weight" field
+ * ranges from 0.03 to 0.10, inclusive.
+ *
+ * <p>The performance of LegacyNumericRangeQuery is much better
+ * than the corresponding {@link TermRangeQuery} because the
+ * number of terms that must be searched is usually far
+ * fewer, thanks to trie indexing, described below.</p>
+ *
+ * <p>You can optionally specify a <a
+ * href="#precisionStepDesc"><code>precisionStep</code></a>
+ * when creating this query.  This is necessary if you've
+ * changed this configuration from its default (4) during
+ * indexing.  Lower values consume more disk space but speed
+ * up searching.  Suitable values are between <b>1</b> and
+ * <b>8</b>. A good starting point to test is <b>4</b>,
+ * which is the default value for all <code>Numeric*</code>
+ * classes.  See <a href="#precisionStepDesc">below</a> for
+ * details.
+ *
+ * <p>This query defaults to {@linkplain
+ * MultiTermQuery#CONSTANT_SCORE_REWRITE}.
+ * With precision steps of &le;4, this query can be run with
+ * one of the BooleanQuery rewrite methods without changing
+ * BooleanQuery's default max clause count.
+ *
+ * <br><h3>How it works</h3>
+ *
+ * <p>See the publication about <a target="_blank" href="http://www.panfmp.org">panFMP</a>,
+ * where this algorithm was described (referred to as <code>TrieRangeQuery</code>):
+ *
+ * <blockquote><strong>Schindler, U, Diepenbroek, M</strong>, 2008.
+ * <em>Generic XML-based Framework for Metadata Portals.</em>
+ * Computers &amp; Geosciences 34 (12), 1947-1955.
+ * <a href="http://dx.doi.org/10.1016/j.cageo.2008.02.023"
+ * target="_blank">doi:10.1016/j.cageo.2008.02.023</a></blockquote>
+ *
+ * <p><em>A quote from this paper:</em> Because Apache Lucene is a full-text
+ * search engine and not a conventional database, it cannot handle numerical ranges
+ * (e.g., field value is inside user defined bounds, even dates are numerical values).
+ * We have developed an extension to Apache Lucene that stores
+ * the numerical values in a special string-encoded format with variable precision
+ * (all numerical values like doubles, longs, floats, and ints are converted to
+ * lexicographic sortable string representations and stored with different precisions
+ * (for a more detailed description of how the values are stored,
+ * see {@link org.apache.lucene.legacy.LegacyNumericUtils}). A range is then divided recursively into multiple intervals for searching:
+ * The center of the range is searched only with the lowest possible precision in the <em>trie</em>,
+ * while the boundaries are matched more exactly. This reduces the number of terms dramatically.</p>
+ *
+ * <p>For the variant that stores long values in 8 different precisions (each reduced by 8 bits) that
+ * uses a lowest precision of 1 byte, the index contains only a maximum of 256 distinct values in the
+ * lowest precision. Overall, a range could consist of a theoretical maximum of
+ * <code>7*255*2 + 255 = 3825</code> distinct terms (when there is a term for every distinct value of an
+ * 8-byte-number in the index and the range covers almost all of them; a maximum of 255 distinct values is used
+ * because it would always be possible to reduce the full 256 values to one term with degraded precision).
+ * In practice, we have seen up to 300 terms in most cases (index with 500,000 metadata records
+ * and a uniform value distribution).</p>
+ *
+ * <h3><a name="precisionStepDesc">Precision Step</a></h3>
+ * <p>You can choose any <code>precisionStep</code> when encoding values.
+ * Lower step values mean more precisions and so more terms in index (and index gets larger). The number
+ * of indexed terms per value is (those are generated by {@link org.apache.lucene.legacy.LegacyNumericTokenStream}):
+ * <p style="font-family:serif">
+ * &nbsp;&nbsp;indexedTermsPerValue = <b>ceil</b><big>(</big>bitsPerValue / precisionStep<big>)</big>
+ * </p>
+ * As the lower precision terms are shared by many values, the additional terms only
+ * slightly grow the term dictionary (approx. 7% for <code>precisionStep=4</code>), but have a larger
+ * impact on the postings (the postings file will have  more entries, as every document is linked to
+ * <code>indexedTermsPerValue</code> terms instead of one). The formula to estimate the growth
+ * of the term dictionary in comparison to one term per value:
+ * <p>
+ * <!-- the formula in the alt attribute was transformed from latex to PNG with http://1.618034.com/latex.php (with 110 dpi): -->
+ * &nbsp;&nbsp;<img src="doc-files/nrq-formula-1.png" alt="\mathrm{termDictOverhead} = \sum\limits_{i=0}^{\mathrm{indexedTermsPerValue}-1} \frac{1}{2^{\mathrm{precisionStep}\cdot i}}">
+ * </p>
+ * <p>On the other hand, if the <code>precisionStep</code> is smaller, the maximum number of terms to match reduces,
+ * which optimizes query speed. The formula to calculate the maximum number of terms that will be visited while
+ * executing the query is:
+ * <p>
+ * <!-- the formula in the alt attribute was transformed from latex to PNG with http://1.618034.com/latex.php (with 110 dpi): -->
+ * &nbsp;&nbsp;<img src="doc-files/nrq-formula-2.png" alt="\mathrm{maxQueryTerms} = \left[ \left( \mathrm{indexedTermsPerValue} - 1 \right) \cdot \left(2^\mathrm{precisionStep} - 1 \right) \cdot 2 \right] + \left( 2^\mathrm{precisionStep} - 1 \right)">
+ * </p>
+ * <p>For longs stored using a precision step of 4, <code>maxQueryTerms = 15*15*2 + 15 = 465</code>, and for a precision
+ * step of 2, <code>maxQueryTerms = 31*3*2 + 3 = 189</code>. But the faster search speed is reduced by more seeking
+ * in the term enum of the index. Because of this, the ideal <code>precisionStep</code> value can only
+ * be found out by testing. <b>Important:</b> You can index with a lower precision step value and test search speed
+ * using a multiple of the original step value.</p>
+ *
+ * <p>Good values for <code>precisionStep</code> are depending on usage and data type:
+ * <ul>
+ *  <li>The default for all data types is <b>4</b>, which is used, when no <code>precisionStep</code> is given.
+ *  <li>Ideal value in most cases for <em>64 bit</em> data types <em>(long, double)</em> is <b>6</b> or <b>8</b>.
+ *  <li>Ideal value in most cases for <em>32 bit</em> data types <em>(int, float)</em> is <b>4</b>.
+ *  <li>For low cardinality fields larger precision steps are good. If the cardinality is &lt; 100, it is
+ *  fair to use {@link Integer#MAX_VALUE} (see below).
+ *  <li>Steps <b>&ge;64</b> for <em>long/double</em> and <b>&ge;32</b> for <em>int/float</em> produces one token
+ *  per value in the index and querying is as slow as a conventional {@link TermRangeQuery}. But it can be used
+ *  to produce fields, that are solely used for sorting (in this case simply use {@link Integer#MAX_VALUE} as
+ *  <code>precisionStep</code>). Using {@link org.apache.lucene.legacy.LegacyIntField},
+ *  {@link org.apache.lucene.legacy.LegacyLongField}, {@link org.apache.lucene.legacy.LegacyFloatField} or {@link org.apache.lucene.legacy.LegacyDoubleField} for sorting
+ *  is ideal, because building the field cache is much faster than with text-only numbers.
+ *  These fields have one term per value and therefore also work with term enumeration for building distinct lists
+ *  (e.g. facets / preselected values to search for).
+ *  Sorting is also possible with range query optimized fields using one of the above <code>precisionSteps</code>.
+ * </ul>
+ *
+ * <p>Comparisons of the different types of RangeQueries on an index with about 500,000 docs showed
+ * that {@link TermRangeQuery} in boolean rewrite mode (with raised {@link BooleanQuery} clause count)
+ * took about 30-40 secs to complete, {@link TermRangeQuery} in constant score filter rewrite mode took 5 secs
+ * and executing this class took &lt;100ms to complete (on an Opteron64 machine, Java 1.5, 8 bit
+ * precision step). This query type was developed for a geographic portal, where the performance for
+ * e.g. bounding boxes or exact date/time stamps is important.</p>
+ *
+ * @deprecated Instead index with {@link IntPoint}, {@link LongPoint}, {@link FloatPoint}, {@link DoublePoint}, and
+ *             create range queries with {@link IntPoint#newRangeQuery(String, int, int) IntPoint.newRangeQuery()},
+ *             {@link LongPoint#newRangeQuery(String, long, long) LongPoint.newRangeQuery()},
+ *             {@link FloatPoint#newRangeQuery(String, float, float) FloatPoint.newRangeQuery()},
+ *             {@link DoublePoint#newRangeQuery(String, double, double) DoublePoint.newRangeQuery()} respectively.
+ *             See {@link PointValues} for background information on Points.
+ *
+ * @since 2.9
+ **/
+
+@Deprecated
+public final class LegacyNumericRangeQuery<T extends Number> extends MultiTermQuery {
+
+  private LegacyNumericRangeQuery(final String field, final int precisionStep, final LegacyNumericType dataType,
+                                  T min, T max, final boolean minInclusive, final boolean maxInclusive) {
+    super(field);
+    if (precisionStep < 1)
+      throw new IllegalArgumentException("precisionStep must be >=1");
+    this.precisionStep = precisionStep;
+    this.dataType = Objects.requireNonNull(dataType, "LegacyNumericType must not be null");
+    this.min = min;
+    this.max = max;
+    this.minInclusive = minInclusive;
+    this.maxInclusive = maxInclusive;
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>long</code>
+   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Long> newLongRange(final String field, final int precisionStep,
+    Long min, Long max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, precisionStep, LegacyNumericType.LONG, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>long</code>
+   * range using the default <code>precisionStep</code> {@link org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Long> newLongRange(final String field,
+    Long min, Long max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT, LegacyNumericType.LONG, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>int</code>
+   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Integer> newIntRange(final String field, final int precisionStep,
+    Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, precisionStep, LegacyNumericType.INT, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>int</code>
+   * range using the default <code>precisionStep</code> {@link org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Integer> newIntRange(final String field,
+    Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, LegacyNumericType.INT, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>double</code>
+   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>.
+   * {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
+   * with {@code min == max == Double.NaN}.  By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Double> newDoubleRange(final String field, final int precisionStep,
+    Double min, Double max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, precisionStep, LegacyNumericType.DOUBLE, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>double</code>
+   * range using the default <code>precisionStep</code> {@link org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>.
+   * {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
+   * with {@code min == max == Double.NaN}.  By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Double> newDoubleRange(final String field,
+    Double min, Double max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT, LegacyNumericType.DOUBLE, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>float</code>
+   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>.
+   * {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
+   * with {@code min == max == Float.NaN}.  By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Float> newFloatRange(final String field, final int precisionStep,
+    Float min, Float max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, precisionStep, LegacyNumericType.FLOAT, min, max, minInclusive, maxInclusive);
+  }
+  
+  /**
+   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>float</code>
+   * range using the default <code>precisionStep</code> {@link org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
+   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
+   * by setting the min or max value to <code>null</code>.
+   * {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
+   * with {@code min == max == Float.NaN}.  By setting inclusive to false, it will
+   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+   */
+  public static LegacyNumericRangeQuery<Float> newFloatRange(final String field,
+    Float min, Float max, final boolean minInclusive, final boolean maxInclusive
+  ) {
+    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, LegacyNumericType.FLOAT, min, max, minInclusive, maxInclusive);
+  }
+
+  @Override @SuppressWarnings("unchecked")
+  protected TermsEnum getTermsEnum(final Terms terms, AttributeSource atts) throws IOException {
+    // very strange: java.lang.Number itself is not Comparable, but all subclasses used here are
+    if (min != null && max != null && ((Comparable<T>) min).compareTo(max) > 0) {
+      return TermsEnum.EMPTY;
+    }
+    return new NumericRangeTermsEnum(terms.iterator());
+  }
+
+  /** Returns <code>true</code> if the lower endpoint is inclusive */
+  public boolean includesMin() { return minInclusive; }
+  
+  /** Returns <code>true</code> if the upper endpoint is inclusive */
+  public boolean includesMax() { return maxInclusive; }
+
+  /** Returns the lower value of this range query */
+  public T getMin() { return min; }
+
+  /** Returns the upper value of this range query */
+  public T getMax() { return max; }
+  
+  /** Returns the precision step. */
+  public int getPrecisionStep() { return precisionStep; }
+  
+  @Override
+  public String toString(final String field) {
+    final StringBuilder sb = new StringBuilder();
+    if (!getField().equals(field)) sb.append(getField()).append(':');
+    return sb.append(minInclusive ? '[' : '{')
+      .append((min == null) ? "*" : min.toString())
+      .append(" TO ")
+      .append((max == null) ? "*" : max.toString())
+      .append(maxInclusive ? ']' : '}')
+      .toString();
+  }
+
+  @Override
+  @SuppressWarnings({"unchecked","rawtypes"})
+  public final boolean equals(final Object o) {
+    if (o==this) return true;
+    if (!super.equals(o))
+      return false;
+    if (o instanceof LegacyNumericRangeQuery) {
+      final LegacyNumericRangeQuery q=(LegacyNumericRangeQuery)o;
+      return (
+        (q.min == null ? min == null : q.min.equals(min)) &&
+        (q.max == null ? max == null : q.max.equals(max)) &&
+        minInclusive == q.minInclusive &&
+        maxInclusive == q.maxInclusive &&
+        precisionStep == q.precisionStep
+      );
+    }
+    return false;
+  }
+
+  @Override
+  public final int hashCode() {
+    int hash = super.hashCode();
+    hash = 31 * hash + precisionStep;
+    hash = 31 * hash + Objects.hashCode(min);
+    hash = 31 * hash + Objects.hashCode(max);
+    hash = 31 * hash + Objects.hashCode(minInclusive);
+    hash = 31 * hash + Objects.hashCode(maxInclusive);
+    return hash;
+  }
+
+  // members (package private, to be also fast accessible by NumericRangeTermEnum)
+  final int precisionStep;
+  final LegacyNumericType dataType;
+  final T min, max;
+  final boolean minInclusive,maxInclusive;
+
+  // used to handle float/double infinity correcty
+  static final long LONG_NEGATIVE_INFINITY =
+    NumericUtils.doubleToSortableLong(Double.NEGATIVE_INFINITY);
+  static final long LONG_POSITIVE_INFINITY =
+    NumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
+  static final int INT_NEGATIVE_INFINITY =
+    NumericUtils.floatToSortableInt(Float.NEGATIVE_INFINITY);
+  static final int INT_POSITIVE_INFINITY =
+    NumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
+
+  /**
+   * Subclass of FilteredTermsEnum for enumerating all terms that match the
+   * sub-ranges for trie range queries, using flex API.
+   * <p>
+   * WARNING: This term enumeration is not guaranteed to be always ordered by
+   * {@link Term#compareTo}.
+   * The ordering depends on how {@link org.apache.lucene.legacy.LegacyNumericUtils#splitLongRange} and
+   * {@link org.apache.lucene.legacy.LegacyNumericUtils#splitIntRange} generates the sub-ranges. For
+   * {@link MultiTermQuery} ordering is not relevant.
+   */
+  private final class NumericRangeTermsEnum extends FilteredTermsEnum {
+
+    private BytesRef currentLowerBound, currentUpperBound;
+
+    private final LinkedList<BytesRef> rangeBounds = new LinkedList<>();
+
+    NumericRangeTermsEnum(final TermsEnum tenum) {
+      super(tenum);
+      switch (dataType) {
+        case LONG:
+        case DOUBLE: {
+          // lower
+          long minBound;
+          if (dataType == LegacyNumericType.LONG) {
+            minBound = (min == null) ? Long.MIN_VALUE : min.longValue();
+          } else {
+            assert dataType == LegacyNumericType.DOUBLE;
+            minBound = (min == null) ? LONG_NEGATIVE_INFINITY
+              : NumericUtils.doubleToSortableLong(min.doubleValue());
+          }
+          if (!minInclusive && min != null) {
+            if (minBound == Long.MAX_VALUE) break;
+            minBound++;
+          }
+          
+          // upper
+          long maxBound;
+          if (dataType == LegacyNumericType.LONG) {
+            maxBound = (max == null) ? Long.MAX_VALUE : max.longValue();
+          } else {
+            assert dataType == LegacyNumericType.DOUBLE;
+            maxBound = (max == null) ? LONG_POSITIVE_INFINITY
+              : NumericUtils.doubleToSortableLong(max.doubleValue());
+          }
+          if (!maxInclusive && max != null) {
+            if (maxBound == Long.MIN_VALUE) break;
+            maxBound--;
+          }
+          
+          LegacyNumericUtils.splitLongRange(new LegacyNumericUtils.LongRangeBuilder() {
+            @Override
+            public final void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
+              rangeBounds.add(minPrefixCoded);
+              rangeBounds.add(maxPrefixCoded);
+            }
+          }, precisionStep, minBound, maxBound);
+          break;
+        }
+          
+        case INT:
+        case FLOAT: {
+          // lower
+          int minBound;
+          if (dataType == LegacyNumericType.INT) {
+            minBound = (min == null) ? Integer.MIN_VALUE : min.intValue();
+          } else {
+            assert dataType == LegacyNumericType.FLOAT;
+            minBound = (min == null) ? INT_NEGATIVE_INFINITY
+              : NumericUtils.floatToSortableInt(min.floatValue());
+          }
+          if (!minInclusive && min != null) {
+            if (minBound == Integer.MAX_VALUE) break;
+            minBound++;
+          }
+          
+          // upper
+          int maxBound;
+          if (dataType == LegacyNumericType.INT) {
+            maxBound = (max == null) ? Integer.MAX_VALUE : max.intValue();
+          } else {
+            assert dataType == LegacyNumericType.FLOAT;
+            maxBound = (max == null) ? INT_POSITIVE_INFINITY
+              : NumericUtils.floatToSortableInt(max.floatValue());
+          }
+          if (!maxInclusive && max != null) {
+            if (maxBound == Integer.MIN_VALUE) break;
+            maxBound--;
+          }
+          
+          LegacyNumericUtils.splitIntRange(new LegacyNumericUtils.IntRangeBuilder() {
+            @Override
+            public final void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
+              rangeBounds.add(minPrefixCoded);
+              rangeBounds.add(maxPrefixCoded);
+            }
+          }, precisionStep, minBound, maxBound);
+          break;
+        }
+          
+        default:
+          // should never happen
+          throw new IllegalArgumentException("Invalid LegacyNumericType");
+      }
+    }
+    
+    private void nextRange() {
+      assert rangeBounds.size() % 2 == 0;
+
+      currentLowerBound = rangeBounds.removeFirst();
+      assert currentUpperBound == null || currentUpperBound.compareTo(currentLowerBound) <= 0 :
+        "The current upper bound must be <= the new lower bound";
+      
+      currentUpperBound = rangeBounds.removeFirst();
+    }
+    
+    @Override
+    protected final BytesRef nextSeekTerm(BytesRef term) {
+      while (rangeBounds.size() >= 2) {
+        nextRange();
+        
+        // if the new upper bound is before the term parameter, the sub-range is never a hit
+        if (term != null && term.compareTo(currentUpperBound) > 0)
+          continue;
+        // never seek backwards, so use current term if lower bound is smaller
+        return (term != null && term.compareTo(currentLowerBound) > 0) ?
+          term : currentLowerBound;
+      }
+      
+      // no more sub-range enums available
+      assert rangeBounds.isEmpty();
+      currentLowerBound = currentUpperBound = null;
+      return null;
+    }
+    
+    @Override
+    protected final AcceptStatus accept(BytesRef term) {
+      while (currentUpperBound == null || term.compareTo(currentUpperBound) > 0) {
+        if (rangeBounds.isEmpty())
+          return AcceptStatus.END;
+        // peek next sub-range, only seek if the current term is smaller than next lower bound
+        if (term.compareTo(rangeBounds.getFirst()) < 0)
+          return AcceptStatus.NO_AND_SEEK;
+        // step forward to next range without seeking, as next lower range bound is less or equal current term
+        nextRange();
+      }
+      return AcceptStatus.YES;
+    }
+
+  }
+  
+}


[6/7] lucene-solr:master: LUCENE-7413: move legacy numeric support to backwards module

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericTokenStream.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericTokenStream.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericTokenStream.java
new file mode 100644
index 0000000..a2aba19
--- /dev/null
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericTokenStream.java
@@ -0,0 +1,357 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+
+import java.util.Objects;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.util.Attribute;
+import org.apache.lucene.util.AttributeFactory;
+import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.AttributeReflector;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.NumericUtils;
+
+/**
+ * <b>Expert:</b> This class provides a {@link TokenStream}
+ * for indexing numeric values that can be used by {@link
+ * org.apache.lucene.legacy.LegacyNumericRangeQuery}.
+ *
+ * <p>Note that for simple usage, {@link org.apache.lucene.legacy.LegacyIntField}, {@link
+ * org.apache.lucene.legacy.LegacyLongField}, {@link org.apache.lucene.legacy.LegacyFloatField} or {@link org.apache.lucene.legacy.LegacyDoubleField} is
+ * recommended.  These fields disable norms and
+ * term freqs, as they are not usually needed during
+ * searching.  If you need to change these settings, you
+ * should use this class.
+ *
+ * <p>Here's an example usage, for an <code>int</code> field:
+ *
+ * <pre class="prettyprint">
+ *  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
+ *  fieldType.setOmitNorms(true);
+ *  fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
+ *  Field field = new Field(name, new LegacyNumericTokenStream(precisionStep).setIntValue(value), fieldType);
+ *  document.add(field);
+ * </pre>
+ *
+ * <p>For optimal performance, re-use the TokenStream and Field instance
+ * for more than one document:
+ *
+ * <pre class="prettyprint">
+ *  LegacyNumericTokenStream stream = new LegacyNumericTokenStream(precisionStep);
+ *  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
+ *  fieldType.setOmitNorms(true);
+ *  fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
+ *  Field field = new Field(name, stream, fieldType);
+ *  Document document = new Document();
+ *  document.add(field);
+ *
+ *  for(all documents) {
+ *    stream.setIntValue(value)
+ *    writer.addDocument(document);
+ *  }
+ * </pre>
+ *
+ * <p>This stream is not intended to be used in analyzers;
+ * it's more for iterating the different precisions during
+ * indexing a specific numeric value.</p>
+
+ * <p><b>NOTE</b>: as token streams are only consumed once
+ * the document is added to the index, if you index more
+ * than one numeric field, use a separate <code>LegacyNumericTokenStream</code>
+ * instance for each.</p>
+ *
+ * <p>See {@link org.apache.lucene.legacy.LegacyNumericRangeQuery} for more details on the
+ * <a
+ * href="LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
+ * parameter as well as how numeric fields work under the hood.</p>
+ *
+ * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+ *
+ * @since 2.9
+ */
+@Deprecated
+public final class LegacyNumericTokenStream extends TokenStream {
+
+  /** The full precision token gets this token type assigned. */
+  public static final String TOKEN_TYPE_FULL_PREC  = "fullPrecNumeric";
+
+  /** The lower precision tokens gets this token type assigned. */
+  public static final String TOKEN_TYPE_LOWER_PREC = "lowerPrecNumeric";
+  
+  /** <b>Expert:</b> Use this attribute to get the details of the currently generated token.
+   * @lucene.experimental
+   * @since 4.0
+   */
+  public interface LegacyNumericTermAttribute extends Attribute {
+    /** Returns current shift value, undefined before first token */
+    int getShift();
+    /** Returns current token's raw value as {@code long} with all {@link #getShift} applied, undefined before first token */
+    long getRawValue();
+    /** Returns value size in bits (32 for {@code float}, {@code int}; 64 for {@code double}, {@code long}) */
+    int getValueSize();
+    
+    /** <em>Don't call this method!</em>
+      * @lucene.internal */
+    void init(long value, int valSize, int precisionStep, int shift);
+
+    /** <em>Don't call this method!</em>
+      * @lucene.internal */
+    void setShift(int shift);
+
+    /** <em>Don't call this method!</em>
+      * @lucene.internal */
+    int incShift();
+  }
+  
+  // just a wrapper to prevent adding CTA
+  private static final class NumericAttributeFactory extends AttributeFactory {
+    private final AttributeFactory delegate;
+
+    NumericAttributeFactory(AttributeFactory delegate) {
+      this.delegate = delegate;
+    }
+  
+    @Override
+    public AttributeImpl createAttributeInstance(Class<? extends Attribute> attClass) {
+      if (CharTermAttribute.class.isAssignableFrom(attClass))
+        throw new IllegalArgumentException("LegacyNumericTokenStream does not support CharTermAttribute.");
+      return delegate.createAttributeInstance(attClass);
+    }
+  }
+
+  /** Implementation of {@link org.apache.lucene.legacy.LegacyNumericTokenStream.LegacyNumericTermAttribute}.
+   * @lucene.internal
+   * @since 4.0
+   */
+  public static final class LegacyNumericTermAttributeImpl extends AttributeImpl implements LegacyNumericTermAttribute,TermToBytesRefAttribute {
+    private long value = 0L;
+    private int valueSize = 0, shift = 0, precisionStep = 0;
+    private BytesRefBuilder bytes = new BytesRefBuilder();
+    
+    /** 
+     * Creates, but does not yet initialize this attribute instance
+     * @see #init(long, int, int, int)
+     */
+    public LegacyNumericTermAttributeImpl() {}
+
+    @Override
+    public BytesRef getBytesRef() {
+      assert valueSize == 64 || valueSize == 32;
+      if (shift >= valueSize) {
+        bytes.clear();
+      } else if (valueSize == 64) {
+        LegacyNumericUtils.longToPrefixCoded(value, shift, bytes);
+      } else {
+        LegacyNumericUtils.intToPrefixCoded((int) value, shift, bytes);
+      }
+      return bytes.get();
+    }
+
+    @Override
+    public int getShift() { return shift; }
+    @Override
+    public void setShift(int shift) { this.shift = shift; }
+    @Override
+    public int incShift() {
+      return (shift += precisionStep);
+    }
+
+    @Override
+    public long getRawValue() { return value  & ~((1L << shift) - 1L); }
+    @Override
+    public int getValueSize() { return valueSize; }
+
+    @Override
+    public void init(long value, int valueSize, int precisionStep, int shift) {
+      this.value = value;
+      this.valueSize = valueSize;
+      this.precisionStep = precisionStep;
+      this.shift = shift;
+    }
+
+    @Override
+    public void clear() {
+      // this attribute has no contents to clear!
+      // we keep it untouched as it's fully controlled by outer class.
+    }
+    
+    @Override
+    public void reflectWith(AttributeReflector reflector) {
+      reflector.reflect(TermToBytesRefAttribute.class, "bytes", getBytesRef());
+      reflector.reflect(LegacyNumericTermAttribute.class, "shift", shift);
+      reflector.reflect(LegacyNumericTermAttribute.class, "rawValue", getRawValue());
+      reflector.reflect(LegacyNumericTermAttribute.class, "valueSize", valueSize);
+    }
+  
+    @Override
+    public void copyTo(AttributeImpl target) {
+      final LegacyNumericTermAttribute a = (LegacyNumericTermAttribute) target;
+      a.init(value, valueSize, precisionStep, shift);
+    }
+    
+    @Override
+    public LegacyNumericTermAttributeImpl clone() {
+      LegacyNumericTermAttributeImpl t = (LegacyNumericTermAttributeImpl)super.clone();
+      // Do a deep clone
+      t.bytes = new BytesRefBuilder();
+      t.bytes.copyBytes(getBytesRef());
+      return t;
+    }
+
+    @Override
+    public int hashCode() {
+      return Objects.hash(precisionStep, shift, value, valueSize);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (this == obj) return true;
+      if (obj == null) return false;
+      if (getClass() != obj.getClass()) return false;
+      LegacyNumericTermAttributeImpl other = (LegacyNumericTermAttributeImpl) obj;
+      if (precisionStep != other.precisionStep) return false;
+      if (shift != other.shift) return false;
+      if (value != other.value) return false;
+      if (valueSize != other.valueSize) return false;
+      return true;
+    }
+  }
+  
+  /**
+   * Creates a token stream for numeric values using the default <code>precisionStep</code>
+   * {@link org.apache.lucene.legacy.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16). The stream is not yet initialized,
+   * before using set a value using the various set<em>???</em>Value() methods.
+   */
+  public LegacyNumericTokenStream() {
+    this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, LegacyNumericUtils.PRECISION_STEP_DEFAULT);
+  }
+  
+  /**
+   * Creates a token stream for numeric values with the specified
+   * <code>precisionStep</code>. The stream is not yet initialized,
+   * before using set a value using the various set<em>???</em>Value() methods.
+   */
+  public LegacyNumericTokenStream(final int precisionStep) {
+    this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, precisionStep);
+  }
+
+  /**
+   * Expert: Creates a token stream for numeric values with the specified
+   * <code>precisionStep</code> using the given
+   * {@link org.apache.lucene.util.AttributeFactory}.
+   * The stream is not yet initialized,
+   * before using set a value using the various set<em>???</em>Value() methods.
+   */
+  public LegacyNumericTokenStream(AttributeFactory factory, final int precisionStep) {
+    super(new NumericAttributeFactory(factory));
+    if (precisionStep < 1)
+      throw new IllegalArgumentException("precisionStep must be >=1");
+    this.precisionStep = precisionStep;
+    numericAtt.setShift(-precisionStep);
+  }
+
+  /**
+   * Initializes the token stream with the supplied <code>long</code> value.
+   * @param value the value, for which this TokenStream should enumerate tokens.
+   * @return this instance, because of this you can use it the following way:
+   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setLongValue(value))</code>
+   */
+  public LegacyNumericTokenStream setLongValue(final long value) {
+    numericAtt.init(value, valSize = 64, precisionStep, -precisionStep);
+    return this;
+  }
+  
+  /**
+   * Initializes the token stream with the supplied <code>int</code> value.
+   * @param value the value, for which this TokenStream should enumerate tokens.
+   * @return this instance, because of this you can use it the following way:
+   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setIntValue(value))</code>
+   */
+  public LegacyNumericTokenStream setIntValue(final int value) {
+    numericAtt.init(value, valSize = 32, precisionStep, -precisionStep);
+    return this;
+  }
+  
+  /**
+   * Initializes the token stream with the supplied <code>double</code> value.
+   * @param value the value, for which this TokenStream should enumerate tokens.
+   * @return this instance, because of this you can use it the following way:
+   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setDoubleValue(value))</code>
+   */
+  public LegacyNumericTokenStream setDoubleValue(final double value) {
+    numericAtt.init(NumericUtils.doubleToSortableLong(value), valSize = 64, precisionStep, -precisionStep);
+    return this;
+  }
+  
+  /**
+   * Initializes the token stream with the supplied <code>float</code> value.
+   * @param value the value, for which this TokenStream should enumerate tokens.
+   * @return this instance, because of this you can use it the following way:
+   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setFloatValue(value))</code>
+   */
+  public LegacyNumericTokenStream setFloatValue(final float value) {
+    numericAtt.init(NumericUtils.floatToSortableInt(value), valSize = 32, precisionStep, -precisionStep);
+    return this;
+  }
+  
+  @Override
+  public void reset() {
+    if (valSize == 0)
+      throw new IllegalStateException("call set???Value() before usage");
+    numericAtt.setShift(-precisionStep);
+  }
+
+  @Override
+  public boolean incrementToken() {
+    if (valSize == 0)
+      throw new IllegalStateException("call set???Value() before usage");
+    
+    // this will only clear all other attributes in this TokenStream
+    clearAttributes();
+
+    final int shift = numericAtt.incShift();
+    typeAtt.setType((shift == 0) ? TOKEN_TYPE_FULL_PREC : TOKEN_TYPE_LOWER_PREC);
+    posIncrAtt.setPositionIncrement((shift == 0) ? 1 : 0);
+    return (shift < valSize);
+  }
+
+  /** Returns the precision step. */
+  public int getPrecisionStep() {
+    return precisionStep;
+  }
+
+  @Override
+  public String toString() {
+    // We override default because it can throw cryptic "illegal shift value":
+    return getClass().getSimpleName() + "(precisionStep=" + precisionStep + " valueSize=" + numericAtt.getValueSize() + " shift=" + numericAtt.getShift() + ")";
+  }
+  
+  // members
+  private final LegacyNumericTermAttribute numericAtt = addAttribute(LegacyNumericTermAttribute.class);
+  private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
+  private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+  
+  private int valSize = 0; // valSize==0 means not initialized
+  private final int precisionStep;
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericType.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericType.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericType.java
new file mode 100644
index 0000000..345b497
--- /dev/null
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericType.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+/** Data type of the numeric value
+ * @since 3.2
+ *
+ * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
+ */
+@Deprecated
+public enum LegacyNumericType {
+  /** 32-bit integer numeric type */
+  INT, 
+  /** 64-bit long numeric type */
+  LONG, 
+  /** 32-bit float numeric type */
+  FLOAT, 
+  /** 64-bit double numeric type */
+  DOUBLE
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericUtils.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericUtils.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericUtils.java
new file mode 100644
index 0000000..e6659d7
--- /dev/null
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericUtils.java
@@ -0,0 +1,510 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+
+import java.io.IOException;
+
+import org.apache.lucene.index.FilterLeafReader;
+import org.apache.lucene.index.FilteredTermsEnum;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+
+/**
+ * This is a helper class to generate prefix-encoded representations for numerical values
+ * and supplies converters to represent float/double values as sortable integers/longs.
+ *
+ * <p>To quickly execute range queries in Apache Lucene, a range is divided recursively
+ * into multiple intervals for searching: The center of the range is searched only with
+ * the lowest possible precision in the trie, while the boundaries are matched
+ * more exactly. This reduces the number of terms dramatically.
+ *
+ * <p>This class generates terms to achieve this: First the numerical integer values need to
+ * be converted to bytes. For that integer values (32 bit or 64 bit) are made unsigned
+ * and the bits are converted to ASCII chars with each 7 bit. The resulting byte[] is
+ * sortable like the original integer value (even using UTF-8 sort order). Each value is also
+ * prefixed (in the first char) by the <code>shift</code> value (number of bits removed) used
+ * during encoding.
+ *
+ * <p>For easy usage, the trie algorithm is implemented for indexing inside
+ * {@link org.apache.lucene.legacy.LegacyNumericTokenStream} that can index <code>int</code>, <code>long</code>,
+ * <code>float</code>, and <code>double</code>. For querying,
+ * {@link org.apache.lucene.legacy.LegacyNumericRangeQuery} implements the query part
+ * for the same data types.
+ *
+ * @lucene.internal
+ *
+ * @deprecated Please use {@link org.apache.lucene.index.PointValues} instead.
+ *
+ * @since 2.9, API changed non backwards-compliant in 4.0
+ */
+
+@Deprecated
+public final class LegacyNumericUtils {
+
+  private LegacyNumericUtils() {} // no instance!
+  
+  /**
+   * The default precision step used by {@link org.apache.lucene.legacy.LegacyLongField},
+   * {@link org.apache.lucene.legacy.LegacyDoubleField}, {@link org.apache.lucene.legacy.LegacyNumericTokenStream}, {@link
+   * org.apache.lucene.legacy.LegacyNumericRangeQuery}.
+   */
+  public static final int PRECISION_STEP_DEFAULT = 16;
+  
+  /**
+   * The default precision step used by {@link org.apache.lucene.legacy.LegacyIntField} and
+   * {@link org.apache.lucene.legacy.LegacyFloatField}.
+   */
+  public static final int PRECISION_STEP_DEFAULT_32 = 8;
+  
+  /**
+   * Longs are stored at lower precision by shifting off lower bits. The shift count is
+   * stored as <code>SHIFT_START_LONG+shift</code> in the first byte
+   */
+  public static final byte SHIFT_START_LONG = 0x20;
+
+  /**
+   * The maximum term length (used for <code>byte[]</code> buffer size)
+   * for encoding <code>long</code> values.
+   * @see #longToPrefixCoded
+   */
+  public static final int BUF_SIZE_LONG = 63/7 + 2;
+
+  /**
+   * Integers are stored at lower precision by shifting off lower bits. The shift count is
+   * stored as <code>SHIFT_START_INT+shift</code> in the first byte
+   */
+  public static final byte SHIFT_START_INT  = 0x60;
+
+  /**
+   * The maximum term length (used for <code>byte[]</code> buffer size)
+   * for encoding <code>int</code> values.
+   * @see #intToPrefixCoded
+   */
+  public static final int BUF_SIZE_INT = 31/7 + 2;
+
+  /**
+   * Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
+   * This is method is used by {@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
+   * After encoding, {@code bytes.offset} will always be 0. 
+   * @param val the numeric value
+   * @param shift how many bits to strip from the right
+   * @param bytes will contain the encoded value
+   */
+  public static void longToPrefixCoded(final long val, final int shift, final BytesRefBuilder bytes) {
+    // ensure shift is 0..63
+    if ((shift & ~0x3f) != 0) {
+      throw new IllegalArgumentException("Illegal shift value, must be 0..63; got shift=" + shift);
+    }
+    int nChars = (((63-shift)*37)>>8) + 1;    // i/7 is the same as (i*37)>>8 for i in 0..63
+    bytes.setLength(nChars+1);   // one extra for the byte that contains the shift info
+    bytes.grow(BUF_SIZE_LONG);
+    bytes.setByteAt(0, (byte)(SHIFT_START_LONG + shift));
+    long sortableBits = val ^ 0x8000000000000000L;
+    sortableBits >>>= shift;
+    while (nChars > 0) {
+      // Store 7 bits per byte for compatibility
+      // with UTF-8 encoding of terms
+      bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
+      sortableBits >>>= 7;
+    }
+  }
+
+  /**
+   * Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
+   * This is method is used by {@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
+   * After encoding, {@code bytes.offset} will always be 0.
+   * @param val the numeric value
+   * @param shift how many bits to strip from the right
+   * @param bytes will contain the encoded value
+   */
+  public static void intToPrefixCoded(final int val, final int shift, final BytesRefBuilder bytes) {
+    // ensure shift is 0..31
+    if ((shift & ~0x1f) != 0) {
+      throw new IllegalArgumentException("Illegal shift value, must be 0..31; got shift=" + shift);
+    }
+    int nChars = (((31-shift)*37)>>8) + 1;    // i/7 is the same as (i*37)>>8 for i in 0..63
+    bytes.setLength(nChars+1);   // one extra for the byte that contains the shift info
+    bytes.grow(LegacyNumericUtils.BUF_SIZE_LONG);  // use the max
+    bytes.setByteAt(0, (byte)(SHIFT_START_INT + shift));
+    int sortableBits = val ^ 0x80000000;
+    sortableBits >>>= shift;
+    while (nChars > 0) {
+      // Store 7 bits per byte for compatibility
+      // with UTF-8 encoding of terms
+      bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
+      sortableBits >>>= 7;
+    }
+  }
+
+
+  /**
+   * Returns the shift value from a prefix encoded {@code long}.
+   * @throws NumberFormatException if the supplied {@link BytesRef} is
+   * not correctly prefix encoded.
+   */
+  public static int getPrefixCodedLongShift(final BytesRef val) {
+    final int shift = val.bytes[val.offset] - SHIFT_START_LONG;
+    if (shift > 63 || shift < 0)
+      throw new NumberFormatException("Invalid shift value (" + shift + ") in prefixCoded bytes (is encoded value really an INT?)");
+    return shift;
+  }
+
+  /**
+   * Returns the shift value from a prefix encoded {@code int}.
+   * @throws NumberFormatException if the supplied {@link BytesRef} is
+   * not correctly prefix encoded.
+   */
+  public static int getPrefixCodedIntShift(final BytesRef val) {
+    final int shift = val.bytes[val.offset] - SHIFT_START_INT;
+    if (shift > 31 || shift < 0)
+      throw new NumberFormatException("Invalid shift value in prefixCoded bytes (is encoded value really an INT?)");
+    return shift;
+  }
+
+  /**
+   * Returns a long from prefixCoded bytes.
+   * Rightmost bits will be zero for lower precision codes.
+   * This method can be used to decode a term's value.
+   * @throws NumberFormatException if the supplied {@link BytesRef} is
+   * not correctly prefix encoded.
+   * @see #longToPrefixCoded
+   */
+  public static long prefixCodedToLong(final BytesRef val) {
+    long sortableBits = 0L;
+    for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
+      sortableBits <<= 7;
+      final byte b = val.bytes[i];
+      if (b < 0) {
+        throw new NumberFormatException(
+          "Invalid prefixCoded numerical value representation (byte "+
+          Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
+        );
+      }
+      sortableBits |= b;
+    }
+    return (sortableBits << getPrefixCodedLongShift(val)) ^ 0x8000000000000000L;
+  }
+
+  /**
+   * Returns an int from prefixCoded bytes.
+   * Rightmost bits will be zero for lower precision codes.
+   * This method can be used to decode a term's value.
+   * @throws NumberFormatException if the supplied {@link BytesRef} is
+   * not correctly prefix encoded.
+   * @see #intToPrefixCoded
+   */
+  public static int prefixCodedToInt(final BytesRef val) {
+    int sortableBits = 0;
+    for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
+      sortableBits <<= 7;
+      final byte b = val.bytes[i];
+      if (b < 0) {
+        throw new NumberFormatException(
+          "Invalid prefixCoded numerical value representation (byte "+
+          Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
+        );
+      }
+      sortableBits |= b;
+    }
+    return (sortableBits << getPrefixCodedIntShift(val)) ^ 0x80000000;
+  }
+
+  /**
+   * Splits a long range recursively.
+   * You may implement a builder that adds clauses to a
+   * {@link org.apache.lucene.search.BooleanQuery} for each call to its
+   * {@link LongRangeBuilder#addRange(BytesRef,BytesRef)}
+   * method.
+   * <p>This method is used by {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}.
+   */
+  public static void splitLongRange(final LongRangeBuilder builder,
+    final int precisionStep,  final long minBound, final long maxBound
+  ) {
+    splitRange(builder, 64, precisionStep, minBound, maxBound);
+  }
+  
+  /**
+   * Splits an int range recursively.
+   * You may implement a builder that adds clauses to a
+   * {@link org.apache.lucene.search.BooleanQuery} for each call to its
+   * {@link IntRangeBuilder#addRange(BytesRef,BytesRef)}
+   * method.
+   * <p>This method is used by {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}.
+   */
+  public static void splitIntRange(final IntRangeBuilder builder,
+    final int precisionStep,  final int minBound, final int maxBound
+  ) {
+    splitRange(builder, 32, precisionStep, minBound, maxBound);
+  }
+  
+  /** This helper does the splitting for both 32 and 64 bit. */
+  private static void splitRange(
+    final Object builder, final int valSize,
+    final int precisionStep, long minBound, long maxBound
+  ) {
+    if (precisionStep < 1)
+      throw new IllegalArgumentException("precisionStep must be >=1");
+    if (minBound > maxBound) return;
+    for (int shift=0; ; shift += precisionStep) {
+      // calculate new bounds for inner precision
+      final long diff = 1L << (shift+precisionStep),
+        mask = ((1L<<precisionStep) - 1L) << shift;
+      final boolean
+        hasLower = (minBound & mask) != 0L,
+        hasUpper = (maxBound & mask) != mask;
+      final long
+        nextMinBound = (hasLower ? (minBound + diff) : minBound) & ~mask,
+        nextMaxBound = (hasUpper ? (maxBound - diff) : maxBound) & ~mask;
+      final boolean
+        lowerWrapped = nextMinBound < minBound,
+        upperWrapped = nextMaxBound > maxBound;
+      
+      if (shift+precisionStep>=valSize || nextMinBound>nextMaxBound || lowerWrapped || upperWrapped) {
+        // We are in the lowest precision or the next precision is not available.
+        addRange(builder, valSize, minBound, maxBound, shift);
+        // exit the split recursion loop
+        break;
+      }
+      
+      if (hasLower)
+        addRange(builder, valSize, minBound, minBound | mask, shift);
+      if (hasUpper)
+        addRange(builder, valSize, maxBound & ~mask, maxBound, shift);
+      
+      // recurse to next precision
+      minBound = nextMinBound;
+      maxBound = nextMaxBound;
+    }
+  }
+  
+  /** Helper that delegates to correct range builder */
+  private static void addRange(
+    final Object builder, final int valSize,
+    long minBound, long maxBound,
+    final int shift
+  ) {
+    // for the max bound set all lower bits (that were shifted away):
+    // this is important for testing or other usages of the splitted range
+    // (e.g. to reconstruct the full range). The prefixEncoding will remove
+    // the bits anyway, so they do not hurt!
+    maxBound |= (1L << shift) - 1L;
+    // delegate to correct range builder
+    switch(valSize) {
+      case 64:
+        ((LongRangeBuilder)builder).addRange(minBound, maxBound, shift);
+        break;
+      case 32:
+        ((IntRangeBuilder)builder).addRange((int)minBound, (int)maxBound, shift);
+        break;
+      default:
+        // Should not happen!
+        throw new IllegalArgumentException("valSize must be 32 or 64.");
+    }
+  }
+
+  /**
+   * Callback for {@link #splitLongRange}.
+   * You need to overwrite only one of the methods.
+   * @lucene.internal
+   * @since 2.9, API changed non backwards-compliant in 4.0
+   */
+  public static abstract class LongRangeBuilder {
+    
+    /**
+     * Overwrite this method, if you like to receive the already prefix encoded range bounds.
+     * You can directly build classical (inclusive) range queries from them.
+     */
+    public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
+      throw new UnsupportedOperationException();
+    }
+    
+    /**
+     * Overwrite this method, if you like to receive the raw long range bounds.
+     * You can use this for e.g. debugging purposes (print out range bounds).
+     */
+    public void addRange(final long min, final long max, final int shift) {
+      final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
+      longToPrefixCoded(min, shift, minBytes);
+      longToPrefixCoded(max, shift, maxBytes);
+      addRange(minBytes.get(), maxBytes.get());
+    }
+  
+  }
+  
+  /**
+   * Callback for {@link #splitIntRange}.
+   * You need to overwrite only one of the methods.
+   * @lucene.internal
+   * @since 2.9, API changed non backwards-compliant in 4.0
+   */
+  public static abstract class IntRangeBuilder {
+    
+    /**
+     * Overwrite this method, if you like to receive the already prefix encoded range bounds.
+     * You can directly build classical range (inclusive) queries from them.
+     */
+    public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
+      throw new UnsupportedOperationException();
+    }
+    
+    /**
+     * Overwrite this method, if you like to receive the raw int range bounds.
+     * You can use this for e.g. debugging purposes (print out range bounds).
+     */
+    public void addRange(final int min, final int max, final int shift) {
+      final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
+      intToPrefixCoded(min, shift, minBytes);
+      intToPrefixCoded(max, shift, maxBytes);
+      addRange(minBytes.get(), maxBytes.get());
+    }
+  
+  }
+  
+  /**
+   * Filters the given {@link TermsEnum} by accepting only prefix coded 64 bit
+   * terms with a shift value of <tt>0</tt>.
+   * 
+   * @param termsEnum
+   *          the terms enum to filter
+   * @return a filtered {@link TermsEnum} that only returns prefix coded 64 bit
+   *         terms with a shift value of <tt>0</tt>.
+   */
+  public static TermsEnum filterPrefixCodedLongs(TermsEnum termsEnum) {
+    return new SeekingNumericFilteredTermsEnum(termsEnum) {
+
+      @Override
+      protected AcceptStatus accept(BytesRef term) {
+        return LegacyNumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
+      }
+    };
+  }
+
+  /**
+   * Filters the given {@link TermsEnum} by accepting only prefix coded 32 bit
+   * terms with a shift value of <tt>0</tt>.
+   * 
+   * @param termsEnum
+   *          the terms enum to filter
+   * @return a filtered {@link TermsEnum} that only returns prefix coded 32 bit
+   *         terms with a shift value of <tt>0</tt>.
+   */
+  public static TermsEnum filterPrefixCodedInts(TermsEnum termsEnum) {
+    return new SeekingNumericFilteredTermsEnum(termsEnum) {
+      
+      @Override
+      protected AcceptStatus accept(BytesRef term) {
+        return LegacyNumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
+      }
+    };
+  }
+
+  /** Just like FilteredTermsEnum, except it adds a limited
+   *  seekCeil implementation that only works with {@link
+   *  #filterPrefixCodedInts} and {@link
+   *  #filterPrefixCodedLongs}. */
+  private static abstract class SeekingNumericFilteredTermsEnum extends FilteredTermsEnum {
+    public SeekingNumericFilteredTermsEnum(final TermsEnum tenum) {
+      super(tenum, false);
+    }
+
+    @Override
+    @SuppressWarnings("fallthrough")
+    public SeekStatus seekCeil(BytesRef term) throws IOException {
+
+      // NOTE: This is not general!!  It only handles YES
+      // and END, because that's all we need for the numeric
+      // case here
+
+      SeekStatus status = tenum.seekCeil(term);
+      if (status == SeekStatus.END) {
+        return SeekStatus.END;
+      }
+
+      actualTerm = tenum.term();
+
+      if (accept(actualTerm) == AcceptStatus.YES) {
+        return status;
+      } else {
+        return SeekStatus.END;
+      }
+    }
+  }
+
+  private static Terms intTerms(Terms terms) {
+    return new FilterLeafReader.FilterTerms(terms) {
+        @Override
+        public TermsEnum iterator() throws IOException {
+          return filterPrefixCodedInts(in.iterator());
+        }
+      };
+  }
+
+  private static Terms longTerms(Terms terms) {
+    return new FilterLeafReader.FilterTerms(terms) {
+        @Override
+        public TermsEnum iterator() throws IOException {
+          return filterPrefixCodedLongs(in.iterator());
+        }
+      };
+  }
+    
+  /**
+   * Returns the minimum int value indexed into this
+   * numeric field or null if no terms exist.
+   */
+  public static Integer getMinInt(Terms terms) throws IOException {
+    // All shift=0 terms are sorted first, so we don't need
+    // to filter the incoming terms; we can just get the
+    // min:
+    BytesRef min = terms.getMin();
+    return (min != null) ? LegacyNumericUtils.prefixCodedToInt(min) : null;
+  }
+
+  /**
+   * Returns the maximum int value indexed into this
+   * numeric field or null if no terms exist.
+   */
+  public static Integer getMaxInt(Terms terms) throws IOException {
+    BytesRef max = intTerms(terms).getMax();
+    return (max != null) ? LegacyNumericUtils.prefixCodedToInt(max) : null;
+  }
+
+  /**
+   * Returns the minimum long value indexed into this
+   * numeric field or null if no terms exist.
+   */
+  public static Long getMinLong(Terms terms) throws IOException {
+    // All shift=0 terms are sorted first, so we don't need
+    // to filter the incoming terms; we can just get the
+    // min:
+    BytesRef min = terms.getMin();
+    return (min != null) ? LegacyNumericUtils.prefixCodedToLong(min) : null;
+  }
+
+  /**
+   * Returns the maximum long value indexed into this
+   * numeric field or null if no terms exist.
+   */
+  public static Long getMaxLong(Terms terms) throws IOException {
+    BytesRef max = longTerms(terms).getMax();
+    return (max != null) ? LegacyNumericUtils.prefixCodedToLong(max) : null;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/java/org/apache/lucene/legacy/package-info.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/package-info.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/package-info.java
new file mode 100644
index 0000000..d0167f8
--- /dev/null
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+/** 
+ * Deprecated stuff!
+ */
+package org.apache.lucene.legacy;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index 8226022..03480d7 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -47,8 +47,6 @@ import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.FloatDocValuesField;
 import org.apache.lucene.document.FloatPoint;
 import org.apache.lucene.document.IntPoint;
-import org.apache.lucene.document.LegacyIntField;
-import org.apache.lucene.document.LegacyLongField;
 import org.apache.lucene.document.LongPoint;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedDocValuesField;
@@ -57,9 +55,12 @@ import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyLongField;
+import org.apache.lucene.legacy.LegacyNumericRangeQuery;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.LegacyNumericRangeQuery;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.BaseDirectoryWrapper;
@@ -72,7 +73,6 @@ import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.InfoStream;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.LineFileDocs;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyField.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyField.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyField.java
new file mode 100644
index 0000000..65ff096
--- /dev/null
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyField.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+import java.io.StringReader;
+
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestLegacyField extends LuceneTestCase {
+  
+  public void testLegacyDoubleField() throws Exception {
+    Field fields[] = new Field[] {
+        new LegacyDoubleField("foo", 5d, Field.Store.NO),
+        new LegacyDoubleField("foo", 5d, Field.Store.YES)
+    };
+
+    for (Field field : fields) {
+      trySetBoost(field);
+      trySetByteValue(field);
+      trySetBytesValue(field);
+      trySetBytesRefValue(field);
+      field.setDoubleValue(6d); // ok
+      trySetIntValue(field);
+      trySetFloatValue(field);
+      trySetLongValue(field);
+      trySetReaderValue(field);
+      trySetShortValue(field);
+      trySetStringValue(field);
+      trySetTokenStreamValue(field);
+    
+      assertEquals(6d, field.numericValue().doubleValue(), 0.0d);
+    }
+  }
+  
+  public void testLegacyFloatField() throws Exception {
+    Field fields[] = new Field[] {
+        new LegacyFloatField("foo", 5f, Field.Store.NO),
+        new LegacyFloatField("foo", 5f, Field.Store.YES)
+    };
+
+    for (Field field : fields) {
+      trySetBoost(field);
+      trySetByteValue(field);
+      trySetBytesValue(field);
+      trySetBytesRefValue(field);
+      trySetDoubleValue(field);
+      trySetIntValue(field);
+      field.setFloatValue(6f); // ok
+      trySetLongValue(field);
+      trySetReaderValue(field);
+      trySetShortValue(field);
+      trySetStringValue(field);
+      trySetTokenStreamValue(field);
+      
+      assertEquals(6f, field.numericValue().floatValue(), 0.0f);
+    }
+  }
+  
+  public void testLegacyIntField() throws Exception {
+    Field fields[] = new Field[] {
+        new LegacyIntField("foo", 5, Field.Store.NO),
+        new LegacyIntField("foo", 5, Field.Store.YES)
+    };
+
+    for (Field field : fields) {
+      trySetBoost(field);
+      trySetByteValue(field);
+      trySetBytesValue(field);
+      trySetBytesRefValue(field);
+      trySetDoubleValue(field);
+      field.setIntValue(6); // ok
+      trySetFloatValue(field);
+      trySetLongValue(field);
+      trySetReaderValue(field);
+      trySetShortValue(field);
+      trySetStringValue(field);
+      trySetTokenStreamValue(field);
+      
+      assertEquals(6, field.numericValue().intValue());
+    }
+  }
+  
+  public void testLegacyLongField() throws Exception {
+    Field fields[] = new Field[] {
+        new LegacyLongField("foo", 5L, Field.Store.NO),
+        new LegacyLongField("foo", 5L, Field.Store.YES)
+    };
+
+    for (Field field : fields) {
+      trySetBoost(field);
+      trySetByteValue(field);
+      trySetBytesValue(field);
+      trySetBytesRefValue(field);
+      trySetDoubleValue(field);
+      trySetIntValue(field);
+      trySetFloatValue(field);
+      field.setLongValue(6); // ok
+      trySetReaderValue(field);
+      trySetShortValue(field);
+      trySetStringValue(field);
+      trySetTokenStreamValue(field);
+      
+      assertEquals(6L, field.numericValue().longValue());
+    }
+  }
+  
+  private void trySetByteValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setByteValue((byte) 10);
+    });
+  }
+
+  private void trySetBytesValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setBytesValue(new byte[] { 5, 5 });
+    });
+  }
+  
+  private void trySetBytesRefValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setBytesValue(new BytesRef("bogus"));
+    });
+  }
+  
+  private void trySetDoubleValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setDoubleValue(Double.MAX_VALUE);
+    });
+  }
+  
+  private void trySetIntValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setIntValue(Integer.MAX_VALUE);
+    });
+  }
+  
+  private void trySetLongValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setLongValue(Long.MAX_VALUE);
+    });
+  }
+  
+  private void trySetFloatValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setFloatValue(Float.MAX_VALUE);
+    });
+  }
+  
+  private void trySetReaderValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setReaderValue(new StringReader("BOO!"));
+    });
+  }
+  
+  private void trySetShortValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setShortValue(Short.MAX_VALUE);
+    });
+  }
+  
+  private void trySetStringValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setStringValue("BOO!");
+    });
+  }
+  
+  private void trySetTokenStreamValue(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setTokenStream(new CannedTokenStream(new Token("foo", 0, 3)));
+    });
+  }
+  
+  private void trySetBoost(Field f) {
+    expectThrows(IllegalArgumentException.class, () -> {
+      f.setBoost(5.0f);
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyFieldReuse.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyFieldReuse.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyFieldReuse.java
new file mode 100644
index 0000000..9335290
--- /dev/null
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyFieldReuse.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyNumericTokenStream;
+import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.lucene.legacy.LegacyNumericTokenStream.LegacyNumericTermAttribute;
+
+/** test tokenstream reuse by DefaultIndexingChain */
+public class TestLegacyFieldReuse extends BaseTokenStreamTestCase {
+  
+  public void testNumericReuse() throws IOException {
+    LegacyIntField legacyIntField = new LegacyIntField("foo", 5, Field.Store.NO);
+    
+    // passing null
+    TokenStream ts = legacyIntField.tokenStream(null, null);
+    assertTrue(ts instanceof LegacyNumericTokenStream);
+    assertEquals(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, ((LegacyNumericTokenStream)ts).getPrecisionStep());
+    assertNumericContents(5, ts);
+
+    // now reuse previous stream
+    legacyIntField = new LegacyIntField("foo", 20, Field.Store.NO);
+    TokenStream ts2 = legacyIntField.tokenStream(null, ts);
+    assertSame(ts, ts2);
+    assertNumericContents(20, ts);
+    
+    // pass a bogus stream and ensure it's still ok
+    legacyIntField = new LegacyIntField("foo", 2343, Field.Store.NO);
+    TokenStream bogus = new CannedTokenStream(new Token("bogus", 0, 5));
+    ts = legacyIntField.tokenStream(null, bogus);
+    assertNotSame(bogus, ts);
+    assertNumericContents(2343, ts);
+    
+    // pass another bogus stream (numeric, but different precision step!)
+    legacyIntField = new LegacyIntField("foo", 42, Field.Store.NO);
+    assert 3 != LegacyNumericUtils.PRECISION_STEP_DEFAULT;
+    bogus = new LegacyNumericTokenStream(3);
+    ts = legacyIntField.tokenStream(null, bogus);
+    assertNotSame(bogus, ts);
+    assertNumericContents(42, ts);
+  }
+   
+  private void assertNumericContents(int value, TokenStream ts) throws IOException {
+    assertTrue(ts instanceof LegacyNumericTokenStream);
+    LegacyNumericTermAttribute numericAtt = ts.getAttribute(LegacyNumericTermAttribute.class);
+    ts.reset();
+    boolean seen = false;
+    while (ts.incrementToken()) {
+      if (numericAtt.getShift() == 0) {
+        assertEquals(value, numericAtt.getRawValue());
+        seen = true;
+      }
+    }
+    ts.end();
+    ts.close();
+    assertTrue(seen);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyNumericUtils.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyNumericUtils.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyNumericUtils.java
new file mode 100644
index 0000000..8607efd
--- /dev/null
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyNumericUtils.java
@@ -0,0 +1,571 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Random;
+
+import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.LongBitSet;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+
+public class TestLegacyNumericUtils extends LuceneTestCase {
+
+  public void testLongConversionAndOrdering() throws Exception {
+    // generate a series of encoded longs, each numerical one bigger than the one before
+    BytesRefBuilder last = new BytesRefBuilder();
+    BytesRefBuilder act = new BytesRefBuilder();
+    for (long l=-100000L; l<100000L; l++) {
+      LegacyNumericUtils.longToPrefixCoded(l, 0, act);
+      if (last!=null) {
+        // test if smaller
+        assertTrue("actual bigger than last (BytesRef)", last.get().compareTo(act.get()) < 0 );
+        assertTrue("actual bigger than last (as String)", last.get().utf8ToString().compareTo(act.get().utf8ToString()) < 0 );
+      }
+      // test is back and forward conversion works
+      assertEquals("forward and back conversion should generate same long", l, LegacyNumericUtils.prefixCodedToLong(act.get()));
+      // next step
+      last.copyBytes(act);
+    }
+  }
+
+  public void testIntConversionAndOrdering() throws Exception {
+    // generate a series of encoded ints, each numerical one bigger than the one before
+    BytesRefBuilder act = new BytesRefBuilder();
+    BytesRefBuilder last = new BytesRefBuilder();
+    for (int i=-100000; i<100000; i++) {
+      LegacyNumericUtils.intToPrefixCoded(i, 0, act);
+      if (last!=null) {
+        // test if smaller
+        assertTrue("actual bigger than last (BytesRef)", last.get().compareTo(act.get()) < 0 );
+        assertTrue("actual bigger than last (as String)", last.get().utf8ToString().compareTo(act.get().utf8ToString()) < 0 );
+      }
+      // test is back and forward conversion works
+      assertEquals("forward and back conversion should generate same int", i, LegacyNumericUtils.prefixCodedToInt(act.get()));
+      // next step
+      last.copyBytes(act.get());
+    }
+  }
+
+  public void testLongSpecialValues() throws Exception {
+    long[] vals=new long[]{
+      Long.MIN_VALUE, Long.MIN_VALUE+1, Long.MIN_VALUE+2, -5003400000000L,
+      -4000L, -3000L, -2000L, -1000L, -1L, 0L, 1L, 10L, 300L, 50006789999999999L, Long.MAX_VALUE-2, Long.MAX_VALUE-1, Long.MAX_VALUE
+    };
+    BytesRefBuilder[] prefixVals = new BytesRefBuilder[vals.length];
+    
+    for (int i=0; i<vals.length; i++) {
+      prefixVals[i] = new BytesRefBuilder();
+      LegacyNumericUtils.longToPrefixCoded(vals[i], 0, prefixVals[i]);
+      
+      // check forward and back conversion
+      assertEquals( "forward and back conversion should generate same long", vals[i], LegacyNumericUtils.prefixCodedToLong(prefixVals[i].get()) );
+
+      // test if decoding values as int fails correctly
+      final int index = i;
+      expectThrows(NumberFormatException.class, () -> {
+        LegacyNumericUtils.prefixCodedToInt(prefixVals[index].get());
+      });
+    }
+    
+    // check sort order (prefixVals should be ascending)
+    for (int i=1; i<prefixVals.length; i++) {
+      assertTrue( "check sort order", prefixVals[i-1].get().compareTo(prefixVals[i].get()) < 0 );
+    }
+        
+    // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
+    final BytesRefBuilder ref = new BytesRefBuilder();
+    for (int i=0; i<vals.length; i++) {
+      for (int j=0; j<64; j++) {
+        LegacyNumericUtils.longToPrefixCoded(vals[i], j, ref);
+        long prefixVal= LegacyNumericUtils.prefixCodedToLong(ref.get());
+        long mask=(1L << j) - 1L;
+        assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
+      }
+    }
+  }
+
+  public void testIntSpecialValues() throws Exception {
+    int[] vals=new int[]{
+      Integer.MIN_VALUE, Integer.MIN_VALUE+1, Integer.MIN_VALUE+2, -64765767,
+      -4000, -3000, -2000, -1000, -1, 0, 1, 10, 300, 765878989, Integer.MAX_VALUE-2, Integer.MAX_VALUE-1, Integer.MAX_VALUE
+    };
+    BytesRefBuilder[] prefixVals=new BytesRefBuilder[vals.length];
+    
+    for (int i=0; i<vals.length; i++) {
+      prefixVals[i] = new BytesRefBuilder();
+      LegacyNumericUtils.intToPrefixCoded(vals[i], 0, prefixVals[i]);
+      
+      // check forward and back conversion
+      assertEquals( "forward and back conversion should generate same int", vals[i], LegacyNumericUtils.prefixCodedToInt(prefixVals[i].get()) );
+      
+      // test if decoding values as long fails correctly
+      final int index = i;
+      expectThrows(NumberFormatException.class, () -> {
+        LegacyNumericUtils.prefixCodedToLong(prefixVals[index].get());
+      });
+    }
+    
+    // check sort order (prefixVals should be ascending)
+    for (int i=1; i<prefixVals.length; i++) {
+      assertTrue( "check sort order", prefixVals[i-1].get().compareTo(prefixVals[i].get()) < 0 );
+    }
+    
+    // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
+    final BytesRefBuilder ref = new BytesRefBuilder();
+    for (int i=0; i<vals.length; i++) {
+      for (int j=0; j<32; j++) {
+        LegacyNumericUtils.intToPrefixCoded(vals[i], j, ref);
+        int prefixVal= LegacyNumericUtils.prefixCodedToInt(ref.get());
+        int mask=(1 << j) - 1;
+        assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
+      }
+    }
+  }
+
+  public void testDoubles() throws Exception {
+    double[] vals=new double[]{
+      Double.NEGATIVE_INFINITY, -2.3E25, -1.0E15, -1.0, -1.0E-1, -1.0E-2, -0.0, 
+      +0.0, 1.0E-2, 1.0E-1, 1.0, 1.0E15, 2.3E25, Double.POSITIVE_INFINITY, Double.NaN
+    };
+    long[] longVals=new long[vals.length];
+    
+    // check forward and back conversion
+    for (int i=0; i<vals.length; i++) {
+      longVals[i]= NumericUtils.doubleToSortableLong(vals[i]);
+      assertTrue( "forward and back conversion should generate same double", Double.compare(vals[i], NumericUtils.sortableLongToDouble(longVals[i]))==0 );
+    }
+    
+    // check sort order (prefixVals should be ascending)
+    for (int i=1; i<longVals.length; i++) {
+      assertTrue( "check sort order", longVals[i-1] < longVals[i] );
+    }
+  }
+
+  public static final double[] DOUBLE_NANs = {
+    Double.NaN,
+    Double.longBitsToDouble(0x7ff0000000000001L),
+    Double.longBitsToDouble(0x7fffffffffffffffL),
+    Double.longBitsToDouble(0xfff0000000000001L),
+    Double.longBitsToDouble(0xffffffffffffffffL)
+  };
+
+  public void testSortableDoubleNaN() {
+    final long plusInf = NumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
+    for (double nan : DOUBLE_NANs) {
+      assertTrue(Double.isNaN(nan));
+      final long sortable = NumericUtils.doubleToSortableLong(nan);
+      assertTrue("Double not sorted correctly: " + nan + ", long repr: " 
+          + sortable + ", positive inf.: " + plusInf, sortable > plusInf);
+    }
+  }
+  
+  public void testFloats() throws Exception {
+    float[] vals=new float[]{
+      Float.NEGATIVE_INFINITY, -2.3E25f, -1.0E15f, -1.0f, -1.0E-1f, -1.0E-2f, -0.0f, 
+      +0.0f, 1.0E-2f, 1.0E-1f, 1.0f, 1.0E15f, 2.3E25f, Float.POSITIVE_INFINITY, Float.NaN
+    };
+    int[] intVals=new int[vals.length];
+    
+    // check forward and back conversion
+    for (int i=0; i<vals.length; i++) {
+      intVals[i]= NumericUtils.floatToSortableInt(vals[i]);
+      assertTrue( "forward and back conversion should generate same double", Float.compare(vals[i], NumericUtils.sortableIntToFloat(intVals[i]))==0 );
+    }
+    
+    // check sort order (prefixVals should be ascending)
+    for (int i=1; i<intVals.length; i++) {
+      assertTrue( "check sort order", intVals[i-1] < intVals[i] );
+    }
+  }
+
+  public static final float[] FLOAT_NANs = {
+    Float.NaN,
+    Float.intBitsToFloat(0x7f800001),
+    Float.intBitsToFloat(0x7fffffff),
+    Float.intBitsToFloat(0xff800001),
+    Float.intBitsToFloat(0xffffffff)
+  };
+
+  public void testSortableFloatNaN() {
+    final int plusInf = NumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
+    for (float nan : FLOAT_NANs) {
+      assertTrue(Float.isNaN(nan));
+      final int sortable = NumericUtils.floatToSortableInt(nan);
+      assertTrue("Float not sorted correctly: " + nan + ", int repr: " 
+          + sortable + ", positive inf.: " + plusInf, sortable > plusInf);
+    }
+  }
+
+  // INFO: Tests for trieCodeLong()/trieCodeInt() not needed because implicitely tested by range filter tests
+  
+  /** Note: The neededBounds Iterable must be unsigned (easier understanding what's happening) */
+  private void assertLongRangeSplit(final long lower, final long upper, int precisionStep,
+    final boolean useBitSet, final Iterable<Long> expectedBounds, final Iterable<Integer> expectedShifts
+  ) {
+    // Cannot use FixedBitSet since the range could be long:
+    final LongBitSet bits=useBitSet ? new LongBitSet(upper-lower+1) : null;
+    final Iterator<Long> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
+    final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
+
+    LegacyNumericUtils.splitLongRange(new LegacyNumericUtils.LongRangeBuilder() {
+      @Override
+      public void addRange(long min, long max, int shift) {
+        assertTrue("min, max should be inside bounds", min >= lower && min <= upper && max >= lower && max <= upper);
+        if (useBitSet) for (long l = min; l <= max; l++) {
+          assertFalse("ranges should not overlap", bits.getAndSet(l - lower));
+          // extra exit condition to prevent overflow on MAX_VALUE
+          if (l == max) break;
+        }
+        if (neededBounds == null || neededShifts == null)
+          return;
+        // make unsigned longs for easier display and understanding
+        min ^= 0x8000000000000000L;
+        max ^= 0x8000000000000000L;
+        //System.out.println("0x"+Long.toHexString(min>>>shift)+"L,0x"+Long.toHexString(max>>>shift)+"L)/*shift="+shift+"*/,");
+        assertEquals("shift", neededShifts.next().intValue(), shift);
+        assertEquals("inner min bound", neededBounds.next().longValue(), min >>> shift);
+        assertEquals("inner max bound", neededBounds.next().longValue(), max >>> shift);
+      }
+    }, precisionStep, lower, upper);
+    
+    if (useBitSet) {
+      // after flipping all bits in the range, the cardinality should be zero
+      bits.flip(0,upper-lower+1);
+      assertEquals("The sub-range concenated should match the whole range", 0, bits.cardinality());
+    }
+  }
+  
+  /** LUCENE-2541: LegacyNumericRangeQuery errors with endpoints near long min and max values */
+  public void testLongExtremeValues() throws Exception {
+    // upper end extremes
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 1, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 2, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 4, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 6, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 8, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 64, true, Arrays.asList(
+      0xffffffffffffffffL,0xffffffffffffffffL
+    ), Arrays.asList(
+      0
+    ));
+
+    assertLongRangeSplit(Long.MAX_VALUE-0xfL, Long.MAX_VALUE, 4, true, Arrays.asList(
+      0xfffffffffffffffL,0xfffffffffffffffL
+    ), Arrays.asList(
+      4
+    ));
+    assertLongRangeSplit(Long.MAX_VALUE-0x10L, Long.MAX_VALUE, 4, true, Arrays.asList(
+      0xffffffffffffffefL,0xffffffffffffffefL,
+      0xfffffffffffffffL,0xfffffffffffffffL
+    ), Arrays.asList(
+      0, 4
+    ));
+
+    // lower end extremes
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 1, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 2, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 4, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 6, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 8, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 64, true, Arrays.asList(
+      0x0000000000000000L,0x0000000000000000L
+    ), Arrays.asList(
+      0
+    ));
+
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE+0xfL, 4, true, Arrays.asList(
+      0x000000000000000L,0x000000000000000L
+    ), Arrays.asList(
+      4
+    ));
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE+0x10L, 4, true, Arrays.asList(
+      0x0000000000000010L,0x0000000000000010L,
+      0x000000000000000L,0x000000000000000L
+    ), Arrays.asList(
+      0, 4
+    ));
+  }
+  
+  public void testRandomSplit() throws Exception {
+    long num = (long) atLeast(10);
+    for (long i=0; i < num; i++) {
+      executeOneRandomSplit(random());
+    }
+  }
+  
+  private void executeOneRandomSplit(final Random random) throws Exception {
+    long lower = randomLong(random);
+    long len = random.nextInt(16384*1024); // not too large bitsets, else OOME!
+    while (lower + len < lower) { // overflow
+      lower >>= 1;
+    }
+    assertLongRangeSplit(lower, lower + len, random.nextInt(64) + 1, true, null, null);
+  }
+  
+  private long randomLong(final Random random) {
+    long val;
+    switch(random.nextInt(4)) {
+      case 0:
+        val = 1L << (random.nextInt(63)); //  patterns like 0x000000100000 (-1 yields patterns like 0x0000fff)
+        break;
+      case 1:
+        val = -1L << (random.nextInt(63)); // patterns like 0xfffff00000
+        break;
+      default:
+        val = random.nextLong();
+    }
+
+    val += random.nextInt(5)-2;
+
+    if (random.nextBoolean()) {
+      if (random.nextBoolean()) val += random.nextInt(100)-50;
+      if (random.nextBoolean()) val = ~val;
+      if (random.nextBoolean()) val = val<<1;
+      if (random.nextBoolean()) val = val>>>1;
+    }
+
+    return val;
+  }
+  
+  public void testSplitLongRange() throws Exception {
+    // a hard-coded "standard" range
+    assertLongRangeSplit(-5000L, 9500L, 4, true, Arrays.asList(
+      0x7fffffffffffec78L,0x7fffffffffffec7fL,
+      0x8000000000002510L,0x800000000000251cL,
+      0x7fffffffffffec8L, 0x7fffffffffffecfL,
+      0x800000000000250L, 0x800000000000250L,
+      0x7fffffffffffedL,  0x7fffffffffffefL,
+      0x80000000000020L,  0x80000000000024L,
+      0x7ffffffffffffL,   0x8000000000001L
+    ), Arrays.asList(
+      0, 0,
+      4, 4,
+      8, 8,
+      12
+    ));
+    
+    // the same with no range splitting
+    assertLongRangeSplit(-5000L, 9500L, 64, true, Arrays.asList(
+      0x7fffffffffffec78L,0x800000000000251cL
+    ), Arrays.asList(
+      0
+    ));
+    
+    // this tests optimized range splitting, if one of the inner bounds
+    // is also the bound of the next lower precision, it should be used completely
+    assertLongRangeSplit(0L, 1024L+63L, 4, true, Arrays.asList(
+      0x800000000000040L, 0x800000000000043L,
+      0x80000000000000L,  0x80000000000003L
+    ), Arrays.asList(
+      4, 8
+    ));
+    
+    // the full long range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 8, false, Arrays.asList(
+      0x00L,0xffL
+    ), Arrays.asList(
+      56
+    ));
+
+    // the same with precisionStep=4
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 4, false, Arrays.asList(
+      0x0L,0xfL
+    ), Arrays.asList(
+      60
+    ));
+
+    // the same with precisionStep=2
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 2, false, Arrays.asList(
+      0x0L,0x3L
+    ), Arrays.asList(
+      62
+    ));
+
+    // the same with precisionStep=1
+    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 1, false, Arrays.asList(
+      0x0L,0x1L
+    ), Arrays.asList(
+      63
+    ));
+
+    // a inverse range should produce no sub-ranges
+    assertLongRangeSplit(9500L, -5000L, 4, false, Collections.<Long>emptyList(), Collections.<Integer>emptyList());    
+
+    // a 0-length range should reproduce the range itself
+    assertLongRangeSplit(9500L, 9500L, 4, false, Arrays.asList(
+      0x800000000000251cL,0x800000000000251cL
+    ), Arrays.asList(
+      0
+    ));
+  }
+
+  /** Note: The neededBounds Iterable must be unsigned (easier understanding what's happening) */
+  private void assertIntRangeSplit(final int lower, final int upper, int precisionStep,
+    final boolean useBitSet, final Iterable<Integer> expectedBounds, final Iterable<Integer> expectedShifts
+  ) {
+    final FixedBitSet bits=useBitSet ? new FixedBitSet(upper-lower+1) : null;
+    final Iterator<Integer> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
+    final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
+    
+    LegacyNumericUtils.splitIntRange(new LegacyNumericUtils.IntRangeBuilder() {
+      @Override
+      public void addRange(int min, int max, int shift) {
+        assertTrue("min, max should be inside bounds", min >= lower && min <= upper && max >= lower && max <= upper);
+        if (useBitSet) for (int i = min; i <= max; i++) {
+          assertFalse("ranges should not overlap", bits.getAndSet(i - lower));
+          // extra exit condition to prevent overflow on MAX_VALUE
+          if (i == max) break;
+        }
+        if (neededBounds == null)
+          return;
+        // make unsigned ints for easier display and understanding
+        min ^= 0x80000000;
+        max ^= 0x80000000;
+        //System.out.println("0x"+Integer.toHexString(min>>>shift)+",0x"+Integer.toHexString(max>>>shift)+")/*shift="+shift+"*/,");
+        assertEquals("shift", neededShifts.next().intValue(), shift);
+        assertEquals("inner min bound", neededBounds.next().intValue(), min >>> shift);
+        assertEquals("inner max bound", neededBounds.next().intValue(), max >>> shift);
+      }
+    }, precisionStep, lower, upper);
+    
+    if (useBitSet) {
+      // after flipping all bits in the range, the cardinality should be zero
+      bits.flip(0, upper-lower+1);
+      assertEquals("The sub-range concenated should match the whole range", 0, bits.cardinality());
+    }
+  }
+  
+  public void testSplitIntRange() throws Exception {
+    // a hard-coded "standard" range
+    assertIntRangeSplit(-5000, 9500, 4, true, Arrays.asList(
+      0x7fffec78,0x7fffec7f,
+      0x80002510,0x8000251c,
+      0x7fffec8, 0x7fffecf,
+      0x8000250, 0x8000250,
+      0x7fffed,  0x7fffef,
+      0x800020,  0x800024,
+      0x7ffff,   0x80001
+    ), Arrays.asList(
+      0, 0,
+      4, 4,
+      8, 8,
+      12
+    ));
+    
+    // the same with no range splitting
+    assertIntRangeSplit(-5000, 9500, 32, true, Arrays.asList(
+      0x7fffec78,0x8000251c
+    ), Arrays.asList(
+      0
+    ));
+    
+    // this tests optimized range splitting, if one of the inner bounds
+    // is also the bound of the next lower precision, it should be used completely
+    assertIntRangeSplit(0, 1024+63, 4, true, Arrays.asList(
+      0x8000040, 0x8000043,
+      0x800000,  0x800003
+    ), Arrays.asList(
+      4, 8
+    ));
+    
+    // the full int range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
+    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 8, false, Arrays.asList(
+      0x00,0xff
+    ), Arrays.asList(
+      24
+    ));
+
+    // the same with precisionStep=4
+    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 4, false, Arrays.asList(
+      0x0,0xf
+    ), Arrays.asList(
+      28
+    ));
+
+    // the same with precisionStep=2
+    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 2, false, Arrays.asList(
+      0x0,0x3
+    ), Arrays.asList(
+      30
+    ));
+
+    // the same with precisionStep=1
+    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 1, false, Arrays.asList(
+      0x0,0x1
+    ), Arrays.asList(
+      31
+    ));
+
+    // a inverse range should produce no sub-ranges
+    assertIntRangeSplit(9500, -5000, 4, false, Collections.<Integer>emptyList(), Collections.<Integer>emptyList());    
+
+    // a 0-length range should reproduce the range itself
+    assertIntRangeSplit(9500, 9500, 4, false, Arrays.asList(
+      0x8000251c,0x8000251c
+    ), Arrays.asList(
+      0
+    ));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyTerms.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyTerms.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyTerms.java
new file mode 100644
index 0000000..27fae15
--- /dev/null
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyTerms.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.legacy.LegacyDoubleField;
+import org.apache.lucene.legacy.LegacyFloatField;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyLongField;
+import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+
+public class TestLegacyTerms extends LuceneTestCase {
+
+  public void testEmptyIntFieldMinMax() throws Exception {
+    assertNull(LegacyNumericUtils.getMinInt(EMPTY_TERMS));
+    assertNull(LegacyNumericUtils.getMaxInt(EMPTY_TERMS));
+  }
+  
+  public void testIntFieldMinMax() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    int numDocs = atLeast(100);
+    int minValue = Integer.MAX_VALUE;
+    int maxValue = Integer.MIN_VALUE;
+    for(int i=0;i<numDocs;i++ ){
+      Document doc = new Document();
+      int num = random().nextInt();
+      minValue = Math.min(num, minValue);
+      maxValue = Math.max(num, maxValue);
+      doc.add(new LegacyIntField("field", num, Field.Store.NO));
+      w.addDocument(doc);
+    }
+    
+    IndexReader r = w.getReader();
+    Terms terms = MultiFields.getTerms(r, "field");
+    assertEquals(new Integer(minValue), LegacyNumericUtils.getMinInt(terms));
+    assertEquals(new Integer(maxValue), LegacyNumericUtils.getMaxInt(terms));
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testEmptyLongFieldMinMax() throws Exception {
+    assertNull(LegacyNumericUtils.getMinLong(EMPTY_TERMS));
+    assertNull(LegacyNumericUtils.getMaxLong(EMPTY_TERMS));
+  }
+  
+  public void testLongFieldMinMax() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    int numDocs = atLeast(100);
+    long minValue = Long.MAX_VALUE;
+    long maxValue = Long.MIN_VALUE;
+    for(int i=0;i<numDocs;i++ ){
+      Document doc = new Document();
+      long num = random().nextLong();
+      minValue = Math.min(num, minValue);
+      maxValue = Math.max(num, maxValue);
+      doc.add(new LegacyLongField("field", num, Field.Store.NO));
+      w.addDocument(doc);
+    }
+    
+    IndexReader r = w.getReader();
+
+    Terms terms = MultiFields.getTerms(r, "field");
+    assertEquals(new Long(minValue), LegacyNumericUtils.getMinLong(terms));
+    assertEquals(new Long(maxValue), LegacyNumericUtils.getMaxLong(terms));
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testFloatFieldMinMax() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    int numDocs = atLeast(100);
+    float minValue = Float.POSITIVE_INFINITY;
+    float maxValue = Float.NEGATIVE_INFINITY;
+    for(int i=0;i<numDocs;i++ ){
+      Document doc = new Document();
+      float num = random().nextFloat();
+      minValue = Math.min(num, minValue);
+      maxValue = Math.max(num, maxValue);
+      doc.add(new LegacyFloatField("field", num, Field.Store.NO));
+      w.addDocument(doc);
+    }
+    
+    IndexReader r = w.getReader();
+    Terms terms = MultiFields.getTerms(r, "field");
+    assertEquals(minValue, NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMinInt(terms)), 0.0f);
+    assertEquals(maxValue, NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMaxInt(terms)), 0.0f);
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testDoubleFieldMinMax() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    int numDocs = atLeast(100);
+    double minValue = Double.POSITIVE_INFINITY;
+    double maxValue = Double.NEGATIVE_INFINITY;
+    for(int i=0;i<numDocs;i++ ){
+      Document doc = new Document();
+      double num = random().nextDouble();
+      minValue = Math.min(num, minValue);
+      maxValue = Math.max(num, maxValue);
+      doc.add(new LegacyDoubleField("field", num, Field.Store.NO));
+      w.addDocument(doc);
+    }
+    
+    IndexReader r = w.getReader();
+
+    Terms terms = MultiFields.getTerms(r, "field");
+
+    assertEquals(minValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMinLong(terms)), 0.0);
+    assertEquals(maxValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMaxLong(terms)), 0.0);
+
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  /**
+   * A complete empty Terms instance that has no terms in it and supports no optional statistics
+   */
+  private static Terms EMPTY_TERMS = new Terms() {
+    public TermsEnum iterator() { return TermsEnum.EMPTY; }
+    public long size() { return -1; }
+    public long getSumTotalTermFreq() { return -1; }
+    public long getSumDocFreq() { return -1; }
+    public int getDocCount() { return -1; }
+    public boolean hasFreqs() { return false; }
+    public boolean hasOffsets() { return false; }
+    public boolean hasPositions() { return false; }
+    public boolean hasPayloads() { return false; }
+  };
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestMultiValuedNumericRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestMultiValuedNumericRangeQuery.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestMultiValuedNumericRangeQuery.java
new file mode 100644
index 0000000..386ec17
--- /dev/null
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestMultiValuedNumericRangeQuery.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+
+import java.util.Locale;
+import java.text.DecimalFormat;
+import java.text.DecimalFormatSymbols;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyNumericRangeQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
+
+  /** Tests LegacyNumericRangeQuery on a multi-valued field (multiple numeric values per document).
+   * This test ensures, that a classical TermRangeQuery returns exactly the same document numbers as
+   * LegacyNumericRangeQuery (see SOLR-1322 for discussion) and the multiple precision terms per numeric value
+   * do not interfere with multiple numeric values.
+   */
+  public void testMultiValuedNRQ() throws Exception {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
+        newIndexWriterConfig(new MockAnalyzer(random()))
+        .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
+    
+    DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.ROOT));
+    
+    int num = atLeast(500);
+    for (int l = 0; l < num; l++) {
+      Document doc = new Document();
+      for (int m=0, c=random().nextInt(10); m<=c; m++) {
+        int value = random().nextInt(Integer.MAX_VALUE);
+        doc.add(newStringField("asc", format.format(value), Field.Store.NO));
+        doc.add(new LegacyIntField("trie", value, Field.Store.NO));
+      }
+      writer.addDocument(doc);
+    }
+    IndexReader reader = writer.getReader();
+    writer.close();
+    
+    IndexSearcher searcher=newSearcher(reader);
+    num = atLeast(50);
+    for (int i = 0; i < num; i++) {
+      int lower=random().nextInt(Integer.MAX_VALUE);
+      int upper=random().nextInt(Integer.MAX_VALUE);
+      if (lower>upper) {
+        int a=lower; lower=upper; upper=a;
+      }
+      TermRangeQuery cq=TermRangeQuery.newStringRange("asc", format.format(lower), format.format(upper), true, true);
+      LegacyNumericRangeQuery<Integer> tq= LegacyNumericRangeQuery.newIntRange("trie", lower, upper, true, true);
+      TopDocs trTopDocs = searcher.search(cq, 1);
+      TopDocs nrTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", trTopDocs.totalHits, nrTopDocs.totalHits );
+    }
+    reader.close();
+    directory.close();
+  }
+  
+}


[2/7] lucene-solr:master: LUCENE-7413: move legacy numeric support to backwards module

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/test/org/apache/lucene/util/TestLegacyNumericUtils.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestLegacyNumericUtils.java b/lucene/core/src/test/org/apache/lucene/util/TestLegacyNumericUtils.java
deleted file mode 100644
index 2fb20d1..0000000
--- a/lucene/core/src/test/org/apache/lucene/util/TestLegacyNumericUtils.java
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.util;
-
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Random;
-
-public class TestLegacyNumericUtils extends LuceneTestCase {
-
-  public void testLongConversionAndOrdering() throws Exception {
-    // generate a series of encoded longs, each numerical one bigger than the one before
-    BytesRefBuilder last = new BytesRefBuilder();
-    BytesRefBuilder act = new BytesRefBuilder();
-    for (long l=-100000L; l<100000L; l++) {
-      LegacyNumericUtils.longToPrefixCoded(l, 0, act);
-      if (last!=null) {
-        // test if smaller
-        assertTrue("actual bigger than last (BytesRef)", last.get().compareTo(act.get()) < 0 );
-        assertTrue("actual bigger than last (as String)", last.get().utf8ToString().compareTo(act.get().utf8ToString()) < 0 );
-      }
-      // test is back and forward conversion works
-      assertEquals("forward and back conversion should generate same long", l, LegacyNumericUtils.prefixCodedToLong(act.get()));
-      // next step
-      last.copyBytes(act);
-    }
-  }
-
-  public void testIntConversionAndOrdering() throws Exception {
-    // generate a series of encoded ints, each numerical one bigger than the one before
-    BytesRefBuilder act = new BytesRefBuilder();
-    BytesRefBuilder last = new BytesRefBuilder();
-    for (int i=-100000; i<100000; i++) {
-      LegacyNumericUtils.intToPrefixCoded(i, 0, act);
-      if (last!=null) {
-        // test if smaller
-        assertTrue("actual bigger than last (BytesRef)", last.get().compareTo(act.get()) < 0 );
-        assertTrue("actual bigger than last (as String)", last.get().utf8ToString().compareTo(act.get().utf8ToString()) < 0 );
-      }
-      // test is back and forward conversion works
-      assertEquals("forward and back conversion should generate same int", i, LegacyNumericUtils.prefixCodedToInt(act.get()));
-      // next step
-      last.copyBytes(act.get());
-    }
-  }
-
-  public void testLongSpecialValues() throws Exception {
-    long[] vals=new long[]{
-      Long.MIN_VALUE, Long.MIN_VALUE+1, Long.MIN_VALUE+2, -5003400000000L,
-      -4000L, -3000L, -2000L, -1000L, -1L, 0L, 1L, 10L, 300L, 50006789999999999L, Long.MAX_VALUE-2, Long.MAX_VALUE-1, Long.MAX_VALUE
-    };
-    BytesRefBuilder[] prefixVals = new BytesRefBuilder[vals.length];
-    
-    for (int i=0; i<vals.length; i++) {
-      prefixVals[i] = new BytesRefBuilder();
-      LegacyNumericUtils.longToPrefixCoded(vals[i], 0, prefixVals[i]);
-      
-      // check forward and back conversion
-      assertEquals( "forward and back conversion should generate same long", vals[i], LegacyNumericUtils.prefixCodedToLong(prefixVals[i].get()) );
-
-      // test if decoding values as int fails correctly
-      final int index = i;
-      expectThrows(NumberFormatException.class, () -> {
-        LegacyNumericUtils.prefixCodedToInt(prefixVals[index].get());
-      });
-    }
-    
-    // check sort order (prefixVals should be ascending)
-    for (int i=1; i<prefixVals.length; i++) {
-      assertTrue( "check sort order", prefixVals[i-1].get().compareTo(prefixVals[i].get()) < 0 );
-    }
-        
-    // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
-    final BytesRefBuilder ref = new BytesRefBuilder();
-    for (int i=0; i<vals.length; i++) {
-      for (int j=0; j<64; j++) {
-        LegacyNumericUtils.longToPrefixCoded(vals[i], j, ref);
-        long prefixVal= LegacyNumericUtils.prefixCodedToLong(ref.get());
-        long mask=(1L << j) - 1L;
-        assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
-      }
-    }
-  }
-
-  public void testIntSpecialValues() throws Exception {
-    int[] vals=new int[]{
-      Integer.MIN_VALUE, Integer.MIN_VALUE+1, Integer.MIN_VALUE+2, -64765767,
-      -4000, -3000, -2000, -1000, -1, 0, 1, 10, 300, 765878989, Integer.MAX_VALUE-2, Integer.MAX_VALUE-1, Integer.MAX_VALUE
-    };
-    BytesRefBuilder[] prefixVals=new BytesRefBuilder[vals.length];
-    
-    for (int i=0; i<vals.length; i++) {
-      prefixVals[i] = new BytesRefBuilder();
-      LegacyNumericUtils.intToPrefixCoded(vals[i], 0, prefixVals[i]);
-      
-      // check forward and back conversion
-      assertEquals( "forward and back conversion should generate same int", vals[i], LegacyNumericUtils.prefixCodedToInt(prefixVals[i].get()) );
-      
-      // test if decoding values as long fails correctly
-      final int index = i;
-      expectThrows(NumberFormatException.class, () -> {
-        LegacyNumericUtils.prefixCodedToLong(prefixVals[index].get());
-      });
-    }
-    
-    // check sort order (prefixVals should be ascending)
-    for (int i=1; i<prefixVals.length; i++) {
-      assertTrue( "check sort order", prefixVals[i-1].get().compareTo(prefixVals[i].get()) < 0 );
-    }
-    
-    // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
-    final BytesRefBuilder ref = new BytesRefBuilder();
-    for (int i=0; i<vals.length; i++) {
-      for (int j=0; j<32; j++) {
-        LegacyNumericUtils.intToPrefixCoded(vals[i], j, ref);
-        int prefixVal= LegacyNumericUtils.prefixCodedToInt(ref.get());
-        int mask=(1 << j) - 1;
-        assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
-      }
-    }
-  }
-
-  public void testDoubles() throws Exception {
-    double[] vals=new double[]{
-      Double.NEGATIVE_INFINITY, -2.3E25, -1.0E15, -1.0, -1.0E-1, -1.0E-2, -0.0, 
-      +0.0, 1.0E-2, 1.0E-1, 1.0, 1.0E15, 2.3E25, Double.POSITIVE_INFINITY, Double.NaN
-    };
-    long[] longVals=new long[vals.length];
-    
-    // check forward and back conversion
-    for (int i=0; i<vals.length; i++) {
-      longVals[i]= NumericUtils.doubleToSortableLong(vals[i]);
-      assertTrue( "forward and back conversion should generate same double", Double.compare(vals[i], NumericUtils.sortableLongToDouble(longVals[i]))==0 );
-    }
-    
-    // check sort order (prefixVals should be ascending)
-    for (int i=1; i<longVals.length; i++) {
-      assertTrue( "check sort order", longVals[i-1] < longVals[i] );
-    }
-  }
-
-  public static final double[] DOUBLE_NANs = {
-    Double.NaN,
-    Double.longBitsToDouble(0x7ff0000000000001L),
-    Double.longBitsToDouble(0x7fffffffffffffffL),
-    Double.longBitsToDouble(0xfff0000000000001L),
-    Double.longBitsToDouble(0xffffffffffffffffL)
-  };
-
-  public void testSortableDoubleNaN() {
-    final long plusInf = NumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
-    for (double nan : DOUBLE_NANs) {
-      assertTrue(Double.isNaN(nan));
-      final long sortable = NumericUtils.doubleToSortableLong(nan);
-      assertTrue("Double not sorted correctly: " + nan + ", long repr: " 
-          + sortable + ", positive inf.: " + plusInf, sortable > plusInf);
-    }
-  }
-  
-  public void testFloats() throws Exception {
-    float[] vals=new float[]{
-      Float.NEGATIVE_INFINITY, -2.3E25f, -1.0E15f, -1.0f, -1.0E-1f, -1.0E-2f, -0.0f, 
-      +0.0f, 1.0E-2f, 1.0E-1f, 1.0f, 1.0E15f, 2.3E25f, Float.POSITIVE_INFINITY, Float.NaN
-    };
-    int[] intVals=new int[vals.length];
-    
-    // check forward and back conversion
-    for (int i=0; i<vals.length; i++) {
-      intVals[i]= NumericUtils.floatToSortableInt(vals[i]);
-      assertTrue( "forward and back conversion should generate same double", Float.compare(vals[i], NumericUtils.sortableIntToFloat(intVals[i]))==0 );
-    }
-    
-    // check sort order (prefixVals should be ascending)
-    for (int i=1; i<intVals.length; i++) {
-      assertTrue( "check sort order", intVals[i-1] < intVals[i] );
-    }
-  }
-
-  public static final float[] FLOAT_NANs = {
-    Float.NaN,
-    Float.intBitsToFloat(0x7f800001),
-    Float.intBitsToFloat(0x7fffffff),
-    Float.intBitsToFloat(0xff800001),
-    Float.intBitsToFloat(0xffffffff)
-  };
-
-  public void testSortableFloatNaN() {
-    final int plusInf = NumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
-    for (float nan : FLOAT_NANs) {
-      assertTrue(Float.isNaN(nan));
-      final int sortable = NumericUtils.floatToSortableInt(nan);
-      assertTrue("Float not sorted correctly: " + nan + ", int repr: " 
-          + sortable + ", positive inf.: " + plusInf, sortable > plusInf);
-    }
-  }
-
-  // INFO: Tests for trieCodeLong()/trieCodeInt() not needed because implicitely tested by range filter tests
-  
-  /** Note: The neededBounds Iterable must be unsigned (easier understanding what's happening) */
-  private void assertLongRangeSplit(final long lower, final long upper, int precisionStep,
-    final boolean useBitSet, final Iterable<Long> expectedBounds, final Iterable<Integer> expectedShifts
-  ) {
-    // Cannot use FixedBitSet since the range could be long:
-    final LongBitSet bits=useBitSet ? new LongBitSet(upper-lower+1) : null;
-    final Iterator<Long> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
-    final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
-
-    LegacyNumericUtils.splitLongRange(new LegacyNumericUtils.LongRangeBuilder() {
-      @Override
-      public void addRange(long min, long max, int shift) {
-        assertTrue("min, max should be inside bounds", min >= lower && min <= upper && max >= lower && max <= upper);
-        if (useBitSet) for (long l = min; l <= max; l++) {
-          assertFalse("ranges should not overlap", bits.getAndSet(l - lower));
-          // extra exit condition to prevent overflow on MAX_VALUE
-          if (l == max) break;
-        }
-        if (neededBounds == null || neededShifts == null)
-          return;
-        // make unsigned longs for easier display and understanding
-        min ^= 0x8000000000000000L;
-        max ^= 0x8000000000000000L;
-        //System.out.println("0x"+Long.toHexString(min>>>shift)+"L,0x"+Long.toHexString(max>>>shift)+"L)/*shift="+shift+"*/,");
-        assertEquals("shift", neededShifts.next().intValue(), shift);
-        assertEquals("inner min bound", neededBounds.next().longValue(), min >>> shift);
-        assertEquals("inner max bound", neededBounds.next().longValue(), max >>> shift);
-      }
-    }, precisionStep, lower, upper);
-    
-    if (useBitSet) {
-      // after flipping all bits in the range, the cardinality should be zero
-      bits.flip(0,upper-lower+1);
-      assertEquals("The sub-range concenated should match the whole range", 0, bits.cardinality());
-    }
-  }
-  
-  /** LUCENE-2541: LegacyNumericRangeQuery errors with endpoints near long min and max values */
-  public void testLongExtremeValues() throws Exception {
-    // upper end extremes
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 1, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 2, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 4, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 6, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 8, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 64, true, Arrays.asList(
-      0xffffffffffffffffL,0xffffffffffffffffL
-    ), Arrays.asList(
-      0
-    ));
-
-    assertLongRangeSplit(Long.MAX_VALUE-0xfL, Long.MAX_VALUE, 4, true, Arrays.asList(
-      0xfffffffffffffffL,0xfffffffffffffffL
-    ), Arrays.asList(
-      4
-    ));
-    assertLongRangeSplit(Long.MAX_VALUE-0x10L, Long.MAX_VALUE, 4, true, Arrays.asList(
-      0xffffffffffffffefL,0xffffffffffffffefL,
-      0xfffffffffffffffL,0xfffffffffffffffL
-    ), Arrays.asList(
-      0, 4
-    ));
-
-    // lower end extremes
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 1, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 2, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 4, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 6, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 8, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE, 64, true, Arrays.asList(
-      0x0000000000000000L,0x0000000000000000L
-    ), Arrays.asList(
-      0
-    ));
-
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE+0xfL, 4, true, Arrays.asList(
-      0x000000000000000L,0x000000000000000L
-    ), Arrays.asList(
-      4
-    ));
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MIN_VALUE+0x10L, 4, true, Arrays.asList(
-      0x0000000000000010L,0x0000000000000010L,
-      0x000000000000000L,0x000000000000000L
-    ), Arrays.asList(
-      0, 4
-    ));
-  }
-  
-  public void testRandomSplit() throws Exception {
-    long num = (long) atLeast(10);
-    for (long i=0; i < num; i++) {
-      executeOneRandomSplit(random());
-    }
-  }
-  
-  private void executeOneRandomSplit(final Random random) throws Exception {
-    long lower = randomLong(random);
-    long len = random.nextInt(16384*1024); // not too large bitsets, else OOME!
-    while (lower + len < lower) { // overflow
-      lower >>= 1;
-    }
-    assertLongRangeSplit(lower, lower + len, random.nextInt(64) + 1, true, null, null);
-  }
-  
-  private long randomLong(final Random random) {
-    long val;
-    switch(random.nextInt(4)) {
-      case 0:
-        val = 1L << (random.nextInt(63)); //  patterns like 0x000000100000 (-1 yields patterns like 0x0000fff)
-        break;
-      case 1:
-        val = -1L << (random.nextInt(63)); // patterns like 0xfffff00000
-        break;
-      default:
-        val = random.nextLong();
-    }
-
-    val += random.nextInt(5)-2;
-
-    if (random.nextBoolean()) {
-      if (random.nextBoolean()) val += random.nextInt(100)-50;
-      if (random.nextBoolean()) val = ~val;
-      if (random.nextBoolean()) val = val<<1;
-      if (random.nextBoolean()) val = val>>>1;
-    }
-
-    return val;
-  }
-  
-  public void testSplitLongRange() throws Exception {
-    // a hard-coded "standard" range
-    assertLongRangeSplit(-5000L, 9500L, 4, true, Arrays.asList(
-      0x7fffffffffffec78L,0x7fffffffffffec7fL,
-      0x8000000000002510L,0x800000000000251cL,
-      0x7fffffffffffec8L, 0x7fffffffffffecfL,
-      0x800000000000250L, 0x800000000000250L,
-      0x7fffffffffffedL,  0x7fffffffffffefL,
-      0x80000000000020L,  0x80000000000024L,
-      0x7ffffffffffffL,   0x8000000000001L
-    ), Arrays.asList(
-      0, 0,
-      4, 4,
-      8, 8,
-      12
-    ));
-    
-    // the same with no range splitting
-    assertLongRangeSplit(-5000L, 9500L, 64, true, Arrays.asList(
-      0x7fffffffffffec78L,0x800000000000251cL
-    ), Arrays.asList(
-      0
-    ));
-    
-    // this tests optimized range splitting, if one of the inner bounds
-    // is also the bound of the next lower precision, it should be used completely
-    assertLongRangeSplit(0L, 1024L+63L, 4, true, Arrays.asList(
-      0x800000000000040L, 0x800000000000043L,
-      0x80000000000000L,  0x80000000000003L
-    ), Arrays.asList(
-      4, 8
-    ));
-    
-    // the full long range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 8, false, Arrays.asList(
-      0x00L,0xffL
-    ), Arrays.asList(
-      56
-    ));
-
-    // the same with precisionStep=4
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 4, false, Arrays.asList(
-      0x0L,0xfL
-    ), Arrays.asList(
-      60
-    ));
-
-    // the same with precisionStep=2
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 2, false, Arrays.asList(
-      0x0L,0x3L
-    ), Arrays.asList(
-      62
-    ));
-
-    // the same with precisionStep=1
-    assertLongRangeSplit(Long.MIN_VALUE, Long.MAX_VALUE, 1, false, Arrays.asList(
-      0x0L,0x1L
-    ), Arrays.asList(
-      63
-    ));
-
-    // a inverse range should produce no sub-ranges
-    assertLongRangeSplit(9500L, -5000L, 4, false, Collections.<Long>emptyList(), Collections.<Integer>emptyList());    
-
-    // a 0-length range should reproduce the range itself
-    assertLongRangeSplit(9500L, 9500L, 4, false, Arrays.asList(
-      0x800000000000251cL,0x800000000000251cL
-    ), Arrays.asList(
-      0
-    ));
-  }
-
-  /** Note: The neededBounds Iterable must be unsigned (easier understanding what's happening) */
-  private void assertIntRangeSplit(final int lower, final int upper, int precisionStep,
-    final boolean useBitSet, final Iterable<Integer> expectedBounds, final Iterable<Integer> expectedShifts
-  ) {
-    final FixedBitSet bits=useBitSet ? new FixedBitSet(upper-lower+1) : null;
-    final Iterator<Integer> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
-    final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
-    
-    LegacyNumericUtils.splitIntRange(new LegacyNumericUtils.IntRangeBuilder() {
-      @Override
-      public void addRange(int min, int max, int shift) {
-        assertTrue("min, max should be inside bounds", min >= lower && min <= upper && max >= lower && max <= upper);
-        if (useBitSet) for (int i = min; i <= max; i++) {
-          assertFalse("ranges should not overlap", bits.getAndSet(i - lower));
-          // extra exit condition to prevent overflow on MAX_VALUE
-          if (i == max) break;
-        }
-        if (neededBounds == null)
-          return;
-        // make unsigned ints for easier display and understanding
-        min ^= 0x80000000;
-        max ^= 0x80000000;
-        //System.out.println("0x"+Integer.toHexString(min>>>shift)+",0x"+Integer.toHexString(max>>>shift)+")/*shift="+shift+"*/,");
-        assertEquals("shift", neededShifts.next().intValue(), shift);
-        assertEquals("inner min bound", neededBounds.next().intValue(), min >>> shift);
-        assertEquals("inner max bound", neededBounds.next().intValue(), max >>> shift);
-      }
-    }, precisionStep, lower, upper);
-    
-    if (useBitSet) {
-      // after flipping all bits in the range, the cardinality should be zero
-      bits.flip(0, upper-lower+1);
-      assertEquals("The sub-range concenated should match the whole range", 0, bits.cardinality());
-    }
-  }
-  
-  public void testSplitIntRange() throws Exception {
-    // a hard-coded "standard" range
-    assertIntRangeSplit(-5000, 9500, 4, true, Arrays.asList(
-      0x7fffec78,0x7fffec7f,
-      0x80002510,0x8000251c,
-      0x7fffec8, 0x7fffecf,
-      0x8000250, 0x8000250,
-      0x7fffed,  0x7fffef,
-      0x800020,  0x800024,
-      0x7ffff,   0x80001
-    ), Arrays.asList(
-      0, 0,
-      4, 4,
-      8, 8,
-      12
-    ));
-    
-    // the same with no range splitting
-    assertIntRangeSplit(-5000, 9500, 32, true, Arrays.asList(
-      0x7fffec78,0x8000251c
-    ), Arrays.asList(
-      0
-    ));
-    
-    // this tests optimized range splitting, if one of the inner bounds
-    // is also the bound of the next lower precision, it should be used completely
-    assertIntRangeSplit(0, 1024+63, 4, true, Arrays.asList(
-      0x8000040, 0x8000043,
-      0x800000,  0x800003
-    ), Arrays.asList(
-      4, 8
-    ));
-    
-    // the full int range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
-    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 8, false, Arrays.asList(
-      0x00,0xff
-    ), Arrays.asList(
-      24
-    ));
-
-    // the same with precisionStep=4
-    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 4, false, Arrays.asList(
-      0x0,0xf
-    ), Arrays.asList(
-      28
-    ));
-
-    // the same with precisionStep=2
-    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 2, false, Arrays.asList(
-      0x0,0x3
-    ), Arrays.asList(
-      30
-    ));
-
-    // the same with precisionStep=1
-    assertIntRangeSplit(Integer.MIN_VALUE, Integer.MAX_VALUE, 1, false, Arrays.asList(
-      0x0,0x1
-    ), Arrays.asList(
-      31
-    ));
-
-    // a inverse range should produce no sub-ranges
-    assertIntRangeSplit(9500, -5000, 4, false, Collections.<Integer>emptyList(), Collections.<Integer>emptyList());    
-
-    // a 0-length range should reproduce the range itself
-    assertIntRangeSplit(9500, 9500, 4, false, Arrays.asList(
-      0x8000251c,0x8000251c
-    ), Arrays.asList(
-      0
-    ));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/join/build.xml
----------------------------------------------------------------------
diff --git a/lucene/join/build.xml b/lucene/join/build.xml
index b5360c4..b6878b8 100644
--- a/lucene/join/build.xml
+++ b/lucene/join/build.xml
@@ -26,6 +26,7 @@
 
   <path id="classpath">
     <pathelement path="${grouping.jar}"/>
+    <pathelement path="${backward-codecs.jar}"/>
     <path refid="base.classpath"/>
   </path>
 
@@ -34,13 +35,14 @@
     <pathelement location="${build.dir}/classes/java"/>
   </path>
 
-  <target name="init" depends="module-build.init,jar-grouping"/>
+  <target name="init" depends="module-build.init,jar-grouping,jar-backward-codecs"/>
 
-  <target name="javadocs" depends="javadocs-grouping,compile-core,check-javadocs-uptodate"
+  <target name="javadocs" depends="javadocs-grouping,javadocs-backward-codecs,compile-core,check-javadocs-uptodate"
           unless="javadocs-uptodate-${name}">
     <invoke-module-javadoc>
       <links>
         <link href="../grouping"/>
+        <link href="../backward-codecs"/>
       </links>
     </invoke-module-javadoc>
   </target>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/join/src/java/org/apache/lucene/search/join/DocValuesTermsCollector.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/DocValuesTermsCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/DocValuesTermsCollector.java
index a9b11ed..4bb692a 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/DocValuesTermsCollector.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/DocValuesTermsCollector.java
@@ -19,8 +19,6 @@ package org.apache.lucene.search.join;
 import java.io.IOException;
 import java.util.function.LongConsumer;
 
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.FieldType.LegacyNumericType;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReader;
@@ -28,10 +26,11 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.SortedNumericDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.legacy.LegacyNumericType;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.search.SimpleCollector;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.LegacyNumericUtils;
 
 abstract class DocValuesTermsCollector<DV> extends SimpleCollector {
   
@@ -85,13 +84,13 @@ abstract class DocValuesTermsCollector<DV> extends SimpleCollector {
         return (l) -> LegacyNumericUtils.longToPrefixCoded(l, 0, bytes);
       default:
         throw new IllegalArgumentException("Unsupported "+type+
-            ". Only "+ LegacyNumericType.INT+" and "+ FieldType.LegacyNumericType.LONG+" are supported."
+            ". Only "+ LegacyNumericType.INT+" and "+ LegacyNumericType.LONG+" are supported."
             + "Field "+fieldName );
     }
   }
   
   /** this adapter is quite weird. ords are per doc index, don't use ords across different docs*/
-  static Function<SortedSetDocValues> sortedNumericAsSortedSetDocValues(String field, FieldType.LegacyNumericType numTyp) {
+  static Function<SortedSetDocValues> sortedNumericAsSortedSetDocValues(String field, LegacyNumericType numTyp) {
     return (ctx) -> {
       final SortedNumericDocValues numerics = DocValues.getSortedNumeric(ctx, field);
       final BytesRefBuilder bytes = new BytesRefBuilder();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java b/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java
index b0133e5..4942394 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java
@@ -26,7 +26,7 @@ import java.util.function.BiConsumer;
 import java.util.function.LongFunction;
 
 import org.apache.lucene.document.DoublePoint;
-import org.apache.lucene.document.FieldType.LegacyNumericType;
+import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.lucene.document.FloatPoint;
 import org.apache.lucene.document.IntPoint;
 import org.apache.lucene.document.LongPoint;
@@ -123,8 +123,8 @@ public final class JoinUtil {
    * @param multipleValuesPerDocument Whether the from field has multiple terms per document
    *                                  when true fromField might be {@link DocValuesType#SORTED_NUMERIC},
    *                                  otherwise fromField should be {@link DocValuesType#NUMERIC}
-   * @param toField                   The to field to join to, should be {@link org.apache.lucene.document.LegacyIntField} or {@link org.apache.lucene.document.LegacyLongField}
-   * @param numericType               either {@link org.apache.lucene.document.FieldType.LegacyNumericType#INT} or {@link org.apache.lucene.document.FieldType.LegacyNumericType#LONG}, it should correspond to fromField and toField types
+   * @param toField                   The to field to join to, should be {@link org.apache.lucene.legacy.LegacyIntField} or {@link org.apache.lucene.legacy.LegacyLongField}
+   * @param numericType               either {@link LegacyNumericType#INT} or {@link LegacyNumericType#LONG}, it should correspond to fromField and toField types
    * @param fromQuery                 The query to match documents on the from side
    * @param fromSearcher              The searcher that executed the specified fromQuery
    * @param scoreMode                 Instructs how scores from the fromQuery are mapped to the returned query

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
index 3b03bd3..a39c25f 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
@@ -27,6 +27,7 @@ import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.IndexSearcher;
@@ -37,7 +38,6 @@ import org.apache.lucene.util.BitSetIterator;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefHash;
 import org.apache.lucene.util.FixedBitSet;
-import org.apache.lucene.util.LegacyNumericUtils;
 
 class TermsIncludingScoreQuery extends Query {
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
index b29e9ff..6d9eb2a 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
@@ -37,12 +37,9 @@ import org.apache.lucene.document.Document;
 import org.apache.lucene.document.DoubleDocValuesField;
 import org.apache.lucene.document.DoublePoint;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType.LegacyNumericType;
 import org.apache.lucene.document.FloatDocValuesField;
 import org.apache.lucene.document.FloatPoint;
 import org.apache.lucene.document.IntPoint;
-import org.apache.lucene.document.LegacyIntField;
-import org.apache.lucene.document.LegacyLongField;
 import org.apache.lucene.document.LongPoint;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedDocValuesField;
@@ -59,6 +56,9 @@ import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.MultiDocValues;
 import org.apache.lucene.index.MultiDocValues.OrdinalMap;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyLongField;
+import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.NoMergePolicy;
 import org.apache.lucene.index.NumericDocValues;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
----------------------------------------------------------------------
diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
index 45e9551..a785720 100644
--- a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
+++ b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
@@ -45,7 +45,6 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.FloatPoint;
 import org.apache.lucene.document.IntPoint;
-import org.apache.lucene.document.LegacyLongField;
 import org.apache.lucene.document.LongPoint;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedDocValuesField;
@@ -457,9 +456,6 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
     Document doc = new Document();
     long randomLong = random().nextLong();
     doc.add(new NumericDocValuesField("numeric", randomLong));
-    if (random().nextBoolean()) {
-      doc.add(new LegacyLongField("numeric", randomLong, Field.Store.NO));
-    }
     int numValues = atLeast(5);
     for (int i = 0; i < numValues; i++) {
       randomLong = random().nextLong();
@@ -468,9 +464,6 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
         // randomly duplicate field/value
         doc.add(new SortedNumericDocValuesField("sorted_numeric", randomLong));
       }
-      if (random().nextBoolean()) {
-        doc.add(new LegacyLongField("numeric", randomLong, Field.Store.NO));
-      }
     }
     BytesRef randomTerm = new BytesRef(randomTerm());
     doc.add(new BinaryDocValuesField("binary", randomTerm));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/misc/src/test/org/apache/lucene/search/TestDiversifiedTopDocsCollector.java
----------------------------------------------------------------------
diff --git a/lucene/misc/src/test/org/apache/lucene/search/TestDiversifiedTopDocsCollector.java b/lucene/misc/src/test/org/apache/lucene/search/TestDiversifiedTopDocsCollector.java
index da9fdc5..05a3b23 100644
--- a/lucene/misc/src/test/org/apache/lucene/search/TestDiversifiedTopDocsCollector.java
+++ b/lucene/misc/src/test/org/apache/lucene/search/TestDiversifiedTopDocsCollector.java
@@ -21,11 +21,10 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FloatDocValuesField;
-import org.apache.lucene.document.LegacyFloatField;
 import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.StoredField;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.FieldInvertState;
@@ -331,7 +330,7 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase {
         new BytesRef(""));
     Field weeksAtNumberOneField = new FloatDocValuesField("weeksAtNumberOne",
         0.0F);
-    Field weeksStoredField = new LegacyFloatField("weeks", 0.0F, Store.YES);
+    Field weeksStoredField = new StoredField("weeks", 0.0F);
     Field idField = newStringField("id", "", Field.Store.YES);
     Field songField = newTextField("song", "", Field.Store.NO);
     Field storedArtistField = newTextField("artistName", "", Field.Store.NO);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/queryparser/build.xml
----------------------------------------------------------------------
diff --git a/lucene/queryparser/build.xml b/lucene/queryparser/build.xml
index b6e43c2..f1d59a3 100644
--- a/lucene/queryparser/build.xml
+++ b/lucene/queryparser/build.xml
@@ -25,15 +25,17 @@
   <path id="classpath">
     <pathelement path="${queries.jar}"/>
     <pathelement path="${sandbox.jar}"/>
+    <pathelement path="${backward-codecs.jar}"/>
     <path refid="base.classpath"/>
   </path>
 
-  <target name="compile-core" depends="jar-queries,jar-sandbox,common.compile-core"/>
+  <target name="compile-core" depends="jar-backward-codecs,jar-queries,jar-sandbox,common.compile-core"/>
 
-  <target name="javadocs" depends="javadocs-queries,javadocs-sandbox,compile-core,check-javadocs-uptodate"
+  <target name="javadocs" depends="javadocs-backward-codecs,javadocs-queries,javadocs-sandbox,compile-core,check-javadocs-uptodate"
           unless="javadocs-uptodate-${name}">
     <invoke-module-javadoc>
       <links>
+        <link href="../backward-codecs"/>
         <link href="../queries"/>
         <link href="../sandbox"/>
       </links>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/LegacyNumericRangeQueryNodeBuilder.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/LegacyNumericRangeQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/LegacyNumericRangeQueryNodeBuilder.java
index 8ae7d5e..0781afb 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/LegacyNumericRangeQueryNodeBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/LegacyNumericRangeQueryNodeBuilder.java
@@ -16,7 +16,8 @@
  */
 package org.apache.lucene.queryparser.flexible.standard.builders;
 
-import org.apache.lucene.document.FieldType;
+import org.apache.lucene.legacy.LegacyNumericRangeQuery;
+import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
 import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages;
 import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
@@ -25,12 +26,11 @@ import org.apache.lucene.queryparser.flexible.messages.MessageImpl;
 import org.apache.lucene.queryparser.flexible.standard.config.LegacyNumericConfig;
 import org.apache.lucene.queryparser.flexible.standard.nodes.LegacyNumericQueryNode;
 import org.apache.lucene.queryparser.flexible.standard.nodes.LegacyNumericRangeQueryNode;
-import org.apache.lucene.search.LegacyNumericRangeQuery;
 
 /**
- * Builds {@link org.apache.lucene.search.LegacyNumericRangeQuery}s out of {@link LegacyNumericRangeQueryNode}s.
+ * Builds {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}s out of {@link LegacyNumericRangeQueryNode}s.
  *
- * @see org.apache.lucene.search.LegacyNumericRangeQuery
+ * @see org.apache.lucene.legacy.LegacyNumericRangeQuery
  * @see LegacyNumericRangeQueryNode
  * @deprecated Index with points and use {@link PointRangeQueryNodeBuilder} instead.
  */
@@ -56,7 +56,7 @@ public class LegacyNumericRangeQueryNodeBuilder implements StandardQueryBuilder
     Number upperNumber = upperNumericNode.getValue();
     
     LegacyNumericConfig numericConfig = numericRangeNode.getNumericConfig();
-    FieldType.LegacyNumericType numberType = numericConfig.getType();
+    LegacyNumericType numberType = numericConfig.getType();
     String field = StringUtils.toString(numericRangeNode.getField());
     boolean minInclusive = numericRangeNode.isLowerInclusive();
     boolean maxInclusive = numericRangeNode.isUpperInclusive();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/LegacyNumericConfig.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/LegacyNumericConfig.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/LegacyNumericConfig.java
index 6cd3c49..038023e 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/LegacyNumericConfig.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/config/LegacyNumericConfig.java
@@ -19,14 +19,13 @@ package org.apache.lucene.queryparser.flexible.standard.config;
 import java.text.NumberFormat;
 import java.util.Objects;
 
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.FieldType.LegacyNumericType;
+import org.apache.lucene.legacy.LegacyNumericType;
 
 /**
  * This class holds the configuration used to parse numeric queries and create
- * {@link org.apache.lucene.search.LegacyNumericRangeQuery}s.
+ * {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}s.
  * 
- * @see org.apache.lucene.search.LegacyNumericRangeQuery
+ * @see org.apache.lucene.legacy.LegacyNumericRangeQuery
  * @see NumberFormat
  * @deprecated Index with Points instead and use {@link PointsConfig}
  */
@@ -37,7 +36,7 @@ public class LegacyNumericConfig {
   
   private NumberFormat format;
   
-  private FieldType.LegacyNumericType type;
+  private LegacyNumericType type;
   
   /**
    * Constructs a {@link LegacyNumericConfig} object.
@@ -52,7 +51,7 @@ public class LegacyNumericConfig {
    * 
    * @see LegacyNumericConfig#setPrecisionStep(int)
    * @see LegacyNumericConfig#setNumberFormat(NumberFormat)
-   * @see #setType(org.apache.lucene.document.FieldType.LegacyNumericType)
+   * @see #setType(LegacyNumericType)
    */
   public LegacyNumericConfig(int precisionStep, NumberFormat format,
       LegacyNumericType type) {
@@ -67,7 +66,7 @@ public class LegacyNumericConfig {
    * 
    * @return the precision used to index the numeric values
    * 
-   * @see org.apache.lucene.search.LegacyNumericRangeQuery#getPrecisionStep()
+   * @see org.apache.lucene.legacy.LegacyNumericRangeQuery#getPrecisionStep()
    */
   public int getPrecisionStep() {
     return precisionStep;
@@ -79,7 +78,7 @@ public class LegacyNumericConfig {
    * @param precisionStep
    *          the precision used to index the numeric values
    * 
-   * @see org.apache.lucene.search.LegacyNumericRangeQuery#getPrecisionStep()
+   * @see org.apache.lucene.legacy.LegacyNumericRangeQuery#getPrecisionStep()
    */
   public void setPrecisionStep(int precisionStep) {
     this.precisionStep = precisionStep;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/LegacyNumericRangeQueryNode.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/LegacyNumericRangeQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/LegacyNumericRangeQueryNode.java
index 27c285e..20cde35 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/LegacyNumericRangeQueryNode.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/LegacyNumericRangeQueryNode.java
@@ -16,8 +16,7 @@
  */
 package org.apache.lucene.queryparser.flexible.standard.nodes;
 
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.FieldType.LegacyNumericType;
+import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
 import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages;
 import org.apache.lucene.queryparser.flexible.messages.MessageImpl;
@@ -57,13 +56,13 @@ public class LegacyNumericRangeQueryNode extends
   private static LegacyNumericType getNumericDataType(Number number) throws QueryNodeException {
     
     if (number instanceof Long) {
-      return FieldType.LegacyNumericType.LONG;
+      return LegacyNumericType.LONG;
     } else if (number instanceof Integer) {
-      return FieldType.LegacyNumericType.INT;
+      return LegacyNumericType.INT;
     } else if (number instanceof Double) {
       return LegacyNumericType.DOUBLE;
     } else if (number instanceof Float) {
-      return FieldType.LegacyNumericType.FLOAT;
+      return LegacyNumericType.FLOAT;
     } else {
       throw new QueryNodeException(
           new MessageImpl(

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LegacyNumericRangeQueryBuilder.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LegacyNumericRangeQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LegacyNumericRangeQueryBuilder.java
index f7aef3f..9f4505f 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LegacyNumericRangeQueryBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LegacyNumericRangeQueryBuilder.java
@@ -16,19 +16,19 @@
  */
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.search.LegacyNumericRangeQuery;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.util.LegacyNumericUtils;
+import org.apache.lucene.legacy.LegacyNumericRangeQuery;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queryparser.xml.DOMUtils;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.apache.lucene.queryparser.xml.QueryBuilder;
 import org.w3c.dom.Element;
 
 /**
- * Creates a {@link org.apache.lucene.search.LegacyNumericRangeQuery}. The table below specifies the required
+ * Creates a {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}. The table below specifies the required
  * attributes and the defaults if optional attributes are omitted. For more
  * detail on what each of the attributes actually do, consult the documentation
- * for {@link org.apache.lucene.search.LegacyNumericRangeQuery}:
+ * for {@link org.apache.lucene.legacy.LegacyNumericRangeQuery}:
  * <table summary="supported attributes">
  * <tr>
  * <th>Attribute name</th>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestLegacyNumericQueryParser.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestLegacyNumericQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestLegacyNumericQueryParser.java
index c6ab7f5..398923e 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestLegacyNumericQueryParser.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestLegacyNumericQueryParser.java
@@ -32,15 +32,15 @@ import java.util.TimeZone;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.LegacyDoubleField;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType.LegacyNumericType;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.LegacyFloatField;
-import org.apache.lucene.document.LegacyIntField;
-import org.apache.lucene.document.LegacyLongField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.legacy.LegacyDoubleField;
+import org.apache.lucene.legacy.LegacyFieldType;
+import org.apache.lucene.legacy.LegacyFloatField;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyLongField;
+import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
 import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax;
 import org.apache.lucene.queryparser.flexible.standard.config.NumberDateFormat;
@@ -179,7 +179,7 @@ public class TestLegacyNumericQueryParser extends LuceneTestCase {
       ;
     
     randomNumberMap.put(LegacyNumericType.LONG.name(), randomLong);
-    randomNumberMap.put(FieldType.LegacyNumericType.INT.name(), randomInt);
+    randomNumberMap.put(LegacyNumericType.INT.name(), randomInt);
     randomNumberMap.put(LegacyNumericType.FLOAT.name(), randomFloat);
     randomNumberMap.put(LegacyNumericType.DOUBLE.name(), randomDouble);
     randomNumberMap.put(DATE_FIELD_NAME, randomDate);
@@ -201,7 +201,7 @@ public class TestLegacyNumericQueryParser extends LuceneTestCase {
       numericConfigMap.put(type.name(), new LegacyNumericConfig(PRECISION_STEP,
           NUMBER_FORMAT, type));
 
-      FieldType ft = new FieldType(LegacyIntField.TYPE_NOT_STORED);
+      LegacyFieldType ft = new LegacyFieldType(LegacyIntField.TYPE_NOT_STORED);
       ft.setNumericType(type);
       ft.setStored(true);
       ft.setNumericPrecisionStep(PRECISION_STEP);
@@ -231,7 +231,7 @@ public class TestLegacyNumericQueryParser extends LuceneTestCase {
     
     numericConfigMap.put(DATE_FIELD_NAME, new LegacyNumericConfig(PRECISION_STEP,
         DATE_FORMAT, LegacyNumericType.LONG));
-    FieldType ft = new FieldType(LegacyLongField.TYPE_NOT_STORED);
+    LegacyFieldType ft = new LegacyFieldType(LegacyLongField.TYPE_NOT_STORED);
     ft.setStored(true);
     ft.setNumericPrecisionStep(PRECISION_STEP);
     LegacyLongField dateField = new LegacyLongField(DATE_FIELD_NAME, 0l, ft);
@@ -268,10 +268,10 @@ public class TestLegacyNumericQueryParser extends LuceneTestCase {
             || DATE_FIELD_NAME.equals(fieldName)) {
           number = -number.longValue();
           
-        } else if (FieldType.LegacyNumericType.DOUBLE.name().equals(fieldName)) {
+        } else if (LegacyNumericType.DOUBLE.name().equals(fieldName)) {
           number = -number.doubleValue();
           
-        } else if (FieldType.LegacyNumericType.FLOAT.name().equals(fieldName)) {
+        } else if (LegacyNumericType.FLOAT.name().equals(fieldName)) {
           number = -number.floatValue();
           
         } else if (LegacyNumericType.INT.name().equals(fieldName)) {
@@ -299,16 +299,16 @@ public class TestLegacyNumericQueryParser extends LuceneTestCase {
     numericFieldMap.get(LegacyNumericType.DOUBLE.name()).setDoubleValue(
         number.doubleValue());
     
-    number = getNumberType(numberType, FieldType.LegacyNumericType.INT.name());
-    numericFieldMap.get(FieldType.LegacyNumericType.INT.name()).setIntValue(
+    number = getNumberType(numberType, LegacyNumericType.INT.name());
+    numericFieldMap.get(LegacyNumericType.INT.name()).setIntValue(
         number.intValue());
     
     number = getNumberType(numberType, LegacyNumericType.LONG.name());
-    numericFieldMap.get(FieldType.LegacyNumericType.LONG.name()).setLongValue(
+    numericFieldMap.get(LegacyNumericType.LONG.name()).setLongValue(
         number.longValue());
     
-    number = getNumberType(numberType, FieldType.LegacyNumericType.FLOAT.name());
-    numericFieldMap.get(FieldType.LegacyNumericType.FLOAT.name()).setFloatValue(
+    number = getNumberType(numberType, LegacyNumericType.FLOAT.name());
+    numericFieldMap.get(LegacyNumericType.FLOAT.name()).setFloatValue(
         number.floatValue());
     
     number = getNumberType(numberType, DATE_FIELD_NAME);
@@ -456,7 +456,7 @@ public class TestLegacyNumericQueryParser extends LuceneTestCase {
 
     StringBuilder sb = new StringBuilder();
     
-    for (LegacyNumericType type : FieldType.LegacyNumericType.values()) {
+    for (LegacyNumericType type : LegacyNumericType.values()) {
       String boundStr = numberToString(getNumberType(boundType, type.name()));
       
       sb.append("+").append(type.name()).append(operator).append('"').append(boundStr).append('"').append(' ');

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/CoreParserTestIndexData.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/CoreParserTestIndexData.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/CoreParserTestIndexData.java
index 71b627e..4763005 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/CoreParserTestIndexData.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/CoreParserTestIndexData.java
@@ -20,10 +20,10 @@ import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.IntPoint;
-import org.apache.lucene.document.LegacyIntField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.legacy.LegacyIntField;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/builders/TestNumericRangeQueryBuilder.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/builders/TestNumericRangeQueryBuilder.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/builders/TestNumericRangeQueryBuilder.java
index 8fc0641..0bc0195 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/builders/TestNumericRangeQueryBuilder.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/builders/TestNumericRangeQueryBuilder.java
@@ -16,9 +16,9 @@
  */
 package org.apache.lucene.queryparser.xml.builders;
 
-import org.apache.lucene.search.LegacyNumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.legacy.LegacyNumericRangeQuery;
 import org.apache.lucene.queryparser.xml.ParserException;
 import org.w3c.dom.Document;
 import org.xml.sax.SAXException;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/spatial-extras/build.xml
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/build.xml b/lucene/spatial-extras/build.xml
index a77f9ea..2e425fd 100644
--- a/lucene/spatial-extras/build.xml
+++ b/lucene/spatial-extras/build.xml
@@ -31,6 +31,7 @@
   <path id="classpath">
     <path refid="base.classpath"/>
     <path refid="spatialjar"/>
+    <pathelement path="${backward-codecs.jar}" />
     <pathelement path="${queries.jar}" />
     <pathelement path="${misc.jar}" />
     <pathelement path="${spatial3d.jar}" />
@@ -42,16 +43,17 @@
     <pathelement path="src/test-files" />
   </path>
 
-  <target name="compile-core" depends="jar-queries,jar-misc,jar-spatial3d,common.compile-core" />
+  <target name="compile-core" depends="jar-backward-codecs,jar-queries,jar-misc,jar-spatial3d,common.compile-core" />
 
-  <target name="javadocs" depends="javadocs-queries,javadocs-misc,javadocs-spatial3d,compile-core,check-javadocs-uptodate"
+  <target name="javadocs" depends="javadocs-backward-codecs,javadocs-queries,javadocs-misc,javadocs-spatial3d,compile-core,check-javadocs-uptodate"
           unless="javadocs-uptodate-${name}">
     <invoke-module-javadoc>
       <links>
+        <link href="../backward-codecs"/>
         <link href="../queries"/>
         <link href="../misc"/>
         <link href="../spatial3d"/>
       </links>
     </invoke-module-javadoc>
   </target>
-</project>
\ No newline at end of file
+</project>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
index 63a1138..90e36d8 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/bbox/BBoxStrategy.java
@@ -20,17 +20,20 @@ import org.apache.lucene.document.DoubleDocValuesField;
 import org.apache.lucene.document.DoublePoint;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.LegacyDoubleField;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.legacy.LegacyDoubleField;
+import org.apache.lucene.legacy.LegacyFieldType;
+import org.apache.lucene.legacy.LegacyNumericRangeQuery;
+import org.apache.lucene.legacy.LegacyNumericType;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
-import org.apache.lucene.search.LegacyNumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.spatial.SpatialStrategy;
@@ -39,7 +42,6 @@ import org.apache.lucene.spatial.query.SpatialOperation;
 import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
 import org.apache.lucene.spatial.util.DistanceToShapeValueSource;
 import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.NumericUtils;
 import org.locationtech.spatial4j.context.SpatialContext;
 import org.locationtech.spatial4j.shape.Point;
@@ -87,7 +89,7 @@ public class BBoxStrategy extends SpatialStrategy {
   public static FieldType DEFAULT_FIELDTYPE;
 
   @Deprecated
-  public static FieldType LEGACY_FIELDTYPE;
+  public static LegacyFieldType LEGACY_FIELDTYPE;
   static {
     // Default: pointValues + docValues
     FieldType type = new FieldType();
@@ -97,14 +99,14 @@ public class BBoxStrategy extends SpatialStrategy {
     type.freeze();
     DEFAULT_FIELDTYPE = type;
     // Legacy default: legacyNumerics + docValues
-    type = new FieldType();
-    type.setIndexOptions(IndexOptions.DOCS);
-    type.setNumericType(FieldType.LegacyNumericType.DOUBLE);
-    type.setNumericPrecisionStep(8);// same as solr default
-    type.setDocValuesType(DocValuesType.NUMERIC);//docValues
-    type.setStored(false);
-    type.freeze();
-    LEGACY_FIELDTYPE = type;
+    LegacyFieldType legacyType = new LegacyFieldType();
+    legacyType.setIndexOptions(IndexOptions.DOCS);
+    legacyType.setNumericType(LegacyNumericType.DOUBLE);
+    legacyType.setNumericPrecisionStep(8);// same as solr default
+    legacyType.setDocValuesType(DocValuesType.NUMERIC);//docValues
+    legacyType.setStored(false);
+    legacyType.freeze();
+    LEGACY_FIELDTYPE = legacyType;
   }
 
   public static final String SUFFIX_MINX = "__minX";
@@ -130,7 +132,7 @@ public class BBoxStrategy extends SpatialStrategy {
   private final boolean hasDocVals;
   private final boolean hasPointVals;
   // equiv to "hasLegacyNumerics":
-  private final FieldType legacyNumericFieldType; // not stored; holds precision step.
+  private final LegacyFieldType legacyNumericFieldType; // not stored; holds precision step.
   private final FieldType xdlFieldType;
 
   /**
@@ -177,16 +179,17 @@ public class BBoxStrategy extends SpatialStrategy {
     if ((this.hasPointVals = fieldType.pointDimensionCount() > 0)) {
       numQuads++;
     }
-    if (fieldType.indexOptions() != IndexOptions.NONE && fieldType.numericType() != null) {
+    if (fieldType.indexOptions() != IndexOptions.NONE && fieldType instanceof LegacyFieldType && ((LegacyFieldType)fieldType).numericType() != null) {
       if (hasPointVals) {
         throw new IllegalArgumentException("pointValues and LegacyNumericType are mutually exclusive");
       }
-      if (fieldType.numericType() != FieldType.LegacyNumericType.DOUBLE) {
-        throw new IllegalArgumentException(getClass() + " does not support " + fieldType.numericType());
+      final LegacyFieldType legacyType = (LegacyFieldType) fieldType;
+      if (legacyType.numericType() != LegacyNumericType.DOUBLE) {
+        throw new IllegalArgumentException(getClass() + " does not support " + legacyType.numericType());
       }
       numQuads++;
-      legacyNumericFieldType = new FieldType(LegacyDoubleField.TYPE_NOT_STORED);
-      legacyNumericFieldType.setNumericPrecisionStep(fieldType.numericPrecisionStep());
+      legacyNumericFieldType = new LegacyFieldType(LegacyDoubleField.TYPE_NOT_STORED);
+      legacyNumericFieldType.setNumericPrecisionStep(legacyType.numericPrecisionStep());
       legacyNumericFieldType.freeze();
     } else {
       legacyNumericFieldType = null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java
index e724ab0..757e2bd 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/BytesRefIteratorTokenStream.java
@@ -26,7 +26,7 @@ import org.apache.lucene.util.BytesRefIterator;
 /**
  * A TokenStream used internally by {@link org.apache.lucene.spatial.prefix.PrefixTreeStrategy}.
  *
- * This is modelled after {@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
+ * This is modelled after {@link org.apache.lucene.legacy.LegacyNumericTokenStream}.
  *
  * @lucene.internal
  */

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
index 197547c..59aff49 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/vector/PointVectorStrategy.java
@@ -20,16 +20,18 @@ import org.apache.lucene.document.DoubleDocValuesField;
 import org.apache.lucene.document.DoublePoint;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.LegacyDoubleField;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.legacy.LegacyDoubleField;
+import org.apache.lucene.legacy.LegacyFieldType;
+import org.apache.lucene.legacy.LegacyNumericRangeQuery;
+import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.lucene.queries.function.FunctionRangeQuery;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
-import org.apache.lucene.search.LegacyNumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.spatial.SpatialStrategy;
 import org.apache.lucene.spatial.query.SpatialArgs;
@@ -85,7 +87,7 @@ public class PointVectorStrategy extends SpatialStrategy {
   public static FieldType DEFAULT_FIELDTYPE;
 
   @Deprecated
-  public static FieldType LEGACY_FIELDTYPE;
+  public static LegacyFieldType LEGACY_FIELDTYPE;
   static {
     // Default: pointValues + docValues
     FieldType type = new FieldType();
@@ -95,14 +97,14 @@ public class PointVectorStrategy extends SpatialStrategy {
     type.freeze();
     DEFAULT_FIELDTYPE = type;
     // Legacy default: legacyNumerics
-    type = new FieldType();
-    type.setIndexOptions(IndexOptions.DOCS);
-    type.setNumericType(FieldType.LegacyNumericType.DOUBLE);
-    type.setNumericPrecisionStep(8);// same as solr default
-    type.setDocValuesType(DocValuesType.NONE);//no docValues!
-    type.setStored(false);
-    type.freeze();
-    LEGACY_FIELDTYPE = type;
+    LegacyFieldType legacyType = new LegacyFieldType();
+    legacyType.setIndexOptions(IndexOptions.DOCS);
+    legacyType.setNumericType(LegacyNumericType.DOUBLE);
+    legacyType.setNumericPrecisionStep(8);// same as solr default
+    legacyType.setDocValuesType(DocValuesType.NONE);//no docValues!
+    legacyType.setStored(false);
+    legacyType.freeze();
+    LEGACY_FIELDTYPE = legacyType;
   }
 
   public static final String SUFFIX_X = "__x";
@@ -116,7 +118,7 @@ public class PointVectorStrategy extends SpatialStrategy {
   private final boolean hasDocVals;
   private final boolean hasPointVals;
   // equiv to "hasLegacyNumerics":
-  private final FieldType legacyNumericFieldType; // not stored; holds precision step.
+  private final LegacyFieldType legacyNumericFieldType; // not stored; holds precision step.
 
   /**
    * Create a new {@link PointVectorStrategy} instance that uses {@link DoublePoint} and {@link DoublePoint#newRangeQuery}
@@ -157,16 +159,17 @@ public class PointVectorStrategy extends SpatialStrategy {
     if ((this.hasPointVals = fieldType.pointDimensionCount() > 0)) {
       numPairs++;
     }
-    if (fieldType.indexOptions() != IndexOptions.NONE && fieldType.numericType() != null) {
+    if (fieldType.indexOptions() != IndexOptions.NONE && fieldType instanceof LegacyFieldType && ((LegacyFieldType)fieldType).numericType() != null) {
       if (hasPointVals) {
         throw new IllegalArgumentException("pointValues and LegacyNumericType are mutually exclusive");
       }
-      if (fieldType.numericType() != FieldType.LegacyNumericType.DOUBLE) {
-        throw new IllegalArgumentException(getClass() + " does not support " + fieldType.numericType());
+      final LegacyFieldType legacyType = (LegacyFieldType) fieldType;
+      if (legacyType.numericType() != LegacyNumericType.DOUBLE) {
+        throw new IllegalArgumentException(getClass() + " does not support " + legacyType.numericType());
       }
       numPairs++;
-      legacyNumericFieldType = new FieldType(LegacyDoubleField.TYPE_NOT_STORED);
-      legacyNumericFieldType.setNumericPrecisionStep(fieldType.numericPrecisionStep());
+      legacyNumericFieldType = new LegacyFieldType(LegacyDoubleField.TYPE_NOT_STORED);
+      legacyNumericFieldType.setNumericPrecisionStep(legacyType.numericPrecisionStep());
       legacyNumericFieldType.freeze();
     } else {
       legacyNumericFieldType = null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
----------------------------------------------------------------------
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
index 01e9259..20df730 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
@@ -22,6 +22,7 @@ import com.carrotsearch.randomizedtesting.annotations.Repeat;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.legacy.LegacyFieldType;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.spatial.SpatialMatchConcern;
 import org.apache.lucene.spatial.prefix.RandomSpatialOpStrategyTestCase;
@@ -100,7 +101,12 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase {
     }
     //test we can disable docValues for predicate tests
     if (random().nextBoolean()) {
-      FieldType fieldType = new FieldType(((BBoxStrategy)strategy).getFieldType());
+      FieldType fieldType = ((BBoxStrategy)strategy).getFieldType();
+      if (fieldType instanceof LegacyFieldType) {
+        fieldType = new LegacyFieldType((LegacyFieldType)fieldType);
+      } else {
+        fieldType = new FieldType(fieldType);
+      }
       fieldType.setDocValuesType(DocValuesType.NONE);
       strategy = new BBoxStrategy(ctx, strategy.getFieldName(), fieldType);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java
----------------------------------------------------------------------
diff --git a/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java b/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java
index 7a7e697..aadb9e2 100644
--- a/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java
+++ b/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/AnalyticsParsers.java
@@ -20,8 +20,8 @@ import java.io.IOException;
 import java.time.Instant;
 import java.util.Arrays;
 
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.NumericUtils;
 import org.apache.solr.schema.FieldType;
 import org.apache.solr.schema.TrieDateField;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java
----------------------------------------------------------------------
diff --git a/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java b/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java
index 4d66e00..22dde4c 100644
--- a/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java
+++ b/solr/contrib/analytics/src/java/org/apache/solr/analytics/util/valuesource/DateFieldSource.java
@@ -24,12 +24,12 @@ import java.util.Map;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.docvalues.LongDocValues;
 import org.apache.lucene.queries.function.valuesource.LongFieldSource;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueDate;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/handler/component/StatsField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/StatsField.java b/solr/core/src/java/org/apache/solr/handler/component/StatsField.java
index 4c2a2b6..5df1b45 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/StatsField.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/StatsField.java
@@ -29,8 +29,7 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.FieldType.LegacyNumericType;
+import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.queries.function.FunctionQuery;
 import org.apache.lucene.queries.function.ValueSource;
@@ -637,13 +636,13 @@ public class StatsField {
         return null;
       }
 
-      final FieldType.LegacyNumericType hashableNumType = getHashableNumericType(field);
+      final LegacyNumericType hashableNumType = getHashableNumericType(field);
 
       // some sane defaults
       int log2m = 13;   // roughly equivilent to "cardinality='0.33'"
       int regwidth = 6; // with decent hash, this is plenty for all valid long hashes
 
-      if (LegacyNumericType.FLOAT.equals(hashableNumType) || FieldType.LegacyNumericType.INT.equals(hashableNumType)) {
+      if (LegacyNumericType.FLOAT.equals(hashableNumType) || LegacyNumericType.INT.equals(hashableNumType)) {
         // for 32bit values, we can adjust our default regwidth down a bit
         regwidth--;
 
@@ -707,7 +706,7 @@ public class StatsField {
       if (null == hasher) {
         // if this is a function, or a non Long field, pre-hashed is invalid
         // NOTE: we ignore hashableNumType - it's LONG for non numerics like Strings
-        if (null == field || !FieldType.LegacyNumericType.LONG.equals(field.getType().getNumericType())) {
+        if (null == field || !LegacyNumericType.LONG.equals(field.getType().getNumericType())) {
           throw new SolrException(ErrorCode.BAD_REQUEST, "hllPreHashed is only supported with Long based fields");
         }
       }
@@ -740,16 +739,16 @@ public class StatsField {
   }
 
   /**
-   * Returns the effective {@link org.apache.lucene.document.FieldType.LegacyNumericType} for the field for the purposes of hash values.
+   * Returns the effective {@link LegacyNumericType} for the field for the purposes of hash values.
    * ie: If the field has an explict LegacyNumericType that is returned; If the field has no explicit
-   * LegacyNumericType then {@link org.apache.lucene.document.FieldType.LegacyNumericType#LONG} is returned;  If field is null, then
-   * {@link org.apache.lucene.document.FieldType.LegacyNumericType#FLOAT} is assumed for ValueSource.
+   * LegacyNumericType then {@link LegacyNumericType#LONG} is returned;  If field is null, then
+   * {@link LegacyNumericType#FLOAT} is assumed for ValueSource.
    */
   private static LegacyNumericType getHashableNumericType(SchemaField field) {
     if (null == field) {
       return LegacyNumericType.FLOAT;
     }
     final LegacyNumericType result = field.getType().getNumericType();
-    return null == result ? FieldType.LegacyNumericType.LONG : result;
+    return null == result ? LegacyNumericType.LONG : result;
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/request/IntervalFacets.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/request/IntervalFacets.java b/solr/core/src/java/org/apache/solr/request/IntervalFacets.java
index fedc7fe..db26e12 100644
--- a/solr/core/src/java/org/apache/solr/request/IntervalFacets.java
+++ b/solr/core/src/java/org/apache/solr/request/IntervalFacets.java
@@ -25,7 +25,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Locale;
 
-import org.apache.lucene.document.FieldType.LegacyNumericType;
+import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.NumericDocValues;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/request/NumericFacets.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/request/NumericFacets.java b/solr/core/src/java/org/apache/solr/request/NumericFacets.java
index 1034947..fd85d3f 100644
--- a/solr/core/src/java/org/apache/solr/request/NumericFacets.java
+++ b/solr/core/src/java/org/apache/solr/request/NumericFacets.java
@@ -33,6 +33,7 @@ import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.ReaderUtil;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.util.Bits;
@@ -132,7 +133,7 @@ final class NumericFacets {
     mincount = Math.max(mincount, 1);
     final SchemaField sf = searcher.getSchema().getField(fieldName);
     final FieldType ft = sf.getType();
-    final org.apache.lucene.document.FieldType.LegacyNumericType numericType = ft.getNumericType();
+    final LegacyNumericType numericType = ft.getNumericType();
     if (numericType == null) {
       throw new IllegalStateException();
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/schema/BBoxField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/BBoxField.java b/solr/core/src/java/org/apache/solr/schema/BBoxField.java
index e41d3c6..d7fda7c 100644
--- a/solr/core/src/java/org/apache/solr/schema/BBoxField.java
+++ b/solr/core/src/java/org/apache/solr/schema/BBoxField.java
@@ -23,6 +23,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.legacy.LegacyFieldType;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.spatial.bbox.BBoxOverlapRatioValueSource;
 import org.apache.lucene.spatial.bbox.BBoxStrategy;
@@ -141,7 +142,11 @@ public class BBoxField extends AbstractSpatialFieldType<BBoxStrategy> implements
     
     //and annoyingly this Field isn't going to have a docValues format because Solr uses a separate Field for that
     if (solrNumField.hasDocValues()) {
-      luceneType = new org.apache.lucene.document.FieldType(luceneType);
+      if (luceneType instanceof LegacyFieldType) {
+        luceneType = new LegacyFieldType((LegacyFieldType)luceneType);
+      } else {
+        luceneType = new org.apache.lucene.document.FieldType(luceneType);
+      }
       luceneType.setDocValuesType(DocValuesType.NUMERIC);
     }
     return new BBoxStrategy(ctx, fieldName, luceneType);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/schema/EnumField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/EnumField.java b/solr/core/src/java/org/apache/solr/schema/EnumField.java
index 27f3a0a..4820e77 100644
--- a/solr/core/src/java/org/apache/solr/schema/EnumField.java
+++ b/solr/core/src/java/org/apache/solr/schema/EnumField.java
@@ -32,24 +32,25 @@ import javax.xml.xpath.XPathConstants;
 import javax.xml.xpath.XPathExpressionException;
 import javax.xml.xpath.XPathFactory;
 
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.LegacyIntField;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.legacy.LegacyFieldType;
+import org.apache.lucene.legacy.LegacyIntField;
+import org.apache.lucene.legacy.LegacyNumericRangeQuery;
+import org.apache.lucene.legacy.LegacyNumericType;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.EnumFieldSource;
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.DocValuesRangeQuery;
-import org.apache.lucene.search.LegacyNumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefBuilder;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.CharsRefBuilder;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.solr.common.EnumFieldValue;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.response.TextResponseWriter;
@@ -234,8 +235,8 @@ public class EnumField extends PrimitiveFieldType {
    * {@inheritDoc}
    */
   @Override
-  public FieldType.LegacyNumericType getNumericType() {
-    return FieldType.LegacyNumericType.INT;
+  public LegacyNumericType getNumericType() {
+    return LegacyNumericType.INT;
   }
 
   /**
@@ -387,7 +388,7 @@ public class EnumField extends PrimitiveFieldType {
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown value for enum field: " + value.toString());
 
     String intAsString =  intValue.toString();
-    final FieldType newType = new FieldType();
+    final LegacyFieldType newType = new LegacyFieldType();
 
     newType.setTokenized(field.isTokenized());
     newType.setStored(field.stored());
@@ -397,7 +398,7 @@ public class EnumField extends PrimitiveFieldType {
     newType.setStoreTermVectorOffsets(field.storeTermOffsets());
     newType.setStoreTermVectorPositions(field.storeTermPositions());
     newType.setStoreTermVectorPayloads(field.storeTermPayloads());
-    newType.setNumericType(FieldType.LegacyNumericType.INT);
+    newType.setNumericType(LegacyNumericType.INT);
     newType.setNumericPrecisionStep(DEFAULT_PRECISION_STEP);
 
     final org.apache.lucene.document.Field f;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/schema/FieldType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/FieldType.java b/solr/core/src/java/org/apache/solr/schema/FieldType.java
index 6556ddb..32a91d9 100644
--- a/solr/core/src/java/org/apache/solr/schema/FieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/FieldType.java
@@ -38,6 +38,7 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.search.DocValuesRangeQuery;
 import org.apache.lucene.search.DocValuesRewriteMethod;
@@ -621,7 +622,7 @@ public abstract class FieldType extends FieldProperties {
 
   /** Return the numeric type of this field, or null if this field is not a
    *  numeric field. */
-  public org.apache.lucene.document.FieldType.LegacyNumericType getNumericType() {
+  public LegacyNumericType getNumericType() {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java b/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java
index 18d80a3..f6bb782 100644
--- a/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java
@@ -20,6 +20,8 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.lucene.legacy.LegacyFieldType;
+import org.apache.lucene.legacy.LegacyNumericType;
 import org.apache.lucene.spatial.vector.PointVectorStrategy;
 
 /**
@@ -78,8 +80,8 @@ public class SpatialPointVectorFieldType extends AbstractSpatialFieldType<PointV
   }
 
   @Override
-  public org.apache.lucene.document.FieldType.LegacyNumericType getNumericType() {
-    return org.apache.lucene.document.FieldType.LegacyNumericType.DOUBLE;
+  public LegacyNumericType getNumericType() {
+    return LegacyNumericType.DOUBLE;
   }
 
   @Override
@@ -88,8 +90,7 @@ public class SpatialPointVectorFieldType extends AbstractSpatialFieldType<PointV
     if (this.getNumericType() != null) {
       // create strategy based on legacy numerics
       // todo remove in 7.0
-      org.apache.lucene.document.FieldType fieldType =
-          new org.apache.lucene.document.FieldType(PointVectorStrategy.LEGACY_FIELDTYPE);
+      LegacyFieldType fieldType = new LegacyFieldType(PointVectorStrategy.LEGACY_FIELDTYPE);
       fieldType.setNumericPrecisionStep(precisionStep);
       return new PointVectorStrategy(ctx, fieldName, fieldType);
     } else {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java b/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
index edaa0c4..cb409a1 100644
--- a/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
@@ -23,13 +23,13 @@ import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.legacy.LegacyNumericUtils;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
 import org.apache.lucene.queries.function.valuesource.SortedSetFieldSource;
 import org.apache.lucene.search.SortedSetSelector;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
 import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util.mutable.MutableValue;
 import org.apache.lucene.util.mutable.MutableValueDouble;


[5/7] lucene-solr:master: LUCENE-7413: move legacy numeric support to backwards module

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery32.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery32.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery32.java
new file mode 100644
index 0000000..acd0c04
--- /dev/null
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery32.java
@@ -0,0 +1,461 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestNumericRangeQuery32 extends LuceneTestCase {
+  // distance of entries
+  private static int distance;
+  // shift the starting of the values to the left, to also have negative values:
+  private static final int startOffset = - 1 << 15;
+  // number of docs to generate for testing
+  private static int noDocs;
+  
+  private static Directory directory = null;
+  private static IndexReader reader = null;
+  private static IndexSearcher searcher = null;
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    noDocs = atLeast(4096);
+    distance = (1 << 30) / noDocs;
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
+        newIndexWriterConfig(new MockAnalyzer(random()))
+        .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
+        .setMergePolicy(newLogMergePolicy()));
+    
+    final LegacyFieldType storedInt = new LegacyFieldType(LegacyIntField.TYPE_NOT_STORED);
+    storedInt.setStored(true);
+    storedInt.freeze();
+
+    final LegacyFieldType storedInt8 = new LegacyFieldType(storedInt);
+    storedInt8.setNumericPrecisionStep(8);
+
+    final LegacyFieldType storedInt4 = new LegacyFieldType(storedInt);
+    storedInt4.setNumericPrecisionStep(4);
+
+    final LegacyFieldType storedInt2 = new LegacyFieldType(storedInt);
+    storedInt2.setNumericPrecisionStep(2);
+
+    final LegacyFieldType storedIntNone = new LegacyFieldType(storedInt);
+    storedIntNone.setNumericPrecisionStep(Integer.MAX_VALUE);
+
+    final LegacyFieldType unstoredInt = LegacyIntField.TYPE_NOT_STORED;
+
+    final LegacyFieldType unstoredInt8 = new LegacyFieldType(unstoredInt);
+    unstoredInt8.setNumericPrecisionStep(8);
+
+    final LegacyFieldType unstoredInt4 = new LegacyFieldType(unstoredInt);
+    unstoredInt4.setNumericPrecisionStep(4);
+
+    final LegacyFieldType unstoredInt2 = new LegacyFieldType(unstoredInt);
+    unstoredInt2.setNumericPrecisionStep(2);
+
+    LegacyIntField
+      field8 = new LegacyIntField("field8", 0, storedInt8),
+      field4 = new LegacyIntField("field4", 0, storedInt4),
+      field2 = new LegacyIntField("field2", 0, storedInt2),
+      fieldNoTrie = new LegacyIntField("field"+Integer.MAX_VALUE, 0, storedIntNone),
+      ascfield8 = new LegacyIntField("ascfield8", 0, unstoredInt8),
+      ascfield4 = new LegacyIntField("ascfield4", 0, unstoredInt4),
+      ascfield2 = new LegacyIntField("ascfield2", 0, unstoredInt2);
+    
+    Document doc = new Document();
+    // add fields, that have a distance to test general functionality
+    doc.add(field8); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
+    // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
+    doc.add(ascfield8); doc.add(ascfield4); doc.add(ascfield2);
+    
+    // Add a series of noDocs docs with increasing int values
+    for (int l=0; l<noDocs; l++) {
+      int val=distance*l+startOffset;
+      field8.setIntValue(val);
+      field4.setIntValue(val);
+      field2.setIntValue(val);
+      fieldNoTrie.setIntValue(val);
+
+      val=l-(noDocs/2);
+      ascfield8.setIntValue(val);
+      ascfield4.setIntValue(val);
+      ascfield2.setIntValue(val);
+      writer.addDocument(doc);
+    }
+  
+    reader = writer.getReader();
+    searcher=newSearcher(reader);
+    writer.close();
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher = null;
+    reader.close();
+    reader = null;
+    directory.close();
+    directory = null;
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    // set the theoretical maximum term count for 8bit (see docs for the number)
+    // super.tearDown will restore the default
+    BooleanQuery.setMaxClauseCount(3*255*2 + 255);
+  }
+  
+  /** test for both constant score and boolean query, the other tests only use the constant score mode */
+  private void testRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    int lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
+    LegacyNumericRangeQuery<Integer> q = LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
+    for (byte i=0; i<2; i++) {
+      TopDocs topDocs;
+      String type;
+      switch (i) {
+        case 0:
+          type = " (constant score filter rewrite)";
+          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
+          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+          break;
+        case 1:
+          type = " (constant score boolean rewrite)";
+          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
+          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+          break;
+        default:
+          return;
+      }
+      ScoreDoc[] sd = topDocs.scoreDocs;
+      assertNotNull(sd);
+      assertEquals("Score doc count"+type, count, sd.length );
+      Document doc=searcher.doc(sd[0].doc);
+      assertEquals("First doc"+type, 2*distance+startOffset, doc.getField(field).numericValue().intValue());
+      doc=searcher.doc(sd[sd.length-1].doc);
+      assertEquals("Last doc"+type, (1+count)*distance+startOffset, doc.getField(field).numericValue().intValue());
+    }
+  }
+
+  @Test
+  public void testRange_8bit() throws Exception {
+    testRange(8);
+  }
+  
+  @Test
+  public void testRange_4bit() throws Exception {
+    testRange(4);
+  }
+  
+  @Test
+  public void testRange_2bit() throws Exception {
+    testRange(2);
+  }
+  
+  @Test
+  public void testOneMatchQuery() throws Exception {
+    LegacyNumericRangeQuery<Integer> q = LegacyNumericRangeQuery.newIntRange("ascfield8", 8, 1000, 1000, true, true);
+    TopDocs topDocs = searcher.search(q, noDocs);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", 1, sd.length );
+  }
+  
+  private void testLeftOpenRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    int upper=(count-1)*distance + (distance/3) + startOffset;
+    LegacyNumericRangeQuery<Integer> q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, null, upper, true, true);
+    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", count, sd.length );
+    Document doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", startOffset, doc.getField(field).numericValue().intValue());
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
+    
+    q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, null, upper, false, true);
+    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", count, sd.length );
+    doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", startOffset, doc.getField(field).numericValue().intValue());
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
+  }
+  
+  @Test
+  public void testLeftOpenRange_8bit() throws Exception {
+    testLeftOpenRange(8);
+  }
+  
+  @Test
+  public void testLeftOpenRange_4bit() throws Exception {
+    testLeftOpenRange(4);
+  }
+  
+  @Test
+  public void testLeftOpenRange_2bit() throws Exception {
+    testLeftOpenRange(2);
+  }
+  
+  private void testRightOpenRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    int lower=(count-1)*distance + (distance/3) +startOffset;
+    LegacyNumericRangeQuery<Integer> q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, true);
+    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", noDocs-count, sd.length );
+    Document doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().intValue());
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
+
+    q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, false);
+    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", noDocs-count, sd.length );
+    doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().intValue() );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().intValue() );
+  }
+  
+  @Test
+  public void testRightOpenRange_8bit() throws Exception {
+    testRightOpenRange(8);
+  }
+  
+  @Test
+  public void testRightOpenRange_4bit() throws Exception {
+    testRightOpenRange(4);
+  }
+  
+  @Test
+  public void testRightOpenRange_2bit() throws Exception {
+    testRightOpenRange(2);
+  }
+  
+  @Test
+  public void testInfiniteValues() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
+      newIndexWriterConfig(new MockAnalyzer(random())));
+    Document doc = new Document();
+    doc.add(new LegacyFloatField("float", Float.NEGATIVE_INFINITY, Field.Store.NO));
+    doc.add(new LegacyIntField("int", Integer.MIN_VALUE, Field.Store.NO));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new LegacyFloatField("float", Float.POSITIVE_INFINITY, Field.Store.NO));
+    doc.add(new LegacyIntField("int", Integer.MAX_VALUE, Field.Store.NO));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new LegacyFloatField("float", 0.0f, Field.Store.NO));
+    doc.add(new LegacyIntField("int", 0, Field.Store.NO));
+    writer.addDocument(doc);
+    
+    for (float f : TestLegacyNumericUtils.FLOAT_NANs) {
+      doc = new Document();
+      doc.add(new LegacyFloatField("float", f, Field.Store.NO));
+      writer.addDocument(doc);
+    }
+    
+    writer.close();
+    
+    IndexReader r = DirectoryReader.open(dir);
+    IndexSearcher s = newSearcher(r);
+    
+    Query q= LegacyNumericRangeQuery.newIntRange("int", null, null, true, true);
+    TopDocs topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+    
+    q= LegacyNumericRangeQuery.newIntRange("int", null, null, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+    
+    q= LegacyNumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newFloatRange("float", null, null, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newFloatRange("float", null, null, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newFloatRange("float", Float.NaN, Float.NaN, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", TestLegacyNumericUtils.FLOAT_NANs.length,  topDocs.scoreDocs.length );
+
+    r.close();
+    dir.close();
+  }
+  
+  private void testRangeSplit(int precisionStep) throws Exception {
+    String field="ascfield"+precisionStep;
+    // 10 random tests
+    int num = TestUtil.nextInt(random(), 10, 20);
+    for (int  i =0;  i< num; i++) {
+      int lower=(int)(random().nextDouble()*noDocs - noDocs/2);
+      int upper=(int)(random().nextDouble()*noDocs - noDocs/2);
+      if (lower>upper) {
+        int a=lower; lower=upper; upper=a;
+      }
+      // test inclusive range
+      Query tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
+      TopDocs tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+      // test exclusive range
+      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
+      // test left exclusive range
+      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
+      // test right exclusive range
+      tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
+    }
+  }
+
+  @Test
+  public void testRangeSplit_8bit() throws Exception {
+    testRangeSplit(8);
+  }
+  
+  @Test
+  public void testRangeSplit_4bit() throws Exception {
+    testRangeSplit(4);
+  }
+  
+  @Test
+  public void testRangeSplit_2bit() throws Exception {
+    testRangeSplit(2);
+  }
+  
+  /** we fake a float test using int2float conversion of LegacyNumericUtils */
+  private void testFloatRange(int precisionStep) throws Exception {
+    final String field="ascfield"+precisionStep;
+    final int lower=-1000, upper=+2000;
+    
+    Query tq= LegacyNumericRangeQuery.newFloatRange(field, precisionStep,
+        NumericUtils.sortableIntToFloat(lower), NumericUtils.sortableIntToFloat(upper), true, true);
+    TopDocs tTopDocs = searcher.search(tq, 1);
+    assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+  }
+
+  @Test
+  public void testFloatRange_8bit() throws Exception {
+    testFloatRange(8);
+  }
+  
+  @Test
+  public void testFloatRange_4bit() throws Exception {
+    testFloatRange(4);
+  }
+  
+  @Test
+  public void testFloatRange_2bit() throws Exception {
+    testFloatRange(2);
+  }
+  
+  @Test
+  public void testEqualsAndHash() throws Exception {
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test1", 4, 10, 20, true, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test2", 4, 10, 20, false, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test3", 4, 10, 20, true, false));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test4", 4, 10, 20, false, false));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test5", 4, 10, null, true, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test6", 4, null, 20, true, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test7", 4, null, null, true, true));
+    QueryUtils.checkEqual(
+      LegacyNumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true),
+      LegacyNumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newIntRange("test9", 4, 10, 20, true, true),
+      LegacyNumericRangeQuery.newIntRange("test9", 8, 10, 20, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newIntRange("test10a", 4, 10, 20, true, true),
+      LegacyNumericRangeQuery.newIntRange("test10b", 4, 10, 20, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newIntRange("test11", 4, 10, 20, true, true),
+      LegacyNumericRangeQuery.newIntRange("test11", 4, 20, 10, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newIntRange("test12", 4, 10, 20, true, true),
+      LegacyNumericRangeQuery.newIntRange("test12", 4, 10, 20, false, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newIntRange("test13", 4, 10, 20, true, true),
+      LegacyNumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
+    );
+    // the following produces a hash collision, because Long and Integer have the same hashcode, so only test equality:
+    Query q1 = LegacyNumericRangeQuery.newIntRange("test14", 4, 10, 20, true, true);
+    Query q2 = LegacyNumericRangeQuery.newLongRange("test14", 4, 10L, 20L, true, true);
+    assertFalse(q1.equals(q2));
+    assertFalse(q2.equals(q1));
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery64.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery64.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery64.java
new file mode 100644
index 0000000..b3ce55a
--- /dev/null
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericRangeQuery64.java
@@ -0,0 +1,490 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestNumericRangeQuery64 extends LuceneTestCase {
+  // distance of entries
+  private static long distance;
+  // shift the starting of the values to the left, to also have negative values:
+  private static final long startOffset = - 1L << 31;
+  // number of docs to generate for testing
+  private static int noDocs;
+  
+  private static Directory directory = null;
+  private static IndexReader reader = null;
+  private static IndexSearcher searcher = null;
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    noDocs = atLeast(4096);
+    distance = (1L << 60) / noDocs;
+    directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
+        newIndexWriterConfig(new MockAnalyzer(random()))
+        .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
+        .setMergePolicy(newLogMergePolicy()));
+
+    final LegacyFieldType storedLong = new LegacyFieldType(LegacyLongField.TYPE_NOT_STORED);
+    storedLong.setStored(true);
+    storedLong.freeze();
+
+    final LegacyFieldType storedLong8 = new LegacyFieldType(storedLong);
+    storedLong8.setNumericPrecisionStep(8);
+
+    final LegacyFieldType storedLong4 = new LegacyFieldType(storedLong);
+    storedLong4.setNumericPrecisionStep(4);
+
+    final LegacyFieldType storedLong6 = new LegacyFieldType(storedLong);
+    storedLong6.setNumericPrecisionStep(6);
+
+    final LegacyFieldType storedLong2 = new LegacyFieldType(storedLong);
+    storedLong2.setNumericPrecisionStep(2);
+
+    final LegacyFieldType storedLongNone = new LegacyFieldType(storedLong);
+    storedLongNone.setNumericPrecisionStep(Integer.MAX_VALUE);
+
+    final LegacyFieldType unstoredLong = LegacyLongField.TYPE_NOT_STORED;
+
+    final LegacyFieldType unstoredLong8 = new LegacyFieldType(unstoredLong);
+    unstoredLong8.setNumericPrecisionStep(8);
+
+    final LegacyFieldType unstoredLong6 = new LegacyFieldType(unstoredLong);
+    unstoredLong6.setNumericPrecisionStep(6);
+
+    final LegacyFieldType unstoredLong4 = new LegacyFieldType(unstoredLong);
+    unstoredLong4.setNumericPrecisionStep(4);
+
+    final LegacyFieldType unstoredLong2 = new LegacyFieldType(unstoredLong);
+    unstoredLong2.setNumericPrecisionStep(2);
+
+    LegacyLongField
+      field8 = new LegacyLongField("field8", 0L, storedLong8),
+      field6 = new LegacyLongField("field6", 0L, storedLong6),
+      field4 = new LegacyLongField("field4", 0L, storedLong4),
+      field2 = new LegacyLongField("field2", 0L, storedLong2),
+      fieldNoTrie = new LegacyLongField("field"+Integer.MAX_VALUE, 0L, storedLongNone),
+      ascfield8 = new LegacyLongField("ascfield8", 0L, unstoredLong8),
+      ascfield6 = new LegacyLongField("ascfield6", 0L, unstoredLong6),
+      ascfield4 = new LegacyLongField("ascfield4", 0L, unstoredLong4),
+      ascfield2 = new LegacyLongField("ascfield2", 0L, unstoredLong2);
+
+    Document doc = new Document();
+    // add fields, that have a distance to test general functionality
+    doc.add(field8); doc.add(field6); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
+    // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
+    doc.add(ascfield8); doc.add(ascfield6); doc.add(ascfield4); doc.add(ascfield2);
+    
+    // Add a series of noDocs docs with increasing long values, by updating the fields
+    for (int l=0; l<noDocs; l++) {
+      long val=distance*l+startOffset;
+      field8.setLongValue(val);
+      field6.setLongValue(val);
+      field4.setLongValue(val);
+      field2.setLongValue(val);
+      fieldNoTrie.setLongValue(val);
+
+      val=l-(noDocs/2);
+      ascfield8.setLongValue(val);
+      ascfield6.setLongValue(val);
+      ascfield4.setLongValue(val);
+      ascfield2.setLongValue(val);
+      writer.addDocument(doc);
+    }
+    reader = writer.getReader();
+    searcher=newSearcher(reader);
+    writer.close();
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    searcher = null;
+    reader.close();
+    reader = null;
+    directory.close();
+    directory = null;
+  }
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    // set the theoretical maximum term count for 8bit (see docs for the number)
+    // super.tearDown will restore the default
+    BooleanQuery.setMaxClauseCount(7*255*2 + 255);
+  }
+  
+  /** test for constant score + boolean query + filter, the other tests only use the constant score mode */
+  private void testRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    long lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
+    LegacyNumericRangeQuery<Long> q = LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
+    for (byte i=0; i<2; i++) {
+      TopDocs topDocs;
+      String type;
+      switch (i) {
+        case 0:
+          type = " (constant score filter rewrite)";
+          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
+          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+          break;
+        case 1:
+          type = " (constant score boolean rewrite)";
+          q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
+          topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+          break;
+        default:
+          return;
+      }
+      ScoreDoc[] sd = topDocs.scoreDocs;
+      assertNotNull(sd);
+      assertEquals("Score doc count"+type, count, sd.length );
+      Document doc=searcher.doc(sd[0].doc);
+      assertEquals("First doc"+type, 2*distance+startOffset, doc.getField(field).numericValue().longValue() );
+      doc=searcher.doc(sd[sd.length-1].doc);
+      assertEquals("Last doc"+type, (1+count)*distance+startOffset, doc.getField(field).numericValue().longValue() );
+    }
+  }
+
+  @Test
+  public void testRange_8bit() throws Exception {
+    testRange(8);
+  }
+  
+  @Test
+  public void testRange_6bit() throws Exception {
+    testRange(6);
+  }
+  
+  @Test
+  public void testRange_4bit() throws Exception {
+    testRange(4);
+  }
+  
+  @Test
+  public void testRange_2bit() throws Exception {
+    testRange(2);
+  }
+  
+  @Test
+  public void testOneMatchQuery() throws Exception {
+    LegacyNumericRangeQuery<Long> q = LegacyNumericRangeQuery.newLongRange("ascfield8", 8, 1000L, 1000L, true, true);
+    TopDocs topDocs = searcher.search(q, noDocs);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", 1, sd.length );
+  }
+  
+  private void testLeftOpenRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    long upper=(count-1)*distance + (distance/3) + startOffset;
+    LegacyNumericRangeQuery<Long> q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, null, upper, true, true);
+    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", count, sd.length );
+    Document doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", startOffset, doc.getField(field).numericValue().longValue() );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
+
+    q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, null, upper, false, true);
+    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", count, sd.length );
+    doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", startOffset, doc.getField(field).numericValue().longValue() );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
+  }
+  
+  @Test
+  public void testLeftOpenRange_8bit() throws Exception {
+    testLeftOpenRange(8);
+  }
+  
+  @Test
+  public void testLeftOpenRange_6bit() throws Exception {
+    testLeftOpenRange(6);
+  }
+  
+  @Test
+  public void testLeftOpenRange_4bit() throws Exception {
+    testLeftOpenRange(4);
+  }
+  
+  @Test
+  public void testLeftOpenRange_2bit() throws Exception {
+    testLeftOpenRange(2);
+  }
+  
+  private void testRightOpenRange(int precisionStep) throws Exception {
+    String field="field"+precisionStep;
+    int count=3000;
+    long lower=(count-1)*distance + (distance/3) +startOffset;
+    LegacyNumericRangeQuery<Long> q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, true);
+    TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    ScoreDoc[] sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", noDocs-count, sd.length );
+    Document doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().longValue() );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
+
+    q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, false);
+    topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
+    sd = topDocs.scoreDocs;
+    assertNotNull(sd);
+    assertEquals("Score doc count", noDocs-count, sd.length );
+    doc=searcher.doc(sd[0].doc);
+    assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().longValue() );
+    doc=searcher.doc(sd[sd.length-1].doc);
+    assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
+  }
+  
+  @Test
+  public void testRightOpenRange_8bit() throws Exception {
+    testRightOpenRange(8);
+  }
+  
+  @Test
+  public void testRightOpenRange_6bit() throws Exception {
+    testRightOpenRange(6);
+  }
+  
+  @Test
+  public void testRightOpenRange_4bit() throws Exception {
+    testRightOpenRange(4);
+  }
+  
+  @Test
+  public void testRightOpenRange_2bit() throws Exception {
+    testRightOpenRange(2);
+  }
+  
+  @Test
+  public void testInfiniteValues() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
+      newIndexWriterConfig(new MockAnalyzer(random())));
+    Document doc = new Document();
+    doc.add(new LegacyDoubleField("double", Double.NEGATIVE_INFINITY, Field.Store.NO));
+    doc.add(new LegacyLongField("long", Long.MIN_VALUE, Field.Store.NO));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new LegacyDoubleField("double", Double.POSITIVE_INFINITY, Field.Store.NO));
+    doc.add(new LegacyLongField("long", Long.MAX_VALUE, Field.Store.NO));
+    writer.addDocument(doc);
+    
+    doc = new Document();
+    doc.add(new LegacyDoubleField("double", 0.0, Field.Store.NO));
+    doc.add(new LegacyLongField("long", 0L, Field.Store.NO));
+    writer.addDocument(doc);
+    
+    for (double d : TestLegacyNumericUtils.DOUBLE_NANs) {
+      doc = new Document();
+      doc.add(new LegacyDoubleField("double", d, Field.Store.NO));
+      writer.addDocument(doc);
+    }
+    
+    writer.close();
+    
+    IndexReader r = DirectoryReader.open(dir);
+    IndexSearcher s = newSearcher(r);
+    
+    Query q= LegacyNumericRangeQuery.newLongRange("long", null, null, true, true);
+    TopDocs topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+    
+    q= LegacyNumericRangeQuery.newLongRange("long", null, null, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+    
+    q= LegacyNumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newDoubleRange("double", null, null, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newDoubleRange("double", null, null, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 3,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, false, false);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", 1,  topDocs.scoreDocs.length );
+
+    q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NaN, Double.NaN, true, true);
+    topDocs = s.search(q, 10);
+    assertEquals("Score doc count", TestLegacyNumericUtils.DOUBLE_NANs.length,  topDocs.scoreDocs.length );
+
+    r.close();
+    dir.close();
+  }
+  
+  private void testRangeSplit(int precisionStep) throws Exception {
+    String field="ascfield"+precisionStep;
+    // 10 random tests
+    int num = TestUtil.nextInt(random(), 10, 20);
+    for (int i = 0; i < num; i++) {
+      long lower=(long)(random().nextDouble()*noDocs - noDocs/2);
+      long upper=(long)(random().nextDouble()*noDocs - noDocs/2);
+      if (lower>upper) {
+        long a=lower; lower=upper; upper=a;
+      }
+      // test inclusive range
+      Query tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
+      TopDocs tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+      // test exclusive range
+      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
+      // test left exclusive range
+      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
+      // test right exclusive range
+      tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
+      tTopDocs = searcher.search(tq, 1);
+      assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
+    }
+  }
+
+  @Test
+  public void testRangeSplit_8bit() throws Exception {
+    testRangeSplit(8);
+  }
+  
+  @Test
+  public void testRangeSplit_6bit() throws Exception {
+    testRangeSplit(6);
+  }
+  
+  @Test
+  public void testRangeSplit_4bit() throws Exception {
+    testRangeSplit(4);
+  }
+  
+  @Test
+  public void testRangeSplit_2bit() throws Exception {
+    testRangeSplit(2);
+  }
+  
+  /** we fake a double test using long2double conversion of LegacyNumericUtils */
+  private void testDoubleRange(int precisionStep) throws Exception {
+    final String field="ascfield"+precisionStep;
+    final long lower=-1000L, upper=+2000L;
+    
+    Query tq= LegacyNumericRangeQuery.newDoubleRange(field, precisionStep,
+        NumericUtils.sortableLongToDouble(lower), NumericUtils.sortableLongToDouble(upper), true, true);
+    TopDocs tTopDocs = searcher.search(tq, 1);
+    assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
+  }
+
+  @Test
+  public void testDoubleRange_8bit() throws Exception {
+    testDoubleRange(8);
+  }
+  
+  @Test
+  public void testDoubleRange_6bit() throws Exception {
+    testDoubleRange(6);
+  }
+  
+  @Test
+  public void testDoubleRange_4bit() throws Exception {
+    testDoubleRange(4);
+  }
+  
+  @Test
+  public void testDoubleRange_2bit() throws Exception {
+    testDoubleRange(2);
+  }
+  
+  @Test
+  public void testEqualsAndHash() throws Exception {
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test1", 4, 10L, 20L, true, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test2", 4, 10L, 20L, false, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test3", 4, 10L, 20L, true, false));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test4", 4, 10L, 20L, false, false));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test5", 4, 10L, null, true, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test6", 4, null, 20L, true, true));
+    QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test7", 4, null, null, true, true));
+    QueryUtils.checkEqual(
+      LegacyNumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true),
+      LegacyNumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newLongRange("test9", 4, 10L, 20L, true, true),
+      LegacyNumericRangeQuery.newLongRange("test9", 8, 10L, 20L, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newLongRange("test10a", 4, 10L, 20L, true, true),
+      LegacyNumericRangeQuery.newLongRange("test10b", 4, 10L, 20L, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newLongRange("test11", 4, 10L, 20L, true, true),
+      LegacyNumericRangeQuery.newLongRange("test11", 4, 20L, 10L, true, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newLongRange("test12", 4, 10L, 20L, true, true),
+      LegacyNumericRangeQuery.newLongRange("test12", 4, 10L, 20L, false, true)
+    );
+    QueryUtils.checkUnequal(
+      LegacyNumericRangeQuery.newLongRange("test13", 4, 10L, 20L, true, true),
+      LegacyNumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
+    );
+     // difference to int range is tested in TestNumericRangeQuery32
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericTokenStream.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericTokenStream.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericTokenStream.java
new file mode 100644
index 0000000..a507af0
--- /dev/null
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestNumericTokenStream.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.legacy;
+
+
+import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.legacy.LegacyNumericTokenStream;
+import org.apache.lucene.legacy.LegacyNumericUtils;
+import org.apache.lucene.legacy.LegacyNumericTokenStream.LegacyNumericTermAttributeImpl;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttributeImpl;
+
+@Deprecated
+public class TestNumericTokenStream extends BaseTokenStreamTestCase {
+
+  final long lvalue = random().nextLong();
+  final int ivalue = random().nextInt();
+
+  public void testLongStream() throws Exception {
+    @SuppressWarnings("resource")
+    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream().setLongValue(lvalue);
+    final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
+    assertNotNull(bytesAtt);
+    final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
+    assertNotNull(typeAtt);
+    final LegacyNumericTokenStream.LegacyNumericTermAttribute numericAtt = stream.getAttribute(LegacyNumericTokenStream.LegacyNumericTermAttribute.class);
+    assertNotNull(numericAtt);
+    stream.reset();
+    assertEquals(64, numericAtt.getValueSize());
+    for (int shift=0; shift<64; shift+= LegacyNumericUtils.PRECISION_STEP_DEFAULT) {
+      assertTrue("New token is available", stream.incrementToken());
+      assertEquals("Shift value wrong", shift, numericAtt.getShift());
+      assertEquals("Term is incorrectly encoded", lvalue & ~((1L << shift) - 1L), LegacyNumericUtils.prefixCodedToLong(bytesAtt.getBytesRef()));
+      assertEquals("Term raw value is incorrectly encoded", lvalue & ~((1L << shift) - 1L), numericAtt.getRawValue());
+      assertEquals("Type incorrect", (shift == 0) ? LegacyNumericTokenStream.TOKEN_TYPE_FULL_PREC : LegacyNumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
+    }
+    assertFalse("More tokens available", stream.incrementToken());
+    stream.end();
+    stream.close();
+  }
+
+  public void testIntStream() throws Exception {
+    @SuppressWarnings("resource")
+    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream().setIntValue(ivalue);
+    final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
+    assertNotNull(bytesAtt);
+    final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
+    assertNotNull(typeAtt);
+    final LegacyNumericTokenStream.LegacyNumericTermAttribute numericAtt = stream.getAttribute(LegacyNumericTokenStream.LegacyNumericTermAttribute.class);
+    assertNotNull(numericAtt);
+    stream.reset();
+    assertEquals(32, numericAtt.getValueSize());
+    for (int shift=0; shift<32; shift+= LegacyNumericUtils.PRECISION_STEP_DEFAULT) {
+      assertTrue("New token is available", stream.incrementToken());
+      assertEquals("Shift value wrong", shift, numericAtt.getShift());
+      assertEquals("Term is incorrectly encoded", ivalue & ~((1 << shift) - 1), LegacyNumericUtils.prefixCodedToInt(bytesAtt.getBytesRef()));
+      assertEquals("Term raw value is incorrectly encoded", ((long) ivalue) & ~((1L << shift) - 1L), numericAtt.getRawValue());
+      assertEquals("Type incorrect", (shift == 0) ? LegacyNumericTokenStream.TOKEN_TYPE_FULL_PREC : LegacyNumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
+    }
+    assertFalse("More tokens available", stream.incrementToken());
+    stream.end();
+    stream.close();
+  }
+  
+  public void testNotInitialized() throws Exception {
+    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream();
+    
+    expectThrows(IllegalStateException.class, () -> {
+      stream.reset();
+    });
+
+    expectThrows(IllegalStateException.class, () -> {
+      stream.incrementToken();
+    });
+    
+    stream.close();
+  }
+  
+  public static interface TestAttribute extends CharTermAttribute {}
+  public static class TestAttributeImpl extends CharTermAttributeImpl implements TestAttribute {}
+  
+  public void testCTA() throws Exception {
+    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream();
+    IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
+      stream.addAttribute(CharTermAttribute.class);
+    });
+    assertTrue(e.getMessage().startsWith("LegacyNumericTokenStream does not support"));
+
+    e = expectThrows(IllegalArgumentException.class, () -> {
+      stream.addAttribute(TestAttribute.class);
+    });
+    assertTrue(e.getMessage().startsWith("LegacyNumericTokenStream does not support"));
+    stream.close();
+  }
+  
+  /** LUCENE-7027 */
+  public void testCaptureStateAfterExhausted() throws Exception {
+    // default precstep
+    try (LegacyNumericTokenStream stream=new LegacyNumericTokenStream()) {
+      // int
+      stream.setIntValue(ivalue);
+      stream.reset();
+      while (stream.incrementToken());
+      stream.captureState();
+      stream.end();
+      stream.captureState();
+      // long
+      stream.setLongValue(lvalue);
+      stream.reset();
+      while (stream.incrementToken());
+      stream.captureState();
+      stream.end();
+      stream.captureState();
+    }
+    // huge precstep
+    try (LegacyNumericTokenStream stream=new LegacyNumericTokenStream(Integer.MAX_VALUE)) {
+      // int
+      stream.setIntValue(ivalue);
+      stream.reset();
+      while (stream.incrementToken());
+      stream.captureState();
+      stream.end();
+      stream.captureState();
+      // long
+      stream.setLongValue(lvalue);
+      stream.reset();
+      while (stream.incrementToken());
+      stream.captureState();
+      stream.end();
+      stream.captureState();
+    }
+  }
+  
+  public void testAttributeClone() throws Exception {
+    LegacyNumericTermAttributeImpl att = new LegacyNumericTermAttributeImpl();
+    att.init(lvalue, 64, 8, 0); // set some value, to make getBytesRef() work
+    LegacyNumericTermAttributeImpl copy = assertCloneIsEqual(att);
+    assertNotSame(att.getBytesRef(), copy.getBytesRef());
+    LegacyNumericTermAttributeImpl copy2 = assertCopyIsEqual(att);
+    assertNotSame(att.getBytesRef(), copy2.getBytesRef());
+    
+    // LUCENE-7027 test
+    att.init(lvalue, 64, 8, 64); // Exhausted TokenStream -> should return empty BytesRef
+    assertEquals(new BytesRef(), att.getBytesRef());
+    copy = assertCloneIsEqual(att);
+    assertEquals(new BytesRef(), copy.getBytesRef());
+    assertNotSame(att.getBytesRef(), copy.getBytesRef());
+    copy2 = assertCopyIsEqual(att);
+    assertEquals(new BytesRef(), copy2.getBytesRef());
+    assertNotSame(att.getBytesRef(), copy2.getBytesRef());
+  }
+  
+  public static <T extends AttributeImpl> T assertCloneIsEqual(T att) {
+    @SuppressWarnings("unchecked")
+    T clone = (T) att.clone();
+    assertEquals("Clone must be equal", att, clone);
+    assertEquals("Clone's hashcode must be equal", att.hashCode(), clone.hashCode());
+    return clone;
+  }
+
+  public static <T extends AttributeImpl> T assertCopyIsEqual(T att) throws Exception {
+    @SuppressWarnings("unchecked")
+    T copy = (T) att.getClass().newInstance();
+    att.copyTo(copy);
+    assertEquals("Copied instance must be equal", att, copy);
+    assertEquals("Copied instance's hashcode must be equal", att.hashCode(), copy.hashCode());
+    return copy;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/java/org/apache/lucene/analysis/LegacyNumericTokenStream.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/LegacyNumericTokenStream.java b/lucene/core/src/java/org/apache/lucene/analysis/LegacyNumericTokenStream.java
deleted file mode 100644
index 19f7d37..0000000
--- a/lucene/core/src/java/org/apache/lucene/analysis/LegacyNumericTokenStream.java
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.analysis;
-
-
-import java.util.Objects;
-
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.util.Attribute;
-import org.apache.lucene.util.AttributeFactory;
-import org.apache.lucene.util.AttributeImpl;
-import org.apache.lucene.util.AttributeReflector;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.LegacyNumericUtils;
-import org.apache.lucene.util.NumericUtils;
-
-/**
- * <b>Expert:</b> This class provides a {@link TokenStream}
- * for indexing numeric values that can be used by {@link
- * org.apache.lucene.search.LegacyNumericRangeQuery}.
- *
- * <p>Note that for simple usage, {@link org.apache.lucene.document.LegacyIntField}, {@link
- * org.apache.lucene.document.LegacyLongField}, {@link org.apache.lucene.document.LegacyFloatField} or {@link org.apache.lucene.document.LegacyDoubleField} is
- * recommended.  These fields disable norms and
- * term freqs, as they are not usually needed during
- * searching.  If you need to change these settings, you
- * should use this class.
- *
- * <p>Here's an example usage, for an <code>int</code> field:
- *
- * <pre class="prettyprint">
- *  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
- *  fieldType.setOmitNorms(true);
- *  fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
- *  Field field = new Field(name, new LegacyNumericTokenStream(precisionStep).setIntValue(value), fieldType);
- *  document.add(field);
- * </pre>
- *
- * <p>For optimal performance, re-use the TokenStream and Field instance
- * for more than one document:
- *
- * <pre class="prettyprint">
- *  LegacyNumericTokenStream stream = new LegacyNumericTokenStream(precisionStep);
- *  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
- *  fieldType.setOmitNorms(true);
- *  fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
- *  Field field = new Field(name, stream, fieldType);
- *  Document document = new Document();
- *  document.add(field);
- *
- *  for(all documents) {
- *    stream.setIntValue(value)
- *    writer.addDocument(document);
- *  }
- * </pre>
- *
- * <p>This stream is not intended to be used in analyzers;
- * it's more for iterating the different precisions during
- * indexing a specific numeric value.</p>
-
- * <p><b>NOTE</b>: as token streams are only consumed once
- * the document is added to the index, if you index more
- * than one numeric field, use a separate <code>LegacyNumericTokenStream</code>
- * instance for each.</p>
- *
- * <p>See {@link org.apache.lucene.search.LegacyNumericRangeQuery} for more details on the
- * <a
- * href="../search/LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * parameter as well as how numeric fields work under the hood.</p>
- *
- * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
- *
- * @since 2.9
- */
-@Deprecated
-public final class LegacyNumericTokenStream extends TokenStream {
-
-  /** The full precision token gets this token type assigned. */
-  public static final String TOKEN_TYPE_FULL_PREC  = "fullPrecNumeric";
-
-  /** The lower precision tokens gets this token type assigned. */
-  public static final String TOKEN_TYPE_LOWER_PREC = "lowerPrecNumeric";
-  
-  /** <b>Expert:</b> Use this attribute to get the details of the currently generated token.
-   * @lucene.experimental
-   * @since 4.0
-   */
-  public interface LegacyNumericTermAttribute extends Attribute {
-    /** Returns current shift value, undefined before first token */
-    int getShift();
-    /** Returns current token's raw value as {@code long} with all {@link #getShift} applied, undefined before first token */
-    long getRawValue();
-    /** Returns value size in bits (32 for {@code float}, {@code int}; 64 for {@code double}, {@code long}) */
-    int getValueSize();
-    
-    /** <em>Don't call this method!</em>
-      * @lucene.internal */
-    void init(long value, int valSize, int precisionStep, int shift);
-
-    /** <em>Don't call this method!</em>
-      * @lucene.internal */
-    void setShift(int shift);
-
-    /** <em>Don't call this method!</em>
-      * @lucene.internal */
-    int incShift();
-  }
-  
-  // just a wrapper to prevent adding CTA
-  private static final class NumericAttributeFactory extends AttributeFactory {
-    private final AttributeFactory delegate;
-
-    NumericAttributeFactory(AttributeFactory delegate) {
-      this.delegate = delegate;
-    }
-  
-    @Override
-    public AttributeImpl createAttributeInstance(Class<? extends Attribute> attClass) {
-      if (CharTermAttribute.class.isAssignableFrom(attClass))
-        throw new IllegalArgumentException("LegacyNumericTokenStream does not support CharTermAttribute.");
-      return delegate.createAttributeInstance(attClass);
-    }
-  }
-
-  /** Implementation of {@link org.apache.lucene.analysis.LegacyNumericTokenStream.LegacyNumericTermAttribute}.
-   * @lucene.internal
-   * @since 4.0
-   */
-  public static final class LegacyNumericTermAttributeImpl extends AttributeImpl implements LegacyNumericTermAttribute,TermToBytesRefAttribute {
-    private long value = 0L;
-    private int valueSize = 0, shift = 0, precisionStep = 0;
-    private BytesRefBuilder bytes = new BytesRefBuilder();
-    
-    /** 
-     * Creates, but does not yet initialize this attribute instance
-     * @see #init(long, int, int, int)
-     */
-    public LegacyNumericTermAttributeImpl() {}
-
-    @Override
-    public BytesRef getBytesRef() {
-      assert valueSize == 64 || valueSize == 32;
-      if (shift >= valueSize) {
-        bytes.clear();
-      } else if (valueSize == 64) {
-        LegacyNumericUtils.longToPrefixCoded(value, shift, bytes);
-      } else {
-        LegacyNumericUtils.intToPrefixCoded((int) value, shift, bytes);
-      }
-      return bytes.get();
-    }
-
-    @Override
-    public int getShift() { return shift; }
-    @Override
-    public void setShift(int shift) { this.shift = shift; }
-    @Override
-    public int incShift() {
-      return (shift += precisionStep);
-    }
-
-    @Override
-    public long getRawValue() { return value  & ~((1L << shift) - 1L); }
-    @Override
-    public int getValueSize() { return valueSize; }
-
-    @Override
-    public void init(long value, int valueSize, int precisionStep, int shift) {
-      this.value = value;
-      this.valueSize = valueSize;
-      this.precisionStep = precisionStep;
-      this.shift = shift;
-    }
-
-    @Override
-    public void clear() {
-      // this attribute has no contents to clear!
-      // we keep it untouched as it's fully controlled by outer class.
-    }
-    
-    @Override
-    public void reflectWith(AttributeReflector reflector) {
-      reflector.reflect(TermToBytesRefAttribute.class, "bytes", getBytesRef());
-      reflector.reflect(LegacyNumericTermAttribute.class, "shift", shift);
-      reflector.reflect(LegacyNumericTermAttribute.class, "rawValue", getRawValue());
-      reflector.reflect(LegacyNumericTermAttribute.class, "valueSize", valueSize);
-    }
-  
-    @Override
-    public void copyTo(AttributeImpl target) {
-      final LegacyNumericTermAttribute a = (LegacyNumericTermAttribute) target;
-      a.init(value, valueSize, precisionStep, shift);
-    }
-    
-    @Override
-    public LegacyNumericTermAttributeImpl clone() {
-      LegacyNumericTermAttributeImpl t = (LegacyNumericTermAttributeImpl)super.clone();
-      // Do a deep clone
-      t.bytes = new BytesRefBuilder();
-      t.bytes.copyBytes(getBytesRef());
-      return t;
-    }
-
-    @Override
-    public int hashCode() {
-      return Objects.hash(precisionStep, shift, value, valueSize);
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (this == obj) return true;
-      if (obj == null) return false;
-      if (getClass() != obj.getClass()) return false;
-      LegacyNumericTermAttributeImpl other = (LegacyNumericTermAttributeImpl) obj;
-      if (precisionStep != other.precisionStep) return false;
-      if (shift != other.shift) return false;
-      if (value != other.value) return false;
-      if (valueSize != other.valueSize) return false;
-      return true;
-    }
-  }
-  
-  /**
-   * Creates a token stream for numeric values using the default <code>precisionStep</code>
-   * {@link org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16). The stream is not yet initialized,
-   * before using set a value using the various set<em>???</em>Value() methods.
-   */
-  public LegacyNumericTokenStream() {
-    this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, LegacyNumericUtils.PRECISION_STEP_DEFAULT);
-  }
-  
-  /**
-   * Creates a token stream for numeric values with the specified
-   * <code>precisionStep</code>. The stream is not yet initialized,
-   * before using set a value using the various set<em>???</em>Value() methods.
-   */
-  public LegacyNumericTokenStream(final int precisionStep) {
-    this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, precisionStep);
-  }
-
-  /**
-   * Expert: Creates a token stream for numeric values with the specified
-   * <code>precisionStep</code> using the given
-   * {@link org.apache.lucene.util.AttributeFactory}.
-   * The stream is not yet initialized,
-   * before using set a value using the various set<em>???</em>Value() methods.
-   */
-  public LegacyNumericTokenStream(AttributeFactory factory, final int precisionStep) {
-    super(new NumericAttributeFactory(factory));
-    if (precisionStep < 1)
-      throw new IllegalArgumentException("precisionStep must be >=1");
-    this.precisionStep = precisionStep;
-    numericAtt.setShift(-precisionStep);
-  }
-
-  /**
-   * Initializes the token stream with the supplied <code>long</code> value.
-   * @param value the value, for which this TokenStream should enumerate tokens.
-   * @return this instance, because of this you can use it the following way:
-   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setLongValue(value))</code>
-   */
-  public LegacyNumericTokenStream setLongValue(final long value) {
-    numericAtt.init(value, valSize = 64, precisionStep, -precisionStep);
-    return this;
-  }
-  
-  /**
-   * Initializes the token stream with the supplied <code>int</code> value.
-   * @param value the value, for which this TokenStream should enumerate tokens.
-   * @return this instance, because of this you can use it the following way:
-   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setIntValue(value))</code>
-   */
-  public LegacyNumericTokenStream setIntValue(final int value) {
-    numericAtt.init(value, valSize = 32, precisionStep, -precisionStep);
-    return this;
-  }
-  
-  /**
-   * Initializes the token stream with the supplied <code>double</code> value.
-   * @param value the value, for which this TokenStream should enumerate tokens.
-   * @return this instance, because of this you can use it the following way:
-   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setDoubleValue(value))</code>
-   */
-  public LegacyNumericTokenStream setDoubleValue(final double value) {
-    numericAtt.init(NumericUtils.doubleToSortableLong(value), valSize = 64, precisionStep, -precisionStep);
-    return this;
-  }
-  
-  /**
-   * Initializes the token stream with the supplied <code>float</code> value.
-   * @param value the value, for which this TokenStream should enumerate tokens.
-   * @return this instance, because of this you can use it the following way:
-   * <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setFloatValue(value))</code>
-   */
-  public LegacyNumericTokenStream setFloatValue(final float value) {
-    numericAtt.init(NumericUtils.floatToSortableInt(value), valSize = 32, precisionStep, -precisionStep);
-    return this;
-  }
-  
-  @Override
-  public void reset() {
-    if (valSize == 0)
-      throw new IllegalStateException("call set???Value() before usage");
-    numericAtt.setShift(-precisionStep);
-  }
-
-  @Override
-  public boolean incrementToken() {
-    if (valSize == 0)
-      throw new IllegalStateException("call set???Value() before usage");
-    
-    // this will only clear all other attributes in this TokenStream
-    clearAttributes();
-
-    final int shift = numericAtt.incShift();
-    typeAtt.setType((shift == 0) ? TOKEN_TYPE_FULL_PREC : TOKEN_TYPE_LOWER_PREC);
-    posIncrAtt.setPositionIncrement((shift == 0) ? 1 : 0);
-    return (shift < valSize);
-  }
-
-  /** Returns the precision step. */
-  public int getPrecisionStep() {
-    return precisionStep;
-  }
-
-  @Override
-  public String toString() {
-    // We override default because it can throw cryptic "illegal shift value":
-    return getClass().getSimpleName() + "(precisionStep=" + precisionStep + " valueSize=" + numericAtt.getValueSize() + " shift=" + numericAtt.getShift() + ")";
-  }
-  
-  // members
-  private final LegacyNumericTermAttribute numericAtt = addAttribute(LegacyNumericTermAttribute.class);
-  private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
-  private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
-  
-  private int valSize = 0; // valSize==0 means not initialized
-  private final int precisionStep;
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/java/org/apache/lucene/document/Field.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/Field.java b/lucene/core/src/java/org/apache/lucene/document/Field.java
index 8798610..8f5f869 100644
--- a/lucene/core/src/java/org/apache/lucene/document/Field.java
+++ b/lucene/core/src/java/org/apache/lucene/document/Field.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.io.Reader;
 
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.LegacyNumericTokenStream;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.BytesTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
@@ -426,9 +425,6 @@ public class Field implements IndexableField {
     if (type.indexOptions() == IndexOptions.NONE || !type.tokenized()) {
       throw new IllegalArgumentException("TokenStream fields must be indexed and tokenized");
     }
-    if (type.numericType() != null) {
-      throw new IllegalArgumentException("cannot set private TokenStream on numeric fields");
-    }
     this.tokenStream = tokenStream;
   }
   
@@ -511,35 +507,6 @@ public class Field implements IndexableField {
       return null;
     }
 
-    final FieldType.LegacyNumericType numericType = fieldType().numericType();
-    if (numericType != null) {
-      if (!(reuse instanceof LegacyNumericTokenStream && ((LegacyNumericTokenStream)reuse).getPrecisionStep() == type.numericPrecisionStep())) {
-        // lazy init the TokenStream as it is heavy to instantiate
-        // (attributes,...) if not needed (stored field loading)
-        reuse = new LegacyNumericTokenStream(type.numericPrecisionStep());
-      }
-      final LegacyNumericTokenStream nts = (LegacyNumericTokenStream) reuse;
-      // initialize value in TokenStream
-      final Number val = (Number) fieldsData;
-      switch (numericType) {
-      case INT:
-        nts.setIntValue(val.intValue());
-        break;
-      case LONG:
-        nts.setLongValue(val.longValue());
-        break;
-      case FLOAT:
-        nts.setFloatValue(val.floatValue());
-        break;
-      case DOUBLE:
-        nts.setDoubleValue(val.doubleValue());
-        break;
-      default:
-        throw new AssertionError("Should never get here");
-      }
-      return reuse;
-    }
-
     if (!fieldType().tokenized()) {
       if (stringValue() != null) {
         if (!(reuse instanceof StringTokenStream)) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/java/org/apache/lucene/document/FieldType.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/FieldType.java b/lucene/core/src/java/org/apache/lucene/document/FieldType.java
index e0f058f..6f206a4 100644
--- a/lucene/core/src/java/org/apache/lucene/document/FieldType.java
+++ b/lucene/core/src/java/org/apache/lucene/document/FieldType.java
@@ -22,30 +22,12 @@ import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexableFieldType;
 import org.apache.lucene.index.PointValues;
-import org.apache.lucene.util.LegacyNumericUtils;
 
 /**
  * Describes the properties of a field.
  */
 public class FieldType implements IndexableFieldType  {
 
-  /** Data type of the numeric value
-   * @since 3.2
-   *
-   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
-   */
-  @Deprecated
-  public enum LegacyNumericType {
-    /** 32-bit integer numeric type */
-    INT, 
-    /** 64-bit long numeric type */
-    LONG, 
-    /** 32-bit float numeric type */
-    FLOAT, 
-    /** 64-bit double numeric type */
-    DOUBLE
-  }
-
   private boolean stored;
   private boolean tokenized = true;
   private boolean storeTermVectors;
@@ -54,9 +36,7 @@ public class FieldType implements IndexableFieldType  {
   private boolean storeTermVectorPayloads;
   private boolean omitNorms;
   private IndexOptions indexOptions = IndexOptions.NONE;
-  private LegacyNumericType numericType;
   private boolean frozen;
-  private int numericPrecisionStep = LegacyNumericUtils.PRECISION_STEP_DEFAULT;
   private DocValuesType docValuesType = DocValuesType.NONE;
   private int dimensionCount;
   private int dimensionNumBytes;
@@ -73,8 +53,6 @@ public class FieldType implements IndexableFieldType  {
     this.storeTermVectorPayloads = ref.storeTermVectorPayloads();
     this.omitNorms = ref.omitNorms();
     this.indexOptions = ref.indexOptions();
-    this.numericType = ref.numericType();
-    this.numericPrecisionStep = ref.numericPrecisionStep();
     this.docValuesType = ref.docValuesType();
     this.dimensionCount = ref.dimensionCount;
     this.dimensionNumBytes = ref.dimensionNumBytes;
@@ -298,70 +276,6 @@ public class FieldType implements IndexableFieldType  {
   }
 
   /**
-   * Specifies the field's numeric type.
-   * @param type numeric type, or null if the field has no numeric type.
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #numericType()
-   *
-   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
-   */
-  @Deprecated
-  public void setNumericType(LegacyNumericType type) {
-    checkIfFrozen();
-    numericType = type;
-  }
-
-  /** 
-   * LegacyNumericType: if non-null then the field's value will be indexed
-   * numerically so that {@link org.apache.lucene.search.LegacyNumericRangeQuery} can be used at
-   * search time. 
-   * <p>
-   * The default is <code>null</code> (no numeric type) 
-   * @see #setNumericType(org.apache.lucene.document.FieldType.LegacyNumericType)
-   *
-   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
-   */
-  @Deprecated
-  public LegacyNumericType numericType() {
-    return numericType;
-  }
-
-  /**
-   * Sets the numeric precision step for the field.
-   * @param precisionStep numeric precision step for the field
-   * @throws IllegalArgumentException if precisionStep is less than 1. 
-   * @throws IllegalStateException if this FieldType is frozen against
-   *         future modifications.
-   * @see #numericPrecisionStep()
-   *
-   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
-   */
-  @Deprecated
-  public void setNumericPrecisionStep(int precisionStep) {
-    checkIfFrozen();
-    if (precisionStep < 1) {
-      throw new IllegalArgumentException("precisionStep must be >= 1 (got " + precisionStep + ")");
-    }
-    this.numericPrecisionStep = precisionStep;
-  }
-
-  /** 
-   * Precision step for numeric field. 
-   * <p>
-   * This has no effect if {@link #numericType()} returns null.
-   * <p>
-   * The default is {@link org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT}
-   * @see #setNumericPrecisionStep(int)
-   *
-   * @deprecated Please switch to {@link org.apache.lucene.index.PointValues} instead
-   */
-  @Deprecated
-  public int numericPrecisionStep() {
-    return numericPrecisionStep;
-  }
-
-  /**
    * Enables points indexing.
    */
   public void setDimensions(int dimensionCount, int dimensionNumBytes) {
@@ -403,7 +317,7 @@ public class FieldType implements IndexableFieldType  {
 
   /** Prints a Field for human consumption. */
   @Override
-  public final String toString() {
+  public String toString() {
     StringBuilder result = new StringBuilder();
     if (stored()) {
       result.append("stored");
@@ -434,12 +348,6 @@ public class FieldType implements IndexableFieldType  {
         result.append(",indexOptions=");
         result.append(indexOptions);
       }
-      if (numericType != null) {
-        result.append(",numericType=");
-        result.append(numericType);
-        result.append(",numericPrecisionStep=");
-        result.append(numericPrecisionStep);
-      }
     }
     if (dimensionCount != 0) {
       if (result.length() > 0) {
@@ -495,8 +403,6 @@ public class FieldType implements IndexableFieldType  {
     result = prime * result + dimensionNumBytes;
     result = prime * result + ((docValuesType == null) ? 0 : docValuesType.hashCode());
     result = prime * result + indexOptions.hashCode();
-    result = prime * result + numericPrecisionStep;
-    result = prime * result + ((numericType == null) ? 0 : numericType.hashCode());
     result = prime * result + (omitNorms ? 1231 : 1237);
     result = prime * result + (storeTermVectorOffsets ? 1231 : 1237);
     result = prime * result + (storeTermVectorPayloads ? 1231 : 1237);
@@ -517,8 +423,6 @@ public class FieldType implements IndexableFieldType  {
     if (dimensionNumBytes != other.dimensionNumBytes) return false;
     if (docValuesType != other.docValuesType) return false;
     if (indexOptions != other.indexOptions) return false;
-    if (numericPrecisionStep != other.numericPrecisionStep) return false;
-    if (numericType != other.numericType) return false;
     if (omitNorms != other.omitNorms) return false;
     if (storeTermVectorOffsets != other.storeTermVectorOffsets) return false;
     if (storeTermVectorPayloads != other.storeTermVectorPayloads) return false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/java/org/apache/lucene/document/LegacyDoubleField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/LegacyDoubleField.java b/lucene/core/src/java/org/apache/lucene/document/LegacyDoubleField.java
deleted file mode 100644
index 55ba81c..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/LegacyDoubleField.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.document;
-
-
-import org.apache.lucene.index.IndexOptions;
-
-
-/**
- * <p>
- * Field that indexes <code>double</code> values
- * for efficient range filtering and sorting. Here's an example usage:
- * 
- * <pre class="prettyprint">
- * document.add(new LegacyDoubleField(name, 6.0, Field.Store.NO));
- * </pre>
- * 
- * For optimal performance, re-use the <code>LegacyDoubleField</code> and
- * {@link Document} instance for more than one document:
- * 
- * <pre class="prettyprint">
- *  LegacyDoubleField field = new LegacyDoubleField(name, 0.0, Field.Store.NO);
- *  Document document = new Document();
- *  document.add(field);
- * 
- *  for(all documents) {
- *    ...
- *    field.setDoubleValue(value)
- *    writer.addDocument(document);
- *    ...
- *  }
- * </pre>
- *
- * See also {@link LegacyIntField}, {@link LegacyLongField}, {@link
- * LegacyFloatField}.
- *
- * <p>To perform range querying or filtering against a
- * <code>LegacyDoubleField</code>, use {@link org.apache.lucene.search.LegacyNumericRangeQuery}.
- * To sort according to a
- * <code>LegacyDoubleField</code>, use the normal numeric sort types, eg
- * {@link org.apache.lucene.search.SortField.Type#DOUBLE}. <code>LegacyDoubleField</code>
- * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
- *
- * <p>You may add the same field name as an <code>LegacyDoubleField</code> to
- * the same document more than once.  Range querying and
- * filtering will be the logical OR of all values; so a range query
- * will hit all documents that have at least one value in
- * the range. However sort behavior is not defined.  If you need to sort,
- * you should separately index a single-valued <code>LegacyDoubleField</code>.</p>
- *
- * <p>A <code>LegacyDoubleField</code> will consume somewhat more disk space
- * in the index than an ordinary single-valued field.
- * However, for a typical index that includes substantial
- * textual content per document, this increase will likely
- * be in the noise. </p>
- *
- * <p>Within Lucene, each numeric value is indexed as a
- * <em>trie</em> structure, where each term is logically
- * assigned to larger and larger pre-defined brackets (which
- * are simply lower-precision representations of the value).
- * The step size between each successive bracket is called the
- * <code>precisionStep</code>, measured in bits.  Smaller
- * <code>precisionStep</code> values result in larger number
- * of brackets, which consumes more disk space in the index
- * but may result in faster range search performance.  The
- * default value, 16, was selected for a reasonable tradeoff
- * of disk space consumption versus performance.  You can
- * create a custom {@link FieldType} and invoke the {@link
- * FieldType#setNumericPrecisionStep} method if you'd
- * like to change the value.  Note that you must also
- * specify a congruent value when creating {@link
- * org.apache.lucene.search.LegacyNumericRangeQuery}.
- * For low cardinality fields larger precision steps are good.
- * If the cardinality is &lt; 100, it is fair
- * to use {@link Integer#MAX_VALUE}, which produces one
- * term per value.
- *
- * <p>For more information on the internals of numeric trie
- * indexing, including the <a
- * href="../search/LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * configuration, see {@link org.apache.lucene.search.LegacyNumericRangeQuery}. The format of
- * indexed values is described in {@link org.apache.lucene.util.LegacyNumericUtils}.
- *
- * <p>If you only need to sort by numeric value, and never
- * run range querying/filtering, you can index using a
- * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
- * This will minimize disk space consumed. </p>
- *
- * <p>More advanced users can instead use {@link
- * org.apache.lucene.analysis.LegacyNumericTokenStream} directly, when indexing numbers. This
- * class is a wrapper around this token stream type for
- * easier, more intuitive usage.</p>
- *
- * @deprecated Please use {@link DoublePoint} instead
- *
- * @since 2.9
- */
-
-@Deprecated
-public final class LegacyDoubleField extends Field {
-  
-  /** 
-   * Type for a LegacyDoubleField that is not stored:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_NOT_STORED = new FieldType();
-  static {
-    TYPE_NOT_STORED.setTokenized(true);
-    TYPE_NOT_STORED.setOmitNorms(true);
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_NOT_STORED.setNumericType(FieldType.LegacyNumericType.DOUBLE);
-    TYPE_NOT_STORED.freeze();
-  }
-
-  /** 
-   * Type for a stored LegacyDoubleField:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_STORED = new FieldType();
-  static {
-    TYPE_STORED.setTokenized(true);
-    TYPE_STORED.setOmitNorms(true);
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_STORED.setNumericType(FieldType.LegacyNumericType.DOUBLE);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.freeze();
-  }
-
-  /** Creates a stored or un-stored LegacyDoubleField with the provided value
-   *  and default <code>precisionStep</code> {@link
-   *  org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
-   *  @param name field name
-   *  @param value 64-bit double value
-   *  @param stored Store.YES if the content should also be stored
-   *  @throws IllegalArgumentException if the field name is null. 
-   */
-  public LegacyDoubleField(String name, double value, Store stored) {
-    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-    fieldsData = Double.valueOf(value);
-  }
-  
-  /** Expert: allows you to customize the {@link
-   *  FieldType}. 
-   *  @param name field name
-   *  @param value 64-bit double value
-   *  @param type customized field type: must have {@link FieldType#numericType()}
-   *         of {@link org.apache.lucene.document.FieldType.LegacyNumericType#DOUBLE}.
-   *  @throws IllegalArgumentException if the field name or type is null, or
-   *          if the field type does not have a DOUBLE numericType()
-   */
-  public LegacyDoubleField(String name, double value, FieldType type) {
-    super(name, type);
-    if (type.numericType() != FieldType.LegacyNumericType.DOUBLE) {
-      throw new IllegalArgumentException("type.numericType() must be DOUBLE but got " + type.numericType());
-    }
-    fieldsData = Double.valueOf(value);
-  }
-}


[4/7] lucene-solr:master: LUCENE-7413: move legacy numeric support to backwards module

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/java/org/apache/lucene/document/LegacyFloatField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/LegacyFloatField.java b/lucene/core/src/java/org/apache/lucene/document/LegacyFloatField.java
deleted file mode 100644
index e24bf30..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/LegacyFloatField.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.document;
-
-
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.util.LegacyNumericUtils;
-
-/**
- * <p>
- * Field that indexes <code>float</code> values
- * for efficient range filtering and sorting. Here's an example usage:
- * 
- * <pre class="prettyprint">
- * document.add(new LegacyFloatField(name, 6.0F, Field.Store.NO));
- * </pre>
- * 
- * For optimal performance, re-use the <code>LegacyFloatField</code> and
- * {@link Document} instance for more than one document:
- * 
- * <pre class="prettyprint">
- *  LegacyFloatField field = new LegacyFloatField(name, 0.0F, Field.Store.NO);
- *  Document document = new Document();
- *  document.add(field);
- * 
- *  for(all documents) {
- *    ...
- *    field.setFloatValue(value)
- *    writer.addDocument(document);
- *    ...
- *  }
- * </pre>
- *
- * See also {@link LegacyIntField}, {@link LegacyLongField}, {@link
- * LegacyDoubleField}.
- *
- * <p>To perform range querying or filtering against a
- * <code>LegacyFloatField</code>, use {@link org.apache.lucene.search.LegacyNumericRangeQuery}.
- * To sort according to a
- * <code>LegacyFloatField</code>, use the normal numeric sort types, eg
- * {@link org.apache.lucene.search.SortField.Type#FLOAT}. <code>LegacyFloatField</code>
- * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
- *
- * <p>You may add the same field name as an <code>LegacyFloatField</code> to
- * the same document more than once.  Range querying and
- * filtering will be the logical OR of all values; so a range query
- * will hit all documents that have at least one value in
- * the range. However sort behavior is not defined.  If you need to sort,
- * you should separately index a single-valued <code>LegacyFloatField</code>.</p>
- *
- * <p>A <code>LegacyFloatField</code> will consume somewhat more disk space
- * in the index than an ordinary single-valued field.
- * However, for a typical index that includes substantial
- * textual content per document, this increase will likely
- * be in the noise. </p>
- *
- * <p>Within Lucene, each numeric value is indexed as a
- * <em>trie</em> structure, where each term is logically
- * assigned to larger and larger pre-defined brackets (which
- * are simply lower-precision representations of the value).
- * The step size between each successive bracket is called the
- * <code>precisionStep</code>, measured in bits.  Smaller
- * <code>precisionStep</code> values result in larger number
- * of brackets, which consumes more disk space in the index
- * but may result in faster range search performance.  The
- * default value, 8, was selected for a reasonable tradeoff
- * of disk space consumption versus performance.  You can
- * create a custom {@link FieldType} and invoke the {@link
- * FieldType#setNumericPrecisionStep} method if you'd
- * like to change the value.  Note that you must also
- * specify a congruent value when creating {@link
- * org.apache.lucene.search.LegacyNumericRangeQuery}.
- * For low cardinality fields larger precision steps are good.
- * If the cardinality is &lt; 100, it is fair
- * to use {@link Integer#MAX_VALUE}, which produces one
- * term per value.
- *
- * <p>For more information on the internals of numeric trie
- * indexing, including the <a
- * href="../search/LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * configuration, see {@link org.apache.lucene.search.LegacyNumericRangeQuery}. The format of
- * indexed values is described in {@link org.apache.lucene.util.LegacyNumericUtils}.
- *
- * <p>If you only need to sort by numeric value, and never
- * run range querying/filtering, you can index using a
- * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
- * This will minimize disk space consumed. </p>
- *
- * <p>More advanced users can instead use {@link
- * org.apache.lucene.analysis.LegacyNumericTokenStream} directly, when indexing numbers. This
- * class is a wrapper around this token stream type for
- * easier, more intuitive usage.</p>
- *
- * @deprecated Please use {@link FloatPoint} instead
- *
- * @since 2.9
- */
-
-@Deprecated
-public final class LegacyFloatField extends Field {
-  
-  /** 
-   * Type for a LegacyFloatField that is not stored:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_NOT_STORED = new FieldType();
-  static {
-    TYPE_NOT_STORED.setTokenized(true);
-    TYPE_NOT_STORED.setOmitNorms(true);
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_NOT_STORED.setNumericType(FieldType.LegacyNumericType.FLOAT);
-    TYPE_NOT_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
-    TYPE_NOT_STORED.freeze();
-  }
-
-  /** 
-   * Type for a stored LegacyFloatField:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_STORED = new FieldType();
-  static {
-    TYPE_STORED.setTokenized(true);
-    TYPE_STORED.setOmitNorms(true);
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_STORED.setNumericType(FieldType.LegacyNumericType.FLOAT);
-    TYPE_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.freeze();
-  }
-
-  /** Creates a stored or un-stored LegacyFloatField with the provided value
-   *  and default <code>precisionStep</code> {@link
-   *  org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
-   *  @param name field name
-   *  @param value 32-bit double value
-   *  @param stored Store.YES if the content should also be stored
-   *  @throws IllegalArgumentException if the field name is null.
-   */
-  public LegacyFloatField(String name, float value, Store stored) {
-    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-    fieldsData = Float.valueOf(value);
-  }
-  
-  /** Expert: allows you to customize the {@link
-   *  FieldType}. 
-   *  @param name field name
-   *  @param value 32-bit float value
-   *  @param type customized field type: must have {@link FieldType#numericType()}
-   *         of {@link org.apache.lucene.document.FieldType.LegacyNumericType#FLOAT}.
-   *  @throws IllegalArgumentException if the field name or type is null, or
-   *          if the field type does not have a FLOAT numericType()
-   */
-  public LegacyFloatField(String name, float value, FieldType type) {
-    super(name, type);
-    if (type.numericType() != FieldType.LegacyNumericType.FLOAT) {
-      throw new IllegalArgumentException("type.numericType() must be FLOAT but got " + type.numericType());
-    }
-    fieldsData = Float.valueOf(value);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/java/org/apache/lucene/document/LegacyIntField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/LegacyIntField.java b/lucene/core/src/java/org/apache/lucene/document/LegacyIntField.java
deleted file mode 100644
index 6eb0376..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/LegacyIntField.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.document;
-
-
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.util.LegacyNumericUtils;
-
-/**
- * <p>
- * Field that indexes <code>int</code> values
- * for efficient range filtering and sorting. Here's an example usage:
- * 
- * <pre class="prettyprint">
- * document.add(new LegacyIntField(name, 6, Field.Store.NO));
- * </pre>
- * 
- * For optimal performance, re-use the <code>LegacyIntField</code> and
- * {@link Document} instance for more than one document:
- * 
- * <pre class="prettyprint">
- *  LegacyIntField field = new LegacyIntField(name, 6, Field.Store.NO);
- *  Document document = new Document();
- *  document.add(field);
- * 
- *  for(all documents) {
- *    ...
- *    field.setIntValue(value)
- *    writer.addDocument(document);
- *    ...
- *  }
- * </pre>
- *
- * See also {@link LegacyLongField}, {@link LegacyFloatField}, {@link
- * LegacyDoubleField}.
- *
- * <p>To perform range querying or filtering against a
- * <code>LegacyIntField</code>, use {@link org.apache.lucene.search.LegacyNumericRangeQuery}.
- * To sort according to a
- * <code>LegacyIntField</code>, use the normal numeric sort types, eg
- * {@link org.apache.lucene.search.SortField.Type#INT}. <code>LegacyIntField</code>
- * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
- *
- * <p>You may add the same field name as an <code>LegacyIntField</code> to
- * the same document more than once.  Range querying and
- * filtering will be the logical OR of all values; so a range query
- * will hit all documents that have at least one value in
- * the range. However sort behavior is not defined.  If you need to sort,
- * you should separately index a single-valued <code>LegacyIntField</code>.</p>
- *
- * <p>An <code>LegacyIntField</code> will consume somewhat more disk space
- * in the index than an ordinary single-valued field.
- * However, for a typical index that includes substantial
- * textual content per document, this increase will likely
- * be in the noise. </p>
- *
- * <p>Within Lucene, each numeric value is indexed as a
- * <em>trie</em> structure, where each term is logically
- * assigned to larger and larger pre-defined brackets (which
- * are simply lower-precision representations of the value).
- * The step size between each successive bracket is called the
- * <code>precisionStep</code>, measured in bits.  Smaller
- * <code>precisionStep</code> values result in larger number
- * of brackets, which consumes more disk space in the index
- * but may result in faster range search performance.  The
- * default value, 8, was selected for a reasonable tradeoff
- * of disk space consumption versus performance.  You can
- * create a custom {@link FieldType} and invoke the {@link
- * FieldType#setNumericPrecisionStep} method if you'd
- * like to change the value.  Note that you must also
- * specify a congruent value when creating {@link
- * org.apache.lucene.search.LegacyNumericRangeQuery}.
- * For low cardinality fields larger precision steps are good.
- * If the cardinality is &lt; 100, it is fair
- * to use {@link Integer#MAX_VALUE}, which produces one
- * term per value.
- *
- * <p>For more information on the internals of numeric trie
- * indexing, including the <a
- * href="../search/LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * configuration, see {@link org.apache.lucene.search.LegacyNumericRangeQuery}. The format of
- * indexed values is described in {@link org.apache.lucene.util.LegacyNumericUtils}.
- *
- * <p>If you only need to sort by numeric value, and never
- * run range querying/filtering, you can index using a
- * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
- * This will minimize disk space consumed. </p>
- *
- * <p>More advanced users can instead use {@link
- * org.apache.lucene.analysis.LegacyNumericTokenStream} directly, when indexing numbers. This
- * class is a wrapper around this token stream type for
- * easier, more intuitive usage.</p>
- *
- * @deprecated Please use {@link IntPoint} instead
- *
- * @since 2.9
- */
-
-@Deprecated
-public final class LegacyIntField extends Field {
-  
-  /** 
-   * Type for an LegacyIntField that is not stored:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_NOT_STORED = new FieldType();
-  static {
-    TYPE_NOT_STORED.setTokenized(true);
-    TYPE_NOT_STORED.setOmitNorms(true);
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_NOT_STORED.setNumericType(FieldType.LegacyNumericType.INT);
-    TYPE_NOT_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
-    TYPE_NOT_STORED.freeze();
-  }
-
-  /** 
-   * Type for a stored LegacyIntField:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_STORED = new FieldType();
-  static {
-    TYPE_STORED.setTokenized(true);
-    TYPE_STORED.setOmitNorms(true);
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_STORED.setNumericType(FieldType.LegacyNumericType.INT);
-    TYPE_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.freeze();
-  }
-
-  /** Creates a stored or un-stored LegacyIntField with the provided value
-   *  and default <code>precisionStep</code> {@link
-   *  org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
-   *  @param name field name
-   *  @param value 32-bit integer value
-   *  @param stored Store.YES if the content should also be stored
-   *  @throws IllegalArgumentException if the field name is null.
-   */
-  public LegacyIntField(String name, int value, Store stored) {
-    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-    fieldsData = Integer.valueOf(value);
-  }
-  
-  /** Expert: allows you to customize the {@link
-   *  FieldType}. 
-   *  @param name field name
-   *  @param value 32-bit integer value
-   *  @param type customized field type: must have {@link FieldType#numericType()}
-   *         of {@link org.apache.lucene.document.FieldType.LegacyNumericType#INT}.
-   *  @throws IllegalArgumentException if the field name or type is null, or
-   *          if the field type does not have a INT numericType()
-   */
-  public LegacyIntField(String name, int value, FieldType type) {
-    super(name, type);
-    if (type.numericType() != FieldType.LegacyNumericType.INT) {
-      throw new IllegalArgumentException("type.numericType() must be INT but got " + type.numericType());
-    }
-    fieldsData = Integer.valueOf(value);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/java/org/apache/lucene/document/LegacyLongField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/document/LegacyLongField.java b/lucene/core/src/java/org/apache/lucene/document/LegacyLongField.java
deleted file mode 100644
index fa1851f..0000000
--- a/lucene/core/src/java/org/apache/lucene/document/LegacyLongField.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.document;
-
-
-import org.apache.lucene.index.IndexOptions;
-
-
-/**
- * <p>
- * Field that indexes <code>long</code> values
- * for efficient range filtering and sorting. Here's an example usage:
- * 
- * <pre class="prettyprint">
- * document.add(new LegacyLongField(name, 6L, Field.Store.NO));
- * </pre>
- * 
- * For optimal performance, re-use the <code>LegacyLongField</code> and
- * {@link Document} instance for more than one document:
- * 
- * <pre class="prettyprint">
- *  LegacyLongField field = new LegacyLongField(name, 0L, Field.Store.NO);
- *  Document document = new Document();
- *  document.add(field);
- * 
- *  for(all documents) {
- *    ...
- *    field.setLongValue(value)
- *    writer.addDocument(document);
- *    ...
- *  }
- * </pre>
- *
- * See also {@link LegacyIntField}, {@link LegacyFloatField}, {@link
- * LegacyDoubleField}.
- *
- * Any type that can be converted to long can also be
- * indexed.  For example, date/time values represented by a
- * {@link java.util.Date} can be translated into a long
- * value using the {@link java.util.Date#getTime} method.  If you
- * don't need millisecond precision, you can quantize the
- * value, either by dividing the result of
- * {@link java.util.Date#getTime} or using the separate getters
- * (for year, month, etc.) to construct an <code>int</code> or
- * <code>long</code> value.
- *
- * <p>To perform range querying or filtering against a
- * <code>LegacyLongField</code>, use {@link org.apache.lucene.search.LegacyNumericRangeQuery}.
- * To sort according to a
- * <code>LegacyLongField</code>, use the normal numeric sort types, eg
- * {@link org.apache.lucene.search.SortField.Type#LONG}. <code>LegacyLongField</code>
- * values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.
- *
- * <p>You may add the same field name as an <code>LegacyLongField</code> to
- * the same document more than once.  Range querying and
- * filtering will be the logical OR of all values; so a range query
- * will hit all documents that have at least one value in
- * the range. However sort behavior is not defined.  If you need to sort,
- * you should separately index a single-valued <code>LegacyLongField</code>.
- *
- * <p>A <code>LegacyLongField</code> will consume somewhat more disk space
- * in the index than an ordinary single-valued field.
- * However, for a typical index that includes substantial
- * textual content per document, this increase will likely
- * be in the noise. </p>
- *
- * <p>Within Lucene, each numeric value is indexed as a
- * <em>trie</em> structure, where each term is logically
- * assigned to larger and larger pre-defined brackets (which
- * are simply lower-precision representations of the value).
- * The step size between each successive bracket is called the
- * <code>precisionStep</code>, measured in bits.  Smaller
- * <code>precisionStep</code> values result in larger number
- * of brackets, which consumes more disk space in the index
- * but may result in faster range search performance.  The
- * default value, 16, was selected for a reasonable tradeoff
- * of disk space consumption versus performance.  You can
- * create a custom {@link FieldType} and invoke the {@link
- * FieldType#setNumericPrecisionStep} method if you'd
- * like to change the value.  Note that you must also
- * specify a congruent value when creating {@link
- * org.apache.lucene.search.LegacyNumericRangeQuery}.
- * For low cardinality fields larger precision steps are good.
- * If the cardinality is &lt; 100, it is fair
- * to use {@link Integer#MAX_VALUE}, which produces one
- * term per value.
- *
- * <p>For more information on the internals of numeric trie
- * indexing, including the <a
- * href="../search/LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
- * configuration, see {@link org.apache.lucene.search.LegacyNumericRangeQuery}. The format of
- * indexed values is described in {@link org.apache.lucene.util.LegacyNumericUtils}.
- *
- * <p>If you only need to sort by numeric value, and never
- * run range querying/filtering, you can index using a
- * <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
- * This will minimize disk space consumed.
- *
- * <p>More advanced users can instead use {@link
- * org.apache.lucene.analysis.LegacyNumericTokenStream} directly, when indexing numbers. This
- * class is a wrapper around this token stream type for
- * easier, more intuitive usage.</p>
- *
- * @deprecated Please use {@link LongPoint} instead
- *
- * @since 2.9
- */
-
-@Deprecated
-public final class LegacyLongField extends Field {
-  
-  /** 
-   * Type for a LegacyLongField that is not stored:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_NOT_STORED = new FieldType();
-  static {
-    TYPE_NOT_STORED.setTokenized(true);
-    TYPE_NOT_STORED.setOmitNorms(true);
-    TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_NOT_STORED.setNumericType(FieldType.LegacyNumericType.LONG);
-    TYPE_NOT_STORED.freeze();
-  }
-
-  /** 
-   * Type for a stored LegacyLongField:
-   * normalization factors, frequencies, and positions are omitted.
-   */
-  public static final FieldType TYPE_STORED = new FieldType();
-  static {
-    TYPE_STORED.setTokenized(true);
-    TYPE_STORED.setOmitNorms(true);
-    TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
-    TYPE_STORED.setNumericType(FieldType.LegacyNumericType.LONG);
-    TYPE_STORED.setStored(true);
-    TYPE_STORED.freeze();
-  }
-
-  /** Creates a stored or un-stored LegacyLongField with the provided value
-   *  and default <code>precisionStep</code> {@link
-   *  org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
-   *  @param name field name
-   *  @param value 64-bit long value
-   *  @param stored Store.YES if the content should also be stored
-   *  @throws IllegalArgumentException if the field name is null.
-   */
-  public LegacyLongField(String name, long value, Store stored) {
-    super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
-    fieldsData = Long.valueOf(value);
-  }
-  
-  /** Expert: allows you to customize the {@link
-   *  FieldType}. 
-   *  @param name field name
-   *  @param value 64-bit long value
-   *  @param type customized field type: must have {@link FieldType#numericType()}
-   *         of {@link org.apache.lucene.document.FieldType.LegacyNumericType#LONG}.
-   *  @throws IllegalArgumentException if the field name or type is null, or
-   *          if the field type does not have a LONG numericType()
-   */
-  public LegacyLongField(String name, long value, FieldType type) {
-    super(name, type);
-    if (type.numericType() != FieldType.LegacyNumericType.LONG) {
-      throw new IllegalArgumentException("type.numericType() must be LONG but got " + type.numericType());
-    }
-    fieldsData = Long.valueOf(value);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/java/org/apache/lucene/search/LegacyNumericRangeQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/LegacyNumericRangeQuery.java b/lucene/core/src/java/org/apache/lucene/search/LegacyNumericRangeQuery.java
deleted file mode 100644
index fe6c9e2..0000000
--- a/lucene/core/src/java/org/apache/lucene/search/LegacyNumericRangeQuery.java
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search;
-
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.Objects;
-
-import org.apache.lucene.document.DoublePoint;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.FieldType.LegacyNumericType;
-import org.apache.lucene.document.FloatPoint;
-import org.apache.lucene.document.IntPoint;
-import org.apache.lucene.document.LongPoint;
-import org.apache.lucene.index.FilteredTermsEnum;
-import org.apache.lucene.index.PointValues;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.util.AttributeSource;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
-import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.index.Term; // for javadocs
-
-/**
- * <p>A {@link Query} that matches numeric values within a
- * specified range.  To use this, you must first index the
- * numeric values using {@link org.apache.lucene.document.LegacyIntField}, {@link
- * org.apache.lucene.document.LegacyFloatField}, {@link org.apache.lucene.document.LegacyLongField} or {@link org.apache.lucene.document.LegacyDoubleField} (expert: {@link
- * org.apache.lucene.analysis.LegacyNumericTokenStream}).  If your terms are instead textual,
- * you should use {@link TermRangeQuery}.</p>
- *
- * <p>You create a new LegacyNumericRangeQuery with the static
- * factory methods, eg:
- *
- * <pre class="prettyprint">
- * Query q = LegacyNumericRangeQuery.newFloatRange("weight", 0.03f, 0.10f, true, true);
- * </pre>
- *
- * matches all documents whose float valued "weight" field
- * ranges from 0.03 to 0.10, inclusive.
- *
- * <p>The performance of LegacyNumericRangeQuery is much better
- * than the corresponding {@link TermRangeQuery} because the
- * number of terms that must be searched is usually far
- * fewer, thanks to trie indexing, described below.</p>
- *
- * <p>You can optionally specify a <a
- * href="#precisionStepDesc"><code>precisionStep</code></a>
- * when creating this query.  This is necessary if you've
- * changed this configuration from its default (4) during
- * indexing.  Lower values consume more disk space but speed
- * up searching.  Suitable values are between <b>1</b> and
- * <b>8</b>. A good starting point to test is <b>4</b>,
- * which is the default value for all <code>Numeric*</code>
- * classes.  See <a href="#precisionStepDesc">below</a> for
- * details.
- *
- * <p>This query defaults to {@linkplain
- * MultiTermQuery#CONSTANT_SCORE_REWRITE}.
- * With precision steps of &le;4, this query can be run with
- * one of the BooleanQuery rewrite methods without changing
- * BooleanQuery's default max clause count.
- *
- * <br><h3>How it works</h3>
- *
- * <p>See the publication about <a target="_blank" href="http://www.panfmp.org">panFMP</a>,
- * where this algorithm was described (referred to as <code>TrieRangeQuery</code>):
- *
- * <blockquote><strong>Schindler, U, Diepenbroek, M</strong>, 2008.
- * <em>Generic XML-based Framework for Metadata Portals.</em>
- * Computers &amp; Geosciences 34 (12), 1947-1955.
- * <a href="http://dx.doi.org/10.1016/j.cageo.2008.02.023"
- * target="_blank">doi:10.1016/j.cageo.2008.02.023</a></blockquote>
- *
- * <p><em>A quote from this paper:</em> Because Apache Lucene is a full-text
- * search engine and not a conventional database, it cannot handle numerical ranges
- * (e.g., field value is inside user defined bounds, even dates are numerical values).
- * We have developed an extension to Apache Lucene that stores
- * the numerical values in a special string-encoded format with variable precision
- * (all numerical values like doubles, longs, floats, and ints are converted to
- * lexicographic sortable string representations and stored with different precisions
- * (for a more detailed description of how the values are stored,
- * see {@link org.apache.lucene.util.LegacyNumericUtils}). A range is then divided recursively into multiple intervals for searching:
- * The center of the range is searched only with the lowest possible precision in the <em>trie</em>,
- * while the boundaries are matched more exactly. This reduces the number of terms dramatically.</p>
- *
- * <p>For the variant that stores long values in 8 different precisions (each reduced by 8 bits) that
- * uses a lowest precision of 1 byte, the index contains only a maximum of 256 distinct values in the
- * lowest precision. Overall, a range could consist of a theoretical maximum of
- * <code>7*255*2 + 255 = 3825</code> distinct terms (when there is a term for every distinct value of an
- * 8-byte-number in the index and the range covers almost all of them; a maximum of 255 distinct values is used
- * because it would always be possible to reduce the full 256 values to one term with degraded precision).
- * In practice, we have seen up to 300 terms in most cases (index with 500,000 metadata records
- * and a uniform value distribution).</p>
- *
- * <h3><a name="precisionStepDesc">Precision Step</a></h3>
- * <p>You can choose any <code>precisionStep</code> when encoding values.
- * Lower step values mean more precisions and so more terms in index (and index gets larger). The number
- * of indexed terms per value is (those are generated by {@link org.apache.lucene.analysis.LegacyNumericTokenStream}):
- * <p style="font-family:serif">
- * &nbsp;&nbsp;indexedTermsPerValue = <b>ceil</b><big>(</big>bitsPerValue / precisionStep<big>)</big>
- * </p>
- * As the lower precision terms are shared by many values, the additional terms only
- * slightly grow the term dictionary (approx. 7% for <code>precisionStep=4</code>), but have a larger
- * impact on the postings (the postings file will have  more entries, as every document is linked to
- * <code>indexedTermsPerValue</code> terms instead of one). The formula to estimate the growth
- * of the term dictionary in comparison to one term per value:
- * <p>
- * <!-- the formula in the alt attribute was transformed from latex to PNG with http://1.618034.com/latex.php (with 110 dpi): -->
- * &nbsp;&nbsp;<img src="doc-files/nrq-formula-1.png" alt="\mathrm{termDictOverhead} = \sum\limits_{i=0}^{\mathrm{indexedTermsPerValue}-1} \frac{1}{2^{\mathrm{precisionStep}\cdot i}}">
- * </p>
- * <p>On the other hand, if the <code>precisionStep</code> is smaller, the maximum number of terms to match reduces,
- * which optimizes query speed. The formula to calculate the maximum number of terms that will be visited while
- * executing the query is:
- * <p>
- * <!-- the formula in the alt attribute was transformed from latex to PNG with http://1.618034.com/latex.php (with 110 dpi): -->
- * &nbsp;&nbsp;<img src="doc-files/nrq-formula-2.png" alt="\mathrm{maxQueryTerms} = \left[ \left( \mathrm{indexedTermsPerValue} - 1 \right) \cdot \left(2^\mathrm{precisionStep} - 1 \right) \cdot 2 \right] + \left( 2^\mathrm{precisionStep} - 1 \right)">
- * </p>
- * <p>For longs stored using a precision step of 4, <code>maxQueryTerms = 15*15*2 + 15 = 465</code>, and for a precision
- * step of 2, <code>maxQueryTerms = 31*3*2 + 3 = 189</code>. But the faster search speed is reduced by more seeking
- * in the term enum of the index. Because of this, the ideal <code>precisionStep</code> value can only
- * be found out by testing. <b>Important:</b> You can index with a lower precision step value and test search speed
- * using a multiple of the original step value.</p>
- *
- * <p>Good values for <code>precisionStep</code> are depending on usage and data type:
- * <ul>
- *  <li>The default for all data types is <b>4</b>, which is used, when no <code>precisionStep</code> is given.
- *  <li>Ideal value in most cases for <em>64 bit</em> data types <em>(long, double)</em> is <b>6</b> or <b>8</b>.
- *  <li>Ideal value in most cases for <em>32 bit</em> data types <em>(int, float)</em> is <b>4</b>.
- *  <li>For low cardinality fields larger precision steps are good. If the cardinality is &lt; 100, it is
- *  fair to use {@link Integer#MAX_VALUE} (see below).
- *  <li>Steps <b>&ge;64</b> for <em>long/double</em> and <b>&ge;32</b> for <em>int/float</em> produces one token
- *  per value in the index and querying is as slow as a conventional {@link TermRangeQuery}. But it can be used
- *  to produce fields, that are solely used for sorting (in this case simply use {@link Integer#MAX_VALUE} as
- *  <code>precisionStep</code>). Using {@link org.apache.lucene.document.LegacyIntField},
- *  {@link org.apache.lucene.document.LegacyLongField}, {@link org.apache.lucene.document.LegacyFloatField} or {@link org.apache.lucene.document.LegacyDoubleField} for sorting
- *  is ideal, because building the field cache is much faster than with text-only numbers.
- *  These fields have one term per value and therefore also work with term enumeration for building distinct lists
- *  (e.g. facets / preselected values to search for).
- *  Sorting is also possible with range query optimized fields using one of the above <code>precisionSteps</code>.
- * </ul>
- *
- * <p>Comparisons of the different types of RangeQueries on an index with about 500,000 docs showed
- * that {@link TermRangeQuery} in boolean rewrite mode (with raised {@link BooleanQuery} clause count)
- * took about 30-40 secs to complete, {@link TermRangeQuery} in constant score filter rewrite mode took 5 secs
- * and executing this class took &lt;100ms to complete (on an Opteron64 machine, Java 1.5, 8 bit
- * precision step). This query type was developed for a geographic portal, where the performance for
- * e.g. bounding boxes or exact date/time stamps is important.</p>
- *
- * @deprecated Instead index with {@link IntPoint}, {@link LongPoint}, {@link FloatPoint}, {@link DoublePoint}, and
- *             create range queries with {@link IntPoint#newRangeQuery(String, int, int) IntPoint.newRangeQuery()},
- *             {@link LongPoint#newRangeQuery(String, long, long) LongPoint.newRangeQuery()},
- *             {@link FloatPoint#newRangeQuery(String, float, float) FloatPoint.newRangeQuery()},
- *             {@link DoublePoint#newRangeQuery(String, double, double) DoublePoint.newRangeQuery()} respectively.
- *             See {@link PointValues} for background information on Points.
- *
- * @since 2.9
- **/
-
-@Deprecated
-public final class LegacyNumericRangeQuery<T extends Number> extends MultiTermQuery {
-
-  private LegacyNumericRangeQuery(final String field, final int precisionStep, final LegacyNumericType dataType,
-                                  T min, T max, final boolean minInclusive, final boolean maxInclusive) {
-    super(field);
-    if (precisionStep < 1)
-      throw new IllegalArgumentException("precisionStep must be >=1");
-    this.precisionStep = precisionStep;
-    this.dataType = Objects.requireNonNull(dataType, "LegacyNumericType must not be null");
-    this.min = min;
-    this.max = max;
-    this.minInclusive = minInclusive;
-    this.maxInclusive = maxInclusive;
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>long</code>
-   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Long> newLongRange(final String field, final int precisionStep,
-    Long min, Long max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, precisionStep, FieldType.LegacyNumericType.LONG, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>long</code>
-   * range using the default <code>precisionStep</code> {@link org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Long> newLongRange(final String field,
-    Long min, Long max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT, FieldType.LegacyNumericType.LONG, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>int</code>
-   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Integer> newIntRange(final String field, final int precisionStep,
-    Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, precisionStep, FieldType.LegacyNumericType.INT, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>int</code>
-   * range using the default <code>precisionStep</code> {@link org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Integer> newIntRange(final String field,
-    Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, FieldType.LegacyNumericType.INT, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>double</code>
-   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Double.NaN}.  By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Double> newDoubleRange(final String field, final int precisionStep,
-    Double min, Double max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, precisionStep, FieldType.LegacyNumericType.DOUBLE, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>double</code>
-   * range using the default <code>precisionStep</code> {@link org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Double.NaN}.  By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Double> newDoubleRange(final String field,
-    Double min, Double max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT, FieldType.LegacyNumericType.DOUBLE, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>float</code>
-   * range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Float.NaN}.  By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Float> newFloatRange(final String field, final int precisionStep,
-    Float min, Float max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, precisionStep, FieldType.LegacyNumericType.FLOAT, min, max, minInclusive, maxInclusive);
-  }
-  
-  /**
-   * Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>float</code>
-   * range using the default <code>precisionStep</code> {@link org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
-   * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
-   * by setting the min or max value to <code>null</code>.
-   * {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
-   * with {@code min == max == Float.NaN}.  By setting inclusive to false, it will
-   * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
-   */
-  public static LegacyNumericRangeQuery<Float> newFloatRange(final String field,
-    Float min, Float max, final boolean minInclusive, final boolean maxInclusive
-  ) {
-    return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, FieldType.LegacyNumericType.FLOAT, min, max, minInclusive, maxInclusive);
-  }
-
-  @Override @SuppressWarnings("unchecked")
-  protected TermsEnum getTermsEnum(final Terms terms, AttributeSource atts) throws IOException {
-    // very strange: java.lang.Number itself is not Comparable, but all subclasses used here are
-    if (min != null && max != null && ((Comparable<T>) min).compareTo(max) > 0) {
-      return TermsEnum.EMPTY;
-    }
-    return new NumericRangeTermsEnum(terms.iterator());
-  }
-
-  /** Returns <code>true</code> if the lower endpoint is inclusive */
-  public boolean includesMin() { return minInclusive; }
-  
-  /** Returns <code>true</code> if the upper endpoint is inclusive */
-  public boolean includesMax() { return maxInclusive; }
-
-  /** Returns the lower value of this range query */
-  public T getMin() { return min; }
-
-  /** Returns the upper value of this range query */
-  public T getMax() { return max; }
-  
-  /** Returns the precision step. */
-  public int getPrecisionStep() { return precisionStep; }
-  
-  @Override
-  public String toString(final String field) {
-    final StringBuilder sb = new StringBuilder();
-    if (!getField().equals(field)) sb.append(getField()).append(':');
-    return sb.append(minInclusive ? '[' : '{')
-      .append((min == null) ? "*" : min.toString())
-      .append(" TO ")
-      .append((max == null) ? "*" : max.toString())
-      .append(maxInclusive ? ']' : '}')
-      .toString();
-  }
-
-  @Override
-  @SuppressWarnings({"unchecked","rawtypes"})
-  public final boolean equals(final Object o) {
-    if (o==this) return true;
-    if (!super.equals(o))
-      return false;
-    if (o instanceof LegacyNumericRangeQuery) {
-      final LegacyNumericRangeQuery q=(LegacyNumericRangeQuery)o;
-      return (
-        (q.min == null ? min == null : q.min.equals(min)) &&
-        (q.max == null ? max == null : q.max.equals(max)) &&
-        minInclusive == q.minInclusive &&
-        maxInclusive == q.maxInclusive &&
-        precisionStep == q.precisionStep
-      );
-    }
-    return false;
-  }
-
-  @Override
-  public final int hashCode() {
-    int hash = super.hashCode();
-    hash = 31 * hash + precisionStep;
-    hash = 31 * hash + Objects.hashCode(min);
-    hash = 31 * hash + Objects.hashCode(max);
-    hash = 31 * hash + Objects.hashCode(minInclusive);
-    hash = 31 * hash + Objects.hashCode(maxInclusive);
-    return hash;
-  }
-
-  // members (package private, to be also fast accessible by NumericRangeTermEnum)
-  final int precisionStep;
-  final FieldType.LegacyNumericType dataType;
-  final T min, max;
-  final boolean minInclusive,maxInclusive;
-
-  // used to handle float/double infinity correcty
-  static final long LONG_NEGATIVE_INFINITY =
-    NumericUtils.doubleToSortableLong(Double.NEGATIVE_INFINITY);
-  static final long LONG_POSITIVE_INFINITY =
-    NumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
-  static final int INT_NEGATIVE_INFINITY =
-    NumericUtils.floatToSortableInt(Float.NEGATIVE_INFINITY);
-  static final int INT_POSITIVE_INFINITY =
-    NumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
-
-  /**
-   * Subclass of FilteredTermsEnum for enumerating all terms that match the
-   * sub-ranges for trie range queries, using flex API.
-   * <p>
-   * WARNING: This term enumeration is not guaranteed to be always ordered by
-   * {@link Term#compareTo}.
-   * The ordering depends on how {@link org.apache.lucene.util.LegacyNumericUtils#splitLongRange} and
-   * {@link org.apache.lucene.util.LegacyNumericUtils#splitIntRange} generates the sub-ranges. For
-   * {@link MultiTermQuery} ordering is not relevant.
-   */
-  private final class NumericRangeTermsEnum extends FilteredTermsEnum {
-
-    private BytesRef currentLowerBound, currentUpperBound;
-
-    private final LinkedList<BytesRef> rangeBounds = new LinkedList<>();
-
-    NumericRangeTermsEnum(final TermsEnum tenum) {
-      super(tenum);
-      switch (dataType) {
-        case LONG:
-        case DOUBLE: {
-          // lower
-          long minBound;
-          if (dataType == FieldType.LegacyNumericType.LONG) {
-            minBound = (min == null) ? Long.MIN_VALUE : min.longValue();
-          } else {
-            assert dataType == FieldType.LegacyNumericType.DOUBLE;
-            minBound = (min == null) ? LONG_NEGATIVE_INFINITY
-              : NumericUtils.doubleToSortableLong(min.doubleValue());
-          }
-          if (!minInclusive && min != null) {
-            if (minBound == Long.MAX_VALUE) break;
-            minBound++;
-          }
-          
-          // upper
-          long maxBound;
-          if (dataType == FieldType.LegacyNumericType.LONG) {
-            maxBound = (max == null) ? Long.MAX_VALUE : max.longValue();
-          } else {
-            assert dataType == FieldType.LegacyNumericType.DOUBLE;
-            maxBound = (max == null) ? LONG_POSITIVE_INFINITY
-              : NumericUtils.doubleToSortableLong(max.doubleValue());
-          }
-          if (!maxInclusive && max != null) {
-            if (maxBound == Long.MIN_VALUE) break;
-            maxBound--;
-          }
-          
-          LegacyNumericUtils.splitLongRange(new LegacyNumericUtils.LongRangeBuilder() {
-            @Override
-            public final void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
-              rangeBounds.add(minPrefixCoded);
-              rangeBounds.add(maxPrefixCoded);
-            }
-          }, precisionStep, minBound, maxBound);
-          break;
-        }
-          
-        case INT:
-        case FLOAT: {
-          // lower
-          int minBound;
-          if (dataType == FieldType.LegacyNumericType.INT) {
-            minBound = (min == null) ? Integer.MIN_VALUE : min.intValue();
-          } else {
-            assert dataType == FieldType.LegacyNumericType.FLOAT;
-            minBound = (min == null) ? INT_NEGATIVE_INFINITY
-              : NumericUtils.floatToSortableInt(min.floatValue());
-          }
-          if (!minInclusive && min != null) {
-            if (minBound == Integer.MAX_VALUE) break;
-            minBound++;
-          }
-          
-          // upper
-          int maxBound;
-          if (dataType == LegacyNumericType.INT) {
-            maxBound = (max == null) ? Integer.MAX_VALUE : max.intValue();
-          } else {
-            assert dataType == FieldType.LegacyNumericType.FLOAT;
-            maxBound = (max == null) ? INT_POSITIVE_INFINITY
-              : NumericUtils.floatToSortableInt(max.floatValue());
-          }
-          if (!maxInclusive && max != null) {
-            if (maxBound == Integer.MIN_VALUE) break;
-            maxBound--;
-          }
-          
-          LegacyNumericUtils.splitIntRange(new LegacyNumericUtils.IntRangeBuilder() {
-            @Override
-            public final void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
-              rangeBounds.add(minPrefixCoded);
-              rangeBounds.add(maxPrefixCoded);
-            }
-          }, precisionStep, minBound, maxBound);
-          break;
-        }
-          
-        default:
-          // should never happen
-          throw new IllegalArgumentException("Invalid LegacyNumericType");
-      }
-    }
-    
-    private void nextRange() {
-      assert rangeBounds.size() % 2 == 0;
-
-      currentLowerBound = rangeBounds.removeFirst();
-      assert currentUpperBound == null || currentUpperBound.compareTo(currentLowerBound) <= 0 :
-        "The current upper bound must be <= the new lower bound";
-      
-      currentUpperBound = rangeBounds.removeFirst();
-    }
-    
-    @Override
-    protected final BytesRef nextSeekTerm(BytesRef term) {
-      while (rangeBounds.size() >= 2) {
-        nextRange();
-        
-        // if the new upper bound is before the term parameter, the sub-range is never a hit
-        if (term != null && term.compareTo(currentUpperBound) > 0)
-          continue;
-        // never seek backwards, so use current term if lower bound is smaller
-        return (term != null && term.compareTo(currentLowerBound) > 0) ?
-          term : currentLowerBound;
-      }
-      
-      // no more sub-range enums available
-      assert rangeBounds.isEmpty();
-      currentLowerBound = currentUpperBound = null;
-      return null;
-    }
-    
-    @Override
-    protected final AcceptStatus accept(BytesRef term) {
-      while (currentUpperBound == null || term.compareTo(currentUpperBound) > 0) {
-        if (rangeBounds.isEmpty())
-          return AcceptStatus.END;
-        // peek next sub-range, only seek if the current term is smaller than next lower bound
-        if (term.compareTo(rangeBounds.getFirst()) < 0)
-          return AcceptStatus.NO_AND_SEEK;
-        // step forward to next range without seeking, as next lower range bound is less or equal current term
-        nextRange();
-      }
-      return AcceptStatus.YES;
-    }
-
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/java/org/apache/lucene/util/LegacyNumericUtils.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/LegacyNumericUtils.java b/lucene/core/src/java/org/apache/lucene/util/LegacyNumericUtils.java
deleted file mode 100644
index 9a26bfa..0000000
--- a/lucene/core/src/java/org/apache/lucene/util/LegacyNumericUtils.java
+++ /dev/null
@@ -1,508 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.util;
-
-
-import java.io.IOException;
-
-import org.apache.lucene.index.FilterLeafReader;
-import org.apache.lucene.index.FilteredTermsEnum;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-
-/**
- * This is a helper class to generate prefix-encoded representations for numerical values
- * and supplies converters to represent float/double values as sortable integers/longs.
- *
- * <p>To quickly execute range queries in Apache Lucene, a range is divided recursively
- * into multiple intervals for searching: The center of the range is searched only with
- * the lowest possible precision in the trie, while the boundaries are matched
- * more exactly. This reduces the number of terms dramatically.
- *
- * <p>This class generates terms to achieve this: First the numerical integer values need to
- * be converted to bytes. For that integer values (32 bit or 64 bit) are made unsigned
- * and the bits are converted to ASCII chars with each 7 bit. The resulting byte[] is
- * sortable like the original integer value (even using UTF-8 sort order). Each value is also
- * prefixed (in the first char) by the <code>shift</code> value (number of bits removed) used
- * during encoding.
- *
- * <p>For easy usage, the trie algorithm is implemented for indexing inside
- * {@link org.apache.lucene.analysis.LegacyNumericTokenStream} that can index <code>int</code>, <code>long</code>,
- * <code>float</code>, and <code>double</code>. For querying,
- * {@link org.apache.lucene.search.LegacyNumericRangeQuery} implements the query part
- * for the same data types.
- *
- * @lucene.internal
- *
- * @deprecated Please use {@link org.apache.lucene.index.PointValues} instead.
- *
- * @since 2.9, API changed non backwards-compliant in 4.0
- */
-
-@Deprecated
-public final class LegacyNumericUtils {
-
-  private LegacyNumericUtils() {} // no instance!
-  
-  /**
-   * The default precision step used by {@link org.apache.lucene.document.LegacyLongField},
-   * {@link org.apache.lucene.document.LegacyDoubleField}, {@link org.apache.lucene.analysis.LegacyNumericTokenStream}, {@link
-   * org.apache.lucene.search.LegacyNumericRangeQuery}.
-   */
-  public static final int PRECISION_STEP_DEFAULT = 16;
-  
-  /**
-   * The default precision step used by {@link org.apache.lucene.document.LegacyIntField} and
-   * {@link org.apache.lucene.document.LegacyFloatField}.
-   */
-  public static final int PRECISION_STEP_DEFAULT_32 = 8;
-  
-  /**
-   * Longs are stored at lower precision by shifting off lower bits. The shift count is
-   * stored as <code>SHIFT_START_LONG+shift</code> in the first byte
-   */
-  public static final byte SHIFT_START_LONG = 0x20;
-
-  /**
-   * The maximum term length (used for <code>byte[]</code> buffer size)
-   * for encoding <code>long</code> values.
-   * @see #longToPrefixCoded
-   */
-  public static final int BUF_SIZE_LONG = 63/7 + 2;
-
-  /**
-   * Integers are stored at lower precision by shifting off lower bits. The shift count is
-   * stored as <code>SHIFT_START_INT+shift</code> in the first byte
-   */
-  public static final byte SHIFT_START_INT  = 0x60;
-
-  /**
-   * The maximum term length (used for <code>byte[]</code> buffer size)
-   * for encoding <code>int</code> values.
-   * @see #intToPrefixCoded
-   */
-  public static final int BUF_SIZE_INT = 31/7 + 2;
-
-  /**
-   * Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
-   * This is method is used by {@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
-   * After encoding, {@code bytes.offset} will always be 0. 
-   * @param val the numeric value
-   * @param shift how many bits to strip from the right
-   * @param bytes will contain the encoded value
-   */
-  public static void longToPrefixCoded(final long val, final int shift, final BytesRefBuilder bytes) {
-    // ensure shift is 0..63
-    if ((shift & ~0x3f) != 0) {
-      throw new IllegalArgumentException("Illegal shift value, must be 0..63; got shift=" + shift);
-    }
-    int nChars = (((63-shift)*37)>>8) + 1;    // i/7 is the same as (i*37)>>8 for i in 0..63
-    bytes.setLength(nChars+1);   // one extra for the byte that contains the shift info
-    bytes.grow(BUF_SIZE_LONG);
-    bytes.setByteAt(0, (byte)(SHIFT_START_LONG + shift));
-    long sortableBits = val ^ 0x8000000000000000L;
-    sortableBits >>>= shift;
-    while (nChars > 0) {
-      // Store 7 bits per byte for compatibility
-      // with UTF-8 encoding of terms
-      bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
-      sortableBits >>>= 7;
-    }
-  }
-
-  /**
-   * Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
-   * This is method is used by {@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
-   * After encoding, {@code bytes.offset} will always be 0.
-   * @param val the numeric value
-   * @param shift how many bits to strip from the right
-   * @param bytes will contain the encoded value
-   */
-  public static void intToPrefixCoded(final int val, final int shift, final BytesRefBuilder bytes) {
-    // ensure shift is 0..31
-    if ((shift & ~0x1f) != 0) {
-      throw new IllegalArgumentException("Illegal shift value, must be 0..31; got shift=" + shift);
-    }
-    int nChars = (((31-shift)*37)>>8) + 1;    // i/7 is the same as (i*37)>>8 for i in 0..63
-    bytes.setLength(nChars+1);   // one extra for the byte that contains the shift info
-    bytes.grow(LegacyNumericUtils.BUF_SIZE_LONG);  // use the max
-    bytes.setByteAt(0, (byte)(SHIFT_START_INT + shift));
-    int sortableBits = val ^ 0x80000000;
-    sortableBits >>>= shift;
-    while (nChars > 0) {
-      // Store 7 bits per byte for compatibility
-      // with UTF-8 encoding of terms
-      bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
-      sortableBits >>>= 7;
-    }
-  }
-
-
-  /**
-   * Returns the shift value from a prefix encoded {@code long}.
-   * @throws NumberFormatException if the supplied {@link BytesRef} is
-   * not correctly prefix encoded.
-   */
-  public static int getPrefixCodedLongShift(final BytesRef val) {
-    final int shift = val.bytes[val.offset] - SHIFT_START_LONG;
-    if (shift > 63 || shift < 0)
-      throw new NumberFormatException("Invalid shift value (" + shift + ") in prefixCoded bytes (is encoded value really an INT?)");
-    return shift;
-  }
-
-  /**
-   * Returns the shift value from a prefix encoded {@code int}.
-   * @throws NumberFormatException if the supplied {@link BytesRef} is
-   * not correctly prefix encoded.
-   */
-  public static int getPrefixCodedIntShift(final BytesRef val) {
-    final int shift = val.bytes[val.offset] - SHIFT_START_INT;
-    if (shift > 31 || shift < 0)
-      throw new NumberFormatException("Invalid shift value in prefixCoded bytes (is encoded value really an INT?)");
-    return shift;
-  }
-
-  /**
-   * Returns a long from prefixCoded bytes.
-   * Rightmost bits will be zero for lower precision codes.
-   * This method can be used to decode a term's value.
-   * @throws NumberFormatException if the supplied {@link BytesRef} is
-   * not correctly prefix encoded.
-   * @see #longToPrefixCoded
-   */
-  public static long prefixCodedToLong(final BytesRef val) {
-    long sortableBits = 0L;
-    for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
-      sortableBits <<= 7;
-      final byte b = val.bytes[i];
-      if (b < 0) {
-        throw new NumberFormatException(
-          "Invalid prefixCoded numerical value representation (byte "+
-          Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
-        );
-      }
-      sortableBits |= b;
-    }
-    return (sortableBits << getPrefixCodedLongShift(val)) ^ 0x8000000000000000L;
-  }
-
-  /**
-   * Returns an int from prefixCoded bytes.
-   * Rightmost bits will be zero for lower precision codes.
-   * This method can be used to decode a term's value.
-   * @throws NumberFormatException if the supplied {@link BytesRef} is
-   * not correctly prefix encoded.
-   * @see #intToPrefixCoded
-   */
-  public static int prefixCodedToInt(final BytesRef val) {
-    int sortableBits = 0;
-    for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
-      sortableBits <<= 7;
-      final byte b = val.bytes[i];
-      if (b < 0) {
-        throw new NumberFormatException(
-          "Invalid prefixCoded numerical value representation (byte "+
-          Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
-        );
-      }
-      sortableBits |= b;
-    }
-    return (sortableBits << getPrefixCodedIntShift(val)) ^ 0x80000000;
-  }
-
-  /**
-   * Splits a long range recursively.
-   * You may implement a builder that adds clauses to a
-   * {@link org.apache.lucene.search.BooleanQuery} for each call to its
-   * {@link LongRangeBuilder#addRange(BytesRef,BytesRef)}
-   * method.
-   * <p>This method is used by {@link org.apache.lucene.search.LegacyNumericRangeQuery}.
-   */
-  public static void splitLongRange(final LongRangeBuilder builder,
-    final int precisionStep,  final long minBound, final long maxBound
-  ) {
-    splitRange(builder, 64, precisionStep, minBound, maxBound);
-  }
-  
-  /**
-   * Splits an int range recursively.
-   * You may implement a builder that adds clauses to a
-   * {@link org.apache.lucene.search.BooleanQuery} for each call to its
-   * {@link IntRangeBuilder#addRange(BytesRef,BytesRef)}
-   * method.
-   * <p>This method is used by {@link org.apache.lucene.search.LegacyNumericRangeQuery}.
-   */
-  public static void splitIntRange(final IntRangeBuilder builder,
-    final int precisionStep,  final int minBound, final int maxBound
-  ) {
-    splitRange(builder, 32, precisionStep, minBound, maxBound);
-  }
-  
-  /** This helper does the splitting for both 32 and 64 bit. */
-  private static void splitRange(
-    final Object builder, final int valSize,
-    final int precisionStep, long minBound, long maxBound
-  ) {
-    if (precisionStep < 1)
-      throw new IllegalArgumentException("precisionStep must be >=1");
-    if (minBound > maxBound) return;
-    for (int shift=0; ; shift += precisionStep) {
-      // calculate new bounds for inner precision
-      final long diff = 1L << (shift+precisionStep),
-        mask = ((1L<<precisionStep) - 1L) << shift;
-      final boolean
-        hasLower = (minBound & mask) != 0L,
-        hasUpper = (maxBound & mask) != mask;
-      final long
-        nextMinBound = (hasLower ? (minBound + diff) : minBound) & ~mask,
-        nextMaxBound = (hasUpper ? (maxBound - diff) : maxBound) & ~mask;
-      final boolean
-        lowerWrapped = nextMinBound < minBound,
-        upperWrapped = nextMaxBound > maxBound;
-      
-      if (shift+precisionStep>=valSize || nextMinBound>nextMaxBound || lowerWrapped || upperWrapped) {
-        // We are in the lowest precision or the next precision is not available.
-        addRange(builder, valSize, minBound, maxBound, shift);
-        // exit the split recursion loop
-        break;
-      }
-      
-      if (hasLower)
-        addRange(builder, valSize, minBound, minBound | mask, shift);
-      if (hasUpper)
-        addRange(builder, valSize, maxBound & ~mask, maxBound, shift);
-      
-      // recurse to next precision
-      minBound = nextMinBound;
-      maxBound = nextMaxBound;
-    }
-  }
-  
-  /** Helper that delegates to correct range builder */
-  private static void addRange(
-    final Object builder, final int valSize,
-    long minBound, long maxBound,
-    final int shift
-  ) {
-    // for the max bound set all lower bits (that were shifted away):
-    // this is important for testing or other usages of the splitted range
-    // (e.g. to reconstruct the full range). The prefixEncoding will remove
-    // the bits anyway, so they do not hurt!
-    maxBound |= (1L << shift) - 1L;
-    // delegate to correct range builder
-    switch(valSize) {
-      case 64:
-        ((LongRangeBuilder)builder).addRange(minBound, maxBound, shift);
-        break;
-      case 32:
-        ((IntRangeBuilder)builder).addRange((int)minBound, (int)maxBound, shift);
-        break;
-      default:
-        // Should not happen!
-        throw new IllegalArgumentException("valSize must be 32 or 64.");
-    }
-  }
-
-  /**
-   * Callback for {@link #splitLongRange}.
-   * You need to overwrite only one of the methods.
-   * @lucene.internal
-   * @since 2.9, API changed non backwards-compliant in 4.0
-   */
-  public static abstract class LongRangeBuilder {
-    
-    /**
-     * Overwrite this method, if you like to receive the already prefix encoded range bounds.
-     * You can directly build classical (inclusive) range queries from them.
-     */
-    public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
-      throw new UnsupportedOperationException();
-    }
-    
-    /**
-     * Overwrite this method, if you like to receive the raw long range bounds.
-     * You can use this for e.g. debugging purposes (print out range bounds).
-     */
-    public void addRange(final long min, final long max, final int shift) {
-      final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
-      longToPrefixCoded(min, shift, minBytes);
-      longToPrefixCoded(max, shift, maxBytes);
-      addRange(minBytes.get(), maxBytes.get());
-    }
-  
-  }
-  
-  /**
-   * Callback for {@link #splitIntRange}.
-   * You need to overwrite only one of the methods.
-   * @lucene.internal
-   * @since 2.9, API changed non backwards-compliant in 4.0
-   */
-  public static abstract class IntRangeBuilder {
-    
-    /**
-     * Overwrite this method, if you like to receive the already prefix encoded range bounds.
-     * You can directly build classical range (inclusive) queries from them.
-     */
-    public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
-      throw new UnsupportedOperationException();
-    }
-    
-    /**
-     * Overwrite this method, if you like to receive the raw int range bounds.
-     * You can use this for e.g. debugging purposes (print out range bounds).
-     */
-    public void addRange(final int min, final int max, final int shift) {
-      final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
-      intToPrefixCoded(min, shift, minBytes);
-      intToPrefixCoded(max, shift, maxBytes);
-      addRange(minBytes.get(), maxBytes.get());
-    }
-  
-  }
-  
-  /**
-   * Filters the given {@link TermsEnum} by accepting only prefix coded 64 bit
-   * terms with a shift value of <tt>0</tt>.
-   * 
-   * @param termsEnum
-   *          the terms enum to filter
-   * @return a filtered {@link TermsEnum} that only returns prefix coded 64 bit
-   *         terms with a shift value of <tt>0</tt>.
-   */
-  public static TermsEnum filterPrefixCodedLongs(TermsEnum termsEnum) {
-    return new SeekingNumericFilteredTermsEnum(termsEnum) {
-
-      @Override
-      protected AcceptStatus accept(BytesRef term) {
-        return LegacyNumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
-      }
-    };
-  }
-
-  /**
-   * Filters the given {@link TermsEnum} by accepting only prefix coded 32 bit
-   * terms with a shift value of <tt>0</tt>.
-   * 
-   * @param termsEnum
-   *          the terms enum to filter
-   * @return a filtered {@link TermsEnum} that only returns prefix coded 32 bit
-   *         terms with a shift value of <tt>0</tt>.
-   */
-  public static TermsEnum filterPrefixCodedInts(TermsEnum termsEnum) {
-    return new SeekingNumericFilteredTermsEnum(termsEnum) {
-      
-      @Override
-      protected AcceptStatus accept(BytesRef term) {
-        return LegacyNumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
-      }
-    };
-  }
-
-  /** Just like FilteredTermsEnum, except it adds a limited
-   *  seekCeil implementation that only works with {@link
-   *  #filterPrefixCodedInts} and {@link
-   *  #filterPrefixCodedLongs}. */
-  private static abstract class SeekingNumericFilteredTermsEnum extends FilteredTermsEnum {
-    public SeekingNumericFilteredTermsEnum(final TermsEnum tenum) {
-      super(tenum, false);
-    }
-
-    @Override
-    @SuppressWarnings("fallthrough")
-    public SeekStatus seekCeil(BytesRef term) throws IOException {
-
-      // NOTE: This is not general!!  It only handles YES
-      // and END, because that's all we need for the numeric
-      // case here
-
-      SeekStatus status = tenum.seekCeil(term);
-      if (status == SeekStatus.END) {
-        return SeekStatus.END;
-      }
-
-      actualTerm = tenum.term();
-
-      if (accept(actualTerm) == AcceptStatus.YES) {
-        return status;
-      } else {
-        return SeekStatus.END;
-      }
-    }
-  }
-
-  private static Terms intTerms(Terms terms) {
-    return new FilterLeafReader.FilterTerms(terms) {
-        @Override
-        public TermsEnum iterator() throws IOException {
-          return filterPrefixCodedInts(in.iterator());
-        }
-      };
-  }
-
-  private static Terms longTerms(Terms terms) {
-    return new FilterLeafReader.FilterTerms(terms) {
-        @Override
-        public TermsEnum iterator() throws IOException {
-          return filterPrefixCodedLongs(in.iterator());
-        }
-      };
-  }
-    
-  /**
-   * Returns the minimum int value indexed into this
-   * numeric field or null if no terms exist.
-   */
-  public static Integer getMinInt(Terms terms) throws IOException {
-    // All shift=0 terms are sorted first, so we don't need
-    // to filter the incoming terms; we can just get the
-    // min:
-    BytesRef min = terms.getMin();
-    return (min != null) ? LegacyNumericUtils.prefixCodedToInt(min) : null;
-  }
-
-  /**
-   * Returns the maximum int value indexed into this
-   * numeric field or null if no terms exist.
-   */
-  public static Integer getMaxInt(Terms terms) throws IOException {
-    BytesRef max = intTerms(terms).getMax();
-    return (max != null) ? LegacyNumericUtils.prefixCodedToInt(max) : null;
-  }
-
-  /**
-   * Returns the minimum long value indexed into this
-   * numeric field or null if no terms exist.
-   */
-  public static Long getMinLong(Terms terms) throws IOException {
-    // All shift=0 terms are sorted first, so we don't need
-    // to filter the incoming terms; we can just get the
-    // min:
-    BytesRef min = terms.getMin();
-    return (min != null) ? LegacyNumericUtils.prefixCodedToLong(min) : null;
-  }
-
-  /**
-   * Returns the maximum long value indexed into this
-   * numeric field or null if no terms exist.
-   */
-  public static Long getMaxLong(Terms terms) throws IOException {
-    BytesRef max = longTerms(terms).getMax();
-    return (max != null) ? LegacyNumericUtils.prefixCodedToLong(max) : null;
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java b/lucene/core/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java
deleted file mode 100644
index dfaa20e..0000000
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.analysis;
-
-
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LegacyNumericUtils;
-import org.apache.lucene.analysis.LegacyNumericTokenStream.LegacyNumericTermAttributeImpl;
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
-import org.apache.lucene.analysis.tokenattributes.TestCharTermAttributeImpl;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttributeImpl;
-
-@Deprecated
-public class TestNumericTokenStream extends BaseTokenStreamTestCase {
-
-  final long lvalue = random().nextLong();
-  final int ivalue = random().nextInt();
-
-  public void testLongStream() throws Exception {
-    @SuppressWarnings("resource")
-    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream().setLongValue(lvalue);
-    final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
-    assertNotNull(bytesAtt);
-    final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
-    assertNotNull(typeAtt);
-    final LegacyNumericTokenStream.LegacyNumericTermAttribute numericAtt = stream.getAttribute(LegacyNumericTokenStream.LegacyNumericTermAttribute.class);
-    assertNotNull(numericAtt);
-    stream.reset();
-    assertEquals(64, numericAtt.getValueSize());
-    for (int shift=0; shift<64; shift+= LegacyNumericUtils.PRECISION_STEP_DEFAULT) {
-      assertTrue("New token is available", stream.incrementToken());
-      assertEquals("Shift value wrong", shift, numericAtt.getShift());
-      assertEquals("Term is incorrectly encoded", lvalue & ~((1L << shift) - 1L), LegacyNumericUtils.prefixCodedToLong(bytesAtt.getBytesRef()));
-      assertEquals("Term raw value is incorrectly encoded", lvalue & ~((1L << shift) - 1L), numericAtt.getRawValue());
-      assertEquals("Type incorrect", (shift == 0) ? LegacyNumericTokenStream.TOKEN_TYPE_FULL_PREC : LegacyNumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
-    }
-    assertFalse("More tokens available", stream.incrementToken());
-    stream.end();
-    stream.close();
-  }
-
-  public void testIntStream() throws Exception {
-    @SuppressWarnings("resource")
-    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream().setIntValue(ivalue);
-    final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
-    assertNotNull(bytesAtt);
-    final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
-    assertNotNull(typeAtt);
-    final LegacyNumericTokenStream.LegacyNumericTermAttribute numericAtt = stream.getAttribute(LegacyNumericTokenStream.LegacyNumericTermAttribute.class);
-    assertNotNull(numericAtt);
-    stream.reset();
-    assertEquals(32, numericAtt.getValueSize());
-    for (int shift=0; shift<32; shift+= LegacyNumericUtils.PRECISION_STEP_DEFAULT) {
-      assertTrue("New token is available", stream.incrementToken());
-      assertEquals("Shift value wrong", shift, numericAtt.getShift());
-      assertEquals("Term is incorrectly encoded", ivalue & ~((1 << shift) - 1), LegacyNumericUtils.prefixCodedToInt(bytesAtt.getBytesRef()));
-      assertEquals("Term raw value is incorrectly encoded", ((long) ivalue) & ~((1L << shift) - 1L), numericAtt.getRawValue());
-      assertEquals("Type incorrect", (shift == 0) ? LegacyNumericTokenStream.TOKEN_TYPE_FULL_PREC : LegacyNumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
-    }
-    assertFalse("More tokens available", stream.incrementToken());
-    stream.end();
-    stream.close();
-  }
-  
-  public void testNotInitialized() throws Exception {
-    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream();
-    
-    expectThrows(IllegalStateException.class, () -> {
-      stream.reset();
-    });
-
-    expectThrows(IllegalStateException.class, () -> {
-      stream.incrementToken();
-    });
-    
-    stream.close();
-  }
-  
-  public static interface TestAttribute extends CharTermAttribute {}
-  public static class TestAttributeImpl extends CharTermAttributeImpl implements TestAttribute {}
-  
-  public void testCTA() throws Exception {
-    final LegacyNumericTokenStream stream=new LegacyNumericTokenStream();
-    IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
-      stream.addAttribute(CharTermAttribute.class);
-    });
-    assertTrue(e.getMessage().startsWith("LegacyNumericTokenStream does not support"));
-
-    e = expectThrows(IllegalArgumentException.class, () -> {
-      stream.addAttribute(TestAttribute.class);
-    });
-    assertTrue(e.getMessage().startsWith("LegacyNumericTokenStream does not support"));
-    stream.close();
-  }
-  
-  /** LUCENE-7027 */
-  public void testCaptureStateAfterExhausted() throws Exception {
-    // default precstep
-    try (LegacyNumericTokenStream stream=new LegacyNumericTokenStream()) {
-      // int
-      stream.setIntValue(ivalue);
-      stream.reset();
-      while (stream.incrementToken());
-      stream.captureState();
-      stream.end();
-      stream.captureState();
-      // long
-      stream.setLongValue(lvalue);
-      stream.reset();
-      while (stream.incrementToken());
-      stream.captureState();
-      stream.end();
-      stream.captureState();
-    }
-    // huge precstep
-    try (LegacyNumericTokenStream stream=new LegacyNumericTokenStream(Integer.MAX_VALUE)) {
-      // int
-      stream.setIntValue(ivalue);
-      stream.reset();
-      while (stream.incrementToken());
-      stream.captureState();
-      stream.end();
-      stream.captureState();
-      // long
-      stream.setLongValue(lvalue);
-      stream.reset();
-      while (stream.incrementToken());
-      stream.captureState();
-      stream.end();
-      stream.captureState();
-    }
-  }
-  
-  public void testAttributeClone() throws Exception {
-    LegacyNumericTermAttributeImpl att = new LegacyNumericTermAttributeImpl();
-    att.init(lvalue, 64, 8, 0); // set some value, to make getBytesRef() work
-    LegacyNumericTermAttributeImpl copy = TestCharTermAttributeImpl.assertCloneIsEqual(att);
-    assertNotSame(att.getBytesRef(), copy.getBytesRef());
-    LegacyNumericTermAttributeImpl copy2 = TestCharTermAttributeImpl.assertCopyIsEqual(att);
-    assertNotSame(att.getBytesRef(), copy2.getBytesRef());
-    
-    // LUCENE-7027 test
-    att.init(lvalue, 64, 8, 64); // Exhausted TokenStream -> should return empty BytesRef
-    assertEquals(new BytesRef(), att.getBytesRef());
-    copy = TestCharTermAttributeImpl.assertCloneIsEqual(att);
-    assertEquals(new BytesRef(), copy.getBytesRef());
-    assertNotSame(att.getBytesRef(), copy.getBytesRef());
-    copy2 = TestCharTermAttributeImpl.assertCopyIsEqual(att);
-    assertEquals(new BytesRef(), copy2.getBytesRef());
-    assertNotSame(att.getBytesRef(), copy2.getBytesRef());
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/test/org/apache/lucene/document/TestField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestField.java b/lucene/core/src/test/org/apache/lucene/document/TestField.java
index 92d6a83..4ef7ffb 100644
--- a/lucene/core/src/test/org/apache/lucene/document/TestField.java
+++ b/lucene/core/src/test/org/apache/lucene/document/TestField.java
@@ -79,29 +79,7 @@ public class TestField extends LuceneTestCase {
     assertEquals("DoublePoint <foo:6.0,7.0>", field.toString());
   }
   
-  public void testLegacyDoubleField() throws Exception {
-    Field fields[] = new Field[] {
-        new LegacyDoubleField("foo", 5d, Field.Store.NO),
-        new LegacyDoubleField("foo", 5d, Field.Store.YES)
-    };
 
-    for (Field field : fields) {
-      trySetBoost(field);
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      field.setDoubleValue(6d); // ok
-      trySetIntValue(field);
-      trySetFloatValue(field);
-      trySetLongValue(field);
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      trySetStringValue(field);
-      trySetTokenStreamValue(field);
-    
-      assertEquals(6d, field.numericValue().doubleValue(), 0.0d);
-    }
-  }
   
   public void testDoubleDocValuesField() throws Exception {
     DoubleDocValuesField field = new DoubleDocValuesField("foo", 5d);
@@ -185,30 +163,6 @@ public class TestField extends LuceneTestCase {
     assertEquals("FloatPoint <foo:6.0,7.0>", field.toString());
   }
   
-  public void testLegacyFloatField() throws Exception {
-    Field fields[] = new Field[] {
-        new LegacyFloatField("foo", 5f, Field.Store.NO),
-        new LegacyFloatField("foo", 5f, Field.Store.YES)
-    };
-
-    for (Field field : fields) {
-      trySetBoost(field);
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      trySetDoubleValue(field);
-      trySetIntValue(field);
-      field.setFloatValue(6f); // ok
-      trySetLongValue(field);
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      trySetStringValue(field);
-      trySetTokenStreamValue(field);
-      
-      assertEquals(6f, field.numericValue().floatValue(), 0.0f);
-    }
-  }
-  
   public void testIntPoint() throws Exception {
     Field field = new IntPoint("foo", 5);
 
@@ -253,30 +207,6 @@ public class TestField extends LuceneTestCase {
     assertEquals("IntPoint <foo:6,7>", field.toString());
   }
   
-  public void testLegacyIntField() throws Exception {
-    Field fields[] = new Field[] {
-        new LegacyIntField("foo", 5, Field.Store.NO),
-        new LegacyIntField("foo", 5, Field.Store.YES)
-    };
-
-    for (Field field : fields) {
-      trySetBoost(field);
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      trySetDoubleValue(field);
-      field.setIntValue(6); // ok
-      trySetFloatValue(field);
-      trySetLongValue(field);
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      trySetStringValue(field);
-      trySetTokenStreamValue(field);
-      
-      assertEquals(6, field.numericValue().intValue());
-    }
-  }
-  
   public void testNumericDocValuesField() throws Exception {
     NumericDocValuesField field = new NumericDocValuesField("foo", 5L);
 
@@ -340,30 +270,6 @@ public class TestField extends LuceneTestCase {
     assertEquals("LongPoint <foo:6,7>", field.toString());
   }
   
-  public void testLegacyLongField() throws Exception {
-    Field fields[] = new Field[] {
-        new LegacyLongField("foo", 5L, Field.Store.NO),
-        new LegacyLongField("foo", 5L, Field.Store.YES)
-    };
-
-    for (Field field : fields) {
-      trySetBoost(field);
-      trySetByteValue(field);
-      trySetBytesValue(field);
-      trySetBytesRefValue(field);
-      trySetDoubleValue(field);
-      trySetIntValue(field);
-      trySetFloatValue(field);
-      field.setLongValue(6); // ok
-      trySetReaderValue(field);
-      trySetShortValue(field);
-      trySetStringValue(field);
-      trySetTokenStreamValue(field);
-      
-      assertEquals(6L, field.numericValue().longValue());
-    }
-  }
-  
   public void testSortedBytesDocValuesField() throws Exception {
     SortedDocValuesField field = new SortedDocValuesField("foo", new BytesRef("bar"));
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/105c7eae/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java b/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java
index da76f40..9214cb9 100644
--- a/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java
+++ b/lucene/core/src/test/org/apache/lucene/document/TestFieldType.java
@@ -18,7 +18,6 @@ package org.apache.lucene.document;
 
 
 import java.lang.reflect.Method;
-import org.apache.lucene.document.FieldType.LegacyNumericType;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.PointValues;
@@ -58,14 +57,6 @@ public class TestFieldType extends LuceneTestCase {
     ft7.setOmitNorms(true);
     assertFalse(ft7.equals(ft));
     
-    FieldType ft8 = new FieldType();
-    ft8.setNumericType(LegacyNumericType.DOUBLE);
-    assertFalse(ft8.equals(ft));
-    
-    FieldType ft9 = new FieldType();
-    ft9.setNumericPrecisionStep(3);
-    assertFalse(ft9.equals(ft));
-    
     FieldType ft10 = new FieldType();
     ft10.setStoreTermVectors(true);
     assertFalse(ft10.equals(ft));