You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2014/11/27 12:34:48 UTC

svn commit: r1642110 [6/12] - in /lucene/dev/branches/lucene6005/lucene: analysis/uima/src/test/org/apache/lucene/analysis/uima/ backward-codecs/src/test/org/apache/lucene/index/ benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ benchmark/sr...

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java?rev=1642110&r1=1642109&r2=1642110&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java Thu Nov 27 11:34:43 2014
@@ -28,9 +28,11 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.analysis.MockTokenFilter;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.Token;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
@@ -60,15 +62,12 @@ public class FastVectorHighlighterTest e
   public void testSimpleHighlightTest() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType type = new FieldType(TextField.TYPE_STORED);
-    type.setStoreTermVectorOffsets(true);
-    type.setStoreTermVectorPositions(true);
-    type.setStoreTermVectors(true);
-    type.freeze();
-    Field field = new Field("field", "This is a test where foo is highlighed and should be highlighted", type);
-    
-    doc.add(field);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorPositions("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    Document2 doc = writer.newDocument();
+    doc.addLargeText("field", "This is a test where foo is highlighed and should be highlighted");
     writer.addDocument(doc);
     FastVectorHighlighter highlighter = new FastVectorHighlighter();
     
@@ -90,15 +89,13 @@ public class FastVectorHighlighterTest e
   public void testPhraseHighlightLongTextTest() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType type = new FieldType(TextField.TYPE_STORED);
-    type.setStoreTermVectorOffsets(true);
-    type.setStoreTermVectorPositions(true);
-    type.setStoreTermVectors(true);
-    type.freeze();
-    Field text = new Field("text", 
-        "Netscape was the general name for a series of web browsers originally produced by Netscape Communications Corporation, now a subsidiary of AOL The original browser was once the dominant browser in terms of usage share, but as a result of the first browser war it lost virtually all of its share to Internet Explorer Netscape was discontinued and support for all Netscape browsers and client products was terminated on March 1, 2008 Netscape Navigator was the name of Netscape\u0027s web browser from versions 1.0 through 4.8 The first beta release versions of the browser were released in 1994 and known as Mosaic and then Mosaic Netscape until a legal challenge from the National Center for Supercomputing Applications (makers of NCSA Mosaic, which many of Netscape\u0027s founders used to develop), led to the name change to Netscape Navigator The company\u0027s name also changed from Mosaic Communications Corporation to Netscape Communications Corporation The browser was easily the 
 most advanced...", type);
-    doc.add(text);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("text");
+    fieldTypes.enableTermVectorPositions("text");
+    fieldTypes.enableTermVectorOffsets("text");
+    Document2 doc = writer.newDocument();
+    doc.addLargeText("text", 
+        "Netscape was the general name for a series of web browsers originally produced by Netscape Communications Corporation, now a subsidiary of AOL The original browser was once the dominant browser in terms of usage share, but as a result of the first browser war it lost virtually all of its share to Internet Explorer Netscape was discontinued and support for all Netscape browsers and client products was terminated on March 1, 2008 Netscape Navigator was the name of Netscape\u0027s web browser from versions 1.0 through 4.8 The first beta release versions of the browser were released in 1994 and known as Mosaic and then Mosaic Netscape until a legal challenge from the National Center for Supercomputing Applications (makers of NCSA Mosaic, which many of Netscape\u0027s founders used to develop), led to the name change to Netscape Navigator The company\u0027s name also changed from Mosaic Communications Corporation to Netscape Communications Corporation The browser was easily the 
 most advanced...");
     writer.addDocument(doc);
     FastVectorHighlighter highlighter = new FastVectorHighlighter();
     IndexReader reader = DirectoryReader.open(writer, true);
@@ -136,17 +133,15 @@ public class FastVectorHighlighterTest e
   public void testPhraseHighlightTest() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType type = new FieldType(TextField.TYPE_STORED);
-    type.setStoreTermVectorOffsets(true);
-    type.setStoreTermVectorPositions(true);
-    type.setStoreTermVectors(true);
-    type.freeze();
-    Field longTermField = new Field("long_term", "This is a test thisisaverylongwordandmakessurethisfails where foo is highlighed and should be highlighted", type);
-    Field noLongTermField = new Field("no_long_term", "This is a test where foo is highlighed and should be highlighted", type);
-
-    doc.add(longTermField);
-    doc.add(noLongTermField);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String fieldName : new String[] {"long_term", "no_long_term"}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+    }
+    Document2 doc = writer.newDocument();
+    doc.addLargeText("long_term", "This is a test thisisaverylongwordandmakessurethisfails where foo is highlighed and should be highlighted");
+    doc.addLargeText("no_long_term", "This is a test where foo is highlighed and should be highlighted");
     writer.addDocument(doc);
     FastVectorHighlighter highlighter = new FastVectorHighlighter();
     IndexReader reader = DirectoryReader.open(writer, true);
@@ -263,12 +258,13 @@ public class FastVectorHighlighterTest e
   public void testBoostedPhraseHighlightTest() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer( random() ) ) );
-    Document doc = new Document();
-    FieldType type = new FieldType( TextField.TYPE_STORED  );
-    type.setStoreTermVectorOffsets( true );
-    type.setStoreTermVectorPositions( true );
-    type.setStoreTermVectors( true );
-    type.freeze();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String fieldName : new String[] {"text"}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+    }
+    Document2 doc = writer.newDocument();
     StringBuilder text = new StringBuilder();
     text.append("words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk ");
     for ( int i = 0; i<10; i++ ) {
@@ -278,7 +274,7 @@ public class FastVectorHighlighterTest e
     for ( int i = 0; i<10; i++ ) {
       text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk ");
     }
-    doc.add( new Field( "text", text.toString().trim(), type ) );
+    doc.addLargeText("text", text.toString().trim() );
     writer.addDocument(doc);
     FastVectorHighlighter highlighter = new FastVectorHighlighter();
     IndexReader reader = DirectoryReader.open(writer, true);
@@ -308,11 +304,12 @@ public class FastVectorHighlighterTest e
   public void testCommonTermsQueryHighlight() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)));
-    FieldType type = new FieldType(TextField.TYPE_STORED);
-    type.setStoreTermVectorOffsets(true);
-    type.setStoreTermVectorPositions(true);
-    type.setStoreTermVectors(true);
-    type.freeze();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String fieldName : new String[] {"field"}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+    }
     String[] texts = {
         "Hello this is a piece of text that is very long and contains too much preamble and the meat is really here which says kennedy has been shot",
         "This piece of text refers to Kennedy at the beginning then has a longer piece of text that is very long in the middle and finally ends with another reference to Kennedy",
@@ -320,9 +317,8 @@ public class FastVectorHighlighterTest e
         "This text has a typo in referring to Keneddy",
         "wordx wordy wordz wordx wordy wordx worda wordb wordy wordc", "y z x y z a b", "lets is a the lets is a the lets is a the lets" };
     for (int i = 0; i < texts.length; i++) {
-      Document doc = new Document();
-      Field field = new Field("field", texts[i], type);
-      doc.add(field);
+      Document2 doc = writer.newDocument();
+      doc.addLargeText("field", texts[i]);
       writer.addDocument(doc);
     }
     CommonTermsQuery query = new CommonTermsQuery(Occur.MUST, Occur.SHOULD, 2);
@@ -462,16 +458,18 @@ public class FastVectorHighlighterTest e
   public void testMultiValuedSortByScore() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer( random() ) ) );
-    Document doc = new Document();
-    FieldType type = new FieldType( TextField.TYPE_STORED );
-    type.setStoreTermVectorOffsets( true );
-    type.setStoreTermVectorPositions( true );
-    type.setStoreTermVectors( true );
-    type.freeze();
-    doc.add( new Field( "field", "zero if naught", type ) ); // The first two fields contain the best match
-    doc.add( new Field( "field", "hero of legend", type ) ); // but total a lower score (3) than the bottom
-    doc.add( new Field( "field", "naught of hero", type ) ); // two fields (4)
-    doc.add( new Field( "field", "naught of hero", type ) );
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String fieldName : new String[] {"field"}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      fieldTypes.setMultiValued(fieldName);
+    }
+    Document2 doc = writer.newDocument();
+    doc.addLargeText( "field", "zero if naught"); // The first two fields contain the best match
+    doc.addLargeText( "field", "hero of legend" ); // but total a lower score (3) than the bottom
+    doc.addLargeText( "field", "naught of hero" ); // two fields (4)
+    doc.addLargeText( "field", "naught of hero" );
     writer.addDocument(doc);
 
     FastVectorHighlighter highlighter = new FastVectorHighlighter();
@@ -510,12 +508,14 @@ public class FastVectorHighlighterTest e
   public void testBooleanPhraseWithSynonym() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType type = new FieldType(TextField.TYPE_NOT_STORED);
-    type.setStoreTermVectorOffsets(true);
-    type.setStoreTermVectorPositions(true);
-    type.setStoreTermVectors(true);
-    type.freeze();
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String fieldName : new String[] {"field"}) {
+      fieldTypes.disableHighlighting(fieldName);
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+    }
+    Document2 doc = writer.newDocument();
     Token syn = new Token("httpwwwfacebookcom", 6, 29);
     syn.setPositionIncrement(0);
     CannedTokenStream ts = new CannedTokenStream(
@@ -526,9 +526,7 @@ public class FastVectorHighlighterTest e
         new Token("facebook", 17, 25),
         new Token("com", 26, 29)
     );
-    Field field = new Field("field", ts, type);
-    doc.add(field);
-    doc.add(new StoredField("field", "Test: http://www.facebook.com"));
+    doc.addLargeText("field", "Test: http://www.facebook.com", ts, 1.0f);
     writer.addDocument(doc);
     FastVectorHighlighter highlighter = new FastVectorHighlighter();
     
@@ -542,6 +540,7 @@ public class FastVectorHighlighterTest e
     pq.add(new Term("field", "www"));
     pq.add(new Term("field", "facebook"));
     pq.add(new Term("field", "com"));
+    TopDocs hits = newSearcher(reader).search(pq, 1);
     FieldQuery fieldQuery  = highlighter.getFieldQuery(pq, reader);
     String[] bestFragments = highlighter.getBestFragments(fieldQuery, reader, docId, "field", 54, 1);
     assertEquals("<b>Test: http://www.facebook.com</b>", bestFragments[0]);
@@ -575,29 +574,7 @@ public class FastVectorHighlighterTest e
   }
 
   private void matchedFieldsTestCase( boolean useMatchedFields, boolean fieldMatch, String fieldValue, String expected, Query... queryClauses ) throws IOException {
-    Document doc = new Document();
-    FieldType stored = new FieldType( TextField.TYPE_STORED );
-    stored.setStoreTermVectorOffsets( true );
-    stored.setStoreTermVectorPositions( true );
-    stored.setStoreTermVectors( true );
-    stored.freeze();
-    FieldType matched = new FieldType( TextField.TYPE_NOT_STORED );
-    matched.setStoreTermVectorOffsets( true );
-    matched.setStoreTermVectorPositions( true );
-    matched.setStoreTermVectors( true );
-    matched.freeze();
-    doc.add( new Field( "field", fieldValue, stored ) );               // Whitespace tokenized with English stop words
-    doc.add( new Field( "field_exact", fieldValue, matched ) );        // Whitespace tokenized without stop words
-    doc.add( new Field( "field_super_exact", fieldValue, matched ) );  // Whitespace tokenized without toLower
-    doc.add( new Field( "field_characters", fieldValue, matched ) );   // Each letter is a token
-    doc.add( new Field( "field_tripples", fieldValue, matched ) );     // Every three letters is a token
-    doc.add( new Field( "field_sliced", fieldValue.substring( 0,       // Sliced at 10 chars then analyzed just like field
-      Math.min( fieldValue.length() - 1 , 10 ) ), matched ) );
-    doc.add( new Field( "field_der_red", new CannedTokenStream(        // Hacky field containing "der" and "red" at pos = 0
-          token( "der", 1, 0, 3 ),
-          token( "red", 0, 0, 3 )
-        ), matched ) );
-
+    Directory dir = newDirectory();
     final Map<String, Analyzer> fieldAnalyzers = new TreeMap<>();
     fieldAnalyzers.put( "field", new MockAnalyzer( random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET ) );
     fieldAnalyzers.put( "field_exact", new MockAnalyzer( random() ) );
@@ -612,8 +589,31 @@ public class FastVectorHighlighterTest e
       }
     };
 
-    Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(analyzer));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.disableExistsFilters();
+    for(String fieldName : new String[] {"field", "field_exact", "field_super_exact", "field_characters", "field_tripples", "field_sliced", "field_der_red"}) {
+      fieldTypes.enableTermVectors(fieldName);
+      fieldTypes.enableTermVectorPositions(fieldName);
+      fieldTypes.enableTermVectorOffsets(fieldName);
+      fieldTypes.setMultiValued(fieldName);
+      if (fieldName.equals("field") == false) {
+        fieldTypes.disableStored(fieldName);
+      }
+    }
+    Document2 doc = writer.newDocument();
+    doc.addLargeText( "field", fieldValue );               // Whitespace tokenized with English stop words
+    doc.addLargeText( "field_exact", fieldValue );        // Whitespace tokenized without stop words
+    doc.addLargeText( "field_super_exact", fieldValue );  // Whitespace tokenized without toLower
+    doc.addLargeText( "field_characters", fieldValue );   // Each letter is a token
+    doc.addLargeText( "field_tripples", fieldValue );     // Every three letters is a token
+    doc.addLargeText( "field_sliced", fieldValue.substring( 0,       // Sliced at 10 chars then analyzed just like field
+      Math.min( fieldValue.length() - 1 , 10 ) ) );
+    doc.addLargeText( "field_der_red", new CannedTokenStream(        // Hacky field containing "der" and "red" at pos = 0
+          token( "der", 1, 0, 3 ),
+          token( "red", 0, 0, 3 )
+        ) );
+
     writer.addDocument( doc );
 
     FastVectorHighlighter highlighter = new FastVectorHighlighter();

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java?rev=1642110&r1=1642109&r2=1642110&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java Thu Nov 27 11:34:43 2014
@@ -17,16 +17,25 @@ package org.apache.lucene.search.vectorh
  * limitations under the License.
  */
 
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause;
@@ -37,13 +46,6 @@ import org.apache.lucene.search.highligh
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.TestUtil;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
 public class SimpleFragmentsBuilderTest extends AbstractTestCase {
   
   public void test1TermIndex() throws Exception {
@@ -149,13 +151,13 @@ public class SimpleFragmentsBuilderTest 
   
   protected void makeUnstoredIndex() throws Exception {
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzerW).setOpenMode(OpenMode.CREATE));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorOffsets(true);
-    customType.setStoreTermVectorPositions(true);
-    doc.add( new Field( F, "aaa", customType) );
-    //doc.add( new Field( F, "aaa", Store.NO, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS ) );
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors(F);
+    fieldTypes.enableTermVectorOffsets(F);
+    fieldTypes.enableTermVectorPositions(F);
+    fieldTypes.disableStored(F);
+    Document2 doc = writer.newDocument();
+    doc.addLargeText(F, "aaa");
     writer.addDocument( doc );
     writer.close();
     if (reader != null) reader.close();
@@ -238,19 +240,20 @@ public class SimpleFragmentsBuilderTest 
         dir,
         newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
 
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorOffsets(true);
-    customType.setStoreTermVectorPositions(true);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors(F);
+    fieldTypes.enableTermVectorOffsets(F);
+    fieldTypes.enableTermVectorPositions(F);
+    fieldTypes.setMultiValued(F);
 
     int numDocs = randomValues.length * 5;
     int numFields = 2 + random().nextInt(5);
     int numTerms = 2 + random().nextInt(3);
     List<Doc> docs = new ArrayList<>(numDocs);
-    List<Document> documents = new ArrayList<>(numDocs);
+    List<Document2> documents = new ArrayList<>(numDocs);
     Map<String, Set<Integer>> valueToDocId = new HashMap<>();
     for (int i = 0; i < numDocs; i++) {
-      Document document = new Document();
+      Document2 document = writer.newDocument();
       String[][] fields = new String[numFields][numTerms];
       for (int j = 0; j < numFields; j++) {
         String[] fieldValues = new String[numTerms];
@@ -260,7 +263,7 @@ public class SimpleFragmentsBuilderTest 
           fieldValues[k] = getRandomValue(randomValues, valueToDocId, i);
           builder.append(' ').append(fieldValues[k]);
         }
-        document.add(new Field(F, builder.toString(), customType));
+        document.addLargeText(F, builder.toString());
         fields[j] = fieldValues;
       }
       docs.add(new Doc(fields));

Modified: lucene/dev/branches/lucene6005/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java?rev=1642110&r1=1642109&r2=1642110&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java (original)
+++ lucene/dev/branches/lucene6005/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java Thu Nov 27 11:34:43 2014
@@ -37,28 +37,27 @@ import org.apache.lucene.util.*;
 public class TestBlockJoin extends LuceneTestCase {
 
   // One resume...
-  private Document makeResume(String name, String country) {
-    Document resume = new Document();
-    resume.add(newStringField("docType", "resume", Field.Store.NO));
-    resume.add(newStringField("name", name, Field.Store.YES));
-    resume.add(newStringField("country", country, Field.Store.NO));
+  private Document2 makeResume(IndexWriter w, String name, String country) {
+    Document2 resume = w.newDocument();
+    resume.addAtom("docType", "resume");
+    resume.addAtom("name", name);
+    resume.addAtom("country", country);
     return resume;
   }
 
   // ... has multiple jobs
-  private Document makeJob(String skill, int year) {
-    Document job = new Document();
-    job.add(newStringField("skill", skill, Field.Store.YES));
-    job.add(new IntField("year", year, Field.Store.NO));
-    job.add(new StoredField("year", year));
+  private Document2 makeJob(IndexWriter w, String skill, int year) {
+    Document2 job = w.newDocument();
+    job.addAtom("skill", skill);
+    job.addInt("year", year);
     return job;
   }
 
   // ... has multiple qualifications
-  private Document makeQualification(String qualification, int year) {
-    Document job = new Document();
-    job.add(newStringField("qualification", qualification, Field.Store.YES));
-    job.add(new IntField("year", year, Field.Store.NO));
+  private Document2 makeQualification(IndexWriter w, String qualification, int year) {
+    Document2 job = w.newDocument();
+    job.addAtom("qualification", qualification);
+    job.addInt("year", year);
     return job;
   }
   
@@ -69,23 +68,23 @@ public class TestBlockJoin extends Lucen
     // we don't want to merge - since we rely on certain segment setup
     final IndexWriter w = new IndexWriter(dir, config);
 
-    final List<Document> docs = new ArrayList<>();
+    final List<Document2> docs = new ArrayList<>();
 
-    docs.add(makeJob("java", 2007));
-    docs.add(makeJob("python", 2010));
-    docs.add(makeResume("Lisa", "United Kingdom"));
+    docs.add(makeJob(w, "java", 2007));
+    docs.add(makeJob(w, "python", 2010));
+    docs.add(makeResume(w, "Lisa", "United Kingdom"));
     w.addDocuments(docs);
 
     docs.clear();
-    docs.add(makeJob("ruby", 2005));
-    docs.add(makeJob("java", 2006));
-    docs.add(makeResume("Frank", "United States"));
+    docs.add(makeJob(w, "ruby", 2005));
+    docs.add(makeJob(w, "java", 2006));
+    docs.add(makeResume(w, "Frank", "United States"));
     w.addDocuments(docs);
     w.commit();
     int num = atLeast(10); // produce a segment that doesn't have a value in the docType field
     for (int i = 0; i < num; i++) {
       docs.clear();
-      docs.add(makeJob("java", 2007));
+      docs.add(makeJob(w, "java", 2007));
       w.addDocuments(docs);
     }
     
@@ -93,11 +92,13 @@ public class TestBlockJoin extends Lucen
     w.close();
     assertTrue(r.leaves().size() > 1);
     IndexSearcher s = new IndexSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
+
     BitDocIdSetFilter parentsFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume"))));
 
     BooleanQuery childQuery = new BooleanQuery();
     childQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
-    childQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
+    childQuery.add(new BooleanClause(new ConstantScoreQuery(fieldTypes.newRangeFilter("year", 2006, true, 2011, true)), Occur.MUST));
 
     ToParentBlockJoinQuery childJoinQuery = new ToParentBlockJoinQuery(childQuery, parentsFilter, ScoreMode.Avg);
 
@@ -127,22 +128,23 @@ public class TestBlockJoin extends Lucen
     final Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    final List<Document> docs = new ArrayList<>();
+    final List<Document2> docs = new ArrayList<>();
 
-    docs.add(makeJob("java", 2007));
-    docs.add(makeJob("python", 2010));
-    docs.add(makeResume("Lisa", "United Kingdom"));
+    docs.add(makeJob(w.w, "java", 2007));
+    docs.add(makeJob(w.w, "python", 2010));
+    docs.add(makeResume(w.w, "Lisa", "United Kingdom"));
     w.addDocuments(docs);
 
     docs.clear();
-    docs.add(makeJob("ruby", 2005));
-    docs.add(makeJob("java", 2006));
-    docs.add(makeResume("Frank", "United States"));
+    docs.add(makeJob(w.w, "ruby", 2005));
+    docs.add(makeJob(w.w, "java", 2006));
+    docs.add(makeResume(w.w, "Frank", "United States"));
     w.addDocuments(docs);
     
     IndexReader r = w.getReader();
     w.close();
     IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
 
     // Create a filter that defines "parent" documents in the index - in this case resumes
     BitDocIdSetFilter parentsFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume"))));
@@ -150,7 +152,7 @@ public class TestBlockJoin extends Lucen
     // Define child document criteria (finds an example of relevant work experience)
     BooleanQuery childQuery = new BooleanQuery();
     childQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
-    childQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
+    childQuery.add(new BooleanClause(new ConstantScoreQuery(fieldTypes.newRangeFilter("year", 2006, true, 2011, true)), Occur.MUST));
 
     // Define parent document criteria (find a resident in the UK)
     Query parentQuery = new TermQuery(new Term("country", "United Kingdom"));
@@ -217,21 +219,22 @@ public class TestBlockJoin extends Lucen
     final Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    final List<Document> docs = new ArrayList<>();
+    final List<Document2> docs = new ArrayList<>();
 
     for (int i=0;i<10;i++) {
       docs.clear();
-      docs.add(makeJob("ruby", i));
-      docs.add(makeJob("java", 2007));
-      docs.add(makeResume("Frank", "United States"));
+      docs.add(makeJob(w.w, "ruby", i));
+      docs.add(makeJob(w.w, "java", 2007));
+      docs.add(makeResume(w.w, "Frank", "United States"));
       w.addDocuments(docs);
     }
 
     IndexReader r = w.getReader();
     w.close();
     IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
 
-    MultiTermQuery qc = NumericRangeQuery.newIntRange("year", 2007, 2007, true, true);
+    MultiTermQuery qc = new TermRangeQuery("year", Document2.intToBytes(2007), Document2.intToBytes(2007), true, true);
     // Hacky: this causes the query to need 2 rewrite
     // iterations: 
     qc.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
@@ -270,17 +273,17 @@ public class TestBlockJoin extends Lucen
     final Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    final List<Document> docs = new ArrayList<>();
-    docs.add(makeJob("java", 2007));
-    docs.add(makeJob("python", 2010));
+    final List<Document2> docs = new ArrayList<>();
+    docs.add(makeJob(w.w, "java", 2007));
+    docs.add(makeJob(w.w, "python", 2010));
     Collections.shuffle(docs, random());
-    docs.add(makeResume("Lisa", "United Kingdom"));
+    docs.add(makeResume(w.w, "Lisa", "United Kingdom"));
 
-    final List<Document> docs2 = new ArrayList<>();
-    docs2.add(makeJob("ruby", 2005));
-    docs2.add(makeJob("java", 2006));
+    final List<Document2> docs2 = new ArrayList<>();
+    docs2.add(makeJob(w.w, "ruby", 2005));
+    docs2.add(makeJob(w.w, "java", 2006));
     Collections.shuffle(docs2, random());
-    docs2.add(makeResume("Frank", "United States"));
+    docs2.add(makeResume(w.w, "Frank", "United States"));
     
     addSkillless(w);
     boolean turn = random().nextBoolean();
@@ -295,6 +298,7 @@ public class TestBlockJoin extends Lucen
     IndexReader r = w.getReader();
     w.close();
     IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
 
     // Create a filter that defines "parent" documents in the index - in this case resumes
     BitDocIdSetFilter parentsFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume"))));
@@ -302,7 +306,7 @@ public class TestBlockJoin extends Lucen
     // Define child document criteria (finds an example of relevant work experience)
     BooleanQuery childQuery = new BooleanQuery();
     childQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
-    childQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
+    childQuery.add(new BooleanClause(new ConstantScoreQuery(fieldTypes.newRangeFilter("year", 2006, true, 2011, true)), Occur.MUST));
 
     // Define parent document criteria (find a resident in the UK)
     Query parentQuery = new TermQuery(new Term("country", "United Kingdom"));
@@ -351,7 +355,7 @@ public class TestBlockJoin extends Lucen
 
   private void addSkillless(final RandomIndexWriter w) throws IOException {
     if (random().nextBoolean()) {
-      w.addDocument(makeResume("Skillless", random().nextBoolean() ? "United Kingdom":"United States"));
+      w.addDocument(makeResume(w.w, "Skillless", random().nextBoolean() ? "United Kingdom":"United States"));
     }
   }
   
@@ -391,24 +395,25 @@ public class TestBlockJoin extends Lucen
     // Cannot assert this since we use NoMergePolicy:
     w.setDoRandomForceMergeAssert(false);
 
-    List<Document> docs = new ArrayList<>();
-    docs.add(makeJob("java", 2007));
-    docs.add(makeJob("python", 2010));
-    docs.add(makeResume("Lisa", "United Kingdom"));
+    List<Document2> docs = new ArrayList<>();
+    docs.add(makeJob(w.w, "java", 2007));
+    docs.add(makeJob(w.w, "python", 2010));
+    docs.add(makeResume(w.w, "Lisa", "United Kingdom"));
     w.addDocuments(docs);
 
     docs.clear();
-    docs.add(makeJob("c", 1999));
-    docs.add(makeJob("ruby", 2005));
-    docs.add(makeJob("java", 2006));
-    docs.add(makeResume("Frank", "United States"));
+    docs.add(makeJob(w.w, "c", 1999));
+    docs.add(makeJob(w.w, "ruby", 2005));
+    docs.add(makeJob(w.w, "java", 2006));
+    docs.add(makeResume(w.w, "Frank", "United States"));
     w.addDocuments(docs);
 
     w.commit();
     IndexSearcher s = newSearcher(DirectoryReader.open(dir));
+    FieldTypes fieldTypes = s.getFieldTypes();
 
     ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(
-        NumericRangeQuery.newIntRange("year", 1990, 2010, true, true),
+        new ConstantScoreQuery(fieldTypes.newRangeFilter("year", 1990, true, 2010, true)),
         new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume")))),
         ScoreMode.Total
     );
@@ -504,41 +509,43 @@ public class TestBlockJoin extends Lucen
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
     final RandomIndexWriter joinW = new RandomIndexWriter(random(), joinDir);
     for(int parentDocID=0;parentDocID<numParentDocs;parentDocID++) {
-      Document parentDoc = new Document();
-      Document parentJoinDoc = new Document();
-      Field id = new IntField("parentID", parentDocID, Field.Store.YES);
-      parentDoc.add(id);
-      parentJoinDoc.add(id);
-      parentJoinDoc.add(newStringField("isParent", "x", Field.Store.NO));
-      id = new NumericDocValuesField("parentID", parentDocID);
-      parentDoc.add(id);
-      parentJoinDoc.add(id);
-      parentJoinDoc.add(newStringField("isParent", "x", Field.Store.NO));
+      Document2 parentDoc = w.newDocument();
+      Document2 parentJoinDoc = joinW.newDocument();
+      parentDoc.addInt("parentID", parentDocID);
+      parentJoinDoc.addInt("parentID", parentDocID);
+      parentJoinDoc.addAtom("isParent", "x");
+
+      String[] randomFields = new String[parentFields.length];
       for(int field=0;field<parentFields.length;field++) {
+        String s;
         if (random().nextDouble() < 0.9) {
-          String s = parentFields[field][random().nextInt(parentFields[field].length)];
-          Field f = newStringField("parent" + field, s, Field.Store.NO);
-          parentDoc.add(f);
-          parentJoinDoc.add(f);
-
-          f = new SortedDocValuesField("parent" + field, new BytesRef(s));
-          parentDoc.add(f);
-          parentJoinDoc.add(f);
+          s = parentFields[field][random().nextInt(parentFields[field].length)];
+        } else {
+          s = null;
+        }
+        randomFields[field] = s;
+      }
+
+      for(int i=0;i<randomFields.length;i++) {
+        String s = randomFields[i];
+        if (s != null) {
+          parentDoc.addAtom("parent" + i, s);
+          parentJoinDoc.addAtom("parent" + i, s);
         }
       }
 
       if (doDeletes) {
-        parentDoc.add(new IntField("blockID", parentDocID, Field.Store.NO));
-        parentJoinDoc.add(new IntField("blockID", parentDocID, Field.Store.NO));
+        parentDoc.addInt("blockID", parentDocID);
+        parentJoinDoc.addInt("blockID", parentDocID);
       }
 
-      final List<Document> joinDocs = new ArrayList<>();
+      final List<Document2> joinDocs = new ArrayList<>();
 
       if (VERBOSE) {
         StringBuilder sb = new StringBuilder();
         sb.append("parentID=").append(parentDoc.get("parentID"));
         for(int fieldID=0;fieldID<parentFields.length;fieldID++) {
-          String s = parentDoc.get("parent" + fieldID);
+          String s = parentDoc.getString("parent" + fieldID);
           if (s != null) {
             sb.append(" parent" + fieldID + "=" + s);
           }
@@ -549,27 +556,30 @@ public class TestBlockJoin extends Lucen
       final int numChildDocs = TestUtil.nextInt(random(), 1, 20);
       for(int childDocID=0;childDocID<numChildDocs;childDocID++) {
         // Denormalize: copy all parent fields into child doc:
-        Document childDoc = TestUtil.cloneDocument(parentDoc);
-        Document joinChildDoc = new Document();
+        Document2 childDoc = w.newDocument();
+        childDoc.addInt("parentID", parentDocID);
+        for(int i=0;i<randomFields.length;i++) {
+          String s = randomFields[i];
+          if (s != null) {
+            childDoc.addAtom("parent" + i, s);
+          }
+        }
+        if (doDeletes) {
+          childDoc.addInt("blockID", parentDocID);
+        }
+
+        Document2 joinChildDoc = joinW.newDocument();
         joinDocs.add(joinChildDoc);
 
-        Field childID = new IntField("childID", childDocID, Field.Store.YES);
-        childDoc.add(childID);
-        joinChildDoc.add(childID);
-        childID = new NumericDocValuesField("childID", childDocID);
-        childDoc.add(childID);
-        joinChildDoc.add(childID);
+        childDoc.addInt("childID", childDocID);
+        joinChildDoc.addInt("childID", childDocID);
 
         for(int childFieldID=0;childFieldID<childFields.length;childFieldID++) {
           if (random().nextDouble() < 0.9) {
             String s = childFields[childFieldID][random().nextInt(childFields[childFieldID].length)];
-            Field f = newStringField("child" + childFieldID, s, Field.Store.NO);
-            childDoc.add(f);
-            joinChildDoc.add(f);
-
-            f = new SortedDocValuesField("child" + childFieldID, new BytesRef(s));
-            childDoc.add(f);
-            joinChildDoc.add(f);
+
+            childDoc.addAtom("child" + childFieldID, s);
+            joinChildDoc.addAtom("child" + childFieldID, s);
           }
         }
 
@@ -577,7 +587,7 @@ public class TestBlockJoin extends Lucen
           StringBuilder sb = new StringBuilder();
           sb.append("childID=").append(joinChildDoc.get("childID"));
           for(int fieldID=0;fieldID<childFields.length;fieldID++) {
-            String s = joinChildDoc.get("child" + fieldID);
+            String s = joinChildDoc.getString("child" + fieldID);
             if (s != null) {
               sb.append(" child" + fieldID + "=" + s);
             }
@@ -586,7 +596,7 @@ public class TestBlockJoin extends Lucen
         }
 
         if (doDeletes) {
-          joinChildDoc.add(new IntField("blockID", parentDocID, Field.Store.NO));
+          joinChildDoc.addInt("blockID", parentDocID);
         }
 
         w.addDocument(childDoc);
@@ -606,9 +616,8 @@ public class TestBlockJoin extends Lucen
       if (VERBOSE) {
         System.out.println("DELETE parentID=" + deleteID);
       }
-      NumericUtils.intToPrefixCodedBytes(deleteID, 0, term);
-      w.deleteDocuments(new Term("blockID", term.toBytesRef()));
-      joinW.deleteDocuments(new Term("blockID", term.toBytesRef()));
+      w.deleteDocuments(w.getFieldTypes().newIntTerm("blockID", deleteID));
+      joinW.deleteDocuments(joinW.getFieldTypes().newIntTerm("blockID", deleteID));
     }
 
     final IndexReader r = w.getReader();
@@ -825,7 +834,7 @@ public class TestBlockJoin extends Lucen
         for (ScoreDoc hit : b.scoreDocs) {
           Explanation explanation = joinS.explain(childJoinQuery, hit.doc);
           Document2 document = joinS.doc(hit.doc - 1);
-          int childId = Integer.parseInt(document.getString("childID"));
+          int childId = document.getInt("childID");
           //System.out.println("  hit docID=" + hit.doc + " childId=" + childId + " parentId=" + document.get("parentID"));
           assertTrue(explanation.isMatch());
           assertEquals(hit.score, explanation.getValue(), 0.0f);
@@ -991,7 +1000,7 @@ public class TestBlockJoin extends Lucen
       Document2 doc1 = r.document(hit.doc);
       Document2 doc2 = joinR.document(joinHit.doc);
       assertEquals("hit " + hitCount + " differs",
-                   doc1.getString("childID"), doc2.getString("childID"));
+                   doc1.getInt("childID"), doc2.getInt("childID"));
       // don't compare scores -- they are expected to differ
 
 
@@ -1017,7 +1026,7 @@ public class TestBlockJoin extends Lucen
       final ScoreDoc[] groupHits = group.scoreDocs;
       assertNotNull(group.groupValue);
       final Document2 parentDoc = joinR.document(group.groupValue);
-      final String parentID = parentDoc.getString("parentID");
+      final String parentID = Integer.toString(parentDoc.getInt("parentID"));
       //System.out.println("GROUP groupDoc=" + group.groupDoc + " parent=" + parentDoc);
       assertNotNull(parentID);
       assertTrue(groupHits.length > 0);
@@ -1025,9 +1034,9 @@ public class TestBlockJoin extends Lucen
         final Document2 nonJoinHit = r.document(hits[resultUpto++].doc);
         final Document2 joinHit = joinR.document(groupHits[hitIDX].doc);
         assertEquals(parentID,
-                     nonJoinHit.getString("parentID"));
-        assertEquals(joinHit.getString("childID"),
-                     nonJoinHit.getString("childID"));
+                     Integer.toString(nonJoinHit.getInt("parentID")));
+        assertEquals(joinHit.getInt("childID"),
+                     nonJoinHit.getInt("childID"));
       }
 
       if (joinGroupUpto < groupDocs.length) {
@@ -1035,7 +1044,7 @@ public class TestBlockJoin extends Lucen
         //System.out.println("  next joingroupUpto=" + joinGroupUpto + " gd.length=" + groupDocs.length + " parentID=" + parentID);
         while(true) {
           assertTrue(resultUpto < hits.length);
-          if (!parentID.equals(r.document(hits[resultUpto].doc).getString("parentID"))) {
+          if (!parentID.equals(Integer.toString(r.document(hits[resultUpto].doc).getInt("parentID")))) {
             break;
           }
           resultUpto++;
@@ -1049,17 +1058,18 @@ public class TestBlockJoin extends Lucen
     final Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    final List<Document> docs = new ArrayList<>();
+    final List<Document2> docs = new ArrayList<>();
 
-    docs.add(makeJob("java", 2007));
-    docs.add(makeJob("python", 2010));
-    docs.add(makeQualification("maths", 1999));
-    docs.add(makeResume("Lisa", "United Kingdom"));
+    docs.add(makeJob(w.w, "java", 2007));
+    docs.add(makeJob(w.w, "python", 2010));
+    docs.add(makeQualification(w.w, "maths", 1999));
+    docs.add(makeResume(w.w, "Lisa", "United Kingdom"));
     w.addDocuments(docs);
 
     IndexReader r = w.getReader();
     w.close();
     IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
 
     // Create a filter that defines "parent" documents in the index - in this case resumes
     BitDocIdSetFilter parentsFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume"))));
@@ -1067,11 +1077,11 @@ public class TestBlockJoin extends Lucen
     // Define child document criteria (finds an example of relevant work experience)
     BooleanQuery childJobQuery = new BooleanQuery();
     childJobQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
-    childJobQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
+    childJobQuery.add(new BooleanClause(new ConstantScoreQuery(fieldTypes.newRangeFilter("year", 2006, true, 2011, true)), Occur.MUST));
 
     BooleanQuery childQualificationQuery = new BooleanQuery();
     childQualificationQuery.add(new BooleanClause(new TermQuery(new Term("qualification", "maths")), Occur.MUST));
-    childQualificationQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 1980, 2000, true, true), Occur.MUST));
+    childQualificationQuery.add(new BooleanClause(new ConstantScoreQuery(fieldTypes.newRangeFilter("year", 1980, true, 2000, true)), Occur.MUST));
 
 
     // Define parent document criteria (find a resident in the UK)
@@ -1193,13 +1203,13 @@ public class TestBlockJoin extends Lucen
     final Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
 
-    final List<Document> docs = new ArrayList<>();
-    docs.add(makeJob("ruby", 2005));
-    docs.add(makeJob("java", 2006));
-    docs.add(makeJob("java", 2010));
-    docs.add(makeJob("java", 2012));
+    final List<Document2> docs = new ArrayList<>();
+    docs.add(makeJob(w.w, "ruby", 2005));
+    docs.add(makeJob(w.w, "java", 2006));
+    docs.add(makeJob(w.w, "java", 2010));
+    docs.add(makeJob(w.w, "java", 2012));
     Collections.shuffle(docs, random());
-    docs.add(makeResume("Frank", "United States"));
+    docs.add(makeResume(w.w, "Frank", "United States"));
 
     addSkillless(w);
     w.addDocuments(docs);
@@ -1208,6 +1218,7 @@ public class TestBlockJoin extends Lucen
     IndexReader r = w.getReader();
     w.close();
     IndexSearcher s = new IndexSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
 
     // Create a filter that defines "parent" documents in the index - in this case resumes
     BitDocIdSetFilter parentsFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume"))));
@@ -1215,7 +1226,7 @@ public class TestBlockJoin extends Lucen
     // Define child document criteria (finds an example of relevant work experience)
     BooleanQuery childQuery = new BooleanQuery();
     childQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
-    childQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
+    childQuery.add(new BooleanClause(new ConstantScoreQuery(fieldTypes.newRangeFilter("year", 2006, true, 2011, true)), Occur.MUST));
 
     // Wrap the child document query to 'join' any matches
     // up to corresponding parent:
@@ -1247,7 +1258,7 @@ public class TestBlockJoin extends Lucen
       for (ScoreDoc scoreDoc : group.scoreDocs) {
         Document2 childDoc = s.doc(scoreDoc.doc);
         assertEquals("java", childDoc.getString("skill"));
-        int year = Integer.parseInt(childDoc.getString("year"));
+        int year = childDoc.getInt("year");
         assertTrue(year >= 2006 && year <= 2011);
       }
     }
@@ -1270,7 +1281,7 @@ public class TestBlockJoin extends Lucen
     for (ScoreDoc scoreDoc : group.scoreDocs) {
       Document2 childDoc = s.doc(scoreDoc.doc);
       assertEquals("java", childDoc.getString("skill"));
-      int year = Integer.parseInt(childDoc.getString("year"));
+      int year = childDoc.getInt("year");
       assertTrue(year >= 2006 && year <= 2011);
     }
 

Modified: lucene/dev/branches/lucene6005/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java?rev=1642110&r1=1642109&r2=1642110&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java (original)
+++ lucene/dev/branches/lucene6005/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java Thu Nov 27 11:34:43 2014
@@ -36,12 +36,14 @@ import org.apache.lucene.analysis.TokenF
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.CompositeReader;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.Fields;
@@ -422,7 +424,6 @@ public class TestMemoryIndexAgainstRAMDi
   }
   
   public void testDuellMemIndex() throws IOException {
-    LineFileDocs lineFileDocs = new LineFileDocs(random());
     int numDocs = atLeast(10);
     MemoryIndex memory = new MemoryIndex(random().nextBoolean(),  random().nextInt(50) * 1024 * 1024);
     for (int i = 0; i < numDocs; i++) {
@@ -430,21 +431,23 @@ public class TestMemoryIndexAgainstRAMDi
       MockAnalyzer mockAnalyzer = new MockAnalyzer(random());
       mockAnalyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
       IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(random(), mockAnalyzer));
-      Document nextDoc = lineFileDocs.nextDoc();
+      LineFileDocs lineFileDocs = new LineFileDocs(writer, random());
+      Document2 nextDoc = lineFileDocs.nextDoc();
       Document doc = new Document();
       for (IndexableField field : nextDoc.getFields()) {
-        if (field.fieldType().indexOptions() != IndexOptions.NONE) {
+        if (field.fieldType().indexOptions() != IndexOptions.NONE && field.stringValue() != null && field.fieldType().docValuesType() == DocValuesType.NONE) {
           doc.add(field);
           if (random().nextInt(3) == 0) {
             doc.add(field);  // randomly add the same field twice
           }
         }
       }
+      lineFileDocs.close();
       
       writer.addDocument(doc);
       writer.close();
       for (IndexableField field : doc) {
-        memory.addField(field.name(), ((Field)field).stringValue(), mockAnalyzer);  
+        memory.addField(field.name(), field.stringValue(), mockAnalyzer);  
       }
       DirectoryReader competitor = DirectoryReader.open(dir);
       LeafReader memIndexReader= (LeafReader) memory.createSearcher().getIndexReader();
@@ -453,7 +456,6 @@ public class TestMemoryIndexAgainstRAMDi
       memory.reset();
       dir.close();
     }
-    lineFileDocs.close();
   }
   
   // LUCENE-4880

Modified: lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCache.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCache.java?rev=1642110&r1=1642109&r2=1642110&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCache.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCache.java Thu Nov 27 11:34:43 2014
@@ -22,14 +22,11 @@ import java.io.PrintStream;
 import java.util.Collections;
 
 import org.apache.lucene.analysis.NumericTokenStream;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.IndexReader; // javadocs
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
@@ -114,6 +111,24 @@ interface FieldCache {
     }
   };
 
+  // nocommit rename
+  public static final Parser DOCUMENT2_INT_PARSER = new Parser() {
+    @Override
+    public long parseValue(BytesRef term) {
+      return Document2.bytesToInt(term);
+    }
+    
+    @Override
+    public TermsEnum termsEnum(Terms terms) throws IOException {
+      return terms.iterator(null);
+    }
+    
+    @Override
+    public String toString() { 
+      return FieldCache.class.getName()+".DOCUMENT2_INT_PARSER"; 
+    }
+  };
+
   /**
    * A parser instance for float values encoded with {@link NumericUtils}, e.g. when indexed
    * via {@link FloatField}/{@link NumericTokenStream}.
@@ -137,6 +152,24 @@ interface FieldCache {
     }
   };
 
+  // nocommit rename
+  public static final Parser DOCUMENT2_FLOAT_PARSER = new Parser() {
+    @Override
+    public long parseValue(BytesRef term) {
+      return Document2.sortableFloatBits(Document2.bytesToInt(term));
+    }
+    
+    @Override
+    public TermsEnum termsEnum(Terms terms) throws IOException {
+      return terms.iterator(null);
+    }
+    
+    @Override
+    public String toString() { 
+      return FieldCache.class.getName()+".DOCUMENT2_FLOAT_PARSER"; 
+    }
+  };
+
   /**
    * A parser instance for long values encoded by {@link NumericUtils}, e.g. when indexed
    * via {@link LongField}/{@link NumericTokenStream}.
@@ -157,6 +190,24 @@ interface FieldCache {
     }
   };
 
+  // nocommit rename
+  public static final Parser DOCUMENT2_LONG_PARSER = new Parser() {
+    @Override
+    public long parseValue(BytesRef term) {
+      return Document2.bytesToLong(term);
+    }
+    
+    @Override
+    public TermsEnum termsEnum(Terms terms) throws IOException {
+      return terms.iterator(null);
+    }
+    
+    @Override
+    public String toString() { 
+      return FieldCache.class.getName()+".DOCUMENT2_LONG_PARSER"; 
+    }
+  };
+
   /**
    * A parser instance for double values encoded with {@link NumericUtils}, e.g. when indexed
    * via {@link DoubleField}/{@link NumericTokenStream}.
@@ -179,6 +230,24 @@ interface FieldCache {
     }
   };
   
+  // nocommit rename
+  public static final Parser DOCUMENT2_DOUBLE_PARSER = new Parser() {
+    @Override
+    public long parseValue(BytesRef term) {
+      return Document2.sortableDoubleBits(Document2.bytesToLong(term));
+    }
+    
+    @Override
+    public TermsEnum termsEnum(Terms terms) throws IOException {
+      return terms.iterator(null);
+    }
+    
+    @Override
+    public String toString() { 
+      return FieldCache.class.getName()+".DOCUMENT2_DOUBLE_PARSER"; 
+    }
+  };
+
   /** Checks the internal cache for an appropriate entry, and if none is found,
    *  reads the terms in <code>field</code> and returns a bit set at the size of
    *  <code>reader.maxDoc()</code>, with turned on bits for each docid that 

Modified: lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java?rev=1642110&r1=1642109&r2=1642110&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/UninvertingReader.java Thu Nov 27 11:34:43 2014
@@ -22,10 +22,6 @@ import java.util.ArrayList;
 import java.util.Map;
 
 import org.apache.lucene.document.BinaryDocValuesField; // javadocs
-import org.apache.lucene.document.DoubleField; // javadocs
-import org.apache.lucene.document.FloatField; // javadocs
-import org.apache.lucene.document.IntField; // javadocs
-import org.apache.lucene.document.LongField; // javadocs
 import org.apache.lucene.document.NumericDocValuesField; // javadocs
 import org.apache.lucene.document.SortedDocValuesField; // javadocs
 import org.apache.lucene.document.SortedSetDocValuesField; // javadocs
@@ -225,15 +221,28 @@ public class UninvertingReader extends F
     return fieldInfos;
   }
 
+  private boolean isFieldType(String field) {
+    try {
+      getFieldTypes().getIndexableFieldType(field);
+      return true;
+    } catch (IllegalArgumentException iae) {
+      return false;
+    }
+  }
+
   @Override
   public NumericDocValues getNumericDocValues(String field) throws IOException {
     Type v = getType(field);
     if (v != null) {
       switch (v) {
-        case INTEGER: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_INT_PARSER, true);
-        case FLOAT: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_FLOAT_PARSER, true);
-        case LONG: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_LONG_PARSER, true);
-        case DOUBLE: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
+        case INTEGER:
+          return FieldCache.DEFAULT.getNumerics(in, field, isFieldType(field) ? FieldCache.DOCUMENT2_INT_PARSER : FieldCache.NUMERIC_UTILS_INT_PARSER, true);
+        case FLOAT:
+          return FieldCache.DEFAULT.getNumerics(in, field, isFieldType(field) ? FieldCache.DOCUMENT2_FLOAT_PARSER : FieldCache.NUMERIC_UTILS_FLOAT_PARSER, true);
+        case LONG:
+          return FieldCache.DEFAULT.getNumerics(in, field, isFieldType(field) ? FieldCache.DOCUMENT2_LONG_PARSER : FieldCache.NUMERIC_UTILS_LONG_PARSER, true);
+        case DOUBLE:
+          return FieldCache.DEFAULT.getNumerics(in, field, isFieldType(field) ? FieldCache.DOCUMENT2_DOUBLE_PARSER : FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
       }
     }
     return super.getNumericDocValues(field);
@@ -264,14 +273,14 @@ public class UninvertingReader extends F
     Type v = getType(field);
     if (v != null) {
       switch (v) {
-        case SORTED_SET_INTEGER:
-        case SORTED_SET_FLOAT: 
-          return FieldCache.DEFAULT.getDocTermOrds(in, field, FieldCache.INT32_TERM_PREFIX);
-        case SORTED_SET_LONG:
-        case SORTED_SET_DOUBLE:
-          return FieldCache.DEFAULT.getDocTermOrds(in, field, FieldCache.INT64_TERM_PREFIX);
-        case SORTED_SET_BINARY:
-          return FieldCache.DEFAULT.getDocTermOrds(in, field, null);
+      case SORTED_SET_INTEGER:
+      case SORTED_SET_FLOAT: 
+        return FieldCache.DEFAULT.getDocTermOrds(in, field, isFieldType(field) ? null : FieldCache.INT32_TERM_PREFIX);
+      case SORTED_SET_LONG:
+      case SORTED_SET_DOUBLE:
+        return FieldCache.DEFAULT.getDocTermOrds(in, field, isFieldType(field) ? null : FieldCache.INT64_TERM_PREFIX);
+      case SORTED_SET_BINARY:
+        return FieldCache.DEFAULT.getDocTermOrds(in, field, null);
       }
     }
     return in.getSortedSetDocValues(field);

Modified: lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/uninverting/TestDocTermOrds.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/uninverting/TestDocTermOrds.java?rev=1642110&r1=1642109&r2=1642110&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/uninverting/TestDocTermOrds.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/uninverting/TestDocTermOrds.java Thu Nov 27 11:34:43 2014
@@ -18,27 +18,27 @@ package org.apache.lucene.uninverting;
  */
 
 import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -46,8 +46,8 @@ import org.apache.lucene.index.SlowCompo
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
@@ -133,14 +133,17 @@ public class TestDocTermOrds extends Luc
     }
     
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir, conf);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting("id");
+    fieldTypes.disableSorting("field");
+    fieldTypes.setMultiValued("field");
 
     final int[][] idToOrds = new int[NUM_DOCS][];
     final Set<Integer> ordsForDocSet = new HashSet<>();
 
     for(int id=0;id<NUM_DOCS;id++) {
-      Document doc = new Document();
-
-      doc.add(new IntField("id", id, Field.Store.YES));
+      Document2 doc = w.newDocument();
+      doc.addInt("id", id);
       
       final int termCount = TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER);
       while(ordsForDocSet.size() < termCount) {
@@ -153,11 +156,10 @@ public class TestDocTermOrds extends Luc
       }
       for(int ord : ordsForDocSet) {
         ordsForDoc[upto++] = ord;
-        Field field = newStringField("field", termsArray[ord].utf8ToString(), Field.Store.NO);
         if (VERBOSE) {
           System.out.println("  f=" + termsArray[ord].utf8ToString());
         }
-        doc.add(field);
+        doc.addAtom("field", termsArray[ord].utf8ToString());
       }
       ordsForDocSet.clear();
       Arrays.sort(ordsForDoc);
@@ -230,14 +232,18 @@ public class TestDocTermOrds extends Luc
     }
     
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir, conf);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting("id");
+    fieldTypes.disableSorting("field");
+    fieldTypes.setMultiValued("field");
 
     final int[][] idToOrds = new int[NUM_DOCS][];
     final Set<Integer> ordsForDocSet = new HashSet<>();
 
     for(int id=0;id<NUM_DOCS;id++) {
-      Document doc = new Document();
+      Document2 doc = w.newDocument();
 
-      doc.add(new IntField("id", id, Field.Store.YES));
+      doc.addInt("id", id);
       
       final int termCount = TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER);
       while(ordsForDocSet.size() < termCount) {
@@ -250,11 +256,10 @@ public class TestDocTermOrds extends Luc
       }
       for(int ord : ordsForDocSet) {
         ordsForDoc[upto++] = ord;
-        Field field = newStringField("field", termsArray[ord].utf8ToString(), Field.Store.NO);
         if (VERBOSE) {
           System.out.println("  f=" + termsArray[ord].utf8ToString());
         }
-        doc.add(field);
+        doc.addAtom("field", termsArray[ord].utf8ToString());
       }
       ordsForDocSet.clear();
       Arrays.sort(ordsForDoc);
@@ -321,7 +326,7 @@ public class TestDocTermOrds extends Luc
                                             TestUtil.nextInt(random(), 2, 10));
                                             
 
-    final NumericDocValues docIDToID = FieldCache.DEFAULT.getNumerics(r, "id", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+    final NumericDocValues docIDToID = FieldCache.DEFAULT.getNumerics(r, "id", FieldCache.DOCUMENT2_INT_PARSER, false);
     /*
       for(int docID=0;docID<subR.maxDoc();docID++) {
       System.out.println("  docID=" + docID + " id=" + docIDToID[docID]);
@@ -427,14 +432,17 @@ public class TestDocTermOrds extends Luc
   public void testNumericEncoded32() throws IOException {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    
-    Document doc = new Document();
-    doc.add(new IntField("foo", 5, Field.Store.NO));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("foo");
+    fieldTypes.setMultiValued("foo");
+
+    Document2 doc = iw.newDocument();
+    doc.addInt("foo", 5);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new IntField("foo", 5, Field.Store.NO));
-    doc.add(new IntField("foo", -3, Field.Store.NO));
+    doc = iw.newDocument();
+    doc.addInt("foo", 5);
+    doc.addInt("foo", -3);
     iw.addDocument(doc);
     
     iw.forceMerge(1);
@@ -443,7 +451,7 @@ public class TestDocTermOrds extends Luc
     DirectoryReader ir = DirectoryReader.open(dir);
     LeafReader ar = getOnlySegmentReader(ir);
     
-    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", FieldCache.INT32_TERM_PREFIX);
+    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", null);
     assertEquals(2, v.getValueCount());
     
     v.setDocument(0);
@@ -456,10 +464,10 @@ public class TestDocTermOrds extends Luc
     assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
     
     BytesRef value = v.lookupOrd(0);
-    assertEquals(-3, NumericUtils.prefixCodedToInt(value));
+    assertEquals(-3, Document2.bytesToInt(value));
     
     value = v.lookupOrd(1);
-    assertEquals(5, NumericUtils.prefixCodedToInt(value));
+    assertEquals(5, Document2.bytesToInt(value));
     
     ir.close();
     dir.close();
@@ -468,14 +476,17 @@ public class TestDocTermOrds extends Luc
   public void testNumericEncoded64() throws IOException {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("foo");
+    fieldTypes.setMultiValued("foo");
     
-    Document doc = new Document();
-    doc.add(new LongField("foo", 5, Field.Store.NO));
+    Document2 doc = iw.newDocument();
+    doc.addLong("foo", 5);
     iw.addDocument(doc);
     
-    doc = new Document();
-    doc.add(new LongField("foo", 5, Field.Store.NO));
-    doc.add(new LongField("foo", -3, Field.Store.NO));
+    doc = iw.newDocument();
+    doc.addLong("foo", 5);
+    doc.addLong("foo", -3);
     iw.addDocument(doc);
     
     iw.forceMerge(1);
@@ -484,7 +495,7 @@ public class TestDocTermOrds extends Luc
     DirectoryReader ir = DirectoryReader.open(dir);
     LeafReader ar = getOnlySegmentReader(ir);
     
-    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", FieldCache.INT64_TERM_PREFIX);
+    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", null);
     assertEquals(2, v.getValueCount());
     
     v.setDocument(0);
@@ -497,10 +508,10 @@ public class TestDocTermOrds extends Luc
     assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
     
     BytesRef value = v.lookupOrd(0);
-    assertEquals(-3, NumericUtils.prefixCodedToLong(value));
+    assertEquals(-3, Document2.bytesToLong(value));
     
     value = v.lookupOrd(1);
-    assertEquals(5, NumericUtils.prefixCodedToLong(value));
+    assertEquals(5, Document2.bytesToLong(value));
     
     ir.close();
     dir.close();

Modified: lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCache.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCache.java?rev=1642110&r1=1642109&r2=1642110&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCache.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/uninverting/TestFieldCache.java Thu Nov 27 11:34:43 2014
@@ -29,23 +29,21 @@ import java.util.concurrent.atomic.Atomi
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.BinaryDocValuesField;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoubleField;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.FloatField;
-import org.apache.lucene.document.IntField;
-import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.document.StoredField;
-import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
@@ -77,6 +75,19 @@ public class TestFieldCache extends Luce
     NUM_ORDS = atLeast(2);
     directory = newDirectory();
     RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    for(String field : new String[] {"theDouble",
+                                     "theLong",
+                                     "theInt",
+                                     "theFloat",
+                                     "sparse",
+                                     "numInt",
+                                     "theRandomUnicodeString",
+                                     "theRandomUnicodeMultiValuedField"}) {
+      fieldTypes.disableSorting(field);
+    }
+    fieldTypes.setMultiValued("theRandomUnicodeMultiValuedField");
+
     long theLong = Long.MAX_VALUE;
     double theDouble = Double.MAX_VALUE;
     int theInt = Integer.MAX_VALUE;
@@ -87,23 +98,23 @@ public class TestFieldCache extends Luce
       System.out.println("TEST: setUp");
     }
     for (int i = 0; i < NUM_DOCS; i++){
-      Document doc = new Document();
-      doc.add(new LongField("theLong", theLong--, Field.Store.NO));
-      doc.add(new DoubleField("theDouble", theDouble--, Field.Store.NO));
-      doc.add(new IntField("theInt", theInt--, Field.Store.NO));
-      doc.add(new FloatField("theFloat", theFloat--, Field.Store.NO));
+      Document2 doc = writer.newDocument();
+      doc.addLong("theLong", theLong--);
+      doc.addDouble("theDouble", theDouble--);
+      doc.addInt("theInt", theInt--);
+      doc.addFloat("theFloat", theFloat--);
       if (i%2 == 0) {
-        doc.add(new IntField("sparse", i, Field.Store.NO));
+        doc.addInt("sparse", i);
       }
 
       if (i%2 == 0) {
-        doc.add(new IntField("numInt", i, Field.Store.NO));
+        doc.addInt("numInt", i);
       }
 
       // sometimes skip the field:
       if (random().nextInt(40) != 17) {
         unicodeStrings[i] = generateString(i);
-        doc.add(newStringField("theRandomUnicodeString", unicodeStrings[i], Field.Store.YES));
+        doc.addAtom("theRandomUnicodeString", unicodeStrings[i]);
       }
 
       // sometimes skip the field:
@@ -111,7 +122,7 @@ public class TestFieldCache extends Luce
         for (int j = 0; j < NUM_ORDS; j++) {
           String newValue = generateString(i);
           multiValued[i][j] = new BytesRef(newValue);
-          doc.add(newStringField("theRandomUnicodeMultiValuedField", newValue, Field.Store.YES));
+          doc.addAtom("theRandomUnicodeMultiValuedField", newValue);
         }
         Arrays.sort(multiValued[i]);
       }
@@ -137,17 +148,15 @@ public class TestFieldCache extends Luce
       FieldCache cache = FieldCache.DEFAULT;
       ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
       cache.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8));
-      cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
+      cache.getNumerics(reader, "theDouble", FieldCache.DOCUMENT2_DOUBLE_PARSER, false);
       cache.getNumerics(reader, "theDouble", new FieldCache.Parser() {
         @Override
         public TermsEnum termsEnum(Terms terms) throws IOException {
-          return NumericUtils.filterPrefixCodedLongs(terms.iterator(null));
+          return terms.iterator(null);
         }
         @Override
         public long parseValue(BytesRef term) {
-          int val = (int) NumericUtils.prefixCodedToLong(term);
-          if (val<0) val ^= 0x7fffffff;
-          return val;
+          return Document2.sortableDoubleBits(Document2.bytesToLong(term));
         }
       }, false);
       assertTrue(bos.toString(IOUtils.UTF_8).indexOf("WARNING") != -1);
@@ -159,26 +168,26 @@ public class TestFieldCache extends Luce
 
   public void test() throws IOException {
     FieldCache cache = FieldCache.DEFAULT;
-    NumericDocValues doubles = cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, random().nextBoolean());
-    assertSame("Second request to cache return same array", doubles, cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, random().nextBoolean()));
+    NumericDocValues doubles = cache.getNumerics(reader, "theDouble", FieldCache.DOCUMENT2_DOUBLE_PARSER, random().nextBoolean());
+    assertSame("Second request to cache return same array", doubles, cache.getNumerics(reader, "theDouble", FieldCache.DOCUMENT2_DOUBLE_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
       assertEquals(Double.doubleToLongBits(Double.MAX_VALUE - i), doubles.get(i));
     }
     
-    NumericDocValues longs = cache.getNumerics(reader, "theLong", FieldCache.NUMERIC_UTILS_LONG_PARSER, random().nextBoolean());
-    assertSame("Second request to cache return same array", longs, cache.getNumerics(reader, "theLong", FieldCache.NUMERIC_UTILS_LONG_PARSER, random().nextBoolean()));
+    NumericDocValues longs = cache.getNumerics(reader, "theLong", FieldCache.DOCUMENT2_LONG_PARSER, random().nextBoolean());
+    assertSame("Second request to cache return same array", longs, cache.getNumerics(reader, "theLong", FieldCache.DOCUMENT2_LONG_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
       assertEquals(Long.MAX_VALUE - i, longs.get(i));
     }
 
-    NumericDocValues ints = cache.getNumerics(reader, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean());
-    assertSame("Second request to cache return same array", ints, cache.getNumerics(reader, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean()));
+    NumericDocValues ints = cache.getNumerics(reader, "theInt", FieldCache.DOCUMENT2_INT_PARSER, random().nextBoolean());
+    assertSame("Second request to cache return same array", ints, cache.getNumerics(reader, "theInt", FieldCache.DOCUMENT2_INT_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
       assertEquals(Integer.MAX_VALUE - i, ints.get(i));
     }
     
-    NumericDocValues floats = cache.getNumerics(reader, "theFloat", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, random().nextBoolean());
-    assertSame("Second request to cache return same array", floats, cache.getNumerics(reader, "theFloat", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, random().nextBoolean()));
+    NumericDocValues floats = cache.getNumerics(reader, "theFloat", FieldCache.DOCUMENT2_FLOAT_PARSER, random().nextBoolean());
+    assertSame("Second request to cache return same array", floats, cache.getNumerics(reader, "theFloat", FieldCache.DOCUMENT2_FLOAT_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
       assertEquals(Float.floatToIntBits(Float.MAX_VALUE - i), floats.get(i));
     }
@@ -320,7 +329,7 @@ public class TestFieldCache extends Luce
     FieldCache cache = FieldCache.DEFAULT;
     cache.purgeAllCaches();
     assertEquals(0, cache.getCacheEntries().length);
-    cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
+    cache.getNumerics(reader, "theDouble", FieldCache.DOCUMENT2_DOUBLE_PARSER, true);
 
     // The double[] takes one slots, and docsWithField should also
     // have been populated:
@@ -331,7 +340,7 @@ public class TestFieldCache extends Luce
     assertEquals(2, cache.getCacheEntries().length);
     assertTrue(bits instanceof Bits.MatchAllBits);
 
-    NumericDocValues ints = cache.getNumerics(reader, "sparse", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
+    NumericDocValues ints = cache.getNumerics(reader, "sparse", FieldCache.DOCUMENT2_INT_PARSER, true);
     assertEquals(4, cache.getCacheEntries().length);
     Bits docsWithField = cache.getDocsWithField(reader, "sparse");
     assertEquals(4, cache.getCacheEntries().length);
@@ -344,7 +353,7 @@ public class TestFieldCache extends Luce
       }
     }
 
-    NumericDocValues numInts = cache.getNumerics(reader, "numInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean());
+    NumericDocValues numInts = cache.getNumerics(reader, "numInt", FieldCache.DOCUMENT2_INT_PARSER, random().nextBoolean());
     docsWithField = cache.getDocsWithField(reader, "numInt");
     for (int i = 0; i < docsWithField.length(); i++) {
       if (i%2 == 0) {
@@ -394,7 +403,7 @@ public class TestFieldCache extends Luce
                     assertEquals(i%2 == 0, docsWithField.get(i));
                   }
                 } else {
-                  NumericDocValues ints = cache.getNumerics(reader, "sparse", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
+                  NumericDocValues ints = cache.getNumerics(reader, "sparse", FieldCache.DOCUMENT2_INT_PARSER, true);
                   Bits docsWithField = cache.getDocsWithField(reader, "sparse");
                   for (int i = 0; i < docsWithField.length(); i++) {
                     if (i%2 == 0) {
@@ -426,12 +435,15 @@ public class TestFieldCache extends Luce
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(null);
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(new BinaryDocValuesField("binary", new BytesRef("binary value")));
-    doc.add(new SortedDocValuesField("sorted", new BytesRef("sorted value")));
-    doc.add(new NumericDocValuesField("numeric", 42));
-    doc.add(new SortedSetDocValuesField("sortedset", new BytesRef("sortedset value1")));
-    doc.add(new SortedSetDocValuesField("sortedset", new BytesRef("sortedset value2")));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setMultiValued("sortedset");
+    fieldTypes.disableSorting("binary");
+    Document2 doc = iw.newDocument();
+    doc.addBinary("binary", new BytesRef("binary value"));
+    doc.addBinary("sorted", new BytesRef("sorted value"));
+    doc.addInt("numeric", -42);
+    doc.addAtom("sortedset", new BytesRef("sortedset value1"));
+    doc.addAtom("sortedset", new BytesRef("sortedset value2"));
     iw.addDocument(doc);
     DirectoryReader ir = iw.getReader();
     iw.close();
@@ -439,7 +451,7 @@ public class TestFieldCache extends Luce
     
     // Binary type: can be retrieved via getTerms()
     try {
-      FieldCache.DEFAULT.getNumerics(ar, "binary", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+      FieldCache.DEFAULT.getNumerics(ar, "binary", FieldCache.DOCUMENT2_INT_PARSER, false);
       fail();
     } catch (IllegalStateException expected) {}
     
@@ -467,7 +479,7 @@ public class TestFieldCache extends Luce
     
     // Sorted type: can be retrieved via getTerms(), getTermsIndex(), getDocTermOrds()
     try {
-      FieldCache.DEFAULT.getNumerics(ar, "sorted", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+      FieldCache.DEFAULT.getNumerics(ar, "sorted", FieldCache.DOCUMENT2_INT_PARSER, false);
       fail();
     } catch (IllegalStateException expected) {}
     
@@ -496,8 +508,8 @@ public class TestFieldCache extends Luce
     assertTrue(bits.get(0));
     
     // Numeric type: can be retrieved via getInts() and so on
-    NumericDocValues numeric = FieldCache.DEFAULT.getNumerics(ar, "numeric", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
-    assertEquals(42, numeric.get(0));
+    NumericDocValues numeric = FieldCache.DEFAULT.getNumerics(ar, "numeric", FieldCache.DOCUMENT2_INT_PARSER, false);
+    assertEquals(-42, numeric.get(0));
     
     try {
       FieldCache.DEFAULT.getTerms(ar, "numeric", true);
@@ -524,7 +536,7 @@ public class TestFieldCache extends Luce
     
     // SortedSet type: can be retrieved via getDocTermOrds() 
     try {
-      FieldCache.DEFAULT.getNumerics(ar, "sortedset", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+      FieldCache.DEFAULT.getNumerics(ar, "sortedset", FieldCache.DOCUMENT2_INT_PARSER, false);
       fail();
     } catch (IllegalStateException expected) {}
     
@@ -571,16 +583,16 @@ public class TestFieldCache extends Luce
     cache.purgeAllCaches();
     assertEquals(0, cache.getCacheEntries().length);
     
-    NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
+    NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.DOCUMENT2_INT_PARSER, true);
     assertEquals(0, ints.get(0));
     
-    NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.NUMERIC_UTILS_LONG_PARSER, true);
+    NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.DOCUMENT2_LONG_PARSER, true);
     assertEquals(0, longs.get(0));
     
-    NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, true);
+    NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.DOCUMENT2_FLOAT_PARSER, true);
     assertEquals(0, floats.get(0));
     
-    NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
+    NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.DOCUMENT2_DOUBLE_PARSER, true);
     assertEquals(0, doubles.get(0));
     
     BinaryDocValues binaries = cache.getTerms(ar, "bogusterms", true);
@@ -608,17 +620,17 @@ public class TestFieldCache extends Luce
   public void testNonIndexedFields() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new StoredField("bogusbytes", "bogus"));
-    doc.add(new StoredField("bogusshorts", "bogus"));
-    doc.add(new StoredField("bogusints", "bogus"));
-    doc.add(new StoredField("boguslongs", "bogus"));
-    doc.add(new StoredField("bogusfloats", "bogus"));
-    doc.add(new StoredField("bogusdoubles", "bogus"));
-    doc.add(new StoredField("bogusterms", "bogus"));
-    doc.add(new StoredField("bogustermsindex", "bogus"));
-    doc.add(new StoredField("bogusmultivalued", "bogus"));
-    doc.add(new StoredField("bogusbits", "bogus"));
+    Document2 doc = iw.newDocument();
+    doc.addStored("bogusbytes", "bogus");
+    doc.addStored("bogusshorts", "bogus");
+    doc.addStored("bogusints", "bogus");
+    doc.addStored("boguslongs", "bogus");
+    doc.addStored("bogusfloats", "bogus");
+    doc.addStored("bogusdoubles", "bogus");
+    doc.addStored("bogusterms", "bogus");
+    doc.addStored("bogustermsindex", "bogus");
+    doc.addStored("bogusmultivalued", "bogus");
+    doc.addStored("bogusbits", "bogus");
     iw.addDocument(doc);
     DirectoryReader ir = iw.getReader();
     iw.close();
@@ -629,16 +641,16 @@ public class TestFieldCache extends Luce
     cache.purgeAllCaches();
     assertEquals(0, cache.getCacheEntries().length);
     
-    NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
+    NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.DOCUMENT2_INT_PARSER, true);
     assertEquals(0, ints.get(0));
     
-    NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.NUMERIC_UTILS_LONG_PARSER, true);
+    NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.DOCUMENT2_LONG_PARSER, true);
     assertEquals(0, longs.get(0));
     
-    NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, true);
+    NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.DOCUMENT2_FLOAT_PARSER, true);
     assertEquals(0, floats.get(0));
     
-    NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
+    NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.DOCUMENT2_DOUBLE_PARSER, true);
     assertEquals(0, doubles.get(0));
     
     BinaryDocValues binaries = cache.getTerms(ar, "bogusterms", true);
@@ -669,9 +681,9 @@ public class TestFieldCache extends Luce
     IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
     cfg.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
-    Document doc = new Document();
-    LongField field = new LongField("f", 0L, Store.YES);
-    doc.add(field);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("f");
+
     final long[] values = new long[TestUtil.nextInt(random(), 1, 10)];
     for (int i = 0; i < values.length; ++i) {
       final long v;
@@ -690,17 +702,17 @@ public class TestFieldCache extends Luce
           break;
       }
       values[i] = v;
+      Document2 doc = iw.newDocument();
       if (v == 0 && random().nextBoolean()) {
         // missing
-        iw.addDocument(new Document());
       } else {
-        field.setLongValue(v);
-        iw.addDocument(doc);
+        doc.addLong("f", v);
       }
+      iw.addDocument(doc);
     }
     iw.forceMerge(1);
     final DirectoryReader reader = iw.getReader();
-    final NumericDocValues longs = FieldCache.DEFAULT.getNumerics(getOnlySegmentReader(reader), "f", FieldCache.NUMERIC_UTILS_LONG_PARSER, false);
+    final NumericDocValues longs = FieldCache.DEFAULT.getNumerics(getOnlySegmentReader(reader), "f", FieldCache.DOCUMENT2_LONG_PARSER, false);
     for (int i = 0; i < values.length; ++i) {
       assertEquals(values[i], longs.get(i));
     }
@@ -715,9 +727,9 @@ public class TestFieldCache extends Luce
     IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
     cfg.setMergePolicy(newLogMergePolicy());
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
-    Document doc = new Document();
-    IntField field = new IntField("f", 0, Store.YES);
-    doc.add(field);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.disableSorting("f");
+
     final int[] values = new int[TestUtil.nextInt(random(), 1, 10)];
     for (int i = 0; i < values.length; ++i) {
       final int v;
@@ -736,17 +748,17 @@ public class TestFieldCache extends Luce
           break;
       }
       values[i] = v;
+      Document2 doc = iw.newDocument();
       if (v == 0 && random().nextBoolean()) {
         // missing
-        iw.addDocument(new Document());
       } else {
-        field.setIntValue(v);
-        iw.addDocument(doc);
+        doc.addInt("f", v);
       }
+      iw.addDocument(doc);
     }
     iw.forceMerge(1);
     final DirectoryReader reader = iw.getReader();
-    final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(getOnlySegmentReader(reader), "f", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
+    final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(getOnlySegmentReader(reader), "f", FieldCache.DOCUMENT2_INT_PARSER, false);
     for (int i = 0; i < values.length; ++i) {
       assertEquals(values[i], ints.get(i));
     }