You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2015/01/02 13:02:33 UTC

svn commit: r1649007 [9/10] - in /lucene/dev/branches/lucene6005/lucene: analysis/common/src/test/org/apache/lucene/analysis/core/ analysis/common/src/test/org/apache/lucene/collation/ benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ benchm...

Copied: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestUniqueFields.java (from r1644463, lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestUniqueAtom.java)
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestUniqueFields.java?p2=lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestUniqueFields.java&p1=lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestUniqueAtom.java&r1=1644463&r2=1649007&rev=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestUniqueAtom.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestUniqueFields.java Fri Jan  2 12:02:31 2015
@@ -17,22 +17,29 @@ package org.apache.lucene.index;
  * limitations under the License.
  */
 
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 
+import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.document.LowSchemaField;
+import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ReferenceManager;
+import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
-public class TestUniqueAtom extends LuceneTestCase {
+public class TestUniqueFields extends LuceneTestCase {
 
   public void testBasic1() throws Exception {
     Directory dir = newDirectory();
@@ -49,12 +56,39 @@ public class TestUniqueAtom extends Luce
       assertEquals("field \"field\" must be unique, but value=[6f 6e 65] appears more than once", nue.getMessage());
     }
     DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
+    assertEquals(1, hitCount(s, fieldTypes.newBinaryTermQuery("field", new BytesRef("one"))));
     assertEquals(1, r.numDocs());
     r.close();
     w.close();
     dir.close();
   }
 
+  public void testBasic1Int() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    doc.addUniqueInt("field", 17);
+    w.addDocument(doc);
+
+    try {
+      w.addDocument(doc);
+      fail("did not hit exception");
+    } catch (NotUniqueException nue) {
+      // expected
+      assertEquals("field \"field\" must be unique, but value=[80 0 0 11] appears more than once", nue.getMessage());
+    }
+    DirectoryReader r = DirectoryReader.open(w, true);
+    assertEquals(1, r.numDocs());
+    IndexSearcher s = newSearcher(r);
+    FieldTypes fieldTypes = s.getFieldTypes();
+    assertEquals(1, hitCount(s, fieldTypes.newIntTermQuery("field", 17)));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
   public void testBasic2() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
@@ -96,6 +130,151 @@ public class TestUniqueAtom extends Luce
     dir.close();
   }
 
+  public void testBasic2Int() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    doc.addUniqueInt("field", 17);
+    w.addDocument(doc);
+    ReferenceManager<DirectoryReader> mgr = w.getReaderManager();
+    mgr.maybeRefresh();
+
+    DirectoryReader r = mgr.acquire();
+    try {
+      assertEquals(1, r.numDocs());
+    } finally {
+      mgr.release(r);
+    }
+
+    try {
+      w.addDocument(doc);
+      fail("did not hit exception");
+    } catch (NotUniqueException nue) {
+      // expected
+      assertEquals("field \"field\" must be unique, but value=[80 0 0 11] appears more than once", nue.getMessage());
+    }
+
+    doc = w.newDocument();
+    doc.addUniqueInt("field", 22);
+    w.addDocument(doc);
+
+    mgr.maybeRefresh();
+
+    r = mgr.acquire();
+    try {
+      assertEquals(2, r.numDocs());
+    } finally {
+      mgr.release(r);
+    }
+
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidChange1() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    doc.addAtom("field", new BytesRef("one"));
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addUniqueAtom("field", new BytesRef("two")),
+               "field \"field\": cannot change isUnique from false to true");
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidChange1Int() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    doc.addInt("field", 17);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addUniqueInt("field", 22),
+               "field \"field\": cannot change isUnique from false to true");
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidChange2() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("field", new BytesRef("one"));
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addAtom("field", new BytesRef("two")),
+               "field \"field\": cannot change isUnique from true to false");
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidChange2Int() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    doc.addUniqueInt("field", 17);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addInt("field", 22),
+               "field \"field\": cannot change isUnique from true to false");
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidChange3() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    doc.addAtom("field", "one");
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addUniqueAtom("field", "two"),
+               "field \"field\": cannot change isUnique from false to true");
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidChange3Int() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    doc.addInt("field", 17);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addUniqueInt("field", 22),
+               "field \"field\": cannot change isUnique from false to true");
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidChange4() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("field", "one");
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addAtom("field", "two"),
+               "field \"field\": cannot change isUnique from true to false");
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidChange4Int() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document doc = w.newDocument();
+    doc.addUniqueInt("field", 17);
+    w.addDocument(doc);
+    final Document doc2 = w.newDocument();
+    shouldFail(() -> doc2.addInt("field", 22),
+               "field \"field\": cannot change isUnique from true to false");
+    w.close();
+    dir.close();
+  }
+
   public void testDeletes() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
@@ -130,6 +309,41 @@ public class TestUniqueAtom extends Luce
     dir.close();
   }
 
+  public void testDeletesInt() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    ReferenceManager<DirectoryReader> mgr = w.getReaderManager();
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("field", 17);
+    w.addDocument(doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.deleteDocuments(fieldTypes.newIntTerm("field", 17));
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.addDocument(doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.forceMerge(1);
+    mgr.maybeRefresh();
+
+    DirectoryReader r = mgr.acquire();
+    try {
+      assertEquals(1, r.numDocs());
+    } finally {
+      mgr.release(r);
+    }
+
+    w.close();
+    dir.close();
+  }
+
   public void testUpdates() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
@@ -164,13 +378,46 @@ public class TestUniqueAtom extends Luce
     dir.close();
   }
 
+  public void testUpdatesInt() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    ReferenceManager<DirectoryReader> mgr = w.getReaderManager();
+
+    Document doc = w.newDocument();
+    doc.addUniqueInt("field", 17);
+    w.addDocument(doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.updateDocument(fieldTypes.newIntTerm("field", 17), doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.updateDocument(fieldTypes.newIntTerm("field", 17), doc);
+    if (random().nextBoolean()) {
+      mgr.maybeRefresh();
+    }
+    w.forceMerge(1);
+    mgr.maybeRefresh();
+
+    DirectoryReader r = mgr.acquire();
+    try {
+      assertEquals(1, r.numDocs());
+    } finally {
+      mgr.release(r);
+    }
+
+    w.close();
+    dir.close();
+  }
+
   public void testRandom() throws Exception {
     Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
     FieldTypes fieldTypes = w.getFieldTypes();
 
-    // nocommit add in deletes/updateDocument here:
-
     Set<BytesRef> terms = new HashSet<>();
     final int numTerms = atLeast(1000);
     while (terms.size() < numTerms) {
@@ -188,8 +435,27 @@ public class TestUniqueAtom extends Luce
 
               // First add randomly for a while:
               for(int iter=0;iter<3*numTerms;iter++) {
-                Document doc = w.newDocument();
                 BytesRef term = termsList.get(random().nextInt(termsList.size()));
+                if (random().nextInt(4) == 1) {
+                  w.deleteDocuments(new Term("field", term));
+                } else {
+                  Document doc = w.newDocument();
+                  doc.addUniqueAtom("field", term);
+                  if (random().nextBoolean()) {
+                    w.updateDocument(new Term("field", term), doc);
+                  } else {
+                    try {
+                      w.addDocument(doc);
+                    } catch (NotUniqueException nue) {
+                      // OK
+                    }
+                  }
+                }
+              }
+
+              // Then add every single term, so we know all will be added:
+              for(BytesRef term : termsList) {
+                Document doc = w.newDocument();
                 doc.addUniqueAtom("field", term);
                 if (random().nextBoolean()) {
                   w.updateDocument(new Term("field", term), doc);
@@ -201,13 +467,73 @@ public class TestUniqueAtom extends Luce
                   }
                 }
               }
+            } catch (Exception e) {
+              throw new RuntimeException(e);
+            }
+          }
+        };
+      threads[i].start();
+    }
+    startingGun.countDown();
+    for(Thread thread : threads) {
+      thread.join();
+    }
+    w.forceMerge(1);
+    IndexReader r = w.getReader();
+    assertEquals(terms.size(), r.maxDoc());
+    assertEquals(terms.size(), MultiFields.getTerms(r, "field").size());
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testRandomInt() throws Exception {
+    Directory dir = newDirectory();
+    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    final FieldTypes fieldTypes = w.getFieldTypes();
+
+    Set<Integer> terms = new HashSet<>();
+    final int numTerms = atLeast(1000);
+    while (terms.size() < numTerms) {
+      terms.add(random().nextInt());
+    }
+
+    final List<Integer> termsList = new ArrayList<>(terms);
+    final CountDownLatch startingGun = new CountDownLatch(1);
+    Thread[] threads = new Thread[TestUtil.nextInt(random(), 2, 5)];
+    for(int i=0;i<threads.length;i++) {
+      threads[i] = new Thread() {
+          @Override
+          public void run() {
+            try {
+              startingGun.await();
+
+              // First add randomly for a while:
+              for(int iter=0;iter<3*numTerms;iter++) {
+                Integer term = termsList.get(random().nextInt(termsList.size()));
+                if (iter > 0 && random().nextInt(4) == 1) {
+                  w.deleteDocuments(fieldTypes.newIntTerm("field", term.intValue()));
+                } else {
+                  Document doc = w.newDocument();
+                  doc.addUniqueInt("field", term.intValue());
+                  if (random().nextBoolean()) {
+                    w.updateDocument(fieldTypes.newIntTerm("field", term.intValue()), doc);
+                  } else {
+                    try {
+                      w.addDocument(doc);
+                    } catch (NotUniqueException nue) {
+                      // OK
+                    }
+                  }
+                }
+              }
 
               // Then add every single term, so we know all will be added:
-              for(BytesRef term : termsList) {
+              for(Integer term : termsList) {
                 Document doc = w.newDocument();
-                doc.addUniqueAtom("field", term);
+                doc.addUniqueInt("field", term.intValue());
                 if (random().nextBoolean()) {
-                  w.updateDocument(new Term("field", term), doc);
+                  w.updateDocument(fieldTypes.newIntTerm("field", term.intValue()), doc);
                 } else {
                   try {
                     w.addDocument(doc);
@@ -236,48 +562,62 @@ public class TestUniqueAtom extends Luce
     dir.close();
   }
 
-  /** Make sure CheckIndex detects violation of unique constraint. */
+  /** Make sure CheckIndex detects violation of unique constraint, and -exorcise properly repairs it. */
   public void testExcCheckIndex() throws Exception {
-    MockDirectoryWrapper dir = newMockDirectory();
-    // we intentionally create a corrupt index:
-    dir.setCheckIndexOnClose(false);
-
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
-
-    Document doc = w.newDocument();
-    doc.addUniqueAtom("field", new BytesRef("one"));
-    w.close();
-    SegmentInfos infos = SegmentInfos.readLatestCommit(dir);
-    String fieldTypesBytes = infos.getUserData().get(FieldTypes.FIELD_TYPES_KEY);
-    assertNotNull(fieldTypesBytes);
-    w.close();
-
-    w = new IndexWriter(dir, newIndexWriterConfig().setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+    IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
     ReferenceManager<DirectoryReader> mgr = w.getReaderManager();
-    doc = w.newDocument();
-    doc.addAtom("field", new BytesRef("one"));
+    Document doc2 = w.newDocument();
+    doc2.addUniqueAtom("field", "one");
 
-    w.addDocument(doc);
-    if (random().nextBoolean()) {
-      mgr.maybeRefresh();
-    }
-
-    w.addDocument(doc);
-    w.close();
-    infos = SegmentInfos.readLatestCommit(dir);
-    // nocommit this is evil, we need to close this workaround and find a different way to test:
-    infos.getUserData().put(FieldTypes.FIELD_TYPES_KEY, fieldTypesBytes);
-    infos.prepareCommit(dir);
-    infos.finishCommit(dir);
+    w.addDocument(doc2);
+    mgr.maybeRefresh();
 
     try {
-      TestUtil.checkIndex(dir, true, true);
+      w.addDocument(doc2);
       fail("did not hit exception");
-    } catch (RuntimeException re) {
+    } catch (NotUniqueException nue) {
       // expected
-      assertEquals("field=\"field\" is supposed to be unique, but isn't: e.g. term=[6f 6e 65] matches both docID=0 and docID=1", re.getMessage());
     }
+    IndexReader r = mgr.acquire();
+    w.addIndexes(new IndexReader[] {r});
+    r.close();
+    w.close();
+
+    try (CheckIndex checker = new CheckIndex(dir)) {
+        checker.setCrossCheckTermVectors(true);
+        ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
+        checker.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8), false);
+        CheckIndex.Status status = checker.checkIndex(null);
+        assertFalse(status.clean);
+        assertEquals(1, status.nonUniqueCount);
+        checker.exorciseIndex(status);
+        assertTrue(bos.toString(IOUtils.UTF_8).contains("field=\"field\" is supposed to be unique, but isn't: e.g. term=[6f 6e 65] matches both docID=0 and docID=2; total 1 non-unique documents would be deleted"));
+      }
     
+    r = DirectoryReader.open(dir);
+    assertEquals(1, r.numDocs());
+    IndexSearcher s = newSearcher(r);
+    assertEquals(1, hitCount(s, new TermQuery(new Term("field", "one"))));
+    r.close();
+  }
+
+  public void testMultiValuedUnique() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("field");
+    Document doc = w.newDocument();
+    doc.addUniqueAtom("field", "foo");
+    doc.addUniqueAtom("field", "bar");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+
+    IndexSearcher s = newSearcher(r);
+    assertEquals(1, s.search(fieldTypes.newStringTermQuery("field", "foo"), 1).totalHits);
+    assertEquals(1, s.search(fieldTypes.newStringTermQuery("field", "bar"), 1).totalHits);
+    r.close();
+    w.close();
     dir.close();
   }
 }

Copied: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TermFilterTest.java (from r1643659, lucene/dev/branches/lucene6005/lucene/queries/src/test/org/apache/lucene/queries/TermFilterTest.java)
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TermFilterTest.java?p2=lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TermFilterTest.java&p1=lucene/dev/branches/lucene6005/lucene/queries/src/test/org/apache/lucene/queries/TermFilterTest.java&r1=1643659&r2=1649007&rev=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queries/src/test/org/apache/lucene/queries/TermFilterTest.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TermFilterTest.java Fri Jan  2 12:02:31 2015
@@ -1,4 +1,4 @@
-package org.apache.lucene.queries;
+package org.apache.lucene.search;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -37,6 +37,7 @@ import org.apache.lucene.search.ScoreDoc
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
@@ -178,4 +179,18 @@ public class TermFilterTest extends Luce
     return new TermFilter(term);
   }
 
+  public void testAllMatches() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = newRandomIndexWriter(dir);
+    int numDocs = atLeast(10000);
+    for(int i=0;i<numDocs;i++) {
+      Document doc = w.newDocument();
+      doc.addAtom("field", "foo");
+      w.addDocument(doc);
+    }
+    IndexReader r = w.getReader();
+    IndexSearcher s = newSearcher(r);
+    assertEquals(numDocs, s.search(new ConstantScoreQuery(new TermFilter(new Term("field", "foo"))), 1).totalHits);
+    IOUtils.close(r, w, dir);
+  }
 }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java Fri Jan  2 12:02:31 2015
@@ -200,9 +200,6 @@ public class TestAutomatonQuery extends
     Automaton pfx = Automata.makeString("do");
     Automaton prefixAutomaton = Operations.concatenate(pfx, Automata.makeAnyString());
     AutomatonQuery aq = new AutomatonQuery(newTerm("bogus"), prefixAutomaton);
-    // nocommit not true anymore
-    //Terms terms = MultiFields.getTerms(searcher.getIndexReader(), FN);
-    //assertTrue(aq.getTermsEnum(terms) instanceof PrefixTermsEnum);
     assertEquals(3, automatonQueryNrHits(aq));
   }
   

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java Fri Jan  2 12:02:31 2015
@@ -262,7 +262,7 @@ public class TestCachingWrapperFilter ex
     // returns default empty docidset, always cacheable:
     assertDocIdSetCacheable(reader, fieldTypes.newIntRangeFilter("test", Integer.valueOf(10000), true, Integer.valueOf(-10000), true), true);
     // is cacheable:
-    assertDocIdSetCacheable(reader, fieldTypes.newDocValuesRangeFilter("test", Integer.valueOf(10), true, Integer.valueOf(20), true), false);
+    assertDocIdSetCacheable(reader, fieldTypes.newIntDocValuesRangeFilter("test", Integer.valueOf(10), true, Integer.valueOf(20), true), false);
     // a fixedbitset filter is always cacheable
     assertDocIdSetCacheable(reader, new Filter() {
       @Override

Copied: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestDocValuesRangeFilter.java (from r1648667, lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java)
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestDocValuesRangeFilter.java?p2=lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestDocValuesRangeFilter.java&p1=lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java&r1=1648667&r2=1649007&rev=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestDocValuesRangeFilter.java Fri Jan  2 12:02:31 2015
@@ -30,7 +30,7 @@ import org.apache.lucene.store.Directory
 import org.junit.Test;
 
 /**
- * A basic 'positive' Unit test class for the FieldCacheRangeFilter class.
+ * A basic 'positive' Unit test class for the DocValues range filters.
  *
  * <p>
  * NOTE: at the moment, this class only tests for 'positive' results,
@@ -38,7 +38,7 @@ import org.junit.Test;
  * nor does it adequately test 'negative' results.  It also does not test
  * that garbage in results in an Exception.
  */
-public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
+public class TestDocValuesRangeFilter extends BaseTestRangeFilter {
 
   @Test
   public void testRangeFilterId() throws IOException {
@@ -61,67 +61,67 @@ public class TestFieldCacheRangeFilter e
     FieldTypes fieldTypes = search.getFieldTypes();
 
     // test id, bounded on both ends
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",minIP,T,maxIP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,T,maxIP,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",minIP,T,maxIP,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,T,maxIP,F), numDocs).scoreDocs;
     assertEquals("all but last", numDocs-1, result.length);
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",minIP,F,maxIP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,F,maxIP,T), numDocs).scoreDocs;
     assertEquals("all but first", numDocs-1, result.length);
         
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",minIP,F,maxIP,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,F,maxIP,F), numDocs).scoreDocs;
     assertEquals("all but ends", numDocs-2, result.length);
     
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",medIP,T,maxIP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",medIP,T,maxIP,T), numDocs).scoreDocs;
     assertEquals("med and up", 1+ maxId-medId, result.length);
         
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",minIP,T,medIP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,T,medIP,T), numDocs).scoreDocs;
     assertEquals("up to med", 1+ medId-minId, result.length);
 
     // unbounded id
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",(String)null,T,null,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",(String)null,T,null,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",minIP,T,null,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,T,null,F), numDocs).scoreDocs;
     assertEquals("min and up", numDocs, result.length);
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",null,F,maxIP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",null,F,maxIP,T), numDocs).scoreDocs;
     assertEquals("max and down", numDocs, result.length);
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",minIP,F,null,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,F,null,F), numDocs).scoreDocs;
     assertEquals("not min, but up", numDocs-1, result.length);
         
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",null,F,maxIP,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",null,F,maxIP,F), numDocs).scoreDocs;
     assertEquals("not max, but down", numDocs-1, result.length);
         
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",medIP,T,maxIP,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",medIP,T,maxIP,F), numDocs).scoreDocs;
     assertEquals("med and up, not max", maxId-medId, result.length);
         
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",minIP,F,medIP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,F,medIP,T), numDocs).scoreDocs;
     assertEquals("not min, up to med", medId-minId, result.length);
 
     // very small sets
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",minIP,F,minIP,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,F,minIP,F), numDocs).scoreDocs;
     assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",medIP,F,medIP,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",medIP,F,medIP,F), numDocs).scoreDocs;
     assertEquals("med,med,F,F", 0, result.length);
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",maxIP,F,maxIP,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",maxIP,F,maxIP,F), numDocs).scoreDocs;
     assertEquals("max,max,F,F", 0, result.length);
                      
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",minIP,T,minIP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",minIP,T,minIP,T), numDocs).scoreDocs;
     assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",null,F,minIP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",null,F,minIP,T), numDocs).scoreDocs;
     assertEquals("nul,min,F,T", 1, result.length);
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",maxIP,T,maxIP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",maxIP,T,maxIP,T), numDocs).scoreDocs;
     assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",maxIP,T,null,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",maxIP,T,null,F), numDocs).scoreDocs;
     assertEquals("max,nul,T,T", 1, result.length);
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("id",medIP,T,medIP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("id",medIP,T,medIP,T), numDocs).scoreDocs;
     assertEquals("med,med,T,T", 1, result.length);
   }
 
@@ -144,47 +144,47 @@ public class TestFieldCacheRangeFilter e
     // test extremes, bounded on both ends
         
     FieldTypes fieldTypes = search.getFieldTypes();
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",minRP,T,maxRP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,T,maxRP,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",minRP,T,maxRP,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,T,maxRP,F), numDocs).scoreDocs;
     assertEquals("all but biggest", numDocs-1, result.length);
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",minRP,F,maxRP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,F,maxRP,T), numDocs).scoreDocs;
     assertEquals("all but smallest", numDocs-1, result.length);
         
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",minRP,F,maxRP,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,F,maxRP,F), numDocs).scoreDocs;
     assertEquals("all but extremes", numDocs-2, result.length);
     
     // unbounded
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",minRP,T,null,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,T,null,F), numDocs).scoreDocs;
     assertEquals("smallest and up", numDocs, result.length);
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",null,F,maxRP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",null,F,maxRP,T), numDocs).scoreDocs;
     assertEquals("biggest and down", numDocs, result.length);
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",minRP,F,null,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,F,null,F), numDocs).scoreDocs;
     assertEquals("not smallest, but up", numDocs-1, result.length);
         
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",null,F,maxRP,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",null,F,maxRP,F), numDocs).scoreDocs;
     assertEquals("not biggest, but down", numDocs-1, result.length);
         
     // very small sets
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",minRP,F,minRP,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,F,minRP,F), numDocs).scoreDocs;
     assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",maxRP,F,maxRP,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",maxRP,F,maxRP,F), numDocs).scoreDocs;
     assertEquals("max,max,F,F", 0, result.length);
                      
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",minRP,T,minRP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",minRP,T,minRP,T), numDocs).scoreDocs;
     assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",null,F,minRP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",null,F,minRP,T), numDocs).scoreDocs;
     assertEquals("nul,min,F,T", 1, result.length);
 
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",maxRP,T,maxRP,T), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",maxRP,T,maxRP,T), numDocs).scoreDocs;
     assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q, fieldTypes.newDocValuesRangeFilter("rand",maxRP,T,null,F), numDocs).scoreDocs;
+    result = search.search(q, fieldTypes.newStringDocValuesRangeFilter("rand",maxRP,T,null,F), numDocs).scoreDocs;
     assertEquals("max,nul,T,T", 1, result.length);
   }
   
@@ -208,75 +208,75 @@ public class TestFieldCacheRangeFilter e
     // test id, bounded on both ends
         
     FieldTypes fieldTypes = search.getFieldTypes();
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",minIdO,T,maxIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,T,maxIdO,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",minIdO,T,maxIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,T,maxIdO,F), numDocs).scoreDocs;
     assertEquals("all but last", numDocs-1, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",minIdO,F,maxIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,F,maxIdO,T), numDocs).scoreDocs;
     assertEquals("all but first", numDocs-1, result.length);
         
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",minIdO,F,maxIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,F,maxIdO,F), numDocs).scoreDocs;
     assertEquals("all but ends", numDocs-2, result.length);
     
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",medIdO,T,maxIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",medIdO,T,maxIdO,T), numDocs).scoreDocs;
     assertEquals("med and up", 1+ maxId-medId, result.length);
         
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",minIdO,T,medIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,T,medIdO,T), numDocs).scoreDocs;
     assertEquals("up to med", 1+ medId-minId, result.length);
     
     // unbounded id
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",(Integer) null,T,null,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",(Integer) null,T,null,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",minIdO,T,null,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,T,null,F), numDocs).scoreDocs;
     assertEquals("min and up", numDocs, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",null,F,maxIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",null,F,maxIdO,T), numDocs).scoreDocs;
     assertEquals("max and down", numDocs, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",minIdO,F,null,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,F,null,F), numDocs).scoreDocs;
     assertEquals("not min, but up", numDocs-1, result.length);
         
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",null,F,maxIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",null,F,maxIdO,F), numDocs).scoreDocs;
     assertEquals("not max, but down", numDocs-1, result.length);
         
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",medIdO,T,maxIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",medIdO,T,maxIdO,F), numDocs).scoreDocs;
     assertEquals("med and up, not max", maxId-medId, result.length);
         
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",minIdO,F,medIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,F,medIdO,T), numDocs).scoreDocs;
     assertEquals("not min, up to med", medId-minId, result.length);
 
     // very small sets
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",minIdO,F,minIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,F,minIdO,F), numDocs).scoreDocs;
     assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",medIdO,F,medIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",medIdO,F,medIdO,F), numDocs).scoreDocs;
     assertEquals("med,med,F,F", 0, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",maxIdO,F,maxIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",maxIdO,F,maxIdO,F), numDocs).scoreDocs;
     assertEquals("max,max,F,F", 0, result.length);
                      
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",minIdO,T,minIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",minIdO,T,minIdO,T), numDocs).scoreDocs;
     assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",null,F,minIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",null,F,minIdO,T), numDocs).scoreDocs;
     assertEquals("nul,min,F,T", 1, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",maxIdO,T,maxIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",maxIdO,T,maxIdO,T), numDocs).scoreDocs;
     assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",maxIdO,T,null,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",maxIdO,T,null,F), numDocs).scoreDocs;
     assertEquals("max,nul,T,T", 1, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",medIdO,T,medIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",medIdO,T,medIdO,T), numDocs).scoreDocs;
     assertEquals("med,med,T,T", 1, result.length);
     
     // special cases
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",Integer.valueOf(Integer.MAX_VALUE),F,null,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",Integer.valueOf(Integer.MAX_VALUE),F,null,F), numDocs).scoreDocs;
     assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",null,F,Integer.valueOf(Integer.MIN_VALUE),F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",null,F,Integer.valueOf(Integer.MIN_VALUE),F), numDocs).scoreDocs;
     assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",maxIdO,T,minIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",maxIdO,T,minIdO,T), numDocs).scoreDocs;
     assertEquals("inverse range", 0, result.length);
   }
   
@@ -300,75 +300,75 @@ public class TestFieldCacheRangeFilter e
     // test id, bounded on both ends
         
     FieldTypes fieldTypes = search.getFieldTypes();
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",minIdO,T,maxIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,T,maxIdO,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",minIdO,T,maxIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,T,maxIdO,F), numDocs).scoreDocs;
     assertEquals("all but last", numDocs-1, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",minIdO,F,maxIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,F,maxIdO,T), numDocs).scoreDocs;
     assertEquals("all but first", numDocs-1, result.length);
         
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",minIdO,F,maxIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,F,maxIdO,F), numDocs).scoreDocs;
     assertEquals("all but ends", numDocs-2, result.length);
     
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",medIdO,T,maxIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",medIdO,T,maxIdO,T), numDocs).scoreDocs;
     assertEquals("med and up", 1+ maxId-medId, result.length);
         
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",minIdO,T,medIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,T,medIdO,T), numDocs).scoreDocs;
     assertEquals("up to med", 1+ medId-minId, result.length);
     
     // unbounded id
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",(Long) null,T,null,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",(Long) null,T,null,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",minIdO,T,null,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,T,null,F), numDocs).scoreDocs;
     assertEquals("min and up", numDocs, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",null,F,maxIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",null,F,maxIdO,T), numDocs).scoreDocs;
     assertEquals("max and down", numDocs, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",minIdO,F,null,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,F,null,F), numDocs).scoreDocs;
     assertEquals("not min, but up", numDocs-1, result.length);
         
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",null,F,maxIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",null,F,maxIdO,F), numDocs).scoreDocs;
     assertEquals("not max, but down", numDocs-1, result.length);
         
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",medIdO,T,maxIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",medIdO,T,maxIdO,F), numDocs).scoreDocs;
     assertEquals("med and up, not max", maxId-medId, result.length);
         
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",minIdO,F,medIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,F,medIdO,T), numDocs).scoreDocs;
     assertEquals("not min, up to med", medId-minId, result.length);
 
     // very small sets
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",minIdO,F,minIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,F,minIdO,F), numDocs).scoreDocs;
     assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",medIdO,F,medIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",medIdO,F,medIdO,F), numDocs).scoreDocs;
     assertEquals("med,med,F,F", 0, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",maxIdO,F,maxIdO,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",maxIdO,F,maxIdO,F), numDocs).scoreDocs;
     assertEquals("max,max,F,F", 0, result.length);
                      
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",minIdO,T,minIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",minIdO,T,minIdO,T), numDocs).scoreDocs;
     assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",null,F,minIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",null,F,minIdO,T), numDocs).scoreDocs;
     assertEquals("nul,min,F,T", 1, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",maxIdO,T,maxIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",maxIdO,T,maxIdO,T), numDocs).scoreDocs;
     assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",maxIdO,T,null,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",maxIdO,T,null,F), numDocs).scoreDocs;
     assertEquals("max,nul,T,T", 1, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",medIdO,T,medIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",medIdO,T,medIdO,T), numDocs).scoreDocs;
     assertEquals("med,med,T,T", 1, result.length);
     
     // special cases
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",Long.valueOf(Long.MAX_VALUE),F,null,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",Long.valueOf(Long.MAX_VALUE),F,null,F), numDocs).scoreDocs;
     assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",null,F,Long.valueOf(Long.MIN_VALUE),F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",null,F,Long.valueOf(Long.MIN_VALUE),F), numDocs).scoreDocs;
     assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_long",maxIdO,T,minIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newLongDocValuesRangeFilter("id_long",maxIdO,T,minIdO,T), numDocs).scoreDocs;
     assertEquals("inverse range", 0, result.length);
   }
   
@@ -388,19 +388,19 @@ public class TestFieldCacheRangeFilter e
     Query q = new TermQuery(new Term("body","body"));
 
     FieldTypes fieldTypes = search.getFieldTypes();
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_float",minIdO,T,medIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newFloatDocValuesRangeFilter("id_float",minIdO,T,medIdO,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs/2, result.length);
     int count = 0;
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_float",null,F,medIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newFloatDocValuesRangeFilter("id_float",null,F,medIdO,T), numDocs).scoreDocs;
     count += result.length;
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_float",medIdO,F,null,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newFloatDocValuesRangeFilter("id_float",medIdO,F,null,F), numDocs).scoreDocs;
     count += result.length;
     assertEquals("sum of two concenatted ranges", numDocs, count);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_float",(Float) null,T,null,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newFloatDocValuesRangeFilter("id_float",(Float) null,T,null,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_float",Float.valueOf(Float.POSITIVE_INFINITY),F,null,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newFloatDocValuesRangeFilter("id_float",Float.valueOf(Float.POSITIVE_INFINITY),F,null,F), numDocs).scoreDocs;
     assertEquals("infinity special case", 0, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_float",null,F,Float.valueOf(Float.NEGATIVE_INFINITY),F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newFloatDocValuesRangeFilter("id_float",null,F,Float.valueOf(Float.NEGATIVE_INFINITY),F), numDocs).scoreDocs;
     assertEquals("infinity special case", 0, result.length);
   }
   
@@ -418,19 +418,19 @@ public class TestFieldCacheRangeFilter e
     Query q = new TermQuery(new Term("body","body"));
 
     FieldTypes fieldTypes = search.getFieldTypes();
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_double",minIdO,T,medIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newDoubleDocValuesRangeFilter("id_double",minIdO,T,medIdO,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs/2, result.length);
     int count = 0;
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_double",null,F,medIdO,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newDoubleDocValuesRangeFilter("id_double",null,F,medIdO,T), numDocs).scoreDocs;
     count += result.length;
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_double",medIdO,F,null,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newDoubleDocValuesRangeFilter("id_double",medIdO,F,null,F), numDocs).scoreDocs;
     count += result.length;
     assertEquals("sum of two concenatted ranges", numDocs, count);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_double",(Double) null,T,null,T), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newDoubleDocValuesRangeFilter("id_double",(Double) null,T,null,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_double",Double.valueOf(Double.POSITIVE_INFINITY),F,null,F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newDoubleDocValuesRangeFilter("id_double",Double.valueOf(Double.POSITIVE_INFINITY),F,null,F), numDocs).scoreDocs;
     assertEquals("infinity special case", 0, result.length);
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_double",null,F,Double.valueOf(Double.NEGATIVE_INFINITY),F), numDocs).scoreDocs;
+    result = search.search(q,fieldTypes.newDoubleDocValuesRangeFilter("id_double",null,F,Double.valueOf(Double.NEGATIVE_INFINITY),F), numDocs).scoreDocs;
     assertEquals("infinity special case", 0, result.length);
   }
   
@@ -459,19 +459,19 @@ public class TestFieldCacheRangeFilter e
     ScoreDoc[] result;
     Query q = new TermQuery(new Term("body","body"));
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",-20,T,20,T), 100).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",-20,T,20,T), 100).scoreDocs;
     assertEquals("find all", 40, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",0,T,20,T), 100).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",0,T,20,T), 100).scoreDocs;
     assertEquals("find all", 20, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",-20,T,0,T), 100).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",-20,T,0,T), 100).scoreDocs;
     assertEquals("find all", 20, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",10,T,20,T), 100).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",10,T,20,T), 100).scoreDocs;
     assertEquals("find all", 11, result.length);
 
-    result = search.search(q,fieldTypes.newDocValuesRangeFilter("id_int",-20,T,-10,T), 100).scoreDocs;
+    result = search.search(q,fieldTypes.newIntDocValuesRangeFilter("id_int",-20,T,-10,T), 100).scoreDocs;
     assertEquals("find all", 11, result.length);
     reader.close();
     dir.close();

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestSort.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestSort.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestSort.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestSort.java Fri Jan  2 12:02:31 2015
@@ -18,11 +18,14 @@ package org.apache.lucene.search;
  */
 
 import java.io.IOException;
+import java.util.Locale;
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;

Added: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestSortLocale.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestSortLocale.java?rev=1649007&view=auto
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestSortLocale.java (added)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestSortLocale.java Fri Jan  2 12:02:31 2015
@@ -0,0 +1,121 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.text.Collator;
+import java.util.Locale;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldTypes;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+
+public class TestSortLocale extends LuceneTestCase {
+
+  public void testBasic() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = newIndexWriter(dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setSortLocale("collated", Locale.ENGLISH);
+
+    Document doc = w.newDocument();
+    doc.addAtom("field", "ABC");
+    doc.addAtom("collated", "ABC");
+    w.addDocument(doc);
+
+    doc = w.newDocument();
+    doc.addAtom("field", "abc");
+    doc.addAtom("collated", "abc");
+    w.addDocument(doc);
+
+    DirectoryReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs td = s.search(new MatchAllDocsQuery(), 5, fieldTypes.newSort("collated"));
+    assertEquals("abc", r.document(td.scoreDocs[0].doc).get("field"));
+    assertEquals("ABC", r.document(td.scoreDocs[1].doc).get("field"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testRanges() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.setSortLocale("collated", Locale.getDefault()); // uses -Dtests.locale
+
+    Collator collator = Collator.getInstance(Locale.getDefault());
+    if (random().nextBoolean()) {
+      // nocommit FieldTypes must expose this?
+      // collator.setStrength(Collator.PRIMARY);
+    }
+    
+    int numDocs = atLeast(500);
+    for (int i = 0; i < numDocs; i++) {
+      Document doc = iw.newDocument();
+      String value = TestUtil.randomSimpleString(random());
+      doc.addAtom("field", value);
+      doc.addAtom("collated", value);
+      iw.addDocument(doc);
+    }
+    
+    IndexReader ir = iw.getReader();
+    iw.close();
+    IndexSearcher is = newSearcher(ir);
+    
+    int numChecks = atLeast(100);
+    for (int i = 0; i < numChecks; i++) {
+      String start = TestUtil.randomSimpleString(random());
+      String end = TestUtil.randomSimpleString(random());
+      Query query = new ConstantScoreQuery(fieldTypes.newStringDocValuesRangeFilter("collated", start, true, end, true));
+      doTestRanges(is, start, end, query, collator);
+    }
+    
+    ir.close();
+    dir.close();
+  }
+  
+  private void doTestRanges(IndexSearcher is, String startPoint, String endPoint, Query query, Collator collator) throws Exception { 
+    QueryUtils.check(query);
+    
+    // positive test
+    TopDocs docs = is.search(query, is.getIndexReader().maxDoc());
+    for (ScoreDoc doc : docs.scoreDocs) {
+      String value = is.doc(doc.doc).getString("field");
+      assertTrue(collator.compare(value, startPoint) >= 0);
+      assertTrue(collator.compare(value, endPoint) <= 0);
+    }
+    
+    // negative test
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD);
+    bq.add(query, BooleanClause.Occur.MUST_NOT);
+    docs = is.search(bq, is.getIndexReader().maxDoc());
+    for (ScoreDoc doc : docs.scoreDocs) {
+      String value = is.doc(doc.doc).getString("field");
+      assertTrue(collator.compare(value, startPoint) < 0 || collator.compare(value, endPoint) > 0);
+    }
+  }
+}

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java Fri Jan  2 12:02:31 2015
@@ -23,8 +23,10 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.index.Terms;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -123,14 +125,9 @@ public class TestWildcard
 
     MultiTermQuery wq = new WildcardQuery(new Term("field", "prefix*"));
     assertMatches(searcher, wq, 2);
-    // nocommit not true anymore ... how can we re-assert?
-    //Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "field");
-    // assertTrue(wq.getTermsEnum(terms) instanceof PrefixTermsEnum);
     
     wq = new WildcardQuery(new Term("field", "*"));
     assertMatches(searcher, wq, 2);
-    //assertFalse(wq.getTermsEnum(terms) instanceof PrefixTermsEnum);
-    //assertFalse(wq.getTermsEnum(terms).getClass().getSimpleName().contains("AutomatonTermsEnum"));
     reader.close();
     indexStore.close();
   }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java Fri Jan  2 12:02:31 2015
@@ -107,7 +107,7 @@ public class TestSimilarityBase extends
 
     for (int i = 0; i < docs.length; i++) {
       Document d = writer.newDocument();
-      d.addStored(FIELD_ID, Integer.toString(i));
+      d.addStoredString(FIELD_ID, Integer.toString(i));
       d.addLargeText(FIELD_BODY, docs[i]);
       writer.addDocument(d);
     }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestLeaveFilesIfTestFails.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestLeaveFilesIfTestFails.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestLeaveFilesIfTestFails.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestLeaveFilesIfTestFails.java Fri Jan  2 12:02:31 2015
@@ -39,6 +39,7 @@ public class TestLeaveFilesIfTestFails e
   }
   
   public static class Nested1 extends WithNestedTests.AbstractNestedTest {
+
     static Path file;
     public void testDummy() {
       file = createTempDir("leftover");

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSetupTeardownChaining.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSetupTeardownChaining.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSetupTeardownChaining.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestSetupTeardownChaining.java Fri Jan  2 12:02:31 2015
@@ -65,7 +65,7 @@ public class TestSetupTeardownChaining e
     Assert.assertEquals(1, result.getFailureCount());
     Failure failure = result.getFailures().get(0);
     Assert.assertTrue(failure.getMessage()
-        .contains("One of the overrides of setUp does not propagate the call."));
+                      .contains("One of the overrides of setUp does not propagate the call."));
   }
   
   /**

Modified: lucene/dev/branches/lucene6005/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java (original)
+++ lucene/dev/branches/lucene6005/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java Fri Jan  2 12:02:31 2015
@@ -383,9 +383,6 @@ public class FacetsConfig {
 
         // For drill down:
         doc.addAtom(drillDownFieldName, fullPath);
-
-        // nocommit why were we doing this...?
-        //doc.add(new StringField(indexFieldName, facetField.dim, Field.Store.NO));
       }
     }
   }

Modified: lucene/dev/branches/lucene6005/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java (original)
+++ lucene/dev/branches/lucene6005/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java Fri Jan  2 12:02:31 2015
@@ -48,21 +48,21 @@ public class AllGroupsCollectorTest exte
     Document doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
     doc.addLargeText("content", "random text");
-    doc.addStored("id", "1");
+    doc.addStoredString("id", "1");
     w.addDocument(doc);
 
     // 1
     doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
     doc.addLargeText("content", "some more random text blob");
-    doc.addStored("id", "2");
+    doc.addStoredString("id", "2");
     w.addDocument(doc);
 
     // 2
     doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
     doc.addLargeText("content", "some more random textual data");
-    doc.addStored("id", "3");
+    doc.addStoredString("id", "3");
     w.addDocument(doc);
     w.commit(); // To ensure a second segment
 
@@ -70,27 +70,27 @@ public class AllGroupsCollectorTest exte
     doc = w.newDocument();
     addGroupField(doc, groupField, "author2");
     doc.addLargeText("content", "some random text");
-    doc.addStored("id", "4");
+    doc.addStoredString("id", "4");
     w.addDocument(doc);
 
     // 4
     doc = w.newDocument();
     addGroupField(doc, groupField, "author3");
     doc.addLargeText("content", "some more random text");
-    doc.addStored("id", "5");
+    doc.addStoredString("id", "5");
     w.addDocument(doc);
 
     // 5
     doc = w.newDocument();
     addGroupField(doc, groupField, "author3");
     doc.addLargeText("content", "random blob");
-    doc.addStored("id", "6");
+    doc.addStoredString("id", "6");
     w.addDocument(doc);
 
     // 6 -- no author field
     doc = w.newDocument();
     doc.addLargeText("content", "random word stuck in alot of other text");
-    doc.addStored("id", "6");
+    doc.addStoredString("id", "6");
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = newSearcher(w.getReader());

Modified: lucene/dev/branches/lucene6005/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupingSearchTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupingSearchTest.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupingSearchTest.java (original)
+++ lucene/dev/branches/lucene6005/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupingSearchTest.java Fri Jan  2 12:02:31 2015
@@ -56,21 +56,21 @@ public class GroupingSearchTest extends
     Document doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
     doc.addLargeText("content", "random text");
-    doc.addStored("id", "1");
+    doc.addStoredString("id", "1");
     documents.add(doc);
 
     // 1
     doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
     doc.addLargeText("content", "some more random text");
-    doc.addStored("id", "2");
+    doc.addStoredString("id", "2");
     documents.add(doc);
 
     // 2
     doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
     doc.addLargeText("content", "some more random textual data");
-    doc.addStored("id", "3");
+    doc.addStoredString("id", "3");
     doc.addAtom("groupend", "x");
     documents.add(doc);
     w.addDocuments(documents);
@@ -80,7 +80,7 @@ public class GroupingSearchTest extends
     doc = w.newDocument();
     addGroupField(doc, groupField, "author2");
     doc.addLargeText("content", "some random text");
-    doc.addStored("id", "4");
+    doc.addStoredString("id", "4");
     doc.addAtom("groupend", "x");
     w.addDocument(doc);
 
@@ -88,14 +88,14 @@ public class GroupingSearchTest extends
     doc = w.newDocument();
     addGroupField(doc, groupField, "author3");
     doc.addLargeText("content", "some more random text");
-    doc.addStored("id", "5");
+    doc.addStoredString("id", "5");
     documents.add(doc);
 
     // 5
     doc = w.newDocument();
     addGroupField(doc, groupField, "author3");
     doc.addLargeText("content", "random");
-    doc.addStored("id", "6");
+    doc.addStoredString("id", "6");
     doc.addAtom("groupend", "x");
     documents.add(doc);
     w.addDocuments(documents);
@@ -104,7 +104,7 @@ public class GroupingSearchTest extends
     // 6 -- no author field
     doc = w.newDocument();
     doc.addLargeText("content", "random word stuck in alot of other text");
-    doc.addStored("id", "6");
+    doc.addStoredString("id", "6");
     doc.addAtom("groupend", "x");
 
     w.addDocument(doc);

Modified: lucene/dev/branches/lucene6005/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java (original)
+++ lucene/dev/branches/lucene6005/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java Fri Jan  2 12:02:31 2015
@@ -68,48 +68,48 @@ public class TestGrouping extends Lucene
     Document doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
     doc.addLargeText("content", "random text");
-    doc.addStored("id", "1");
+    doc.addStoredString("id", "1");
     w.addDocument(doc);
 
     // 1
     doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
     doc.addLargeText("content", "some more random text");
-    doc.addStored("id", "2");
+    doc.addStoredString("id", "2");
     w.addDocument(doc);
 
     // 2
     doc = w.newDocument();
     addGroupField(doc, groupField, "author1");
     doc.addLargeText("content", "some more random textual data");
-    doc.addStored("id", "3");
+    doc.addStoredString("id", "3");
     w.addDocument(doc);
 
     // 3
     doc = w.newDocument();
     addGroupField(doc, groupField, "author2");
     doc.addLargeText("content", "some random text");
-    doc.addStored("id", "4");
+    doc.addStoredString("id", "4");
     w.addDocument(doc);
 
     // 4
     doc = w.newDocument();
     addGroupField(doc, groupField, "author3");
     doc.addLargeText("content", "some more random text");
-    doc.addStored("id", "5");
+    doc.addStoredString("id", "5");
     w.addDocument(doc);
 
     // 5
     doc = w.newDocument();
     addGroupField(doc, groupField, "author3");
     doc.addLargeText("content", "random");
-    doc.addStored("id", "6");
+    doc.addStoredString("id", "6");
     w.addDocument(doc);
 
     // 6 -- no author field
     doc = w.newDocument();
     doc.addLargeText("content", "random word stuck in alot of other text");
-    doc.addStored("id", "6");
+    doc.addStoredString("id", "6");
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = newSearcher(w.getReader());

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java Fri Jan  2 12:02:31 2015
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.Collections;
 import java.util.Iterator;
 
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.index.FieldInfo;
@@ -84,6 +85,12 @@ public class TermVectorLeafReader extend
   }
 
   @Override
+  public FieldTypes getFieldTypes() {
+    // nocommit todo
+    return null;
+  }
+
+  @Override
   public void addCoreClosedListener(CoreClosedListener listener) {
     addCoreClosedListenerAsReaderClosedListener(this, listener);
   }

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java Fri Jan  2 12:02:31 2015
@@ -31,6 +31,7 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.IndexOptions;
@@ -77,12 +78,9 @@ import org.apache.lucene.util.automaton.
  * <b>WARNING</b>: The code is very new and probably still has some exciting bugs!
  * <p>
  * Example usage:
-// nocommit fixme
  * <pre class="prettyprint">
- *   // configure field with offsets at index time
- *   FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
- *   offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
- *   Field body = new Field("body", "foobar", offsetsType);
+ *   // large text and short text fields are indexed with offsets by default:
+ *   doc.addLargeText("body", "foobar");
  *
  *   // retrieve highlights at query time 
  *   PostingsHighlighter highlighter = new PostingsHighlighter();
@@ -116,26 +114,45 @@ public class PostingsHighlighter {
   /** Set the first time {@link #getScorer} is called,
    *  and then reused. */
   private PassageScorer defaultScorer;
+
+  private final FieldTypes fieldTypes;
   
   /**
    * Creates a new highlighter with {@link #DEFAULT_MAX_LENGTH}.
    */
   public PostingsHighlighter() {
-    this(DEFAULT_MAX_LENGTH);
+    this(null, DEFAULT_MAX_LENGTH);
+  }
+
+  /**
+   * Creates a new highlighter with {@link #DEFAULT_MAX_LENGTH}.
+   */
+  public PostingsHighlighter(FieldTypes fieldTypes) {
+    this(fieldTypes, DEFAULT_MAX_LENGTH);
+  }
+
+  /**
+   * Creates a new highlighter, specifying maximum content length.
+   */
+  public PostingsHighlighter(int maxLength) {
+    this(null, maxLength);
   }
   
   /**
    * Creates a new highlighter, specifying maximum content length.
+   * @param fieldTypes {@link FieldTypes} (null is allowed).  If non-null then we default to WholeBreakIterator when highlighting short text
+   *   and atom fields.
    * @param maxLength maximum content size to process.
    * @throws IllegalArgumentException if <code>maxLength</code> is negative or <code>Integer.MAX_VALUE</code>
    */
-  public PostingsHighlighter(int maxLength) {
+  public PostingsHighlighter(FieldTypes fieldTypes, int maxLength) {
     if (maxLength < 0 || maxLength == Integer.MAX_VALUE) {
       // two reasons: no overflow problems in BreakIterator.preceding(offset+1),
       // our sentinel in the offsets queue uses this value to terminate.
       throw new IllegalArgumentException("maxLength must be < Integer.MAX_VALUE");
     }
     this.maxLength = maxLength;
+    this.fieldTypes = fieldTypes;
   }
   
   /** Returns the {@link BreakIterator} to use for
@@ -143,7 +160,16 @@ public class PostingsHighlighter {
    *  {@link BreakIterator#getSentenceInstance(Locale)} by default;
    *  subclasses can override to customize. */
   protected BreakIterator getBreakIterator(String field) {
-    return BreakIterator.getSentenceInstance(Locale.ROOT);
+    if (fieldTypes != null) {
+      FieldTypes.ValueType valueType = fieldTypes.getValueType(field);
+      if (valueType == FieldTypes.ValueType.TEXT) {
+        return BreakIterator.getSentenceInstance(Locale.ROOT);
+      } else {
+        return new WholeBreakIterator();
+      }
+    } else {
+      return BreakIterator.getSentenceInstance(Locale.ROOT);
+    }
   }
 
   /** Returns the {@link PassageFormatter} to use for
@@ -388,6 +414,7 @@ public class PostingsHighlighter {
     Map<String,Object[]> highlights = new HashMap<>();
     for (int i = 0; i < fields.length; i++) {
       String field = fields[i];
+      checkField(field);
       int numPassages = maxPassages[i];
       Term floor = new Term(field, "");
       Term ceiling = new Term(field, UnicodeUtil.BIG_TERM);
@@ -411,6 +438,12 @@ public class PostingsHighlighter {
     return highlights;
   }
 
+  private void checkField(String fieldName) {
+    if (fieldTypes != null && fieldTypes.getHighlighted(fieldName) == false) {
+      throw new IllegalArgumentException("field=\"" + fieldName + "\" was indexed with FieldTypes.enableHighlighting");
+    }
+  }
+
   /** Loads the String values for each field X docID to be
    *  highlighted.  By default this loads from stored
    *  fields, but a subclass can change the source.  This

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java Fri Jan  2 12:02:31 2015
@@ -1005,4 +1005,79 @@ public class TestPostingsHighlighter ext
     ir.close();
     dir.close();
   }
+
+  public void testWithFieldTypes() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.enableHighlighting("yes");
+    fieldTypes.disableFastRanges("yes");
+    
+    Document doc = iw.newDocument();
+    doc.addShortText("title", "highlighting on this title field should.  Be entire.");
+    doc.addLargeText("body", "This is a test. Just a test highlighting from postings. Feel free to ignore.");
+    doc.addAtom("not", "no");
+    doc.addAtom("yes", "highlighting");
+    iw.addDocument(doc);
+
+    doc = iw.newDocument();
+    doc.addShortText("title", "Highlighting the first term. Hope it works.");
+    doc.addLargeText("body", "Highlighting the first term. Hope it works.");
+    doc.addAtom("not", "no");
+    doc.addAtom("yes", "highlighting");
+    iw.addDocument(doc);
+    
+    IndexReader ir = iw.getReader();
+    iw.close();
+    
+    // body field should be snippets:
+    IndexSearcher searcher = newSearcher(ir);
+    PostingsHighlighter highlighter = new PostingsHighlighter(searcher.getFieldTypes());
+    Query query = new TermQuery(new Term("body", "highlighting"));
+    TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+    assertEquals(2, topDocs.totalHits);
+    String[] snippets = highlighter.highlight("body", query, searcher, topDocs);
+    assertEquals(2, snippets.length);
+    assertEquals("Just a test <b>highlighting</b> from postings. ", snippets[0]);
+    assertEquals("<b>Highlighting</b> the first term. ", snippets[1]);
+
+    // title field should be "whole" highlighted:
+    query = new TermQuery(new Term("title", "highlighting"));
+    topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+    assertEquals(2, topDocs.totalHits);
+    snippets = highlighter.highlight("title", query, searcher, topDocs);
+    assertEquals(2, snippets.length);
+    assertEquals("<b>highlighting</b> on this title field should.  Be entire.", snippets[0]);
+    assertEquals("<b>Highlighting</b> the first term. Hope it works.", snippets[1]);
+
+    // this field doesn't exist
+    try {
+      highlighter.highlight("nofield", query, searcher, topDocs);
+      fail("did not hit exception");
+    } catch (Exception e) {
+      assertEquals(e.getMessage(), "field \"nofield\" is not recognized");
+    }
+
+    // we didn't enable highlighting for this atom field
+    try {
+      highlighter.highlight("not", query, searcher, topDocs);
+      fail("did not hit exception");
+    } catch (Exception e) {
+      assertEquals(e.getMessage(), "field=\"not\" was indexed with FieldTypes.enableHighlighting");
+    }
+    
+    // we did enable highlighting for this atom field:
+    query = new TermQuery(new Term("yes", "highlighting"));
+    topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+    assertEquals(2, topDocs.totalHits);
+    snippets = highlighter.highlight("yes", query, searcher, topDocs);
+    assertEquals(2, snippets.length);
+    assertEquals("<b>highlighting</b>", snippets[0]);
+    assertEquals("<b>highlighting</b>", snippets[1]);
+
+    ir.close();
+    dir.close();
+  }
 }

Modified: lucene/dev/branches/lucene6005/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java?rev=1649007&r1=1649006&r2=1649007&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java (original)
+++ lucene/dev/branches/lucene6005/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java Fri Jan  2 12:02:31 2015
@@ -1302,7 +1302,7 @@ public class TestBlockJoin extends Lucen
 
     Document child = w.newDocument();
     docs.add(child);
-    child.addStored("childID", "0");
+    child.addStoredString("childID", "0");
     child.addLargeText("childText", "text");
 
     // parent last:
@@ -1366,7 +1366,7 @@ public class TestBlockJoin extends Lucen
 
     Document child = w.newDocument();
     docs.add(child);
-    child.addStored("childID", "0");
+    child.addStoredString("childID", "0");
     child.addLargeText("childText", "text");
 
     // parent last:
@@ -1424,7 +1424,7 @@ public class TestBlockJoin extends Lucen
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
     Document parent = w.newDocument();
-    parent.addStored("parentID", "0");
+    parent.addStoredString("parentID", "0");
     parent.addLargeText("parentText", "text");
     parent.addAtom("isParent", "yes");
 
@@ -1432,7 +1432,7 @@ public class TestBlockJoin extends Lucen
 
     Document child = w.newDocument();
     docs.add(child);
-    child.addStored("childID", "0");
+    child.addStoredString("childID", "0");
     child.addLargeText("childText", "text");
 
     // parent last:
@@ -1444,7 +1444,7 @@ public class TestBlockJoin extends Lucen
     parent = w.newDocument();
     parent.addLargeText("parentText", "text");
     parent.addAtom("isParent", "yes");
-    parent.addStored("parentID", "1");
+    parent.addStoredString("parentID", "1");
 
     // parent last:
     docs.add(parent);