You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2010/07/13 18:12:24 UTC
svn commit: r963780 [4/6] - in /lucene/dev/branches/branch_3x: ./ lucene/
lucene/backwards/src/ lucene/backwards/src/test/org/apache/lucene/analysis/
lucene/backwards/src/test/org/apache/lucene/document/
lucene/backwards/src/test/org/apache/lucene/inde...
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java Tue Jul 13 16:12:21 2010
@@ -23,9 +23,11 @@ import org.apache.lucene.analysis.Whites
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericField;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCaseJ4;
import org.apache.lucene.util.NumericUtils;
@@ -45,12 +47,15 @@ public class TestNumericRangeQuery32 ext
private static final int noDocs = 10000*_TestUtil.getRandomMultiplier();
private static RAMDirectory directory = null;
+ private static IndexReader reader = null;
private static IndexSearcher searcher = null;
-
+
@BeforeClass
public static void beforeClass() throws Exception {
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+ Random random = newStaticRandom(TestNumericRangeQuery32.class);
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
NumericField
field8 = new NumericField("field8", 8, Field.Store.YES, true),
@@ -82,15 +87,17 @@ public class TestNumericRangeQuery32 ext
writer.addDocument(doc);
}
- writer.optimize();
+ reader = writer.getReader();
+ searcher=new IndexSearcher(reader);
writer.close();
- searcher=new IndexSearcher(directory, true);
}
@AfterClass
public static void afterClass() throws Exception {
searcher.close();
searcher = null;
+ reader.close();
+ reader = null;
directory.close();
directory = null;
}
@@ -146,7 +153,7 @@ public class TestNumericRangeQuery32 ext
assertEquals("First doc"+type, 2*distance+startOffset, Integer.parseInt(doc.get(field)) );
doc=searcher.doc(sd[sd.length-1].doc);
assertEquals("Last doc"+type, (1+count)*distance+startOffset, Integer.parseInt(doc.get(field)) );
- if (i>0) {
+ if (i>0 && searcher.getIndexReader().getSequentialSubReaders().length == 1) {
assertEquals("Distinct term number is equal for all query types", lastTerms, terms);
}
lastTerms = terms;
@@ -365,7 +372,7 @@ public class TestNumericRangeQuery32 ext
termCountT += tq.getTotalNumberOfTerms();
termCountC += cq.getTotalNumberOfTerms();
}
- if (precisionStep == Integer.MAX_VALUE) {
+ if (precisionStep == Integer.MAX_VALUE && searcher.getIndexReader().getSequentialSubReaders().length == 1) {
assertEquals("Total number of terms should be equal for unlimited precStep", termCountT, termCountC);
} else if (VERBOSE) {
System.out.println("Average number of terms during random search on '" + field + "':");
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java Tue Jul 13 16:12:21 2010
@@ -23,8 +23,10 @@ import org.apache.lucene.analysis.Whites
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericField;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCaseJ4;
import org.apache.lucene.util.NumericUtils;
@@ -44,12 +46,15 @@ public class TestNumericRangeQuery64 ext
private static final int noDocs = 10000*_TestUtil.getRandomMultiplier();
private static RAMDirectory directory = null;
+ private static IndexReader reader = null;
private static IndexSearcher searcher = null;
@BeforeClass
public static void beforeClass() throws Exception {
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+ Random random = newStaticRandom(TestNumericRangeQuery64.class);
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
NumericField
field8 = new NumericField("field8", 8, Field.Store.YES, true),
@@ -85,15 +90,17 @@ public class TestNumericRangeQuery64 ext
writer.addDocument(doc);
}
- writer.optimize();
+ reader = writer.getReader();
+ searcher=new IndexSearcher(reader);
writer.close();
- searcher=new IndexSearcher(directory, true);
}
@AfterClass
public static void afterClass() throws Exception {
searcher.close();
searcher = null;
+ reader.close();
+ reader = null;
directory.close();
directory = null;
}
@@ -149,7 +156,7 @@ public class TestNumericRangeQuery64 ext
assertEquals("First doc"+type, 2*distance+startOffset, Long.parseLong(doc.get(field)) );
doc=searcher.doc(sd[sd.length-1].doc);
assertEquals("Last doc"+type, (1+count)*distance+startOffset, Long.parseLong(doc.get(field)) );
- if (i>0) {
+ if (i>0 && searcher.getIndexReader().getSequentialSubReaders().length == 1) {
assertEquals("Distinct term number is equal for all query types", lastTerms, terms);
}
lastTerms = terms;
@@ -384,7 +391,7 @@ public class TestNumericRangeQuery64 ext
termCountT += tq.getTotalNumberOfTerms();
termCountC += cq.getTotalNumberOfTerms();
}
- if (precisionStep == Integer.MAX_VALUE) {
+ if (precisionStep == Integer.MAX_VALUE && searcher.getIndexReader().getSequentialSubReaders().length == 1) {
assertEquals("Total number of terms should be equal for unlimited precStep", termCountT, termCountC);
} else if (VERBOSE) {
System.out.println("Average number of terms during random search on '" + field + "':");
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java Tue Jul 13 16:12:21 2010
@@ -21,8 +21,8 @@ import org.apache.lucene.util.LuceneTest
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum;
@@ -38,63 +38,69 @@ public class TestPhrasePrefixQuery exten
public TestPhrasePrefixQuery(String name) {
super(name);
}
-
- /**
+
+ /**
*
*/
- public void testPhrasePrefix()
- throws IOException
- {
- RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
- Document doc1 = new Document();
- Document doc2 = new Document();
- Document doc3 = new Document();
- Document doc4 = new Document();
- Document doc5 = new Document();
- doc1.add(new Field("body", "blueberry pie", Field.Store.YES, Field.Index.ANALYZED));
- doc2.add(new Field("body", "blueberry strudel", Field.Store.YES, Field.Index.ANALYZED));
- doc3.add(new Field("body", "blueberry pizza", Field.Store.YES, Field.Index.ANALYZED));
- doc4.add(new Field("body", "blueberry chewing gum", Field.Store.YES, Field.Index.ANALYZED));
- doc5.add(new Field("body", "piccadilly circus", Field.Store.YES, Field.Index.ANALYZED));
- writer.addDocument(doc1);
- writer.addDocument(doc2);
- writer.addDocument(doc3);
- writer.addDocument(doc4);
- writer.addDocument(doc5);
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(indexStore, true);
-
- //PhrasePrefixQuery query1 = new PhrasePrefixQuery();
- MultiPhraseQuery query1 = new MultiPhraseQuery();
- //PhrasePrefixQuery query2 = new PhrasePrefixQuery();
- MultiPhraseQuery query2 = new MultiPhraseQuery();
- query1.add(new Term("body", "blueberry"));
- query2.add(new Term("body", "strawberry"));
-
- LinkedList<Term> termsWithPrefix = new LinkedList<Term>();
- IndexReader ir = IndexReader.open(indexStore, true);
-
- // this TermEnum gives "piccadilly", "pie" and "pizza".
- String prefix = "pi";
- TermEnum te = ir.terms(new Term("body", prefix + "*"));
- do {
- if (te.term().text().startsWith(prefix))
- {
- termsWithPrefix.add(te.term());
- }
- } while (te.next());
-
- query1.add(termsWithPrefix.toArray(new Term[0]));
- query2.add(termsWithPrefix.toArray(new Term[0]));
-
- ScoreDoc[] result;
- result = searcher.search(query1, null, 1000).scoreDocs;
- assertEquals(2, result.length);
-
- result = searcher.search(query2, null, 1000).scoreDocs;
- assertEquals(0, result.length);
- }
+ public void testPhrasePrefix() throws IOException {
+ RAMDirectory indexStore = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
+ Document doc1 = new Document();
+ Document doc2 = new Document();
+ Document doc3 = new Document();
+ Document doc4 = new Document();
+ Document doc5 = new Document();
+ doc1.add(new Field("body", "blueberry pie", Field.Store.YES,
+ Field.Index.ANALYZED));
+ doc2.add(new Field("body", "blueberry strudel", Field.Store.YES,
+ Field.Index.ANALYZED));
+ doc3.add(new Field("body", "blueberry pizza", Field.Store.YES,
+ Field.Index.ANALYZED));
+ doc4.add(new Field("body", "blueberry chewing gum", Field.Store.YES,
+ Field.Index.ANALYZED));
+ doc5.add(new Field("body", "piccadilly circus", Field.Store.YES,
+ Field.Index.ANALYZED));
+ writer.addDocument(doc1);
+ writer.addDocument(doc2);
+ writer.addDocument(doc3);
+ writer.addDocument(doc4);
+ writer.addDocument(doc5);
+ IndexReader reader = writer.getReader();
+ writer.close();
+
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ // PhrasePrefixQuery query1 = new PhrasePrefixQuery();
+ MultiPhraseQuery query1 = new MultiPhraseQuery();
+ // PhrasePrefixQuery query2 = new PhrasePrefixQuery();
+ MultiPhraseQuery query2 = new MultiPhraseQuery();
+ query1.add(new Term("body", "blueberry"));
+ query2.add(new Term("body", "strawberry"));
+
+ LinkedList<Term> termsWithPrefix = new LinkedList<Term>();
+
+ // this TermEnum gives "piccadilly", "pie" and "pizza".
+ String prefix = "pi";
+ TermEnum te = reader.terms(new Term("body", prefix + "*"));
+ do {
+ if (te.term().text().startsWith(prefix))
+ {
+ termsWithPrefix.add(te.term());
+ }
+ } while (te.next());
+
+ query1.add(termsWithPrefix.toArray(new Term[0]));
+ query2.add(termsWithPrefix.toArray(new Term[0]));
+
+ ScoreDoc[] result;
+ result = searcher.search(query1, null, 1000).scoreDocs;
+ assertEquals(2, result.length);
+
+ result = searcher.search(query2, null, 1000).scoreDocs;
+ assertEquals(0, result.length);
+ searcher.close();
+ reader.close();
+ indexStore.close();
+ }
}
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java Tue Jul 13 16:12:21 2010
@@ -46,12 +46,15 @@ public class TestPhraseQuery extends Luc
public static final float SCORE_COMP_THRESH = 1e-6f;
private IndexSearcher searcher;
+ private IndexReader reader;
private PhraseQuery query;
private RAMDirectory directory;
+ private Random random;
@Override
public void setUp() throws Exception {
super.setUp();
+ random = newRandom();
directory = new RAMDirectory();
Analyzer analyzer = new Analyzer() {
@Override
@@ -64,7 +67,8 @@ public class TestPhraseQuery extends Luc
return 100;
}
};
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
doc.add(new Field("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
@@ -82,16 +86,17 @@ public class TestPhraseQuery extends Luc
doc.add(new Field("nonexist", "phrase exist notexist exist found", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
- writer.optimize();
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
query = new PhraseQuery();
}
@Override
protected void tearDown() throws Exception {
searcher.close();
+ reader.close();
directory.close();
super.tearDown();
}
@@ -211,14 +216,15 @@ public class TestPhraseQuery extends Luc
public void testPhraseQueryWithStopAnalyzer() throws Exception {
RAMDirectory directory = new RAMDirectory();
StopAnalyzer stopAnalyzer = new StopAnalyzer(Version.LUCENE_24);
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
- Version.LUCENE_24, stopAnalyzer));
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(Version.LUCENE_24, stopAnalyzer));
Document doc = new Document();
doc.add(new Field("field", "the stop words are here", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
+ IndexReader reader = writer.getReader();
writer.close();
- IndexSearcher searcher = new IndexSearcher(directory, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
// valid exact phrase query
PhraseQuery query = new PhraseQuery();
@@ -239,11 +245,14 @@ public class TestPhraseQuery extends Luc
searcher.close();
+ reader.close();
+ directory.close();
}
public void testPhraseQueryInConjunctionScorer() throws Exception {
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
Document doc = new Document();
doc.add(new Field("source", "marketing info", Field.Store.YES, Field.Index.ANALYZED));
@@ -254,10 +263,10 @@ public class TestPhraseQuery extends Luc
doc.add(new Field("source", "marketing info", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
- writer.optimize();
+ IndexReader reader = writer.getReader();
writer.close();
- IndexSearcher searcher = new IndexSearcher(directory, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
PhraseQuery phraseQuery = new PhraseQuery();
phraseQuery.add(new Term("source", "marketing"));
@@ -277,8 +286,10 @@ public class TestPhraseQuery extends Luc
searcher.close();
+ reader.close();
- writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE));
+ writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE));
doc = new Document();
doc.add(new Field("contents", "map entry woo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
@@ -291,10 +302,10 @@ public class TestPhraseQuery extends Luc
doc.add(new Field("contents", "map foobarword entry woo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
- writer.optimize();
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
termQuery = new TermQuery(new Term("contents","woo"));
phraseQuery = new PhraseQuery();
@@ -322,12 +333,14 @@ public class TestPhraseQuery extends Luc
searcher.close();
+ reader.close();
directory.close();
}
public void testSlopScoring() throws IOException {
Directory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
Document doc = new Document();
doc.add(new Field("field", "foo firstname lastname foo", Field.Store.YES, Field.Index.ANALYZED));
@@ -341,10 +354,10 @@ public class TestPhraseQuery extends Luc
doc3.add(new Field("field", "foo firstname zzz yyy lastname foo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc3);
- writer.optimize();
+ IndexReader reader = writer.getReader();
writer.close();
- Searcher searcher = new IndexSearcher(directory, true);
+ Searcher searcher = new IndexSearcher(reader);
PhraseQuery query = new PhraseQuery();
query.add(new Term("field", "firstname"));
query.add(new Term("field", "lastname"));
@@ -359,7 +372,10 @@ public class TestPhraseQuery extends Luc
assertEquals(1, hits[1].doc);
assertEquals(0.31, hits[2].score, 0.01);
assertEquals(2, hits[2].doc);
- QueryUtils.check(query,searcher);
+ QueryUtils.check(query,searcher);
+ searcher.close();
+ reader.close();
+ directory.close();
}
public void testToString() throws Exception {
@@ -587,13 +603,14 @@ public class TestPhraseQuery extends Luc
Directory dir = new MockRAMDirectory();
Analyzer analyzer = new WhitespaceAnalyzer();
- IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+ RandomIndexWriter w = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
List<List<String>> docs = new ArrayList<List<String>>();
Document d = new Document();
Field f = new Field("f", "", Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
- Random r = newRandom();
+ Random r = random;
int NUM_DOCS = 10*_TestUtil.getRandomMultiplier();
for(int i=0;i<NUM_DOCS;i++) {
@@ -668,7 +685,7 @@ public class TestPhraseQuery extends Luc
}
reader.close();
- searcher.close();
+ s.close();
dir.close();
}
}
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java Tue Jul 13 16:12:21 2010
@@ -34,9 +34,9 @@ import org.apache.lucene.analysis.tokena
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermPositions;
import org.apache.lucene.queryParser.QueryParser;
@@ -89,15 +89,16 @@ public class TestPositionIncrement exten
}
};
Directory store = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(store, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), store,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document d = new Document();
d.add(new Field("field", "bogus", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d);
- writer.optimize();
+ IndexReader reader = writer.getReader();
writer.close();
- IndexSearcher searcher = new IndexSearcher(store, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
TermPositions pos = searcher.getIndexReader().termPositions(new Term("field", "1"));
pos.next();
@@ -221,6 +222,10 @@ public class TestPositionIncrement exten
q = (PhraseQuery) qp.parse("\"1 stop 2\"");
hits = searcher.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
+
+ searcher.close();
+ reader.close();
+ store.close();
}
private static class StopWhitespaceAnalyzer extends Analyzer {
@@ -239,8 +244,8 @@ public class TestPositionIncrement exten
public void testPayloadsPos0() throws Exception {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new TestPayloadAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new TestPayloadAnalyzer()));
Document doc = new Document();
doc.add(new Field("content",
new StringReader("a a b c d e a f g h i j a b k k")));
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java Tue Jul 13 16:12:21 2010
@@ -19,8 +19,9 @@ package org.apache.lucene.search;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@@ -38,18 +39,19 @@ public class TestPrefixFilter extends Lu
"/Computers/Mac/One",
"/Computers/Mac/Two",
"/Computers/Windows"};
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
- writer.close();
+ IndexReader reader = writer.getReader();
// PrefixFilter combined with ConstantScoreQuery
PrefixFilter filter = new PrefixFilter(new Term("category", "/Computers"));
Query query = new ConstantScoreQuery(filter);
- IndexSearcher searcher = new IndexSearcher(directory, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(4, hits.length);
@@ -100,5 +102,10 @@ public class TestPrefixFilter extends Lu
query = new ConstantScoreQuery(filter);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(0, hits.length);
+
+ writer.close();
+ searcher.close();
+ reader.close();
+ directory.close();
}
}
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java Tue Jul 13 16:12:21 2010
@@ -21,8 +21,9 @@ import org.apache.lucene.util.LuceneTest
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
@@ -40,12 +41,15 @@ public class TestPrefixInBooleanQuery ex
private static final String FIELD = "name";
private RAMDirectory directory = new RAMDirectory();
+ private IndexReader reader;
+ private IndexSearcher searcher;
@Override
protected void setUp() throws Exception {
super.setUp();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
for (int i = 0; i < 5137; ++i) {
Document doc = new Document();
@@ -73,40 +77,46 @@ public class TestPrefixInBooleanQuery ex
writer.addDocument(doc);
}
+ reader = writer.getReader();
+ searcher = new IndexSearcher(reader);
writer.close();
}
+ @Override
+ public void tearDown() throws Exception {
+ searcher.close();
+ reader.close();
+ directory.close();
+ super.tearDown();
+ }
+
public void testPrefixQuery() throws Exception {
- IndexSearcher indexSearcher = new IndexSearcher(directory, true);
Query query = new PrefixQuery(new Term(FIELD, "tang"));
assertEquals("Number of matched documents", 2,
- indexSearcher.search(query, null, 1000).totalHits);
+ searcher.search(query, null, 1000).totalHits);
}
public void testTermQuery() throws Exception {
- IndexSearcher indexSearcher = new IndexSearcher(directory, true);
Query query = new TermQuery(new Term(FIELD, "tangfulin"));
assertEquals("Number of matched documents", 2,
- indexSearcher.search(query, null, 1000).totalHits);
+ searcher.search(query, null, 1000).totalHits);
}
public void testTermBooleanQuery() throws Exception {
- IndexSearcher indexSearcher = new IndexSearcher(directory, true);
BooleanQuery query = new BooleanQuery();
query.add(new TermQuery(new Term(FIELD, "tangfulin")),
BooleanClause.Occur.SHOULD);
query.add(new TermQuery(new Term(FIELD, "notexistnames")),
BooleanClause.Occur.SHOULD);
assertEquals("Number of matched documents", 2,
- indexSearcher.search(query, null, 1000).totalHits);
+ searcher.search(query, null, 1000).totalHits);
}
public void testPrefixBooleanQuery() throws Exception {
- IndexSearcher indexSearcher = new IndexSearcher(directory, true);
BooleanQuery query = new BooleanQuery();
query.add(new PrefixQuery(new Term(FIELD, "tang")),
BooleanClause.Occur.SHOULD);
query.add(new TermQuery(new Term(FIELD, "notexistnames")),
BooleanClause.Occur.SHOULD);
assertEquals("Number of matched documents", 2,
- indexSearcher.search(query, null, 1000).totalHits);
+ searcher.search(query, null, 1000).totalHits);
}
}
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java Tue Jul 13 16:12:21 2010
@@ -19,8 +19,9 @@ package org.apache.lucene.search;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@@ -37,21 +38,26 @@ public class TestPrefixQuery extends Luc
String[] categories = new String[] {"/Computers",
"/Computers/Mac",
"/Computers/Windows"};
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
- writer.close();
+ IndexReader reader = writer.getReader();
PrefixQuery query = new PrefixQuery(new Term("category", "/Computers"));
- IndexSearcher searcher = new IndexSearcher(directory, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("All documents in /Computers category and below", 3, hits.length);
query = new PrefixQuery(new Term("category", "/Computers/Mac"));
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("One in /Computers/Mac", 1, hits.length);
+ writer.close();
+ searcher.close();
+ reader.close();
+ directory.close();
}
}
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java Tue Jul 13 16:12:21 2010
@@ -22,8 +22,9 @@ import org.apache.lucene.document.Docume
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.Directory;
@@ -34,10 +35,12 @@ public class TestQueryWrapperFilter exte
public void testBasic() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)));
Document doc = new Document();
doc.add(new Field("field", "value", Store.NO, Index.ANALYZED));
writer.addDocument(doc);
+ IndexReader reader = writer.getReader();
writer.close();
TermQuery termQuery = new TermQuery(new Term("field", "value"));
@@ -45,7 +48,7 @@ public class TestQueryWrapperFilter exte
// should not throw exception with primitive query
QueryWrapperFilter qwf = new QueryWrapperFilter(termQuery);
- IndexSearcher searcher = new IndexSearcher(dir, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), qwf, 10);
assertEquals(1, hits.totalHits);
hits = searcher.search(new MatchAllDocsQuery(), new CachingWrapperFilter(qwf), 10);
@@ -79,5 +82,8 @@ public class TestQueryWrapperFilter exte
assertEquals(0, hits.totalHits);
hits = searcher.search(new MatchAllDocsQuery(), new CachingWrapperFilter(qwf), 10);
assertEquals(0, hits.totalHits);
+ searcher.close();
+ reader.close();
+ dir.close();
}
}
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSimilarity.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSimilarity.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSimilarity.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSimilarity.java Tue Jul 13 16:12:21 2010
@@ -23,6 +23,7 @@ import java.util.Collection;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.RAMDirectory;
@@ -64,8 +65,9 @@ public class TestSimilarity extends Luce
public void testSimilarity() throws Exception {
RAMDirectory store = new RAMDirectory();
- IndexWriter writer = new IndexWriter(store, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)).setSimilarity(new SimpleSimilarity()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), store,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT))
+ .setSimilarity(new SimpleSimilarity()));
Document d1 = new Document();
d1.add(new Field("field", "a c", Field.Store.YES, Field.Index.ANALYZED));
@@ -75,10 +77,10 @@ public class TestSimilarity extends Luce
writer.addDocument(d1);
writer.addDocument(d2);
- writer.optimize();
+ IndexReader reader = writer.getReader();
writer.close();
- Searcher searcher = new IndexSearcher(store, true);
+ Searcher searcher = new IndexSearcher(reader);
searcher.setSimilarity(new SimpleSimilarity());
Term a = new Term("field", "a");
@@ -173,5 +175,9 @@ public class TestSimilarity extends Luce
return true;
}
});
+
+ searcher.close();
+ reader.close();
+ store.close();
}
}
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java Tue Jul 13 16:12:21 2010
@@ -17,12 +17,16 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.util.Random;
+
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
@@ -45,6 +49,13 @@ public class TestSloppyPhraseQuery exten
private static final PhraseQuery QUERY_2 = makePhraseQuery( S_2 );
private static final PhraseQuery QUERY_4 = makePhraseQuery( "X A A");
+ private Random random;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ random = newRandom();
+ }
/**
* Test DOC_4 and QUERY_4.
@@ -116,18 +127,21 @@ public class TestSloppyPhraseQuery exten
query.setSlop(slop);
RAMDirectory ramDir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+ RandomIndexWriter writer = new RandomIndexWriter(random, ramDir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
writer.addDocument(doc);
- writer.close();
- IndexSearcher searcher = new IndexSearcher(ramDir, true);
+ IndexReader reader = writer.getReader();
+
+ IndexSearcher searcher = new IndexSearcher(reader);
TopDocs td = searcher.search(query,null,10);
//System.out.println("slop: "+slop+" query: "+query+" doc: "+doc+" Expecting number of hits: "+expectedNumResults+" maxScore="+td.getMaxScore());
assertEquals("slop: "+slop+" query: "+query+" doc: "+doc+" Wrong number of hits", expectedNumResults, td.totalHits);
//QueryUtils.check(query,searcher);
-
+ writer.close();
searcher.close();
+ reader.close();
ramDir.close();
return td.getMaxScore();
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSort.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSort.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSort.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSort.java Tue Jul 13 16:12:21 2010
@@ -36,6 +36,7 @@ import org.apache.lucene.index.IndexRead
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LogMergePolicy;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.BooleanClause.Occur;
@@ -68,6 +69,7 @@ public class TestSort extends LuceneTest
private Query queryG;
private Sort sort;
+ private Random random = newRandom();
public TestSort (String name) {
super (name);
@@ -107,10 +109,9 @@ public class TestSort extends LuceneTest
private Searcher getIndex (boolean even, boolean odd)
throws IOException {
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new SimpleAnalyzer(
- TEST_VERSION_CURRENT)).setMaxBufferedDocs(2));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(1000);
+ RandomIndexWriter writer = new RandomIndexWriter(random, indexStore,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
+
for (int i=0; i<data.length; ++i) {
if (((i%2)==0 && even) || ((i%2)==1 && odd)) {
Document doc = new Document();
@@ -130,9 +131,9 @@ public class TestSort extends LuceneTest
writer.addDocument (doc);
}
}
- //writer.optimize ();
+ IndexReader reader = writer.getReader();
writer.close ();
- IndexSearcher s = new IndexSearcher (indexStore, true);
+ IndexSearcher s = new IndexSearcher (reader);
s.setDefaultFieldSortScoring(true, true);
return s;
}
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java Tue Jul 13 16:12:21 2010
@@ -22,8 +22,8 @@ import org.apache.lucene.analysis.Simple
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
@@ -40,17 +40,18 @@ public class TestSpanQueryFilter extends
public void testFilterWorks() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
+
for (int i = 0; i < 500; i++) {
Document document = new Document();
document.add(new Field("field", English.intToEnglish(i) + " equals " + English.intToEnglish(i),
Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(document);
}
+ IndexReader reader = writer.getReader();
writer.close();
- IndexReader reader = IndexReader.open(dir, true);
-
SpanTermQuery query = new SpanTermQuery(new Term("field", English.intToEnglish(10).trim()));
SpanQueryFilter filter = new SpanQueryFilter(query);
SpanFilterResult result = filter.bitSpans(reader);
@@ -69,6 +70,7 @@ public class TestSpanQueryFilter extends
assertTrue("info.getPositions() Size: " + info.getPositions().size() + " is not: " + 2, info.getPositions().size() == 2);
}
reader.close();
+ dir.close();
}
int getDocIdSetSize(DocIdSet docIdSet) throws Exception {
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java Tue Jul 13 16:12:21 2010
@@ -23,8 +23,8 @@ import java.util.Locale;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -32,387 +32,448 @@ import org.apache.lucene.store.RAMDirect
/**
* A basic 'positive' Unit test class for the TermRangeFilter class.
- *
+ *
* <p>
- * NOTE: at the moment, this class only tests for 'positive' results,
- * it does not verify the results to ensure there are no 'false positives',
- * nor does it adequately test 'negative' results. It also does not test
- * that garbage in results in an Exception.
+ * NOTE: at the moment, this class only tests for 'positive' results, it does
+ * not verify the results to ensure there are no 'false positives', nor does it
+ * adequately test 'negative' results. It also does not test that garbage in
+ * results in an Exception.
*/
public class TestTermRangeFilter extends BaseTestRangeFilter {
-
- public TestTermRangeFilter(String name) {
- super(name);
- }
- public TestTermRangeFilter() {
- super();
- }
-
- public void testRangeFilterId() throws IOException {
-
- IndexReader reader = IndexReader.open(signedIndex.index, true);
- IndexSearcher search = new IndexSearcher(reader);
-
- int medId = ((maxId - minId) / 2);
-
- String minIP = pad(minId);
- String maxIP = pad(maxId);
- String medIP = pad(medId);
-
- int numDocs = reader.numDocs();
-
- assertEquals("num of docs", numDocs, 1+ maxId - minId);
-
- ScoreDoc[] result;
- Query q = new TermQuery(new Term("body","body"));
-
- // test id, bounded on both ends
-
- result = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,T), numDocs).scoreDocs;
- assertEquals("find all", numDocs, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,F), numDocs).scoreDocs;
- assertEquals("all but last", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,T), numDocs).scoreDocs;
- assertEquals("all but first", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,F), numDocs).scoreDocs;
- assertEquals("all but ends", numDocs-2, result.length);
-
- result = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,T), numDocs).scoreDocs;
- assertEquals("med and up", 1+ maxId-medId, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,medIP,T,T), numDocs).scoreDocs;
- assertEquals("up to med", 1+ medId-minId, result.length);
-
- // unbounded id
-
- result = search.search(q,new TermRangeFilter("id",minIP,null,T,F), numDocs).scoreDocs;
- assertEquals("min and up", numDocs, result.length);
-
- result = search.search(q,new TermRangeFilter("id",null,maxIP,F,T), numDocs).scoreDocs;
- assertEquals("max and down", numDocs, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,null,F,F), numDocs).scoreDocs;
- assertEquals("not min, but up", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("id",null,maxIP,F,F), numDocs).scoreDocs;
- assertEquals("not max, but down", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,F), numDocs).scoreDocs;
- assertEquals("med and up, not max", maxId-medId, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,medIP,F,T), numDocs).scoreDocs;
- assertEquals("not min, up to med", medId-minId, result.length);
-
- // very small sets
-
- result = search.search(q,new TermRangeFilter("id",minIP,minIP,F,F), numDocs).scoreDocs;
- assertEquals("min,min,F,F", 0, result.length);
- result = search.search(q,new TermRangeFilter("id",medIP,medIP,F,F), numDocs).scoreDocs;
- assertEquals("med,med,F,F", 0, result.length);
- result = search.search(q,new TermRangeFilter("id",maxIP,maxIP,F,F), numDocs).scoreDocs;
- assertEquals("max,max,F,F", 0, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,minIP,T,T), numDocs).scoreDocs;
- assertEquals("min,min,T,T", 1, result.length);
- result = search.search(q,new TermRangeFilter("id",null,minIP,F,T), numDocs).scoreDocs;
- assertEquals("nul,min,F,T", 1, result.length);
-
- result = search.search(q,new TermRangeFilter("id",maxIP,maxIP,T,T), numDocs).scoreDocs;
- assertEquals("max,max,T,T", 1, result.length);
- result = search.search(q,new TermRangeFilter("id",maxIP,null,T,F), numDocs).scoreDocs;
- assertEquals("max,nul,T,T", 1, result.length);
-
- result = search.search(q,new TermRangeFilter("id",medIP,medIP,T,T), numDocs).scoreDocs;
- assertEquals("med,med,T,T", 1, result.length);
-
- }
-
- public void testRangeFilterIdCollating() throws IOException {
-
- IndexReader reader = IndexReader.open(signedIndex.index, true);
- IndexSearcher search = new IndexSearcher(reader);
-
- Collator c = Collator.getInstance(Locale.ENGLISH);
-
- int medId = ((maxId - minId) / 2);
-
- String minIP = pad(minId);
- String maxIP = pad(maxId);
- String medIP = pad(medId);
-
- int numDocs = reader.numDocs();
-
- assertEquals("num of docs", numDocs, 1+ maxId - minId);
-
- Query q = new TermQuery(new Term("body","body"));
-
- // test id, bounded on both ends
- int numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,T,c), 1000).totalHits;
- assertEquals("find all", numDocs, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,F,c), 1000).totalHits;
- assertEquals("all but last", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,T,c), 1000).totalHits;
- assertEquals("all but first", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,F,c), 1000).totalHits;
- assertEquals("all but ends", numDocs-2, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,T,c), 1000).totalHits;
- assertEquals("med and up", 1+ maxId-medId, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,medIP,T,T,c), 1000).totalHits;
- assertEquals("up to med", 1+ medId-minId, numHits);
-
- // unbounded id
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,null,T,F,c), 1000).totalHits;
- assertEquals("min and up", numDocs, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",null,maxIP,F,T,c), 1000).totalHits;
- assertEquals("max and down", numDocs, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,null,F,F,c), 1000).totalHits;
- assertEquals("not min, but up", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",null,maxIP,F,F,c), 1000).totalHits;
- assertEquals("not max, but down", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,F,c), 1000).totalHits;
- assertEquals("med and up, not max", maxId-medId, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,medIP,F,T,c), 1000).totalHits;
- assertEquals("not min, up to med", medId-minId, numHits);
-
- // very small sets
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,minIP,F,F,c), 1000).totalHits;
- assertEquals("min,min,F,F", 0, numHits);
- numHits = search.search(q,new TermRangeFilter("id",medIP,medIP,F,F,c), 1000).totalHits;
- assertEquals("med,med,F,F", 0, numHits);
- numHits = search.search(q,new TermRangeFilter("id",maxIP,maxIP,F,F,c), 1000).totalHits;
- assertEquals("max,max,F,F", 0, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,minIP,T,T,c), 1000).totalHits;
- assertEquals("min,min,T,T", 1, numHits);
- numHits = search.search(q,new TermRangeFilter("id",null,minIP,F,T,c), 1000).totalHits;
- assertEquals("nul,min,F,T", 1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",maxIP,maxIP,T,T,c), 1000).totalHits;
- assertEquals("max,max,T,T", 1, numHits);
- numHits = search.search(q,new TermRangeFilter("id",maxIP,null,T,F,c), 1000).totalHits;
- assertEquals("max,nul,T,T", 1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",medIP,medIP,T,T,c), 1000).totalHits;
- assertEquals("med,med,T,T", 1, numHits);
- }
-
- public void testRangeFilterRand() throws IOException {
-
- IndexReader reader = IndexReader.open(signedIndex.index, true);
- IndexSearcher search = new IndexSearcher(reader);
-
- String minRP = pad(signedIndex.minR);
- String maxRP = pad(signedIndex.maxR);
+
+ public void testRangeFilterId() throws IOException {
- int numDocs = reader.numDocs();
-
- assertEquals("num of docs", numDocs, 1+ maxId - minId);
-
- ScoreDoc[] result;
- Query q = new TermQuery(new Term("body","body"));
-
- // test extremes, bounded on both ends
-
- result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,T), numDocs).scoreDocs;
- assertEquals("find all", numDocs, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,F), numDocs).scoreDocs;
- assertEquals("all but biggest", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,T), numDocs).scoreDocs;
- assertEquals("all but smallest", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,F), numDocs).scoreDocs;
- assertEquals("all but extremes", numDocs-2, result.length);
+ IndexReader reader = signedIndexReader;
+ IndexSearcher search = new IndexSearcher(reader);
- // unbounded
-
- result = search.search(q,new TermRangeFilter("rand",minRP,null,T,F), numDocs).scoreDocs;
- assertEquals("smallest and up", numDocs, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",null,maxRP,F,T), numDocs).scoreDocs;
- assertEquals("biggest and down", numDocs, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",minRP,null,F,F), numDocs).scoreDocs;
- assertEquals("not smallest, but up", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",null,maxRP,F,F), numDocs).scoreDocs;
- assertEquals("not biggest, but down", numDocs-1, result.length);
-
- // very small sets
-
- result = search.search(q,new TermRangeFilter("rand",minRP,minRP,F,F), numDocs).scoreDocs;
- assertEquals("min,min,F,F", 0, result.length);
- result = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,F,F), numDocs).scoreDocs;
- assertEquals("max,max,F,F", 0, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",minRP,minRP,T,T), numDocs).scoreDocs;
- assertEquals("min,min,T,T", 1, result.length);
- result = search.search(q,new TermRangeFilter("rand",null,minRP,F,T), numDocs).scoreDocs;
- assertEquals("nul,min,F,T", 1, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,T,T), numDocs).scoreDocs;
- assertEquals("max,max,T,T", 1, result.length);
- result = search.search(q,new TermRangeFilter("rand",maxRP,null,T,F), numDocs).scoreDocs;
- assertEquals("max,nul,T,T", 1, result.length);
-
- }
-
- public void testRangeFilterRandCollating() throws IOException {
-
- // using the unsigned index because collation seems to ignore hyphens
- IndexReader reader = IndexReader.open(unsignedIndex.index, true);
- IndexSearcher search = new IndexSearcher(reader);
-
- Collator c = Collator.getInstance(Locale.ENGLISH);
-
- String minRP = pad(unsignedIndex.minR);
- String maxRP = pad(unsignedIndex.maxR);
-
- int numDocs = reader.numDocs();
-
- assertEquals("num of docs", numDocs, 1+ maxId - minId);
-
- Query q = new TermQuery(new Term("body","body"));
-
- // test extremes, bounded on both ends
-
- int numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,T,c), 1000).totalHits;
- assertEquals("find all", numDocs, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,F,c), 1000).totalHits;
- assertEquals("all but biggest", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,T,c), 1000).totalHits;
- assertEquals("all but smallest", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,F,c), 1000).totalHits;
- assertEquals("all but extremes", numDocs-2, numHits);
-
- // unbounded
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,null,T,F,c), 1000).totalHits;
- assertEquals("smallest and up", numDocs, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",null,maxRP,F,T,c), 1000).totalHits;
- assertEquals("biggest and down", numDocs, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,null,F,F,c), 1000).totalHits;
- assertEquals("not smallest, but up", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",null,maxRP,F,F,c), 1000).totalHits;
- assertEquals("not biggest, but down", numDocs-1, numHits);
-
- // very small sets
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,minRP,F,F,c), 1000).totalHits;
- assertEquals("min,min,F,F", 0, numHits);
- numHits = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,F,F,c), 1000).totalHits;
- assertEquals("max,max,F,F", 0, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,minRP,T,T,c), 1000).totalHits;
- assertEquals("min,min,T,T", 1, numHits);
- numHits = search.search(q,new TermRangeFilter("rand",null,minRP,F,T,c), 1000).totalHits;
- assertEquals("nul,min,F,T", 1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,T,T,c), 1000).totalHits;
- assertEquals("max,max,T,T", 1, numHits);
- numHits = search.search(q,new TermRangeFilter("rand",maxRP,null,T,F,c), 1000).totalHits;
- assertEquals("max,nul,T,T", 1, numHits);
- }
+ int medId = ((maxId - minId) / 2);
- public void testFarsi() throws Exception {
-
- /* build an index */
- RAMDirectory farsiIndex = new RAMDirectory();
- IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new SimpleAnalyzer(
- TEST_VERSION_CURRENT)));
- Document doc = new Document();
- doc.add(new Field("content","\u0633\u0627\u0628",
- Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field("body", "body",
- Field.Store.YES, Field.Index.NOT_ANALYZED));
- writer.addDocument(doc);
-
- writer.optimize();
- writer.close();
-
- IndexReader reader = IndexReader.open(farsiIndex, true);
- IndexSearcher search = new IndexSearcher(reader);
- Query q = new TermQuery(new Term("body","body"));
-
- // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
- // RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
- // characters properly.
- Collator collator = Collator.getInstance(new Locale("ar"));
-
- // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
- // orders the U+0698 character before the U+0633 character, so the single
- // index Term below should NOT be returned by a TermRangeFilter with a Farsi
- // Collator (or an Arabic one for the case when Farsi is not supported).
- int numHits = search.search
- (q, new TermRangeFilter("content", "\u062F", "\u0698", T, T, collator), 1000).totalHits;
- assertEquals("The index Term should not be included.", 0, numHits);
-
- numHits = search.search
- (q, new TermRangeFilter("content", "\u0633", "\u0638", T, T, collator), 1000).totalHits;
- assertEquals("The index Term should be included.", 1, numHits);
- search.close();
- }
-
- public void testDanish() throws Exception {
-
- /* build an index */
- RAMDirectory danishIndex = new RAMDirectory();
- IndexWriter writer = new IndexWriter(danishIndex, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new SimpleAnalyzer(
- TEST_VERSION_CURRENT)));
- // Danish collation orders the words below in the given order
- // (example taken from TestSort.testInternationalSort() ).
- String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
- for (int docnum = 0 ; docnum < words.length ; ++docnum) {
- Document doc = new Document();
- doc.add(new Field("content", words[docnum],
- Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field("body", "body",
- Field.Store.YES, Field.Index.NOT_ANALYZED));
- writer.addDocument(doc);
- }
- writer.optimize();
- writer.close();
-
- IndexReader reader = IndexReader.open(danishIndex, true);
- IndexSearcher search = new IndexSearcher(reader);
- Query q = new TermQuery(new Term("body","body"));
-
- Collator collator = Collator.getInstance(new Locale("da", "dk"));
-
- // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
- // but Danish collation does.
- int numHits = search.search
- (q, new TermRangeFilter("content", "H\u00D8T", "MAND", F, F, collator), 1000).totalHits;
- assertEquals("The index Term should be included.", 1, numHits);
-
- numHits = search.search
- (q, new TermRangeFilter("content", "H\u00C5T", "MAND", F, F, collator), 1000).totalHits;
- assertEquals
- ("The index Term should not be included.", 0, numHits);
- search.close();
+ String minIP = pad(minId);
+ String maxIP = pad(maxId);
+ String medIP = pad(medId);
+
+ int numDocs = reader.numDocs();
+
+ assertEquals("num of docs", numDocs, 1 + maxId - minId);
+
+ ScoreDoc[] result;
+ Query q = new TermQuery(new Term("body", "body"));
+
+ // test id, bounded on both ends
+
+ result = search.search(q, new TermRangeFilter("id", minIP, maxIP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("find all", numDocs, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, maxIP, T, F),
+ numDocs).scoreDocs;
+ assertEquals("all but last", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, maxIP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("all but first", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, maxIP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("all but ends", numDocs - 2, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", medIP, maxIP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("med and up", 1 + maxId - medId, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, medIP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("up to med", 1 + medId - minId, result.length);
+
+ // unbounded id
+
+ result = search.search(q, new TermRangeFilter("id", minIP, null, T, F),
+ numDocs).scoreDocs;
+ assertEquals("min and up", numDocs, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", null, maxIP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("max and down", numDocs, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, null, F, F),
+ numDocs).scoreDocs;
+ assertEquals("not min, but up", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", null, maxIP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("not max, but down", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", medIP, maxIP, T, F),
+ numDocs).scoreDocs;
+ assertEquals("med and up, not max", maxId - medId, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, medIP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("not min, up to med", medId - minId, result.length);
+
+ // very small sets
+
+ result = search.search(q, new TermRangeFilter("id", minIP, minIP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("min,min,F,F", 0, result.length);
+ result = search.search(q, new TermRangeFilter("id", medIP, medIP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("med,med,F,F", 0, result.length);
+ result = search.search(q, new TermRangeFilter("id", maxIP, maxIP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("max,max,F,F", 0, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, minIP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("min,min,T,T", 1, result.length);
+ result = search.search(q, new TermRangeFilter("id", null, minIP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("nul,min,F,T", 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", maxIP, maxIP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("max,max,T,T", 1, result.length);
+ result = search.search(q, new TermRangeFilter("id", maxIP, null, T, F),
+ numDocs).scoreDocs;
+ assertEquals("max,nul,T,T", 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", medIP, medIP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("med,med,T,T", 1, result.length);
+
+ }
+
+ public void testRangeFilterIdCollating() throws IOException {
+
+ IndexReader reader = signedIndexReader;
+ IndexSearcher search = new IndexSearcher(reader);
+
+ Collator c = Collator.getInstance(Locale.ENGLISH);
+
+ int medId = ((maxId - minId) / 2);
+
+ String minIP = pad(minId);
+ String maxIP = pad(maxId);
+ String medIP = pad(medId);
+
+ int numDocs = reader.numDocs();
+
+ assertEquals("num of docs", numDocs, 1 + maxId - minId);
+
+ Query q = new TermQuery(new Term("body", "body"));
+
+ // test id, bounded on both ends
+ int numHits = search.search(q, new TermRangeFilter("id", minIP, maxIP, T,
+ T, c), 1000).totalHits;
+ assertEquals("find all", numDocs, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, maxIP, T, F, c), 1000).totalHits;
+ assertEquals("all but last", numDocs - 1, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, maxIP, F, T, c), 1000).totalHits;
+ assertEquals("all but first", numDocs - 1, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, maxIP, F, F, c), 1000).totalHits;
+ assertEquals("all but ends", numDocs - 2, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", medIP, maxIP, T, T, c), 1000).totalHits;
+ assertEquals("med and up", 1 + maxId - medId, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, medIP, T, T, c), 1000).totalHits;
+ assertEquals("up to med", 1 + medId - minId, numHits);
+
+ // unbounded id
+
+ numHits = search.search(q, new TermRangeFilter("id", minIP, null, T, F, c),
+ 1000).totalHits;
+ assertEquals("min and up", numDocs, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("id", null, maxIP, F, T, c),
+ 1000).totalHits;
+ assertEquals("max and down", numDocs, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("id", minIP, null, F, F, c),
+ 1000).totalHits;
+ assertEquals("not min, but up", numDocs - 1, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("id", null, maxIP, F, F, c),
+ 1000).totalHits;
+ assertEquals("not max, but down", numDocs - 1, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", medIP, maxIP, T, F, c), 1000).totalHits;
+ assertEquals("med and up, not max", maxId - medId, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, medIP, F, T, c), 1000).totalHits;
+ assertEquals("not min, up to med", medId - minId, numHits);
+
+ // very small sets
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, minIP, F, F, c), 1000).totalHits;
+ assertEquals("min,min,F,F", 0, numHits);
+ numHits = search.search(q,
+ new TermRangeFilter("id", medIP, medIP, F, F, c), 1000).totalHits;
+ assertEquals("med,med,F,F", 0, numHits);
+ numHits = search.search(q,
+ new TermRangeFilter("id", maxIP, maxIP, F, F, c), 1000).totalHits;
+ assertEquals("max,max,F,F", 0, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, minIP, T, T, c), 1000).totalHits;
+ assertEquals("min,min,T,T", 1, numHits);
+ numHits = search.search(q, new TermRangeFilter("id", null, minIP, F, T, c),
+ 1000).totalHits;
+ assertEquals("nul,min,F,T", 1, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", maxIP, maxIP, T, T, c), 1000).totalHits;
+ assertEquals("max,max,T,T", 1, numHits);
+ numHits = search.search(q, new TermRangeFilter("id", maxIP, null, T, F, c),
+ 1000).totalHits;
+ assertEquals("max,nul,T,T", 1, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", medIP, medIP, T, T, c), 1000).totalHits;
+ assertEquals("med,med,T,T", 1, numHits);
+ }
+
+ public void testRangeFilterRand() throws IOException {
+
+ IndexReader reader = signedIndexReader;
+ IndexSearcher search = new IndexSearcher(reader);
+
+ String minRP = pad(signedIndexDir.minR);
+ String maxRP = pad(signedIndexDir.maxR);
+
+ int numDocs = reader.numDocs();
+
+ assertEquals("num of docs", numDocs, 1 + maxId - minId);
+
+ ScoreDoc[] result;
+ Query q = new TermQuery(new Term("body", "body"));
+
+ // test extremes, bounded on both ends
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("find all", numDocs, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, F),
+ numDocs).scoreDocs;
+ assertEquals("all but biggest", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("all but smallest", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("all but extremes", numDocs - 2, result.length);
+
+ // unbounded
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, null, T, F),
+ numDocs).scoreDocs;
+ assertEquals("smallest and up", numDocs, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", null, maxRP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("biggest and down", numDocs, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, null, F, F),
+ numDocs).scoreDocs;
+ assertEquals("not smallest, but up", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", null, maxRP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("not biggest, but down", numDocs - 1, result.length);
+
+ // very small sets
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, minRP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("min,min,F,F", 0, result.length);
+ result = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("max,max,F,F", 0, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, minRP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("min,min,T,T", 1, result.length);
+ result = search.search(q, new TermRangeFilter("rand", null, minRP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("nul,min,F,T", 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("max,max,T,T", 1, result.length);
+ result = search.search(q, new TermRangeFilter("rand", maxRP, null, T, F),
+ numDocs).scoreDocs;
+ assertEquals("max,nul,T,T", 1, result.length);
+
+ }
+
+ public void testRangeFilterRandCollating() throws IOException {
+
+ // using the unsigned index because collation seems to ignore hyphens
+ IndexReader reader = unsignedIndexReader;
+ IndexSearcher search = new IndexSearcher(reader);
+
+ Collator c = Collator.getInstance(Locale.ENGLISH);
+
+ String minRP = pad(unsignedIndexDir.minR);
+ String maxRP = pad(unsignedIndexDir.maxR);
+
+ int numDocs = reader.numDocs();
+
+ assertEquals("num of docs", numDocs, 1 + maxId - minId);
+
+ Query q = new TermQuery(new Term("body", "body"));
+
+ // test extremes, bounded on both ends
+
+ int numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T,
+ T, c), 1000).totalHits;
+ assertEquals("find all", numDocs, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, F,
+ c), 1000).totalHits;
+ assertEquals("all but biggest", numDocs - 1, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, T,
+ c), 1000).totalHits;
+ assertEquals("all but smallest", numDocs - 1, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, F,
+ c), 1000).totalHits;
+ assertEquals("all but extremes", numDocs - 2, numHits);
+
+ // unbounded
+
+ numHits = search.search(q,
+ new TermRangeFilter("rand", minRP, null, T, F, c), 1000).totalHits;
+ assertEquals("smallest and up", numDocs, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("rand", null, maxRP, F, T, c), 1000).totalHits;
+ assertEquals("biggest and down", numDocs, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("rand", minRP, null, F, F, c), 1000).totalHits;
+ assertEquals("not smallest, but up", numDocs - 1, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("rand", null, maxRP, F, F, c), 1000).totalHits;
+ assertEquals("not biggest, but down", numDocs - 1, numHits);
+
+ // very small sets
+
+ numHits = search.search(q, new TermRangeFilter("rand", minRP, minRP, F, F,
+ c), 1000).totalHits;
+ assertEquals("min,min,F,F", 0, numHits);
+ numHits = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, F, F,
+ c), 1000).totalHits;
+ assertEquals("max,max,F,F", 0, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("rand", minRP, minRP, T, T,
+ c), 1000).totalHits;
+ assertEquals("min,min,T,T", 1, numHits);
+ numHits = search.search(q,
+ new TermRangeFilter("rand", null, minRP, F, T, c), 1000).totalHits;
+ assertEquals("nul,min,F,T", 1, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, T, T,
+ c), 1000).totalHits;
+ assertEquals("max,max,T,T", 1, numHits);
+ numHits = search.search(q,
+ new TermRangeFilter("rand", maxRP, null, T, F, c), 1000).totalHits;
+ assertEquals("max,nul,T,T", 1, numHits);
+ }
+
+ public void testFarsi() throws Exception {
+
+ /* build an index */
+ RAMDirectory farsiIndex = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
+ Document doc = new Document();
+ doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ doc
+ .add(new Field("body", "body", Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ writer.addDocument(doc);
+
+ IndexReader reader = writer.getReader();
+ writer.close();
+
+ IndexSearcher search = new IndexSearcher(reader);
+ Query q = new TermQuery(new Term("body", "body"));
+
+ // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
+ // RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
+ // characters properly.
+ Collator collator = Collator.getInstance(new Locale("ar"));
+
+ // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
+ // orders the U+0698 character before the U+0633 character, so the single
+ // index Term below should NOT be returned by a TermRangeFilter with a Farsi
+ // Collator (or an Arabic one for the case when Farsi is not supported).
+ int numHits = search.search(q, new TermRangeFilter("content", "\u062F",
+ "\u0698", T, T, collator), 1000).totalHits;
+ assertEquals("The index Term should not be included.", 0, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("content", "\u0633",
+ "\u0638", T, T, collator), 1000).totalHits;
+ assertEquals("The index Term should be included.", 1, numHits);
+ search.close();
+ reader.close();
+ farsiIndex.close();
+ }
+
+ public void testDanish() throws Exception {
+
+ /* build an index */
+ RAMDirectory danishIndex = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
+ // Danish collation orders the words below in the given order
+ // (example taken from TestSort.testInternationalSort() ).
+ String[] words = {"H\u00D8T", "H\u00C5T", "MAND"};
+ for (int docnum = 0; docnum < words.length; ++docnum) {
+ Document doc = new Document();
+ doc.add(new Field("content", words[docnum], Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ doc.add(new Field("body", "body", Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ writer.addDocument(doc);
}
+ IndexReader reader = writer.getReader();
+ writer.close();
+
+ IndexSearcher search = new IndexSearcher(reader);
+ Query q = new TermQuery(new Term("body", "body"));
+
+ Collator collator = Collator.getInstance(new Locale("da", "dk"));
+
+ // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
+ // but Danish collation does.
+ int numHits = search.search(q, new TermRangeFilter("content", "H\u00D8T",
+ "MAND", F, F, collator), 1000).totalHits;
+ assertEquals("The index Term should be included.", 1, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("content", "H\u00C5T",
+ "MAND", F, F, collator), 1000).totalHits;
+ assertEquals("The index Term should not be included.", 0, numHits);
+ search.close();
+ reader.close();
+ danishIndex.close();
+ }
}