You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ry...@apache.org on 2012/03/06 19:13:50 UTC
svn commit: r1297628 [5/13] - in /lucene/dev/branches/solr_3159_jetty8: ./
dev-tools/maven/ dev-tools/maven/lucene/ dev-tools/maven/lucene/contrib/demo/
dev-tools/maven/lucene/contrib/highlighter/
dev-tools/maven/lucene/contrib/memory/ dev-tools/maven/...
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java Tue Mar 6 18:13:38 2012
@@ -34,6 +34,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.*;
@@ -336,7 +337,7 @@ public class TestStressIndexing2 extends
DocsEnum docs = null;
while(termsEnum.next() != null) {
docs = _TestUtil.docs(random, termsEnum, null, docs, false);
- while(docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+ while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
fail("r1 is not empty but r2 is");
}
}
@@ -362,18 +363,18 @@ public class TestStressIndexing2 extends
termDocs2 = null;
}
- if (termDocs1.nextDoc() == DocsEnum.NO_MORE_DOCS) {
+ if (termDocs1.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
// This doc is deleted and wasn't replaced
- assertTrue(termDocs2 == null || termDocs2.nextDoc() == DocsEnum.NO_MORE_DOCS);
+ assertTrue(termDocs2 == null || termDocs2.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
continue;
}
int id1 = termDocs1.docID();
- assertEquals(DocsEnum.NO_MORE_DOCS, termDocs1.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, termDocs1.nextDoc());
- assertTrue(termDocs2.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(termDocs2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
int id2 = termDocs2.docID();
- assertEquals(DocsEnum.NO_MORE_DOCS, termDocs2.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, termDocs2.nextDoc());
r2r1[id2] = id1;
@@ -409,7 +410,7 @@ public class TestStressIndexing2 extends
System.out.println(" " + term2.utf8ToString() + ": freq=" + termsEnum3.totalTermFreq());
dpEnum = termsEnum3.docsAndPositions(null, dpEnum, false);
if (dpEnum != null) {
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
final int freq = dpEnum.freq();
System.out.println(" doc=" + dpEnum.docID() + " freq=" + freq);
for(int posUpto=0;posUpto<freq;posUpto++) {
@@ -418,7 +419,7 @@ public class TestStressIndexing2 extends
} else {
dEnum = _TestUtil.docs(random, termsEnum3, null, dEnum, true);
assertNotNull(dEnum);
- assertTrue(dEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
final int freq = dEnum.freq();
System.out.println(" doc=" + dEnum.docID() + " freq=" + freq);
}
@@ -443,7 +444,7 @@ public class TestStressIndexing2 extends
System.out.println(" " + term2.utf8ToString() + ": freq=" + termsEnum3.totalTermFreq());
dpEnum = termsEnum3.docsAndPositions(null, dpEnum, false);
if (dpEnum != null) {
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
final int freq = dpEnum.freq();
System.out.println(" doc=" + dpEnum.docID() + " freq=" + freq);
for(int posUpto=0;posUpto<freq;posUpto++) {
@@ -452,7 +453,7 @@ public class TestStressIndexing2 extends
} else {
dEnum = _TestUtil.docs(random, termsEnum3, null, dEnum, true);
assertNotNull(dEnum);
- assertTrue(dEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
final int freq = dEnum.freq();
System.out.println(" doc=" + dEnum.docID() + " freq=" + freq);
}
@@ -508,7 +509,7 @@ public class TestStressIndexing2 extends
//System.out.println("TEST: term1=" + term1);
docs1 = _TestUtil.docs(random, termsEnum1, liveDocs1, docs1, true);
- while (docs1.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+ while (docs1.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
int d = docs1.docID();
int f = docs1.freq();
info1[len1] = (((long)d)<<32) | f;
@@ -542,7 +543,7 @@ public class TestStressIndexing2 extends
//System.out.println("TEST: term1=" + term1);
docs2 = _TestUtil.docs(random, termsEnum2, liveDocs2, docs2, true);
- while (docs2.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+ while (docs2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
int d = r2r1[docs2.docID()];
int f = docs2.freq();
info2[len2] = (((long)d)<<32) | f;
@@ -640,7 +641,7 @@ public class TestStressIndexing2 extends
// docIDs are not supposed to be equal
//int docID2 = dpEnum2.nextDoc();
//assertEquals(docID1, docID2);
- assertTrue(docID1 != DocsEnum.NO_MORE_DOCS);
+ assertTrue(docID1 != DocIdSetIterator.NO_MORE_DOCS);
int freq1 = dpEnum1.freq();
int freq2 = dpEnum2.freq();
@@ -665,8 +666,8 @@ public class TestStressIndexing2 extends
offsetAtt2.endOffset());
}
}
- assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum1.nextDoc());
- assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum2.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum1.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum2.nextDoc());
} else {
dEnum1 = _TestUtil.docs(random, termsEnum1, null, dEnum1, true);
dEnum2 = _TestUtil.docs(random, termsEnum2, null, dEnum2, true);
@@ -677,12 +678,12 @@ public class TestStressIndexing2 extends
// docIDs are not supposed to be equal
//int docID2 = dEnum2.nextDoc();
//assertEquals(docID1, docID2);
- assertTrue(docID1 != DocsEnum.NO_MORE_DOCS);
+ assertTrue(docID1 != DocIdSetIterator.NO_MORE_DOCS);
int freq1 = dEnum1.freq();
int freq2 = dEnum2.freq();
assertEquals(freq1, freq2);
- assertEquals(DocsEnum.NO_MORE_DOCS, dEnum1.nextDoc());
- assertEquals(DocsEnum.NO_MORE_DOCS, dEnum2.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dEnum1.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dEnum2.nextDoc());
}
}
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java Tue Mar 6 18:13:38 2012
@@ -237,7 +237,7 @@ public class TestTermVectorsReader exten
assertNotNull(docsEnum);
int doc = docsEnum.docID();
assertTrue(doc == -1 || doc == DocIdSetIterator.NO_MORE_DOCS);
- assertTrue(docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
}
assertNull(termsEnum.next());
@@ -264,17 +264,17 @@ public class TestTermVectorsReader exten
assertNotNull(dpEnum);
int doc = dpEnum.docID();
assertTrue(doc == -1 || doc == DocIdSetIterator.NO_MORE_DOCS);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(dpEnum.freq(), positions[i].length);
for (int j = 0; j < positions[i].length; j++) {
assertEquals(positions[i][j], dpEnum.nextPosition());
}
- assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
doc = dpEnum.docID();
assertTrue(doc == -1 || doc == DocIdSetIterator.NO_MORE_DOCS);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertNotNull(dpEnum);
assertEquals(dpEnum.freq(), positions[i].length);
for (int j = 0; j < positions[i].length; j++) {
@@ -282,7 +282,7 @@ public class TestTermVectorsReader exten
assertEquals(j*10, dpEnum.startOffset());
assertEquals(j*10 + testTerms[i].length(), dpEnum.endOffset());
}
- assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
}
Terms freqVector = reader.get(0).terms(testFields[1]); //no pos, no offset
@@ -316,15 +316,15 @@ public class TestTermVectorsReader exten
dpEnum = termsEnum.docsAndPositions(null, dpEnum, false);
assertNotNull(dpEnum);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(dpEnum.freq(), positions[i].length);
for (int j = 0; j < positions[i].length; j++) {
assertEquals(positions[i][j], dpEnum.nextPosition());
}
- assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertNotNull(dpEnum);
assertEquals(dpEnum.freq(), positions[i].length);
for (int j = 0; j < positions[i].length; j++) {
@@ -332,7 +332,7 @@ public class TestTermVectorsReader exten
assertEquals(j*10, dpEnum.startOffset());
assertEquals(j*10 + testTerms[i].length(), dpEnum.endOffset());
}
- assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
}
reader.close();
}
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java Tue Mar 6 18:13:38 2012
@@ -31,6 +31,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
+import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
@@ -69,18 +70,18 @@ public class TestTermVectorsWriter exten
assertEquals(1, termsEnum.totalTermFreq());
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(8, dpEnum.startOffset());
assertEquals(8, dpEnum.endOffset());
- assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
// Token "abcd" occurred three times
assertEquals(new BytesRef("abcd"), termsEnum.next());
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
assertEquals(3, termsEnum.totalTermFreq());
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(0, dpEnum.startOffset());
assertEquals(4, dpEnum.endOffset());
@@ -93,7 +94,7 @@ public class TestTermVectorsWriter exten
assertEquals(8, dpEnum.startOffset());
assertEquals(12, dpEnum.endOffset());
- assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
assertNull(termsEnum.next());
r.close();
dir.close();
@@ -120,7 +121,7 @@ public class TestTermVectorsWriter exten
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
assertEquals(2, termsEnum.totalTermFreq());
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(0, dpEnum.startOffset());
assertEquals(4, dpEnum.endOffset());
@@ -128,7 +129,7 @@ public class TestTermVectorsWriter exten
dpEnum.nextPosition();
assertEquals(5, dpEnum.startOffset());
assertEquals(9, dpEnum.endOffset());
- assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
r.close();
dir.close();
@@ -155,7 +156,7 @@ public class TestTermVectorsWriter exten
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
assertEquals(2, termsEnum.totalTermFreq());
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(0, dpEnum.startOffset());
assertEquals(4, dpEnum.endOffset());
@@ -163,7 +164,7 @@ public class TestTermVectorsWriter exten
dpEnum.nextPosition();
assertEquals(8, dpEnum.startOffset());
assertEquals(12, dpEnum.endOffset());
- assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
r.close();
dir.close();
@@ -194,7 +195,7 @@ public class TestTermVectorsWriter exten
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
assertEquals(2, termsEnum.totalTermFreq());
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(0, dpEnum.startOffset());
assertEquals(4, dpEnum.endOffset());
@@ -202,7 +203,7 @@ public class TestTermVectorsWriter exten
dpEnum.nextPosition();
assertEquals(8, dpEnum.startOffset());
assertEquals(12, dpEnum.endOffset());
- assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
r.close();
dir.close();
@@ -230,7 +231,7 @@ public class TestTermVectorsWriter exten
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
assertEquals(2, termsEnum.totalTermFreq());
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(0, dpEnum.startOffset());
assertEquals(4, dpEnum.endOffset());
@@ -238,7 +239,7 @@ public class TestTermVectorsWriter exten
dpEnum.nextPosition();
assertEquals(9, dpEnum.startOffset());
assertEquals(13, dpEnum.endOffset());
- assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
r.close();
dir.close();
@@ -266,21 +267,21 @@ public class TestTermVectorsWriter exten
assertNotNull(termsEnum.next());
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(0, dpEnum.startOffset());
assertEquals(4, dpEnum.endOffset());
assertNotNull(termsEnum.next());
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(11, dpEnum.startOffset());
assertEquals(17, dpEnum.endOffset());
assertNotNull(termsEnum.next());
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(18, dpEnum.startOffset());
assertEquals(21, dpEnum.endOffset());
@@ -312,14 +313,14 @@ public class TestTermVectorsWriter exten
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
assertEquals(1, (int) termsEnum.totalTermFreq());
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(1, dpEnum.startOffset());
assertEquals(7, dpEnum.endOffset());
assertNotNull(termsEnum.next());
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(8, dpEnum.startOffset());
assertEquals(11, dpEnum.endOffset());
@@ -355,14 +356,14 @@ public class TestTermVectorsWriter exten
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
assertEquals(1, (int) termsEnum.totalTermFreq());
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(0, dpEnum.startOffset());
assertEquals(4, dpEnum.endOffset());
assertNotNull(termsEnum.next());
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(6, dpEnum.startOffset());
assertEquals(12, dpEnum.endOffset());
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java Tue Mar 6 18:13:38 2012
@@ -27,6 +27,7 @@ import org.apache.lucene.analysis.tokena
import org.apache.lucene.document.Document;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
@@ -123,7 +124,7 @@ public class TestTermdocPerf extends Luc
for (int i=0; i<iter; i++) {
tenum.seekCeil(new BytesRef("val"));
tdocs = _TestUtil.docs(random, tenum, MultiFields.getLiveDocs(reader), tdocs, false);
- while (tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+ while (tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
ret += tdocs.docID();
}
}
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java Tue Mar 6 18:13:38 2012
@@ -35,6 +35,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
+import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
@@ -51,7 +52,7 @@ import org.apache.lucene.util.automaton.
public class TestTermsEnum extends LuceneTestCase {
public void test() throws Exception {
- final LineFileDocs docs = new LineFileDocs(random);
+ final LineFileDocs docs = new LineFileDocs(random, defaultCodecSupportsDocValues());
final Directory d = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random, d);
final int numDocs = atLeast(10);
@@ -333,7 +334,7 @@ public class TestTermsEnum extends Lucen
assertEquals(1, te.docFreq());
docsEnum = _TestUtil.docs(random, te, null, docsEnum, false);
final int docID = docsEnum.nextDoc();
- assertTrue(docID != DocsEnum.NO_MORE_DOCS);
+ assertTrue(docID != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(docIDToID[docID], termToID.get(expected).intValue());
do {
loc++;
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java Tue Mar 6 18:13:38 2012
@@ -97,9 +97,8 @@ public class TestTypePromotion extends L
writer.addIndexes(dir_2);
} else {
// do a real merge here
- IndexReader open = IndexReader.open(dir_2);
- // TODO: wrap in a better way
- writer.addIndexes(newSearcher(open).getIndexReader());
+ IndexReader open = maybeWrapReader(IndexReader.open(dir_2));
+ writer.addIndexes(open);
open.close();
}
dir_2.close();
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java Tue Mar 6 18:13:38 2012
@@ -129,7 +129,7 @@ final class JustCompileSearch {
}
@Override
- public FieldComparator setNextReader(AtomicReaderContext context)
+ public FieldComparator<Object> setNextReader(AtomicReaderContext context)
throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@@ -144,7 +144,7 @@ final class JustCompileSearch {
static final class JustCompileFieldComparatorSource extends FieldComparatorSource {
@Override
- public FieldComparator newComparator(String fieldname, int numHits,
+ public FieldComparator<?> newComparator(String fieldname, int numHits,
int sortPos, boolean reversed) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java Tue Mar 6 18:13:38 2012
@@ -51,7 +51,7 @@ public class TestCustomSearcherSort exte
INDEX_SIZE = atLeast(2000);
index = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, index);
- RandomGen random = new RandomGen(this.random);
+ RandomGen random = new RandomGen(LuceneTestCase.random);
for (int i = 0; i < INDEX_SIZE; ++i) { // don't decrease; if to low the
// problem doesn't show up
Document doc = new Document();
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java Tue Mar 6 18:13:38 2012
@@ -139,7 +139,7 @@ class ElevationComparatorSource extends
}
@Override
- public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
+ public FieldComparator<Integer> newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
return new FieldComparator<Integer>() {
FieldCache.DocTermsIndex idIndex;
@@ -179,7 +179,7 @@ class ElevationComparatorSource extends
}
@Override
- public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
+ public FieldComparator<Integer> setNextReader(AtomicReaderContext context) throws IOException {
idIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), fieldname);
return this;
}
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java Tue Mar 6 18:13:38 2012
@@ -218,7 +218,7 @@ public class TestPositionIncrement exten
false);
int count = 0;
- assertTrue(tp.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS);
+ assertTrue(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
// "a" occurs 4 times
assertEquals(4, tp.freq());
int expected = 0;
@@ -228,7 +228,7 @@ public class TestPositionIncrement exten
assertEquals(6, tp.nextPosition());
// only one doc has "a"
- assertEquals(DocsAndPositionsEnum.NO_MORE_DOCS, tp.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, tp.nextDoc());
IndexSearcher is = newSearcher(readerFromWriter);
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery2.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery2.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery2.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery2.java Tue Mar 6 18:13:38 2012
@@ -21,10 +21,12 @@ import java.util.Random;
import org.apache.lucene.index.Term;
import org.apache.lucene.util._TestUtil;
+import org.junit.Ignore;
/**
* random sloppy phrase query tests
*/
+@Ignore("Put this back when we fix LUCENE-3821")
public class TestSloppyPhraseQuery2 extends SearchEquivalenceTestBase {
/** "A B"~N â "A B"~N+1 */
public void testIncreasingSloppiness() throws Exception {
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestSort.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestSort.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestSort.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestSort.java Tue Mar 6 18:13:38 2012
@@ -20,27 +20,32 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.ArrayList;
import java.util.BitSet;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.document.DocValuesField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.DocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocValues;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.FieldValueHitQueue.Entry;
import org.apache.lucene.store.Directory;
@@ -48,6 +53,7 @@ import org.apache.lucene.store.LockObtai
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.DocIdBitSet;
+import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
import org.junit.BeforeClass;
@@ -693,7 +699,7 @@ public class TestSort extends LuceneTest
};
@Override
- public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
+ public FieldComparator<Integer> setNextReader(AtomicReaderContext context) throws IOException {
docValues = FieldCache.DEFAULT.getInts(context.reader(), "parser", testIntParser, false);
return this;
}
@@ -706,7 +712,7 @@ public class TestSort extends LuceneTest
static class MyFieldComparatorSource extends FieldComparatorSource {
@Override
- public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
+ public FieldComparator<Integer> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
return new MyFieldComparator(numHits);
}
}
@@ -1294,4 +1300,155 @@ public class TestSort extends LuceneTest
reader.close();
indexStore.close();
}
+
+ private static class RandomFilter extends Filter {
+ private final Random random;
+ private float density;
+ private final List<BytesRef> docValues;
+ public final List<BytesRef> matchValues = Collections.synchronizedList(new ArrayList<BytesRef>());
+
+ // density should be 0.0 ... 1.0
+ public RandomFilter(Random random, float density, List<BytesRef> docValues) {
+ this.random = random;
+ this.density = density;
+ this.docValues = docValues;
+ }
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ final int maxDoc = context.reader().maxDoc();
+ final DocValues.Source idSource = context.reader().docValues("id").getSource();
+ assertNotNull(idSource);
+ final FixedBitSet bits = new FixedBitSet(maxDoc);
+ for(int docID=0;docID<maxDoc;docID++) {
+ if (random.nextFloat() <= density && (acceptDocs == null || acceptDocs.get(docID))) {
+ bits.set(docID);
+ //System.out.println(" acc id=" + idSource.getInt(docID) + " docID=" + docID);
+ matchValues.add(docValues.get((int) idSource.getInt(docID)));
+ }
+ }
+
+ return bits;
+ }
+ }
+
+ public void testRandomStringSort() throws Exception {
+ assumeTrue("cannot work with Lucene3x codec",
+ defaultCodecSupportsDocValues());
+
+ final int NUM_DOCS = atLeast(100);
+ final Directory dir = newDirectory();
+ final RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+ final boolean allowDups = random.nextBoolean();
+ final Set<String> seen = new HashSet<String>();
+ final int maxLength = _TestUtil.nextInt(random, 5, 100);
+ if (VERBOSE) {
+ System.out.println("TEST: NUM_DOCS=" + NUM_DOCS + " maxLength=" + maxLength + " allowDups=" + allowDups);
+ }
+
+ int numDocs = 0;
+ final List<BytesRef> docValues = new ArrayList<BytesRef>();
+ // TODO: deletions
+ while (numDocs < NUM_DOCS) {
+ final String s;
+ if (random.nextBoolean()) {
+ s = _TestUtil.randomSimpleString(random, maxLength);
+ } else {
+ s = _TestUtil.randomUnicodeString(random, maxLength);
+ }
+ final BytesRef br = new BytesRef(s);
+
+ if (!allowDups) {
+ if (seen.contains(s)) {
+ continue;
+ }
+ seen.add(s);
+ }
+
+ if (VERBOSE) {
+ System.out.println(" " + numDocs + ": s=" + s);
+ }
+
+ final Document doc = new Document();
+ doc.add(new DocValuesField("stringdv", br, DocValues.Type.BYTES_VAR_SORTED));
+ doc.add(newField("string", s, StringField.TYPE_UNSTORED));
+ doc.add(new DocValuesField("id", numDocs, DocValues.Type.VAR_INTS));
+ docValues.add(br);
+ writer.addDocument(doc);
+ numDocs++;
+
+ if (random.nextInt(40) == 17) {
+ // force flush
+ writer.getReader().close();
+ }
+ }
+
+ final IndexReader r = writer.getReader();
+ writer.close();
+ if (VERBOSE) {
+ System.out.println(" reader=" + r);
+ }
+
+ final IndexSearcher s = newSearcher(r, false);
+ final int ITERS = atLeast(100);
+ for(int iter=0;iter<ITERS;iter++) {
+ final boolean reverse = random.nextBoolean();
+ final TopFieldDocs hits;
+ final SortField sf;
+ if (random.nextBoolean()) {
+ sf = new SortField("stringdv", SortField.Type.STRING, reverse);
+ sf.setUseIndexValues(true);
+ } else {
+ sf = new SortField("string", SortField.Type.STRING, reverse);
+ }
+ final Sort sort = new Sort(sf);
+ final int hitCount = _TestUtil.nextInt(random, 1, r.maxDoc() + 20);
+ final RandomFilter f = new RandomFilter(random, random.nextFloat(), docValues);
+ if (random.nextBoolean()) {
+ hits = s.search(new ConstantScoreQuery(f),
+ hitCount,
+ sort);
+ } else {
+ hits = s.search(new MatchAllDocsQuery(),
+ f,
+ hitCount,
+ sort);
+ }
+
+ if (VERBOSE) {
+ System.out.println("\nTEST: iter=" + iter + " " + hits.totalHits + " hits; topN=" + hitCount + "; reverse=" + reverse);
+ }
+
+ // Compute expected results:
+ Collections.sort(f.matchValues);
+ if (reverse) {
+ Collections.reverse(f.matchValues);
+ }
+ final List<BytesRef> expected = f.matchValues;
+ if (VERBOSE) {
+ System.out.println(" expected:");
+ for(int idx=0;idx<expected.size();idx++) {
+ System.out.println(" " + idx + ": " + expected.get(idx).utf8ToString());
+ if (idx == hitCount-1) {
+ break;
+ }
+ }
+ }
+
+ if (VERBOSE) {
+ System.out.println(" actual:");
+ for(int hitIDX=0;hitIDX<hits.scoreDocs.length;hitIDX++) {
+ final FieldDoc fd = (FieldDoc) hits.scoreDocs[hitIDX];
+ System.out.println(" " + hitIDX + ": " + ((BytesRef) fd.fields[0]).utf8ToString());
+ }
+ }
+ for(int hitIDX=0;hitIDX<hits.scoreDocs.length;hitIDX++) {
+ final FieldDoc fd = (FieldDoc) hits.scoreDocs[hitIDX];
+ assertEquals(expected.get(hitIDX), (BytesRef) fd.fields[0]);
+ }
+ }
+
+ r.close();
+ dir.close();
+ }
}
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java Tue Mar 6 18:13:38 2012
@@ -161,7 +161,7 @@ public class TestSubScorerFreqs extends
query.add(inner, Occur.MUST);
query.add(aQuery, Occur.MUST);
query.add(dQuery, Occur.MUST);
- Set<String>[] occurList = new Set[] {
+ @SuppressWarnings({"rawtypes","unchecked"}) Set<String>[] occurList = new Set[] {
Collections.singleton(Occur.MUST.toString()),
new HashSet<String>(Arrays.asList(Occur.MUST.toString(), Occur.SHOULD.toString()))
};
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java Tue Mar 6 18:13:38 2012
@@ -135,19 +135,19 @@ public class TestTermVectors extends Luc
TermsEnum termsEnum = terms.iterator(null);
assertEquals("content", termsEnum.next().utf8ToString());
dpEnum = termsEnum.docsAndPositions(null, dpEnum, false);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, dpEnum.freq());
assertEquals(expectedPositions[0], dpEnum.nextPosition());
assertEquals("here", termsEnum.next().utf8ToString());
dpEnum = termsEnum.docsAndPositions(null, dpEnum, false);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, dpEnum.freq());
assertEquals(expectedPositions[1], dpEnum.nextPosition());
assertEquals("some", termsEnum.next().utf8ToString());
dpEnum = termsEnum.docsAndPositions(null, dpEnum, false);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, dpEnum.freq());
assertEquals(expectedPositions[2], dpEnum.nextPosition());
@@ -178,7 +178,7 @@ public class TestTermVectors extends Luc
while(true) {
dpEnum = termsEnum.docsAndPositions(null, dpEnum, shouldBeOffVector);
assertNotNull(dpEnum);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
@@ -263,7 +263,7 @@ public class TestTermVectors extends Luc
String text = termsEnum.term().utf8ToString();
docs = _TestUtil.docs(random, termsEnum, MultiFields.getLiveDocs(knownSearcher.reader), docs, true);
- while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+ while (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
int docId = docs.docID();
int freq = docs.freq();
//System.out.println("Doc Id: " + docId + " freq " + freq);
@@ -428,7 +428,7 @@ public class TestTermVectors extends Luc
assertEquals(5, termsEnum.totalTermFreq());
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, false);
assertNotNull(dpEnum);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(5, dpEnum.freq());
for(int i=0;i<5;i++) {
assertEquals(i, dpEnum.nextPosition());
@@ -436,7 +436,7 @@ public class TestTermVectors extends Luc
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
assertNotNull(dpEnum);
- assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
+ assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(5, dpEnum.freq());
for(int i=0;i<5;i++) {
dpEnum.nextPosition();
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java Tue Mar 6 18:13:38 2012
@@ -28,6 +28,7 @@ import org.apache.lucene.index.AtomicRea
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.TermContext;
@@ -121,7 +122,7 @@ public class MultiSpansWrapper extends S
@Override
public int doc() {
if (current == null) {
- return DocsEnum.NO_MORE_DOCS;
+ return DocIdSetIterator.NO_MORE_DOCS;
}
return current.doc() + leaves[leafOrd].docBase;
}
@@ -129,7 +130,7 @@ public class MultiSpansWrapper extends S
@Override
public int start() {
if (current == null) {
- return DocsEnum.NO_MORE_DOCS;
+ return DocIdSetIterator.NO_MORE_DOCS;
}
return current.start();
}
@@ -137,7 +138,7 @@ public class MultiSpansWrapper extends S
@Override
public int end() {
if (current == null) {
- return DocsEnum.NO_MORE_DOCS;
+ return DocIdSetIterator.NO_MORE_DOCS;
}
return current.end();
}
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java Tue Mar 6 18:13:38 2012
@@ -48,7 +48,8 @@ public class TestNRTCachingDirectory ext
NRTCachingDirectory cachedDir = new NRTCachingDirectory(dir, 2.0, 25.0);
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
RandomIndexWriter w = new RandomIndexWriter(random, cachedDir, conf);
- final LineFileDocs docs = new LineFileDocs(random);
+ final LineFileDocs docs = new LineFileDocs(random,
+ defaultCodecSupportsDocValues());
final int numDocs = _TestUtil.nextInt(random, 100, 400);
if (VERBOSE) {
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/util/TestAttributeSource.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/util/TestAttributeSource.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/util/TestAttributeSource.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/util/TestAttributeSource.java Tue Mar 6 18:13:38 2012
@@ -126,7 +126,7 @@ public class TestAttributeSource extends
src.addAttribute(TypeAttribute.class) instanceof TypeAttributeImpl);
}
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({"rawtypes","unchecked"})
public void testInvalidArguments() throws Exception {
try {
AttributeSource src = new AttributeSource();
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/util/TestVirtualMethod.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/util/TestVirtualMethod.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/util/TestVirtualMethod.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/util/TestVirtualMethod.java Tue Mar 6 18:13:38 2012
@@ -69,7 +69,7 @@ public class TestVirtualMethod extends L
assertEquals(0, VirtualMethod.compareImplementationDistance(TestClass5.class, publicTestMethod, protectedTestMethod));
}
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({"rawtypes","unchecked"})
public void testExceptions() {
try {
// cast to Class to remove generics:
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java Tue Mar 6 18:13:38 2012
@@ -57,8 +57,10 @@ import org.apache.lucene.util.LuceneTest
import org.apache.lucene.util.LuceneTestCase.UseNoMemoryExpensiveCodec;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.util.fst.BytesRefFSTEnum.InputOutput;
import org.apache.lucene.util.fst.FST.Arc;
import org.apache.lucene.util.fst.FST.BytesReader;
+import org.apache.lucene.util.fst.PairOutputs.Pair;
@UseNoMemoryExpensiveCodec
public class TestFSTs extends LuceneTestCase {
@@ -493,7 +495,7 @@ public class TestFSTs extends LuceneTest
if (random.nextBoolean() && fst != null && !willRewrite) {
TestFSTs t = new TestFSTs();
- IOContext context = t.newIOContext(random);
+ IOContext context = LuceneTestCase.newIOContext(random);
IndexOutput out = dir.createOutput("fst.bin", context);
fst.save(out);
out.close();
@@ -983,7 +985,7 @@ public class TestFSTs extends LuceneTest
if (VERBOSE) {
System.out.println(" fstEnum.next prefix=" + inputToString(inputMode, current.input, false) + " output=" + outputs.outputToString(current.output));
}
- final CountMinOutput cmo = prefixes.get(current.input);
+ final CountMinOutput<T> cmo = prefixes.get(current.input);
assertNotNull(cmo);
assertTrue(cmo.isLeaf || cmo.isFinal);
//if (cmo.isFinal && !cmo.isLeaf) {
@@ -1094,7 +1096,7 @@ public class TestFSTs extends LuceneTest
Codec.setDefault(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat()));
}
- final LineFileDocs docs = new LineFileDocs(random);
+ final LineFileDocs docs = new LineFileDocs(random, defaultCodecSupportsDocValues());
final int RUN_TIME_MSEC = atLeast(500);
final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(-1).setRAMBufferSizeMB(64);
final File tempDir = _TestUtil.getTempDir("fstlines");
@@ -1182,7 +1184,7 @@ public class TestFSTs extends LuceneTest
}
final TermsEnum.SeekStatus seekResult = termsEnum.seekCeil(randomTerm);
- final BytesRefFSTEnum.InputOutput fstSeekResult = fstEnum.seekCeil(randomTerm);
+ final InputOutput<Long> fstSeekResult = fstEnum.seekCeil(randomTerm);
if (seekResult == TermsEnum.SeekStatus.END) {
assertNull("got " + (fstSeekResult == null ? "null" : fstSeekResult.input.utf8ToString()) + " but expected null", fstSeekResult);
@@ -1223,7 +1225,7 @@ public class TestFSTs extends LuceneTest
dir.close();
}
- private void assertSame(TermsEnum termsEnum, BytesRefFSTEnum fstEnum, boolean storeOrd) throws Exception {
+ private void assertSame(TermsEnum termsEnum, BytesRefFSTEnum<?> fstEnum, boolean storeOrd) throws Exception {
if (termsEnum.term() == null) {
assertNull(fstEnum.current());
} else {
@@ -1828,7 +1830,7 @@ public class TestFSTs extends LuceneTest
public int verifyStateAndBelow(FST<Object> fst, Arc<Object> arc, int depth)
throws IOException {
- if (fst.targetHasArcs(arc)) {
+ if (FST.targetHasArcs(arc)) {
int childCount = 0;
for (arc = fst.readFirstTargetArc(arc, arc);;
arc = fst.readNextArc(arc), childCount++)
@@ -1975,6 +1977,12 @@ public class TestFSTs extends LuceneTest
assertFalse(arc.isFinal());
assertEquals(42, arc.output.longValue());
}
+
+ static final Comparator<Long> minLongComparator = new Comparator<Long> () {
+ public int compare(Long left, Long right) {
+ return left.compareTo(right);
+ }
+ };
public void testShortestPaths() throws Exception {
final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
@@ -1989,19 +1997,65 @@ public class TestFSTs extends LuceneTest
//Util.toDot(fst, w, false, false);
//w.close();
- Util.MinResult[] r = Util.shortestPaths(fst,
+ Util.MinResult<Long>[] r = Util.shortestPaths(fst,
fst.getFirstArc(new FST.Arc<Long>()),
+ minLongComparator,
3);
assertEquals(3, r.length);
assertEquals(Util.toIntsRef(new BytesRef("aac"), scratch), r[0].input);
- assertEquals(7, r[0].output);
+ assertEquals(7L, r[0].output.longValue());
assertEquals(Util.toIntsRef(new BytesRef("ax"), scratch), r[1].input);
- assertEquals(17, r[1].output);
+ assertEquals(17L, r[1].output.longValue());
assertEquals(Util.toIntsRef(new BytesRef("aab"), scratch), r[2].input);
- assertEquals(22, r[2].output);
+ assertEquals(22L, r[2].output.longValue());
+ }
+
+ // compares just the weight side of the pair
+ static final Comparator<Pair<Long,Long>> minPairWeightComparator = new Comparator<Pair<Long,Long>> () {
+ public int compare(Pair<Long,Long> left, Pair<Long,Long> right) {
+ return left.output1.compareTo(right.output1);
+ }
+ };
+
+ /** like testShortestPaths, but uses pairoutputs so we have both a weight and an output */
+ public void testShortestPathsWFST() throws Exception {
+
+ PairOutputs<Long,Long> outputs = new PairOutputs<Long,Long>(
+ PositiveIntOutputs.getSingleton(true), // weight
+ PositiveIntOutputs.getSingleton(true) // output
+ );
+
+ final Builder<Pair<Long,Long>> builder = new Builder<Pair<Long,Long>>(FST.INPUT_TYPE.BYTE1, outputs);
+
+ final IntsRef scratch = new IntsRef();
+ builder.add(Util.toIntsRef(new BytesRef("aab"), scratch), outputs.newPair(22L, 57L));
+ builder.add(Util.toIntsRef(new BytesRef("aac"), scratch), outputs.newPair(7L, 36L));
+ builder.add(Util.toIntsRef(new BytesRef("ax"), scratch), outputs.newPair(17L, 85L));
+ final FST<Pair<Long,Long>> fst = builder.finish();
+ //Writer w = new OutputStreamWriter(new FileOutputStream("out.dot"));
+ //Util.toDot(fst, w, false, false);
+ //w.close();
+
+ Util.MinResult<Pair<Long,Long>>[] r = Util.shortestPaths(fst,
+ fst.getFirstArc(new FST.Arc<Pair<Long,Long>>()),
+ minPairWeightComparator,
+ 3);
+ assertEquals(3, r.length);
+
+ assertEquals(Util.toIntsRef(new BytesRef("aac"), scratch), r[0].input);
+ assertEquals(7L, r[0].output.output1.longValue()); // weight
+ assertEquals(36L, r[0].output.output2.longValue()); // output
+
+ assertEquals(Util.toIntsRef(new BytesRef("ax"), scratch), r[1].input);
+ assertEquals(17L, r[1].output.output1.longValue()); // weight
+ assertEquals(85L, r[1].output.output2.longValue()); // output
+
+ assertEquals(Util.toIntsRef(new BytesRef("aab"), scratch), r[2].input);
+ assertEquals(22L, r[2].output.output1.longValue()); // weight
+ assertEquals(57L, r[2].output.output2.longValue()); // output
}
public void testShortestPathsRandom() throws Exception {
@@ -2059,17 +2113,121 @@ public class TestFSTs extends LuceneTest
final int topN = _TestUtil.nextInt(random, 1, 10);
- Util.MinResult[] r = Util.shortestPaths(fst, arc, topN);
+ Util.MinResult<Long>[] r = Util.shortestPaths(fst, arc, minLongComparator, topN);
// 2. go thru whole treemap (slowCompletor) and check its actually the best suggestion
- final List<Util.MinResult> matches = new ArrayList<Util.MinResult>();
+ final List<Util.MinResult<Long>> matches = new ArrayList<Util.MinResult<Long>>();
// TODO: could be faster... but its slowCompletor for a reason
for (Map.Entry<String,Long> e : slowCompletor.entrySet()) {
if (e.getKey().startsWith(prefix)) {
//System.out.println(" consider " + e.getKey());
- matches.add(new Util.MinResult(Util.toIntsRef(new BytesRef(e.getKey().substring(prefix.length())), new IntsRef()),
- e.getValue() - prefixOutput));
+ matches.add(new Util.MinResult<Long>(Util.toIntsRef(new BytesRef(e.getKey().substring(prefix.length())), new IntsRef()),
+ e.getValue() - prefixOutput, minLongComparator));
+ }
+ }
+
+ assertTrue(matches.size() > 0);
+ Collections.sort(matches);
+ if (matches.size() > topN) {
+ matches.subList(topN, matches.size()).clear();
+ }
+
+ assertEquals(matches.size(), r.length);
+
+ for(int hit=0;hit<r.length;hit++) {
+ //System.out.println(" check hit " + hit);
+ assertEquals(matches.get(hit).input, r[hit].input);
+ assertEquals(matches.get(hit).output, r[hit].output);
+ }
+ }
+ }
+
+ // used by slowcompletor
+ class TwoLongs {
+ long a;
+ long b;
+
+ TwoLongs(long a, long b) {
+ this.a = a;
+ this.b = b;
+ }
+ }
+
+ /** like testShortestPathsRandom, but uses pairoutputs so we have both a weight and an output */
+ public void testShortestPathsWFSTRandom() throws Exception {
+ int numWords = atLeast(1000);
+
+ final TreeMap<String,TwoLongs> slowCompletor = new TreeMap<String,TwoLongs>();
+ final TreeSet<String> allPrefixes = new TreeSet<String>();
+
+ PairOutputs<Long,Long> outputs = new PairOutputs<Long,Long>(
+ PositiveIntOutputs.getSingleton(true), // weight
+ PositiveIntOutputs.getSingleton(true) // output
+ );
+ final Builder<Pair<Long,Long>> builder = new Builder<Pair<Long,Long>>(FST.INPUT_TYPE.BYTE1, outputs);
+ final IntsRef scratch = new IntsRef();
+
+ for (int i = 0; i < numWords; i++) {
+ String s;
+ while (true) {
+ s = _TestUtil.randomSimpleString(random);
+ if (!slowCompletor.containsKey(s)) {
+ break;
+ }
+ }
+
+ for (int j = 1; j < s.length(); j++) {
+ allPrefixes.add(s.substring(0, j));
+ }
+ int weight = _TestUtil.nextInt(random, 1, 100); // weights 1..100
+ int output = _TestUtil.nextInt(random, 0, 500); // outputs 0..500
+ slowCompletor.put(s, new TwoLongs(weight, output));
+ }
+
+ for (Map.Entry<String,TwoLongs> e : slowCompletor.entrySet()) {
+ //System.out.println("add: " + e);
+ long weight = e.getValue().a;
+ long output = e.getValue().b;
+ builder.add(Util.toIntsRef(new BytesRef(e.getKey()), scratch), outputs.newPair(weight, output));
+ }
+
+ final FST<Pair<Long,Long>> fst = builder.finish();
+ //System.out.println("SAVE out.dot");
+ //Writer w = new OutputStreamWriter(new FileOutputStream("out.dot"));
+ //Util.toDot(fst, w, false, false);
+ //w.close();
+
+ BytesReader reader = fst.getBytesReader(0);
+
+ //System.out.println("testing: " + allPrefixes.size() + " prefixes");
+ for (String prefix : allPrefixes) {
+ // 1. run prefix against fst, then complete by value
+ //System.out.println("TEST: " + prefix);
+
+ Pair<Long,Long> prefixOutput = outputs.getNoOutput();
+ FST.Arc<Pair<Long,Long>> arc = fst.getFirstArc(new FST.Arc<Pair<Long,Long>>());
+ for(int idx=0;idx<prefix.length();idx++) {
+ if (fst.findTargetArc((int) prefix.charAt(idx), arc, arc, reader) == null) {
+ fail();
+ }
+ prefixOutput = outputs.add(prefixOutput, arc.output);
+ }
+
+ final int topN = _TestUtil.nextInt(random, 1, 10);
+
+ Util.MinResult<Pair<Long,Long>>[] r = Util.shortestPaths(fst, arc, minPairWeightComparator, topN);
+
+ // 2. go thru whole treemap (slowCompletor) and check its actually the best suggestion
+ final List<Util.MinResult<Pair<Long,Long>>> matches = new ArrayList<Util.MinResult<Pair<Long,Long>>>();
+
+ // TODO: could be faster... but its slowCompletor for a reason
+ for (Map.Entry<String,TwoLongs> e : slowCompletor.entrySet()) {
+ if (e.getKey().startsWith(prefix)) {
+ //System.out.println(" consider " + e.getKey());
+ matches.add(new Util.MinResult<Pair<Long,Long>>(Util.toIntsRef(new BytesRef(e.getKey().substring(prefix.length())), new IntsRef()),
+ outputs.newPair(e.getValue().a - prefixOutput.output1, e.getValue().b - prefixOutput.output2),
+ minPairWeightComparator));
}
}
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java Tue Mar 6 18:13:38 2012
@@ -17,13 +17,18 @@ package org.apache.lucene.analysis;
* limitations under the License.
*/
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
import java.io.Reader;
import java.io.StringReader;
-import java.io.IOException;
+import java.io.StringWriter;
+import java.io.Writer;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
-
+
import org.apache.lucene.analysis.tokenattributes.*;
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeImpl;
@@ -83,7 +88,7 @@ public abstract class BaseTokenStreamTes
}
}
- public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], Integer finalOffset) throws IOException {
+ public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], int posLengths[], Integer finalOffset) throws IOException {
assertNotNull(output);
CheckClearAttributesAttribute checkClearAtt = ts.addAttribute(CheckClearAttributesAttribute.class);
@@ -107,6 +112,12 @@ public abstract class BaseTokenStreamTes
assertTrue("has no PositionIncrementAttribute", ts.hasAttribute(PositionIncrementAttribute.class));
posIncrAtt = ts.getAttribute(PositionIncrementAttribute.class);
}
+
+ PositionLengthAttribute posLengthAtt = null;
+ if (posLengths != null) {
+ assertTrue("has no PositionLengthAttribute", ts.hasAttribute(PositionLengthAttribute.class));
+ posLengthAtt = ts.getAttribute(PositionLengthAttribute.class);
+ }
ts.reset();
for (int i = 0; i < output.length; i++) {
@@ -116,6 +127,7 @@ public abstract class BaseTokenStreamTes
if (offsetAtt != null) offsetAtt.setOffset(14584724,24683243);
if (typeAtt != null) typeAtt.setType("bogusType");
if (posIncrAtt != null) posIncrAtt.setPositionIncrement(45987657);
+ if (posLengthAtt != null) posLengthAtt.setPositionLength(45987653);
checkClearAtt.getAndResetClearCalled(); // reset it, because we called clearAttribute() before
assertTrue("token "+i+" does not exist", ts.incrementToken());
@@ -130,6 +142,8 @@ public abstract class BaseTokenStreamTes
assertEquals("type "+i, types[i], typeAtt.type());
if (posIncrements != null)
assertEquals("posIncrement "+i, posIncrements[i], posIncrAtt.getPositionIncrement());
+ if (posLengths != null)
+ assertEquals("posLength "+i, posLengths[i], posLengthAtt.getPositionLength());
// we can enforce some basic things about a few attributes even if the caller doesn't check:
if (offsetAtt != null) {
@@ -138,14 +152,18 @@ public abstract class BaseTokenStreamTes
assertTrue("endOffset must be >= startOffset", offsetAtt.endOffset() >= offsetAtt.startOffset());
if (finalOffset != null) {
assertTrue("startOffset must be <= finalOffset", offsetAtt.startOffset() <= finalOffset.intValue());
- assertTrue("endOffset must be <= finalOffset", offsetAtt.endOffset() <= finalOffset.intValue());
+ assertTrue("endOffset must be <= finalOffset: got endOffset=" + offsetAtt.endOffset() + " vs finalOffset=" + finalOffset.intValue(),
+ offsetAtt.endOffset() <= finalOffset.intValue());
}
}
if (posIncrAtt != null) {
assertTrue("posIncrement must be >= 0", posIncrAtt.getPositionIncrement() >= 0);
}
+ if (posLengthAtt != null) {
+ assertTrue("posLength must be >= 1", posLengthAtt.getPositionLength() >= 1);
+ }
}
- assertFalse("end of stream", ts.incrementToken());
+ assertFalse("TokenStream has more tokens than expected", ts.incrementToken());
ts.end();
if (finalOffset != null)
assertEquals("finalOffset ", finalOffset.intValue(), offsetAtt.endOffset());
@@ -155,65 +173,81 @@ public abstract class BaseTokenStreamTes
ts.close();
}
+ public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], Integer finalOffset) throws IOException {
+ assertTokenStreamContents(ts, output, startOffsets, endOffsets, types, posIncrements, null, finalOffset);
+ }
+
public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException {
- assertTokenStreamContents(ts, output, startOffsets, endOffsets, types, posIncrements, null);
+ assertTokenStreamContents(ts, output, startOffsets, endOffsets, types, posIncrements, null, null);
}
public static void assertTokenStreamContents(TokenStream ts, String[] output) throws IOException {
- assertTokenStreamContents(ts, output, null, null, null, null, null);
+ assertTokenStreamContents(ts, output, null, null, null, null, null, null);
}
public static void assertTokenStreamContents(TokenStream ts, String[] output, String[] types) throws IOException {
- assertTokenStreamContents(ts, output, null, null, types, null, null);
+ assertTokenStreamContents(ts, output, null, null, types, null, null, null);
}
public static void assertTokenStreamContents(TokenStream ts, String[] output, int[] posIncrements) throws IOException {
- assertTokenStreamContents(ts, output, null, null, null, posIncrements, null);
+ assertTokenStreamContents(ts, output, null, null, null, posIncrements, null, null);
}
public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[]) throws IOException {
- assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, null, null);
+ assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, null, null, null);
}
public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], Integer finalOffset) throws IOException {
- assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, null, finalOffset);
+ assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, null, null, finalOffset);
}
public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements) throws IOException {
- assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, posIncrements, null);
+ assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, posIncrements, null, null);
}
public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements, Integer finalOffset) throws IOException {
- assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, posIncrements, finalOffset);
+ assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, posIncrements, null, finalOffset);
+ }
+
+ public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements, int[] posLengths, Integer finalOffset) throws IOException {
+ assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, posIncrements, posLengths, finalOffset);
}
public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException {
- assertTokenStreamContents(a.tokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, input.length());
+ assertTokenStreamContents(a.tokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, null, input.length());
+ }
+
+ public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], int posLengths[]) throws IOException {
+ assertTokenStreamContents(a.tokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, posLengths, input.length());
}
public static void assertAnalyzesTo(Analyzer a, String input, String[] output) throws IOException {
- assertAnalyzesTo(a, input, output, null, null, null, null);
+ assertAnalyzesTo(a, input, output, null, null, null, null, null);
}
public static void assertAnalyzesTo(Analyzer a, String input, String[] output, String[] types) throws IOException {
- assertAnalyzesTo(a, input, output, null, null, types, null);
+ assertAnalyzesTo(a, input, output, null, null, types, null, null);
}
public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int[] posIncrements) throws IOException {
- assertAnalyzesTo(a, input, output, null, null, null, posIncrements);
+ assertAnalyzesTo(a, input, output, null, null, null, posIncrements, null);
+ }
+
+ public static void assertAnalyzesToPositions(Analyzer a, String input, String[] output, int[] posIncrements, int[] posLengths) throws IOException {
+ assertAnalyzesTo(a, input, output, null, null, null, posIncrements, posLengths);
}
public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[]) throws IOException {
- assertAnalyzesTo(a, input, output, startOffsets, endOffsets, null, null);
+ assertAnalyzesTo(a, input, output, startOffsets, endOffsets, null, null, null);
}
public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements) throws IOException {
- assertAnalyzesTo(a, input, output, startOffsets, endOffsets, null, posIncrements);
+ assertAnalyzesTo(a, input, output, startOffsets, endOffsets, null, posIncrements, null);
}
public static void assertAnalyzesToReuse(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException {
- assertTokenStreamContents(a.tokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, input.length());
+ assertTokenStreamContents(a.tokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, null, input.length());
}
public static void assertAnalyzesToReuse(Analyzer a, String input, String[] output) throws IOException {
@@ -246,15 +280,22 @@ public abstract class BaseTokenStreamTes
assertAnalyzesToReuse(a, input, new String[]{expected});
}
- // simple utility method for blasting tokenstreams with data to make sure they don't do anything crazy
- // TODO: add a MockCharStream, and use it here too, to ensure that correctOffset etc is being done by tokenizers.
+ /** utility method for blasting tokenstreams with data to make sure they don't do anything crazy */
public static void checkRandomData(Random random, Analyzer a, int iterations) throws IOException {
- checkRandomData(random, a, iterations, 20);
+ checkRandomData(random, a, iterations, false);
+ }
+
+ /**
+ * utility method for blasting tokenstreams with data to make sure they don't do anything crazy
+ * @param simple true if only ascii strings will be used (try to avoid)
+ */
+ public static void checkRandomData(Random random, Analyzer a, int iterations, boolean simple) throws IOException {
+ checkRandomData(random, a, iterations, 20, simple);
// now test with multiple threads
int numThreads = _TestUtil.nextInt(random, 4, 8);
Thread threads[] = new Thread[numThreads];
for (int i = 0; i < threads.length; i++) {
- threads[i] = new AnalysisThread(new Random(random.nextLong()), a, iterations);
+ threads[i] = new AnalysisThread(new Random(random.nextLong()), a, iterations, simple);
}
for (int i = 0; i < threads.length; i++) {
threads[i].start();
@@ -272,11 +313,13 @@ public abstract class BaseTokenStreamTes
final int iterations;
final Random random;
final Analyzer a;
+ final boolean simple;
- AnalysisThread(Random random, Analyzer a, int iterations) {
+ AnalysisThread(Random random, Analyzer a, int iterations, boolean simple) {
this.random = random;
this.a = a;
this.iterations = iterations;
+ this.simple = simple;
}
@Override
@@ -284,36 +327,40 @@ public abstract class BaseTokenStreamTes
try {
// see the part in checkRandomData where it replays the same text again
// to verify reproducability/reuse: hopefully this would catch thread hazards.
- checkRandomData(random, a, iterations, 20);
+ checkRandomData(random, a, iterations, 20, simple);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
};
- public static void checkRandomData(Random random, Analyzer a, int iterations, int maxWordLength) throws IOException {
- checkRandomData(random, a, iterations, maxWordLength, random.nextBoolean());
+ public static void checkRandomData(Random random, Analyzer a, int iterations, int maxWordLength, boolean simple) throws IOException {
+ checkRandomData(random, a, iterations, maxWordLength, random.nextBoolean(), simple);
}
- public static void checkRandomData(Random random, Analyzer a, int iterations, int maxWordLength, boolean useCharFilter) throws IOException {
+ public static void checkRandomData(Random random, Analyzer a, int iterations, int maxWordLength, boolean useCharFilter, boolean simple) throws IOException {
for (int i = 0; i < iterations; i++) {
String text;
- switch(_TestUtil.nextInt(random, 0, 4)) {
- case 0:
- text = _TestUtil.randomSimpleString(random);
- break;
- case 1:
- text = _TestUtil.randomRealisticUnicodeString(random, maxWordLength);
- break;
- case 2:
- text = _TestUtil.randomHtmlishString(random, maxWordLength);
- break;
- default:
- text = _TestUtil.randomUnicodeString(random, maxWordLength);
+ if (simple) {
+ text = random.nextBoolean() ? _TestUtil.randomSimpleString(random) : _TestUtil.randomHtmlishString(random, maxWordLength);
+ } else {
+ switch(_TestUtil.nextInt(random, 0, 4)) {
+ case 0:
+ text = _TestUtil.randomSimpleString(random);
+ break;
+ case 1:
+ text = _TestUtil.randomRealisticUnicodeString(random, maxWordLength);
+ break;
+ case 2:
+ text = _TestUtil.randomHtmlishString(random, maxWordLength);
+ break;
+ default:
+ text = _TestUtil.randomUnicodeString(random, maxWordLength);
+ }
}
if (VERBOSE) {
- System.out.println("NOTE: BaseTokenStreamTestCase: get first token stream now text=" + text);
+ System.out.println(Thread.currentThread().getName() + ": NOTE: BaseTokenStreamTestCase: get first token stream now text=" + text);
}
int remainder = random.nextInt(10);
@@ -323,10 +370,12 @@ public abstract class BaseTokenStreamTes
CharTermAttribute termAtt = ts.getAttribute(CharTermAttribute.class);
OffsetAttribute offsetAtt = ts.hasAttribute(OffsetAttribute.class) ? ts.getAttribute(OffsetAttribute.class) : null;
PositionIncrementAttribute posIncAtt = ts.hasAttribute(PositionIncrementAttribute.class) ? ts.getAttribute(PositionIncrementAttribute.class) : null;
+ PositionLengthAttribute posLengthAtt = ts.hasAttribute(PositionLengthAttribute.class) ? ts.getAttribute(PositionLengthAttribute.class) : null;
TypeAttribute typeAtt = ts.hasAttribute(TypeAttribute.class) ? ts.getAttribute(TypeAttribute.class) : null;
List<String> tokens = new ArrayList<String>();
List<String> types = new ArrayList<String>();
List<Integer> positions = new ArrayList<Integer>();
+ List<Integer> positionLengths = new ArrayList<Integer>();
List<Integer> startOffsets = new ArrayList<Integer>();
List<Integer> endOffsets = new ArrayList<Integer>();
ts.reset();
@@ -334,6 +383,7 @@ public abstract class BaseTokenStreamTes
tokens.add(termAtt.toString());
if (typeAtt != null) types.add(typeAtt.type());
if (posIncAtt != null) positions.add(posIncAtt.getPositionIncrement());
+ if (posLengthAtt != null) positionLengths.add(posLengthAtt.getPositionLength());
if (offsetAtt != null) {
startOffsets.add(offsetAtt.startOffset());
endOffsets.add(offsetAtt.endOffset());
@@ -344,11 +394,21 @@ public abstract class BaseTokenStreamTes
// verify reusing is "reproducable" and also get the normal tokenstream sanity checks
if (!tokens.isEmpty()) {
if (VERBOSE) {
- System.out.println("NOTE: BaseTokenStreamTestCase: re-run analysis");
+ System.out.println(Thread.currentThread().getName() + ": NOTE: BaseTokenStreamTestCase: re-run analysis; " + tokens.size() + " tokens");
}
reader = new StringReader(text);
ts = a.tokenStream("dummy", useCharFilter ? new MockCharFilter(reader, remainder) : reader);
- if (typeAtt != null && posIncAtt != null && offsetAtt != null) {
+ if (typeAtt != null && posIncAtt != null && posLengthAtt != null && offsetAtt != null) {
+ // offset + pos + posLength + type
+ assertTokenStreamContents(ts,
+ tokens.toArray(new String[tokens.size()]),
+ toIntArray(startOffsets),
+ toIntArray(endOffsets),
+ types.toArray(new String[types.size()]),
+ toIntArray(positions),
+ toIntArray(positionLengths),
+ text.length());
+ } else if (typeAtt != null && posIncAtt != null && offsetAtt != null) {
// offset + pos + type
assertTokenStreamContents(ts,
tokens.toArray(new String[tokens.size()]),
@@ -356,7 +416,18 @@ public abstract class BaseTokenStreamTes
toIntArray(endOffsets),
types.toArray(new String[types.size()]),
toIntArray(positions),
+ null,
text.length());
+ } else if (posIncAtt != null && posLengthAtt != null && offsetAtt != null) {
+ // offset + pos + posLength
+ assertTokenStreamContents(ts,
+ tokens.toArray(new String[tokens.size()]),
+ toIntArray(startOffsets),
+ toIntArray(endOffsets),
+ null,
+ toIntArray(positions),
+ toIntArray(positionLengths),
+ text.length());
} else if (posIncAtt != null && offsetAtt != null) {
// offset + pos
assertTokenStreamContents(ts,
@@ -365,6 +436,7 @@ public abstract class BaseTokenStreamTes
toIntArray(endOffsets),
null,
toIntArray(positions),
+ null,
text.length());
} else if (offsetAtt != null) {
// offset
@@ -374,6 +446,7 @@ public abstract class BaseTokenStreamTes
toIntArray(endOffsets),
null,
null,
+ null,
text.length());
} else {
// terms only
@@ -383,6 +456,22 @@ public abstract class BaseTokenStreamTes
}
}
}
+
+ protected String toDot(Analyzer a, String inputText) throws IOException {
+ final StringWriter sw = new StringWriter();
+ final TokenStream ts = a.tokenStream("field", new StringReader(inputText));
+ ts.reset();
+ new TokenStreamToDot(inputText, ts, new PrintWriter(sw)).toDot();
+ return sw.toString();
+ }
+
+ protected void toDotFile(Analyzer a, String inputText, String localFileName) throws IOException {
+ Writer w = new OutputStreamWriter(new FileOutputStream(localFileName), "UTF-8");
+ final TokenStream ts = a.tokenStream("field", new StringReader(inputText));
+ ts.reset();
+ new TokenStreamToDot(inputText, ts, new PrintWriter(w)).toDot();
+ w.close();
+ }
static int[] toIntArray(List<Integer> list) {
int ret[] = new int[list.size()];
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene3x/TermInfosWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene3x/TermInfosWriter.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene3x/TermInfosWriter.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene3x/TermInfosWriter.java Tue Mar 6 18:13:38 2012
@@ -210,9 +210,9 @@ final class TermInfosWriter implements C
assert ti.freqPointer >= lastTi.freqPointer: "freqPointer out of order (" + ti.freqPointer + " < " + lastTi.freqPointer + ")";
assert ti.proxPointer >= lastTi.proxPointer: "proxPointer out of order (" + ti.proxPointer + " < " + lastTi.proxPointer + ")";
- if (!isIndex && size % indexInterval == 0)
+ if (!isIndex && size % indexInterval == 0) {
other.add(lastFieldNumber, lastTerm, lastTi); // add an index term
-
+ }
writeTerm(fieldNumber, term); // write term
output.writeVInt(ti.docFreq); // write doc freq
Modified: lucene/dev/branches/solr_3159_jetty8/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr_3159_jetty8/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java?rev=1297628&r1=1297627&r2=1297628&view=diff
==============================================================================
--- lucene/dev/branches/solr_3159_jetty8/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java (original)
+++ lucene/dev/branches/solr_3159_jetty8/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java Tue Mar 6 18:13:38 2012
@@ -356,37 +356,29 @@ public abstract class ThreadedIndexingAn
shift = 0;
trigger = 1;
} else {
- trigger = totTermCount.get()/10;
+ trigger = totTermCount.get()/30;
shift = random.nextInt(trigger);
}
- BytesRef term = termsEnum.next();
- if (term == null) {
- if (seenTermCount == 0) {
+ while (true) {
+ BytesRef term = termsEnum.next();
+ if (term == null) {
+ if (seenTermCount == 0) {
+ break;
+ }
+ totTermCount.set(seenTermCount);
break;
}
- totTermCount.set(seenTermCount);
- seenTermCount = 0;
- if (totTermCount.get() < 10) {
- shift = 0;
+ seenTermCount++;
+ // search 30 terms
+ if (trigger == 0) {
trigger = 1;
- } else {
- trigger = totTermCount.get()/10;
- //System.out.println("trigger " + trigger);
- shift = random.nextInt(trigger);
}
- termsEnum.seekCeil(new BytesRef(""));
- continue;
- }
- seenTermCount++;
- // search 10 terms
- if (trigger == 0) {
- trigger = 1;
- }
- if ((seenTermCount + shift) % trigger == 0) {
- //if (VERBOSE) {
- //System.out.println(Thread.currentThread().getName() + " now search body:" + term.utf8ToString());
- //}
- totHits.addAndGet(runQuery(s, new TermQuery(new Term("body", term))));
+ if ((seenTermCount + shift) % trigger == 0) {
+ //if (VERBOSE) {
+ //System.out.println(Thread.currentThread().getName() + " now search body:" + term.utf8ToString());
+ //}
+ totHits.addAndGet(runQuery(s, new TermQuery(new Term("body", term))));
+ }
}
//if (VERBOSE) {
//System.out.println(Thread.currentThread().getName() + ": search done");
@@ -432,7 +424,7 @@ public abstract class ThreadedIndexingAn
final long t0 = System.currentTimeMillis();
- final LineFileDocs docs = new LineFileDocs(random);
+ final LineFileDocs docs = new LineFileDocs(random, defaultCodecSupportsDocValues());
final File tempDir = _TestUtil.getTempDir(testName);
dir = newFSDirectory(tempDir);
((MockDirectoryWrapper) dir).setCheckIndexOnClose(false); // don't double-checkIndex, we do it ourselves.
@@ -636,7 +628,14 @@ public abstract class ThreadedIndexingAn
private int runQuery(IndexSearcher s, Query q) throws Exception {
s.search(q, 10);
- return s.search(q, null, 10, new Sort(new SortField("title", SortField.Type.STRING))).totalHits;
+ int hitCount = s.search(q, null, 10, new Sort(new SortField("title", SortField.Type.STRING))).totalHits;
+ if (defaultCodecSupportsDocValues()) {
+ final Sort dvSort = new Sort(new SortField("title", SortField.Type.STRING));
+ dvSort.getSort()[0].setUseIndexValues(true);
+ int hitCount2 = s.search(q, null, 10, dvSort).totalHits;
+ assertEquals(hitCount, hitCount2);
+ }
+ return hitCount;
}
protected void smokeTestSearcher(IndexSearcher s) throws Exception {