You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2011/10/01 05:05:07 UTC
svn commit: r1177888 [7/16] - in /lucene/dev/branches/lucene2621: ./
dev-tools/eclipse/ dev-tools/idea/lucene/contrib/ dev-tools/maven/ lucene/
lucene/contrib/ lucene/contrib/demo/src/java/org/apache/lucene/demo/
lucene/contrib/demo/src/java/org/apache...
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java Sat Oct 1 03:04:53 2011
@@ -17,329 +17,26 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
import java.util.Set;
import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.codecs.CodecProvider;
+
import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.PhraseQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.MockDirectoryWrapper;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LineFileDocs;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.NamedThreadFactory;
-import org.apache.lucene.util._TestUtil;
-import org.junit.Test;
// TODO
// - mix in optimize, addIndexes
// - randomoly mix in non-congruent docs
-public class TestNRTThreads extends LuceneTestCase {
-
- private static class SubDocs {
- public final String packID;
- public final List<String> subIDs;
- public boolean deleted;
-
- public SubDocs(String packID, List<String> subIDs) {
- this.packID = packID;
- this.subIDs = subIDs;
- }
- }
-
- @Test
- public void testNRTThreads() throws Exception {
-
- final long t0 = System.currentTimeMillis();
-
- final String defaultCodec = CodecProvider.getDefault().getDefaultFieldCodec();
- if (defaultCodec.equals("SimpleText") || defaultCodec.equals("Memory")) {
- // no
- CodecProvider.getDefault().setDefaultFieldCodec("Standard");
- }
-
- final LineFileDocs docs = new LineFileDocs(random);
- final File tempDir = _TestUtil.getTempDir("nrtopenfiles");
- final MockDirectoryWrapper dir = newFSDirectory(tempDir);
- dir.setCheckIndexOnClose(false); // don't double-checkIndex, we do it ourselves.
- final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
-
- if (LuceneTestCase.TEST_NIGHTLY) {
- // newIWConfig makes smallish max seg size, which
- // results in tons and tons of segments for this test
- // when run nightly:
- MergePolicy mp = conf.getMergePolicy();
- if (mp instanceof TieredMergePolicy) {
- ((TieredMergePolicy) mp).setMaxMergedSegmentMB(5000.);
- } else if (mp instanceof LogByteSizeMergePolicy) {
- ((LogByteSizeMergePolicy) mp).setMaxMergeMB(1000.);
- } else if (mp instanceof LogMergePolicy) {
- ((LogMergePolicy) mp).setMaxMergeDocs(100000);
- }
- }
-
- conf.setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() {
- @Override
- public void warm(IndexReader reader) throws IOException {
- if (VERBOSE) {
- System.out.println("TEST: now warm merged reader=" + reader);
- }
- final int maxDoc = reader.maxDoc();
- final Bits liveDocs = reader.getLiveDocs();
- int sum = 0;
- final int inc = Math.max(1, maxDoc/50);
- for(int docID=0;docID<maxDoc;docID += inc) {
- if (liveDocs == null || liveDocs.get(docID)) {
- final Document doc = reader.document(docID);
- sum += doc.getFields().size();
- }
- }
+public class TestNRTThreads extends ThreadedIndexingAndSearchingTestCase {
+
+ @Override
+ protected void doSearching(ExecutorService es, long stopTime) throws Exception {
- IndexSearcher searcher = newSearcher(reader);
- sum += searcher.search(new TermQuery(new Term("body", "united")), 10).totalHits;
- searcher.close();
-
- if (VERBOSE) {
- System.out.println("TEST: warm visited " + sum + " fields");
- }
- }
- });
-
- final IndexWriter writer = new IndexWriter(dir, conf);
- if (VERBOSE) {
- writer.setInfoStream(System.out);
- }
- _TestUtil.reduceOpenFiles(writer);
-
- final int NUM_INDEX_THREADS = 2;
- final int NUM_SEARCH_THREADS = 3;
-
- final int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 300 : RANDOM_MULTIPLIER;
-
- final AtomicBoolean failed = new AtomicBoolean();
- final AtomicInteger addCount = new AtomicInteger();
- final AtomicInteger delCount = new AtomicInteger();
- final AtomicInteger packCount = new AtomicInteger();
-
- final Set<String> delIDs = Collections.synchronizedSet(new HashSet<String>());
- final List<SubDocs> allSubDocs = Collections.synchronizedList(new ArrayList<SubDocs>());
-
- final long stopTime = System.currentTimeMillis() + RUN_TIME_SEC*1000;
- Thread[] threads = new Thread[NUM_INDEX_THREADS];
- for(int thread=0;thread<NUM_INDEX_THREADS;thread++) {
- threads[thread] = new Thread() {
- @Override
- public void run() {
- // TODO: would be better if this were cross thread, so that we make sure one thread deleting anothers added docs works:
- final List<String> toDeleteIDs = new ArrayList<String>();
- final List<SubDocs> toDeleteSubDocs = new ArrayList<SubDocs>();
- while(System.currentTimeMillis() < stopTime && !failed.get()) {
- try {
- Document doc = docs.nextDoc();
- if (doc == null) {
- break;
- }
- final String addedField;
- if (random.nextBoolean()) {
- addedField = "extra" + random.nextInt(10);
- doc.add(new TextField(addedField, "a random field"));
- } else {
- addedField = null;
- }
- if (random.nextBoolean()) {
- if (VERBOSE) {
- //System.out.println(Thread.currentThread().getName() + ": add doc id:" + doc.get("docid"));
- }
-
- if (random.nextBoolean()) {
- // Add a pack of adjacent sub-docs
- final String packID;
- final SubDocs delSubDocs;
- if (toDeleteSubDocs.size() > 0 && random.nextBoolean()) {
- delSubDocs = toDeleteSubDocs.get(random.nextInt(toDeleteSubDocs.size()));
- assert !delSubDocs.deleted;
- toDeleteSubDocs.remove(delSubDocs);
- // reuse prior packID
- packID = delSubDocs.packID;
- } else {
- delSubDocs = null;
- // make new packID
- packID = packCount.getAndIncrement() + "";
- }
-
- final Field packIDField = newField("packID", packID, StringField.TYPE_STORED);
- final List<String> docIDs = new ArrayList<String>();
- final SubDocs subDocs = new SubDocs(packID, docIDs);
- final List<Document> docsList = new ArrayList<Document>();
-
- allSubDocs.add(subDocs);
- doc.add(packIDField);
- docsList.add(_TestUtil.cloneDocument(doc));
- docIDs.add(doc.get("docid"));
-
- final int maxDocCount = _TestUtil.nextInt(random, 1, 10);
- while(docsList.size() < maxDocCount) {
- doc = docs.nextDoc();
- if (doc == null) {
- break;
- }
- docsList.add(_TestUtil.cloneDocument(doc));
- docIDs.add(doc.get("docid"));
- }
- addCount.addAndGet(docsList.size());
-
- if (delSubDocs != null) {
- delSubDocs.deleted = true;
- delIDs.addAll(delSubDocs.subIDs);
- delCount.addAndGet(delSubDocs.subIDs.size());
- if (VERBOSE) {
- System.out.println("TEST: update pack packID=" + delSubDocs.packID + " count=" + docsList.size() + " docs=" + docIDs);
- }
- writer.updateDocuments(new Term("packID", delSubDocs.packID), docsList);
- /*
- // non-atomic:
- writer.deleteDocuments(new Term("packID", delSubDocs.packID));
- for(Document subDoc : docsList) {
- writer.addDocument(subDoc);
- }
- */
- } else {
- if (VERBOSE) {
- System.out.println("TEST: add pack packID=" + packID + " count=" + docsList.size() + " docs=" + docIDs);
- }
- writer.addDocuments(docsList);
-
- /*
- // non-atomic:
- for(Document subDoc : docsList) {
- writer.addDocument(subDoc);
- }
- */
- }
- doc.removeField("packID");
-
- if (random.nextInt(5) == 2) {
- if (VERBOSE) {
- //System.out.println(Thread.currentThread().getName() + ": buffer del id:" + packID);
- }
- toDeleteSubDocs.add(subDocs);
- }
-
- } else {
- writer.addDocument(doc);
- addCount.getAndIncrement();
-
- if (random.nextInt(5) == 3) {
- if (VERBOSE) {
- //System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("docid"));
- }
- toDeleteIDs.add(doc.get("docid"));
- }
- }
- } else {
- // we use update but it never replaces a
- // prior doc
- if (VERBOSE) {
- //System.out.println(Thread.currentThread().getName() + ": update doc id:" + doc.get("docid"));
- }
- writer.updateDocument(new Term("docid", doc.get("docid")), doc);
- addCount.getAndIncrement();
-
- if (random.nextInt(5) == 3) {
- if (VERBOSE) {
- //System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("docid"));
- }
- toDeleteIDs.add(doc.get("docid"));
- }
- }
-
- if (random.nextInt(30) == 17) {
- if (VERBOSE) {
- //System.out.println(Thread.currentThread().getName() + ": apply " + toDeleteIDs.size() + " deletes");
- }
- for(String id : toDeleteIDs) {
- if (VERBOSE) {
- //System.out.println(Thread.currentThread().getName() + ": del term=id:" + id);
- }
- writer.deleteDocuments(new Term("docid", id));
- }
- final int count = delCount.addAndGet(toDeleteIDs.size());
- if (VERBOSE) {
- //System.out.println(Thread.currentThread().getName() + ": tot " + count + " deletes");
- }
- delIDs.addAll(toDeleteIDs);
- toDeleteIDs.clear();
-
- for(SubDocs subDocs : toDeleteSubDocs) {
- assert !subDocs.deleted;
- writer.deleteDocuments(new Term("packID", subDocs.packID));
- subDocs.deleted = true;
- if (VERBOSE) {
- System.out.println(" del subs: " + subDocs.subIDs + " packID=" + subDocs.packID);
- }
- delIDs.addAll(subDocs.subIDs);
- delCount.addAndGet(subDocs.subIDs.size());
- }
- toDeleteSubDocs.clear();
- }
- if (addedField != null) {
- doc.removeField(addedField);
- }
- } catch (Throwable t) {
- System.out.println(Thread.currentThread().getName() + ": hit exc");
- t.printStackTrace();
- failed.set(true);
- throw new RuntimeException(t);
- }
- }
- if (VERBOSE) {
- System.out.println(Thread.currentThread().getName() + ": indexing done");
- }
- }
- };
- threads[thread].setDaemon(true);
- threads[thread].start();
- }
-
- if (VERBOSE) {
- System.out.println("TEST: DONE start indexing threads [" + (System.currentTimeMillis()-t0) + " ms]");
- }
-
- // let index build up a bit
- Thread.sleep(100);
+ boolean anyOpenDelFiles = false;
IndexReader r = IndexReader.open(writer, true);
- boolean any = false;
- // silly starting guess:
- final AtomicInteger totTermCount = new AtomicInteger(100);
-
- final ExecutorService es = Executors.newCachedThreadPool(new NamedThreadFactory("NRT search threads"));
-
- while(System.currentTimeMillis() < stopTime && !failed.get()) {
+ while (System.currentTimeMillis() < stopTime && !failed.get()) {
if (random.nextBoolean()) {
if (VERBOSE) {
System.out.println("TEST: now reopen r=" + r);
@@ -355,11 +52,11 @@ public class TestNRTThreads extends Luce
}
r.close();
writer.commit();
- final Set<String> openDeletedFiles = dir.getOpenDeletedFiles();
+ final Set<String> openDeletedFiles = ((MockDirectoryWrapper) dir).getOpenDeletedFiles();
if (openDeletedFiles.size() > 0) {
System.out.println("OBD files: " + openDeletedFiles);
}
- any |= openDeletedFiles.size() > 0;
+ anyOpenDelFiles |= openDeletedFiles.size() > 0;
//assertEquals("open but deleted: " + openDeletedFiles, 0, openDeletedFiles.size());
if (VERBOSE) {
System.out.println("TEST: now open");
@@ -372,203 +69,52 @@ public class TestNRTThreads extends Luce
//System.out.println("numDocs=" + r.numDocs() + "
//openDelFileCount=" + dir.openDeleteFileCount());
- smokeTestReader(r);
-
if (r.numDocs() > 0) {
-
- final IndexSearcher s = new IndexSearcher(r, es);
-
- // run search threads
- final long searchStopTime = System.currentTimeMillis() + 500;
- final Thread[] searchThreads = new Thread[NUM_SEARCH_THREADS];
- final AtomicInteger totHits = new AtomicInteger();
- for(int thread=0;thread<NUM_SEARCH_THREADS;thread++) {
- searchThreads[thread] = new Thread() {
- @Override
- public void run() {
- try {
- TermsEnum termsEnum = MultiFields.getTerms(s.getIndexReader(), "body").iterator();
- int seenTermCount = 0;
- int shift;
- int trigger;
- if (totTermCount.get() < 10) {
- shift = 0;
- trigger = 1;
- } else {
- trigger = totTermCount.get()/10;
- shift = random.nextInt(trigger);
- }
- while(System.currentTimeMillis() < searchStopTime) {
- BytesRef term = termsEnum.next();
- if (term == null) {
- if (seenTermCount < 10) {
- break;
- }
- totTermCount.set(seenTermCount);
- seenTermCount = 0;
- trigger = totTermCount.get()/10;
- //System.out.println("trigger " + trigger);
- shift = random.nextInt(trigger);
- termsEnum.seekCeil(new BytesRef(""));
- continue;
- }
- seenTermCount++;
- // search 10 terms
- if (trigger == 0) {
- trigger = 1;
- }
- if ((seenTermCount + shift) % trigger == 0) {
- //if (VERBOSE) {
- //System.out.println(Thread.currentThread().getName() + " now search body:" + term.utf8ToString());
- //}
- totHits.addAndGet(runQuery(s, new TermQuery(new Term("body", term))));
- }
- }
- if (VERBOSE) {
- System.out.println(Thread.currentThread().getName() + ": search done");
- }
- } catch (Throwable t) {
- System.out.println(Thread.currentThread().getName() + ": hit exc");
- failed.set(true);
- t.printStackTrace(System.out);
- throw new RuntimeException(t);
- }
- }
- };
- searchThreads[thread].setDaemon(true);
- searchThreads[thread].start();
- }
-
- for(int thread=0;thread<NUM_SEARCH_THREADS;thread++) {
- searchThreads[thread].join();
- }
-
- if (VERBOSE) {
- System.out.println("TEST: DONE search: totHits=" + totHits);
- }
- } else {
- Thread.sleep(100);
+ fixedSearcher = new IndexSearcher(r, es);
+ smokeTestSearcher(fixedSearcher);
+ runSearchThreads(System.currentTimeMillis() + 500);
}
}
-
- es.shutdown();
- es.awaitTermination(1, TimeUnit.SECONDS);
-
- if (VERBOSE) {
- System.out.println("TEST: all searching done [" + (System.currentTimeMillis()-t0) + " ms]");
- }
+ r.close();
//System.out.println("numDocs=" + r.numDocs() + " openDelFileCount=" + dir.openDeleteFileCount());
- r.close();
- final Set<String> openDeletedFiles = dir.getOpenDeletedFiles();
+ final Set<String> openDeletedFiles = ((MockDirectoryWrapper) dir).getOpenDeletedFiles();
if (openDeletedFiles.size() > 0) {
System.out.println("OBD files: " + openDeletedFiles);
}
- any |= openDeletedFiles.size() > 0;
+ anyOpenDelFiles |= openDeletedFiles.size() > 0;
- assertFalse("saw non-zero open-but-deleted count", any);
- if (VERBOSE) {
- System.out.println("TEST: now join");
- }
- for(int thread=0;thread<NUM_INDEX_THREADS;thread++) {
- threads[thread].join();
- }
- if (VERBOSE) {
- System.out.println("TEST: done join [" + (System.currentTimeMillis()-t0) + " ms]; addCount=" + addCount + " delCount=" + delCount);
- }
+ assertFalse("saw non-zero open-but-deleted count", anyOpenDelFiles);
+ }
- final IndexReader r2 = writer.getReader();
- final IndexSearcher s = newSearcher(r2);
- boolean doFail = false;
- for(String id : delIDs) {
- final TopDocs hits = s.search(new TermQuery(new Term("docid", id)), 1);
- if (hits.totalHits != 0) {
- System.out.println("doc id=" + id + " is supposed to be deleted, but got docID=" + hits.scoreDocs[0].doc);
- doFail = true;
- }
- }
+ private IndexSearcher fixedSearcher;
- // Make sure each group of sub-docs are still in docID order:
- for(SubDocs subDocs : allSubDocs) {
- if (!subDocs.deleted) {
- // We sort by relevance but the scores should be identical so sort falls back to by docID:
- TopDocs hits = s.search(new TermQuery(new Term("packID", subDocs.packID)), 20);
- assertEquals(subDocs.subIDs.size(), hits.totalHits);
- int lastDocID = -1;
- int startDocID = -1;
- for(ScoreDoc scoreDoc : hits.scoreDocs) {
- final int docID = scoreDoc.doc;
- if (lastDocID != -1) {
- assertEquals(1+lastDocID, docID);
- } else {
- startDocID = docID;
- }
- lastDocID = docID;
- final Document doc = s.doc(docID);
- assertEquals(subDocs.packID, doc.get("packID"));
- }
+ protected IndexSearcher getCurrentSearcher() throws Exception {
+ return fixedSearcher;
+ }
- lastDocID = startDocID - 1;
- for(String subID : subDocs.subIDs) {
- hits = s.search(new TermQuery(new Term("docid", subID)), 1);
- assertEquals(1, hits.totalHits);
- final int docID = hits.scoreDocs[0].doc;
- if (lastDocID != -1) {
- assertEquals(1+lastDocID, docID);
- }
- lastDocID = docID;
- }
- } else {
- for(String subID : subDocs.subIDs) {
- assertEquals(0, s.search(new TermQuery(new Term("docid", subID)), 1).totalHits);
- }
- }
- }
-
- final int endID = Integer.parseInt(docs.nextDoc().get("docid"));
- for(int id=0;id<endID;id++) {
- String stringID = ""+id;
- if (!delIDs.contains(stringID)) {
- final TopDocs hits = s.search(new TermQuery(new Term("docid", stringID)), 1);
- if (hits.totalHits != 1) {
- System.out.println("doc id=" + stringID + " is not supposed to be deleted, but got hitCount=" + hits.totalHits);
- doFail = true;
- }
- }
- }
- assertFalse(doFail);
-
- assertEquals("index=" + writer.segString() + " addCount=" + addCount + " delCount=" + delCount, addCount.get() - delCount.get(), r2.numDocs());
- r2.close();
-
- writer.commit();
- assertEquals("index=" + writer.segString() + " addCount=" + addCount + " delCount=" + delCount, addCount.get() - delCount.get(), writer.numDocs());
-
- assertFalse(writer.anyNonBulkMerges);
- writer.close(false);
- _TestUtil.checkIndex(dir);
- s.close();
- dir.close();
- _TestUtil.rmDir(tempDir);
- docs.close();
- if (VERBOSE) {
- System.out.println("TEST: done [" + (System.currentTimeMillis()-t0) + " ms]");
+ @Override
+ protected void releaseSearcher(IndexSearcher s) throws Exception {
+ if (s != fixedSearcher) {
+ // Final searcher:
+ s.getIndexReader().close();
+ s.close();
}
}
- private int runQuery(IndexSearcher s, Query q) throws Exception {
- s.search(q, 10);
- return s.search(q, null, 10, new Sort(new SortField("title", SortField.Type.STRING))).totalHits;
+ @Override
+ protected IndexSearcher getFinalSearcher() throws Exception {
+ final IndexReader r2;
+ if (random.nextBoolean()) {
+ r2 = writer.getReader();
+ } else {
+ writer.commit();
+ r2 = IndexReader.open(dir);
+ }
+ return newSearcher(r2);
}
- private void smokeTestReader(IndexReader r) throws Exception {
- IndexSearcher s = newSearcher(r);
- runQuery(s, new TermQuery(new Term("body", "united")));
- runQuery(s, new TermQuery(new Term("titleTokenized", "states")));
- PhraseQuery pq = new PhraseQuery();
- pq.add(new Term("body", "united"));
- pq.add(new Term("body", "states"));
- runQuery(s, pq);
- s.close();
+ public void testNRTThreads() throws Exception {
+ runTest("TestNRTThreads");
}
}
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestOmitNorms.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestOmitNorms.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestOmitNorms.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestOmitNorms.java Sat Oct 1 03:04:53 2011
@@ -227,22 +227,22 @@ public class TestOmitNorms extends Lucen
public void testOmitNormsCombos() throws IOException {
// indexed with norms
FieldType customType = new FieldType(TextField.TYPE_STORED);
- Field norms = new Field("foo", customType, "a");
+ Field norms = new Field("foo", "a", customType);
// indexed without norms
FieldType customType1 = new FieldType(TextField.TYPE_STORED);
customType1.setOmitNorms(true);
- Field noNorms = new Field("foo", customType1, "a");
+ Field noNorms = new Field("foo", "a", customType1);
// not indexed, but stored
FieldType customType2 = new FieldType();
customType2.setStored(true);
- Field noIndex = new Field("foo", customType2, "a");
+ Field noIndex = new Field("foo", "a", customType2);
// not indexed but stored, omitNorms is set
FieldType customType3 = new FieldType();
customType3.setStored(true);
customType3.setOmitNorms(true);
- Field noNormsNoIndex = new Field("foo", customType3, "a");
+ Field noNormsNoIndex = new Field("foo", "a", customType3);
// not indexed nor stored (doesnt exist at all, we index a different field instead)
- Field emptyNorms = new Field("bar", customType, "a");
+ Field emptyNorms = new Field("bar", "a", customType);
assertNotNull(getNorms("foo", norms, norms));
assertNull(getNorms("foo", norms, noNorms));
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestPayloads.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestPayloads.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestPayloads.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestPayloads.java Sat Oct 1 03:04:53 2011
@@ -405,7 +405,7 @@ public class TestPayloads extends Lucene
/**
* This Analyzer uses an WhitespaceTokenizer and PayloadFilter.
*/
- private static class PayloadAnalyzer extends ReusableAnalyzerBase {
+ private static class PayloadAnalyzer extends Analyzer {
Map<String,PayloadData> fieldToData = new HashMap<String,PayloadData>();
public PayloadAnalyzer() {
@@ -611,14 +611,14 @@ public class TestPayloads extends Lucene
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
new MockAnalyzer(random, MockTokenizer.WHITESPACE, true));
Document doc = new Document();
- doc.add(new Field("hasMaybepayload", TextField.TYPE_STORED, "here we go"));
+ doc.add(new Field("hasMaybepayload", "here we go", TextField.TYPE_STORED));
writer.addDocument(doc);
writer.close();
writer = new RandomIndexWriter(random, dir,
new MockAnalyzer(random, MockTokenizer.WHITESPACE, true));
doc = new Document();
- doc.add(new Field("hasMaybepayload2", TextField.TYPE_STORED, "here we go"));
+ doc.add(new Field("hasMaybepayload2", "here we go", TextField.TYPE_STORED));
writer.addDocument(doc);
writer.addDocument(doc);
writer.optimize();
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java Sat Oct 1 03:04:53 2011
@@ -20,7 +20,7 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.io.Reader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase;
+import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
@@ -41,7 +41,7 @@ public class TestSameTokenSamePosition e
Directory dir = newDirectory();
RandomIndexWriter riw = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new BugReproAnalyzer()));
Document doc = new Document();
- doc.add(new Field("eng", TextField.TYPE_STORED, "Six drunken" /*This shouldn't matter. */));
+ doc.add(new Field("eng", "Six drunken", TextField.TYPE_STORED /*This shouldn't matter. */));
riw.addDocument(doc);
riw.close();
dir.close();
@@ -54,7 +54,7 @@ public class TestSameTokenSamePosition e
Directory dir = newDirectory();
RandomIndexWriter riw = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new BugReproAnalyzer()));
Document doc = new Document();
- doc.add(new Field("eng", TextField.TYPE_STORED, "Six drunken" /*This shouldn't matter. */));
+ doc.add(new Field("eng", "Six drunken", TextField.TYPE_STORED /*This shouldn't matter. */));
for (int i = 0; i < 100; i++) {
riw.addDocument(doc);
}
@@ -63,7 +63,7 @@ public class TestSameTokenSamePosition e
}
}
-final class BugReproAnalyzer extends ReusableAnalyzerBase {
+final class BugReproAnalyzer extends Analyzer {
@Override
public TokenStreamComponents createComponents(String arg0, Reader arg1) {
return new TokenStreamComponents(new BugReproAnalyzerTokenizer());
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java Sat Oct 1 03:04:53 2011
@@ -32,7 +32,7 @@ public class TestSegmentInfo extends Luc
IndexWriter writer = new IndexWriter(dir, conf);
writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
- doc.add(new Field("a", TextField.TYPE_STORED, "value"));
+ doc.add(new Field("a", "value", TextField.TYPE_STORED));
writer.addDocument(doc);
writer.close();
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestTermVectorsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestTermVectorsReader.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestTermVectorsReader.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestTermVectorsReader.java Sat Oct 1 03:04:53 2011
@@ -117,7 +117,7 @@ public class TestTermVectorsReader exten
else {
customType.setStoreTermVectors(true);
}
- doc.add(new Field(testFields[i], customType, ""));
+ doc.add(new Field(testFields[i], "", customType));
}
//Create 5 documents for testing, they all have the same
@@ -175,7 +175,7 @@ public class TestTermVectorsReader exten
}
}
- private class MyAnalyzer extends ReusableAnalyzerBase {
+ private class MyAnalyzer extends Analyzer {
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
return new TokenStreamComponents(new MyTokenStream());
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestTermVectorsWriter.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestTermVectorsWriter.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestTermVectorsWriter.java Sat Oct 1 03:04:53 2011
@@ -143,7 +143,7 @@ public class TestTermVectorsWriter exten
customType.setStoreTermVectors(true);
customType.setStoreTermVectorPositions(true);
customType.setStoreTermVectorOffsets(true);
- Field f = new Field("field", customType, stream);
+ Field f = new Field("field", stream, customType);
doc.add(f);
doc.add(f);
w.addDocument(doc);
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestTermdocPerf.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestTermdocPerf.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestTermdocPerf.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/TestTermdocPerf.java Sat Oct 1 03:04:53 2011
@@ -22,7 +22,6 @@ import java.io.Reader;
import java.util.Random;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.ReusableAnalyzerBase;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
@@ -77,7 +76,7 @@ public class TestTermdocPerf extends Luc
void addDocs(final Random random, Directory dir, final int ndocs, String field, final String val, final int maxTF, final float percentDocs) throws IOException {
final RepeatingTokenStream ts = new RepeatingTokenStream(val, random, percentDocs, maxTF);
- Analyzer analyzer = new ReusableAnalyzerBase() {
+ Analyzer analyzer = new Analyzer() {
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
return new TokenStreamComponents(ts);
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/values/TestDocValues.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/values/TestDocValues.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/values/TestDocValues.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/values/TestDocValues.java Sat Oct 1 03:04:53 2011
@@ -188,7 +188,7 @@ public class TestDocValues extends Lucen
w.add(1, minMax[i][1]);
w.finish(2);
assertEquals(0, trackBytes.get());
- IndexDocValues r = Ints.getValues(dir, "test", 2, newIOContext(random));
+ IndexDocValues r = Ints.getValues(dir, "test", 2, ValueType.VAR_INTS, newIOContext(random));
Source source = getSource(r);
assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1],
expectedTypes[i], source.type());
@@ -229,7 +229,7 @@ public class TestDocValues extends Lucen
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
- IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length, newIOContext(random));
+ IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length, ValueType.FIXED_INTS_8, newIOContext(random));
Source source = r.getSource();
assertTrue(source.hasArray());
byte[] loaded = ((byte[])source.getArray());
@@ -250,7 +250,7 @@ public class TestDocValues extends Lucen
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
- IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length, newIOContext(random));
+ IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length, ValueType.FIXED_INTS_16, newIOContext(random));
Source source = r.getSource();
assertTrue(source.hasArray());
short[] loaded = ((short[])source.getArray());
@@ -271,7 +271,7 @@ public class TestDocValues extends Lucen
w.add(i, sourceArray[i]);
}
w.finish(sourceArray.length);
- IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length, newIOContext(random));
+ IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length, ValueType.FIXED_INTS_64, newIOContext(random));
Source source = r.getSource();
assertTrue(source.hasArray());
long[] loaded = ((long[])source.getArray());
@@ -292,7 +292,7 @@ public class TestDocValues extends Lucen
w.add(i, (long) sourceArray[i]);
}
w.finish(sourceArray.length);
- IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length, newIOContext(random));
+ IndexDocValues r = Ints.getValues(dir, "test", sourceArray.length, ValueType.FIXED_INTS_32, newIOContext(random));
Source source = r.getSource();
assertTrue(source.hasArray());
int[] loaded = ((int[])source.getArray());
@@ -363,7 +363,7 @@ public class TestDocValues extends Lucen
w.finish(NUM_VALUES + additionalDocs);
assertEquals(0, trackBytes.get());
- IndexDocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs, newIOContext(random));
+ IndexDocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs, type, newIOContext(random));
for (int iter = 0; iter < 2; iter++) {
Source s = getSource(r);
assertEquals(type, s.type());
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/values/TestDocValuesIndexing.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/values/TestDocValuesIndexing.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/values/TestDocValuesIndexing.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/index/values/TestDocValuesIndexing.java Sat Oct 1 03:04:53 2011
@@ -25,9 +25,9 @@ import java.util.EnumSet;
import java.util.List;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.IndexDocValuesField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IndexDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.CorruptIndexException;
@@ -37,6 +37,7 @@ import org.apache.lucene.index.IndexWrit
import org.apache.lucene.index.LogDocMergePolicy;
import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.index.MultiPerDocValues;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.index.codecs.PerDocValues;
@@ -45,10 +46,10 @@ import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.FloatsRef;
import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util._TestUtil;
import org.junit.Before;
@@ -535,7 +536,7 @@ public class TestDocValuesIndexing exten
}
}
doc.removeFields("id");
- doc.add(new Field("id", StringField.TYPE_STORED, idBase + i));
+ doc.add(new Field("id", idBase + i, StringField.TYPE_STORED));
w.addDocument(doc);
if (i % 7 == 0) {
@@ -562,4 +563,64 @@ public class TestDocValuesIndexing exten
}
return deleted;
}
+
+ public void testMultiValuedIndexDocValuesField() throws Exception {
+ assumeFalse("cannot work with preflex codec", CodecProvider.getDefault().getDefaultFieldCodec().equals("PreFlex"));
+ Directory d = newDirectory();
+ RandomIndexWriter w = new RandomIndexWriter(random, d);
+ Document doc = new Document();
+ IndexDocValuesField f = new IndexDocValuesField("field");
+ f.setInt(17);
+ // Index doc values are single-valued so we should not
+ // be able to add same field more than once:
+ doc.add(f);
+ doc.add(f);
+ try {
+ w.addDocument(doc);
+ fail("didn't hit expected exception");
+ } catch (IllegalArgumentException iae) {
+ // expected
+ }
+
+ doc = new Document();
+ doc.add(f);
+ w.addDocument(doc);
+ w.optimize();
+ IndexReader r = w.getReader();
+ w.close();
+ assertEquals(17, r.getSequentialSubReaders()[0].perDocValues().docValues("field").load().getInt(0));
+ r.close();
+ d.close();
+ }
+
+ public void testDifferentTypedDocValuesField() throws Exception {
+ assumeFalse("cannot work with preflex codec", CodecProvider.getDefault().getDefaultFieldCodec().equals("PreFlex"));
+ Directory d = newDirectory();
+ RandomIndexWriter w = new RandomIndexWriter(random, d);
+ Document doc = new Document();
+ IndexDocValuesField f = new IndexDocValuesField("field");
+ f.setInt(17);
+ // Index doc values are single-valued so we should not
+ // be able to add same field more than once:
+ doc.add(f);
+ IndexDocValuesField f2 = new IndexDocValuesField("field");
+ f2.setFloat(22.0);
+ doc.add(f2);
+ try {
+ w.addDocument(doc);
+ fail("didn't hit expected exception");
+ } catch (IllegalArgumentException iae) {
+ // expected
+ }
+
+ doc = new Document();
+ doc.add(f);
+ w.addDocument(doc);
+ w.optimize();
+ IndexReader r = w.getReader();
+ w.close();
+ assertEquals(17, r.getSequentialSubReaders()[0].perDocValues().docValues("field").load().getInt(0));
+ r.close();
+ d.close();
+ }
}
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java Sat Oct 1 03:04:53 2011
@@ -25,6 +25,7 @@ import org.apache.lucene.search.similari
import org.apache.lucene.search.similarities.Similarity.ExactDocScorer;
import org.apache.lucene.search.similarities.Similarity.SloppyDocScorer;
import org.apache.lucene.search.similarities.Similarity.Stats;
+import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.TermContext;
import org.apache.lucene.index.FieldInvertState;
@@ -364,7 +365,8 @@ final class JustCompileSearch {
}
@Override
- public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext)
+ public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
+ boolean topScorer, Bits acceptDocs)
throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java Sat Oct 1 03:04:53 2011
@@ -28,7 +28,6 @@ import org.apache.lucene.index.SlowMulti
import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Weight.ScorerContext;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
import org.apache.lucene.search.similarities.Similarity;
@@ -178,7 +177,8 @@ public class TestDisjunctionMaxQuery ext
QueryUtils.check(random, dq, s);
assertTrue(s.getTopReaderContext().isAtomic);
final Weight dw = s.createNormalizedWeight(dq);
- final Scorer ds = dw.scorer((AtomicReaderContext)s.getTopReaderContext(), ScorerContext.def());
+ AtomicReaderContext context = (AtomicReaderContext)s.getTopReaderContext();
+ final Scorer ds = dw.scorer(context, true, false, context.reader.getLiveDocs());
final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
if (skipOk) {
fail("firsttime skipTo found a match? ... "
@@ -193,7 +193,8 @@ public class TestDisjunctionMaxQuery ext
assertTrue(s.getTopReaderContext().isAtomic);
QueryUtils.check(random, dq, s);
final Weight dw = s.createNormalizedWeight(dq);
- final Scorer ds = dw.scorer((AtomicReaderContext)s.getTopReaderContext(), ScorerContext.def());
+ AtomicReaderContext context = (AtomicReaderContext)s.getTopReaderContext();
+ final Scorer ds = dw.scorer(context, true, false, context.reader.getLiveDocs());
assertTrue("firsttime skipTo found no match",
ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id"));
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java Sat Oct 1 03:04:53 2011
@@ -17,7 +17,6 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.analysis.ReusableAnalyzerBase;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
@@ -346,7 +345,7 @@ public class TestMultiPhraseQuery extend
}
}
- private static class CannedAnalyzer extends ReusableAnalyzerBase {
+ private static class CannedAnalyzer extends Analyzer {
private final TokenAndPos[] tokens;
public CannedAnalyzer(TokenAndPos[] tokens) {
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java Sat Oct 1 03:04:53 2011
@@ -55,7 +55,7 @@ public class TestPhraseQuery extends Luc
@BeforeClass
public static void beforeClass() throws Exception {
directory = newDirectory();
- Analyzer analyzer = new ReusableAnalyzerBase() {
+ Analyzer analyzer = new Analyzer() {
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
@@ -636,7 +636,7 @@ public class TestPhraseQuery extends Luc
break;
}
}
- TokenStream ts = analyzer.reusableTokenStream("ignore", new StringReader(term));
+ TokenStream ts = analyzer.tokenStream("ignore", new StringReader(term));
CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
ts.reset();
while(ts.incrementToken()) {
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java Sat Oct 1 03:04:53 2011
@@ -56,7 +56,7 @@ public class TestPositionIncrement exten
final static boolean VERBOSE = false;
public void testSetPosition() throws Exception {
- Analyzer analyzer = new ReusableAnalyzerBase() {
+ Analyzer analyzer = new Analyzer() {
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
return new TokenStreamComponents(new Tokenizer() {
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java Sat Oct 1 03:04:53 2011
@@ -16,14 +16,19 @@ package org.apache.lucene.search;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+import java.util.HashSet;
+import java.util.Set;
import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
public class TestQueryWrapperFilter extends LuceneTestCase {
@@ -80,4 +85,71 @@ public class TestQueryWrapperFilter exte
reader.close();
dir.close();
}
+
+ public void testRandom() throws Exception {
+ final Directory d = newDirectory();
+ final RandomIndexWriter w = new RandomIndexWriter(random, d);
+ w.w.getConfig().setMaxBufferedDocs(17);
+ final int numDocs = atLeast(100);
+ final Set<String> aDocs = new HashSet<String>();
+ for(int i=0;i<numDocs;i++) {
+ final Document doc = new Document();
+ final String v;
+ if (random.nextInt(5) == 4) {
+ v = "a";
+ aDocs.add(""+i);
+ } else {
+ v = "b";
+ }
+ final Field f = newField("field", v, StringField.TYPE_UNSTORED);
+ doc.add(f);
+ doc.add(newField("id", ""+i, StringField.TYPE_STORED));
+ w.addDocument(doc);
+ }
+
+ final int numDelDocs = atLeast(10);
+ for(int i=0;i<numDelDocs;i++) {
+ final String delID = ""+random.nextInt(numDocs);
+ w.deleteDocuments(new Term("id", delID));
+ aDocs.remove(delID);
+ }
+
+ final IndexReader r = w.getReader();
+ w.close();
+ final TopDocs hits = new IndexSearcher(r).search(new MatchAllDocsQuery(),
+ new QueryWrapperFilter(new TermQuery(new Term("field", "a"))),
+ numDocs);
+ assertEquals(aDocs.size(), hits.totalHits);
+ for(ScoreDoc sd: hits.scoreDocs) {
+ assertTrue(aDocs.contains(r.document(sd.doc).get("id")));
+ }
+ r.close();
+ d.close();
+ }
+
+ public void testThousandDocuments() throws Exception {
+ Directory dir = newDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(random, dir);
+ for (int i = 0; i < 1000; i++) {
+ Document doc = new Document();
+ doc.add(newField("field", English.intToEnglish(i), StringField.TYPE_UNSTORED));
+ writer.addDocument(doc);
+ }
+
+ IndexReader reader = writer.getReader();
+ writer.close();
+
+ IndexSearcher searcher = newSearcher(reader);
+
+ for (int i = 0; i < 1000; i++) {
+ TermQuery termQuery = new TermQuery(new Term("field", English.intToEnglish(i)));
+ QueryWrapperFilter qwf = new QueryWrapperFilter(termQuery);
+ TopDocs td = searcher.search(new MatchAllDocsQuery(), qwf, 10);
+ assertEquals(1, td.totalHits);
+ }
+
+ searcher.close();
+ reader.close();
+ dir.close();
+ }
}
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestSearchAfter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestSearchAfter.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestSearchAfter.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestSearchAfter.java Sat Oct 1 03:04:53 2011
@@ -18,7 +18,7 @@ package org.apache.lucene.search;
*/
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
@@ -43,8 +43,8 @@ public class TestSearchAfter extends Luc
int numDocs = atLeast(200);
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
- document.add(newField("english", English.intToEnglish(i), StringField.TYPE_UNSTORED));
- document.add(newField("oddeven", (i % 2 == 0) ? "even" : "odd", StringField.TYPE_UNSTORED));
+ document.add(newField("english", English.intToEnglish(i), TextField.TYPE_UNSTORED));
+ document.add(newField("oddeven", (i % 2 == 0) ? "even" : "odd", TextField.TYPE_UNSTORED));
iw.addDocument(document);
}
reader = iw.getReader();
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java Sat Oct 1 03:04:53 2011
@@ -162,7 +162,7 @@ public class TestSloppyPhraseQuery exten
Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
customType.setOmitNorms(true);
- Field f = new Field("f", customType, docText);
+ Field f = new Field("f", docText, customType);
doc.add(f);
return doc;
}
@@ -237,7 +237,7 @@ public class TestSloppyPhraseQuery exten
RandomIndexWriter iw = new RandomIndexWriter(random, dir);
FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
customType.setOmitNorms(true);
- Field f = new Field("lyrics", customType, "");
+ Field f = new Field("lyrics", "", customType);
Document doc = new Document();
doc.add(f);
f.setValue("drug drug");
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestSort.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestSort.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestSort.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestSort.java Sat Oct 1 03:04:53 2011
@@ -132,7 +132,7 @@ public class TestSort extends LuceneTest
for (int i=0; i<data.length; ++i) {
if (((i%2)==0 && even) || ((i%2)==1 && odd)) {
Document doc = new Document();
- doc.add (new Field ("tracer", ft1, data[i][0]));
+ doc.add (new Field ("tracer", data[i][0], ft1));
doc.add (new TextField ("contents", data[i][1]));
if (data[i][2] != null) {
Field f = new StringField ("int", data[i][2]);
@@ -196,12 +196,12 @@ public class TestSort extends LuceneTest
for (int i=0; i<NUM_STRINGS; i++) {
Document doc = new Document();
String num = getRandomCharString(getRandomNumber(2, 8), 48, 52);
- doc.add (new Field ("tracer", customType, num));
+ doc.add (new Field ("tracer", num, customType));
//doc.add (new Field ("contents", Integer.toString(i), Field.Store.NO, Field.Index.ANALYZED));
doc.add (new StringField ("string", num));
String num2 = getRandomCharString(getRandomNumber(1, 4), 48, 50);
doc.add (new StringField ("string2", num2));
- doc.add (new Field ("tracer2", customType, num2));
+ doc.add (new Field ("tracer2", num2, customType));
for(IndexableField f : doc.getFields()) {
((Field) f).setBoost(2.0f);
}
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java Sat Oct 1 03:04:53 2011
@@ -190,7 +190,7 @@ public class TestTermRangeQuery extends
assertFalse("queries with different inclusive are not equal", query.equals(other));
}
- private static class SingleCharAnalyzer extends ReusableAnalyzerBase {
+ private static class SingleCharAnalyzer extends Analyzer {
private static class SingleCharTokenizer extends Tokenizer {
char[] buffer = new char[1];
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestTermScorer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestTermScorer.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestTermScorer.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestTermScorer.java Sat Oct 1 03:04:53 2011
@@ -29,7 +29,6 @@ import org.apache.lucene.index.IndexRead
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Weight.ScorerContext;
import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
@@ -79,7 +78,8 @@ public class TestTermScorer extends Luce
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
- Scorer ts = weight.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
+ AtomicReaderContext context = (AtomicReaderContext)indexSearcher.getTopReaderContext();
+ Scorer ts = weight.scorer(context, true, true, context.reader.getLiveDocs());
// we have 2 documents with the term all in them, one document for all the
// other values
final List<TestHit> docs = new ArrayList<TestHit>();
@@ -140,7 +140,8 @@ public class TestTermScorer extends Luce
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
- Scorer ts = weight.scorer((AtomicReaderContext) indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
+ AtomicReaderContext context = (AtomicReaderContext) indexSearcher.getTopReaderContext();
+ Scorer ts = weight.scorer(context, true, true, context.reader.getLiveDocs());
assertTrue("next did not return a doc",
ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue("score is not correct", ts.score() == 1.6931472f);
@@ -158,8 +159,8 @@ public class TestTermScorer extends Luce
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
-
- Scorer ts = weight.scorer((AtomicReaderContext) indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
+ AtomicReaderContext context = (AtomicReaderContext) indexSearcher.getTopReaderContext();
+ Scorer ts = weight.scorer(context, true, true, context.reader.getLiveDocs());
assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
// The next doc should be doc 5
assertTrue("doc should be number 5", ts.docID() == 5);
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestTermVectors.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestTermVectors.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestTermVectors.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/TestTermVectors.java Sat Oct 1 03:04:53 2011
@@ -67,9 +67,9 @@ public class TestTermVectors extends Luc
} else {
ft.setStoreTermVectors(true);
}
- doc.add(new Field("field", ft, English.intToEnglish(i)));
+ doc.add(new Field("field", English.intToEnglish(i), ft));
//test no term vectors too
- doc.add(new Field("noTV", TextField.TYPE_STORED, English.intToEnglish(i)));
+ doc.add(new Field("noTV", English.intToEnglish(i), TextField.TYPE_STORED));
writer.addDocument(doc);
}
reader = writer.getReader();
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java Sat Oct 1 03:04:53 2011
@@ -55,7 +55,7 @@ public class PayloadHelper {
public IndexReader reader;
- public final class PayloadAnalyzer extends ReusableAnalyzerBase {
+ public final class PayloadAnalyzer extends Analyzer {
public PayloadAnalyzer() {
super(new PerFieldReuseStrategy());
@@ -125,9 +125,9 @@ public class PayloadHelper {
// writer.infoStream = System.out;
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
- doc.add(new Field(FIELD, TextField.TYPE_STORED, English.intToEnglish(i)));
- doc.add(new Field(MULTI_FIELD, TextField.TYPE_STORED, English.intToEnglish(i) + " " + English.intToEnglish(i)));
- doc.add(new Field(NO_PAYLOAD_FIELD, TextField.TYPE_STORED, English.intToEnglish(i)));
+ doc.add(new Field(FIELD, English.intToEnglish(i), TextField.TYPE_STORED));
+ doc.add(new Field(MULTI_FIELD, English.intToEnglish(i) + " " + English.intToEnglish(i), TextField.TYPE_STORED));
+ doc.add(new Field(NO_PAYLOAD_FIELD, English.intToEnglish(i), TextField.TYPE_STORED));
writer.addDocument(doc);
}
reader = IndexReader.open(writer, true);
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java Sat Oct 1 03:04:53 2011
@@ -55,7 +55,7 @@ public class TestPayloadNearQuery extend
private static byte[] payload2 = new byte[]{2};
private static byte[] payload4 = new byte[]{4};
- private static class PayloadAnalyzer extends ReusableAnalyzerBase {
+ private static class PayloadAnalyzer extends Analyzer {
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer result = new MockTokenizer(reader, MockTokenizer.SIMPLE, true);
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java Sat Oct 1 03:04:53 2011
@@ -64,7 +64,7 @@ public class TestPayloadTermQuery extend
private static final byte[] payloadMultiField2 = new byte[]{4};
protected static Directory directory;
- private static class PayloadAnalyzer extends ReusableAnalyzerBase {
+ private static class PayloadAnalyzer extends Analyzer {
private PayloadAnalyzer() {
super(new PerFieldReuseStrategy());
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java Sat Oct 1 03:04:53 2011
@@ -23,6 +23,7 @@ import java.util.Collection;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.Bits;
/**
* Holds all implementations of classes in the o.a.l.s.spans package as a
@@ -82,7 +83,7 @@ final class JustCompileSearchSpans {
}
@Override
- public Spans getSpans(AtomicReaderContext context) throws IOException {
+ public Spans getSpans(AtomicReaderContext context, Bits acceptDocs) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java Sat Oct 1 03:04:53 2011
@@ -49,7 +49,7 @@ public class MultiSpansWrapper extends S
public static Spans wrap(ReaderContext topLevelReaderContext, SpanQuery query) throws IOException {
AtomicReaderContext[] leaves = ReaderUtil.leaves(topLevelReaderContext);
if(leaves.length == 1) {
- return query.getSpans(leaves[0]);
+ return query.getSpans(leaves[0], leaves[0].reader.getLiveDocs());
}
return new MultiSpansWrapper(leaves, query);
}
@@ -60,14 +60,14 @@ public class MultiSpansWrapper extends S
return false;
}
if (current == null) {
- current = query.getSpans(leaves[leafOrd]);
+ current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader.getLiveDocs());
}
while(true) {
if (current.next()) {
return true;
}
if (++leafOrd < leaves.length) {
- current = query.getSpans(leaves[leafOrd]);
+ current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader.getLiveDocs());
} else {
current = null;
break;
@@ -85,17 +85,17 @@ public class MultiSpansWrapper extends S
int subIndex = ReaderUtil.subIndex(target, leaves);
assert subIndex >= leafOrd;
if (subIndex != leafOrd) {
- current = query.getSpans(leaves[subIndex]);
+ current = query.getSpans(leaves[subIndex], leaves[subIndex].reader.getLiveDocs());
leafOrd = subIndex;
} else if (current == null) {
- current = query.getSpans(leaves[leafOrd]);
+ current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader.getLiveDocs());
}
while (true) {
if (current.skipTo(target - leaves[leafOrd].docBase)) {
return true;
}
if (++leafOrd < leaves.length) {
- current = query.getSpans(leaves[leafOrd]);
+ current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader.getLiveDocs());
} else {
current = null;
break;
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java Sat Oct 1 03:04:53 2011
@@ -96,7 +96,7 @@ public class TestBasics extends LuceneTe
}
}
- static final Analyzer simplePayloadAnalyzer = new ReusableAnalyzerBase() {
+ static final Analyzer simplePayloadAnalyzer = new Analyzer() {
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java Sat Oct 1 03:04:53 2011
@@ -30,7 +30,6 @@ import org.apache.lucene.search.Explanat
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Weight.ScorerContext;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.ReaderUtil;
@@ -170,7 +169,7 @@ public class TestNearSpansOrdered extend
Weight w = searcher.createNormalizedWeight(q);
ReaderContext topReaderContext = searcher.getTopReaderContext();
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
- Scorer s = w.scorer(leaves[0], ScorerContext.def());
+ Scorer s = w.scorer(leaves[0], true, false, leaves[0].reader.getLiveDocs());
assertEquals(1, s.advance(1));
}
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java Sat Oct 1 03:04:53 2011
@@ -476,7 +476,7 @@ public class TestPayloadSpans extends Lu
assertEquals(numSpans, cnt);
}
- final class PayloadAnalyzer extends ReusableAnalyzerBase {
+ final class PayloadAnalyzer extends Analyzer {
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
@@ -530,7 +530,7 @@ public class TestPayloadSpans extends Lu
}
}
- public final class TestPayloadAnalyzer extends ReusableAnalyzerBase {
+ public final class TestPayloadAnalyzer extends Analyzer {
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
Modified: lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java (original)
+++ lucene/dev/branches/lucene2621/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java Sat Oct 1 03:04:53 2011
@@ -23,7 +23,6 @@ import org.apache.lucene.search.CheckHit
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Weight.ScorerContext;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
import org.apache.lucene.search.similarities.Similarity;
@@ -435,7 +434,7 @@ public class TestSpans extends LuceneTes
slop,
ordered);
- spanScorer = searcher.createNormalizedWeight(snq).scorer(leaves[i], ScorerContext.def());
+ spanScorer = searcher.createNormalizedWeight(snq).scorer(leaves[i], true, false, leaves[i].reader.getLiveDocs());
} finally {
searcher.setSimilarityProvider(oldSim);
}
Modified: lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-apollo.js
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-apollo.js?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-apollo.js (original)
+++ lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-apollo.js Sat Oct 1 03:04:53 2011
@@ -1 +1,2 @@
-PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_COMMENT,/^#[^\r\n]*/,null,'#'],[PR.PR_PLAIN,/^[\t\n\r \xA0]+/,null,' \n\r \xa0'],[PR.PR_STRING,/^\"(?:[^\"\\]|\\[\s\S])*(?:\"|$)/,null,'\"']],[[PR.PR_KEYWORD,/^(?:ADS|AD|AUG|BZF|BZMF|CAE|CAF|CA|CCS|COM|CS|DAS|DCA|DCOM|DCS|DDOUBL|DIM|DOUBLE|DTCB|DTCF|DV|DXCH|EDRUPT|EXTEND|INCR|INDEX|NDX|INHINT|LXCH|MASK|MSK|MP|MSU|NOOP|OVSK|QXCH|RAND|READ|RELINT|RESUME|RETURN|ROR|RXOR|SQUARE|SU|TCR|TCAA|OVSK|TCF|TC|TS|WAND|WOR|WRITE|XCH|XLQ|XXALQ|ZL|ZQ|ADD|ADZ|SUB|SUZ|MPY|MPR|MPZ|DVP|COM|ABS|CLA|CLZ|LDQ|STO|STQ|ALS|LLS|LRS|TRA|TSQ|TMI|TOV|AXT|TIX|DLY|INP|OUT)\s/,null],[PR.PR_TYPE,/^(?:-?GENADR|=MINUS|2BCADR|VN|BOF|MM|-?2CADR|-?[1-6]DNADR|ADRES|BBCON|[SE]?BANK\=?|BLOCK|BNKSUM|E?CADR|COUNT\*?|2?DEC\*?|-?DNCHAN|-?DNPTR|EQUALS|ERASE|MEMORY|2?OCT|REMADR|SETLOC|SUBRO|ORG|BSS|BES|SYN|EQU|DEFINE|END)\s/,null],[PR.PR_LITERAL,/^\'(?:-*(?:\w|\\[\x21-\x7e])(?:[\w-]*|\\[\x21-\x7e])[=!?]?)?/],[PR.PR_PLAIN,/^-*(?:[!-z_]|\\[\x21-\x7e])(?:[\w-]*|\\[\x21-\x7e
])[=!?]?/i],[PR.PR_PUNCTUATION,/^[^\w\t\n\r \xA0()\"\\\';]+/]]),['apollo','agc','aea'])
\ No newline at end of file
+PR.registerLangHandler(PR.createSimpleLexer([["com",/^#[^\n\r]*/,null,"#"],["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r Â\xa0"],["str",/^"(?:[^"\\]|\\[\S\s])*(?:"|$)/,null,'"']],[["kwd",/^(?:ADS|AD|AUG|BZF|BZMF|CAE|CAF|CA|CCS|COM|CS|DAS|DCA|DCOM|DCS|DDOUBL|DIM|DOUBLE|DTCB|DTCF|DV|DXCH|EDRUPT|EXTEND|INCR|INDEX|NDX|INHINT|LXCH|MASK|MSK|MP|MSU|NOOP|OVSK|QXCH|RAND|READ|RELINT|RESUME|RETURN|ROR|RXOR|SQUARE|SU|TCR|TCAA|OVSK|TCF|TC|TS|WAND|WOR|WRITE|XCH|XLQ|XXALQ|ZL|ZQ|ADD|ADZ|SUB|SUZ|MPY|MPR|MPZ|DVP|COM|ABS|CLA|CLZ|LDQ|STO|STQ|ALS|LLS|LRS|TRA|TSQ|TMI|TOV|AXT|TIX|DLY|INP|OUT)\s/,
+null],["typ",/^(?:-?GENADR|=MINUS|2BCADR|VN|BOF|MM|-?2CADR|-?[1-6]DNADR|ADRES|BBCON|[ES]?BANK=?|BLOCK|BNKSUM|E?CADR|COUNT\*?|2?DEC\*?|-?DNCHAN|-?DNPTR|EQUALS|ERASE|MEMORY|2?OCT|REMADR|SETLOC|SUBRO|ORG|BSS|BES|SYN|EQU|DEFINE|END)\s/,null],["lit",/^'(?:-*(?:\w|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?)?/],["pln",/^-*(?:[!-z]|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?/],["pun",/^[^\w\t\n\r "'-);\\\xa0]+/]]),["apollo","agc","aea"]);
Modified: lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-css.js
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-css.js?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-css.js (original)
+++ lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-css.js Sat Oct 1 03:04:53 2011
@@ -1 +1,2 @@
-PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_PLAIN,/^[ \t\r\n\f]+/,null,' \r\n']],[[PR.PR_STRING,/^\"(?:[^\n\r\f\\\"]|\\(?:\r\n?|\n|\f)|\\[\s\S])*\"/,null],[PR.PR_STRING,/^\'(?:[^\n\r\f\\\']|\\(?:\r\n?|\n|\f)|\\[\s\S])*\'/,null],['lang-css-str',/^url\(([^\)\"\']*)\)/i],[PR.PR_KEYWORD,/^(?:url|rgb|\!important|@import|@page|@media|@charset|inherit)(?=[^\-\w]|$)/i,null],['lang-css-kw',/^(-?(?:[_a-z]|(?:\\[0-9a-f]+ ?))(?:[_a-z0-9\-]|\\(?:\\[0-9a-f]+ ?))*)\s*:/i],[PR.PR_COMMENT,/^\/\*[^*]*\*+(?:[^\/*][^*]*\*+)*\//],[PR.PR_COMMENT,/^(?:<!--|-->)/],[PR.PR_LITERAL,/^(?:\d+|\d*\.\d+)(?:%|[a-z]+)?/i],[PR.PR_LITERAL,/^#(?:[0-9a-f]{3}){1,2}/i],[PR.PR_PLAIN,/^-?(?:[_a-z]|(?:\\[\da-f]+ ?))(?:[_a-z\d\-]|\\(?:\\[\da-f]+ ?))*/i],[PR.PR_PUNCTUATION,/^[^\s\w\'\"]+/]]),['css']),PR.registerLangHandler(PR.createSimpleLexer([],[[PR.PR_KEYWORD,/^-?(?:[_a-z]|(?:\\[\da-f]+ ?))(?:[_a-z\d\-]|\\(?:\\[\da-f]+ ?))*/i]]),['css-kw']),PR.registerLangHandler(PR.createSimpleLexer([],[[PR.PR_STRING,/^[
^\)\"\']+/]]),['css-str'])
\ No newline at end of file
+PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\f\r ]+/,null," \t\r\n"]],[["str",/^"(?:[^\n\f\r"\\]|\\(?:\r\n?|\n|\f)|\\[\S\s])*"/,null],["str",/^'(?:[^\n\f\r'\\]|\\(?:\r\n?|\n|\f)|\\[\S\s])*'/,null],["lang-css-str",/^url\(([^"')]*)\)/i],["kwd",/^(?:url|rgb|!important|@import|@page|@media|@charset|inherit)(?=[^\w-]|$)/i,null],["lang-css-kw",/^(-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*)\s*:/i],["com",/^\/\*[^*]*\*+(?:[^*/][^*]*\*+)*\//],["com",
+/^(?:<\!--|--\>)/],["lit",/^(?:\d+|\d*\.\d+)(?:%|[a-z]+)?/i],["lit",/^#[\da-f]{3,6}/i],["pln",/^-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*/i],["pun",/^[^\s\w"']+/]]),["css"]);PR.registerLangHandler(PR.createSimpleLexer([],[["kwd",/^-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*/i]]),["css-kw"]);PR.registerLangHandler(PR.createSimpleLexer([],[["str",/^[^"')]+/]]),["css-str"]);
Modified: lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-hs.js
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-hs.js?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-hs.js (original)
+++ lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-hs.js Sat Oct 1 03:04:53 2011
@@ -1 +1,2 @@
-PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_PLAIN,/^[\t\n\x0B\x0C\r ]+/,null,' \n\r '],[PR.PR_STRING,/^\"(?:[^\"\\\n\x0C\r]|\\[\s\S])*(?:\"|$)/,null,'\"'],[PR.PR_STRING,/^\'(?:[^\'\\\n\x0C\r]|\\[^&])\'?/,null,'\''],[PR.PR_LITERAL,/^(?:0o[0-7]+|0x[\da-f]+|\d+(?:\.\d+)?(?:e[+\-]?\d+)?)/i,null,'0123456789']],[[PR.PR_COMMENT,/^(?:(?:--+(?:[^\r\n\x0C]*)?)|(?:\{-(?:[^-]|-+[^-\}])*-\}))/],[PR.PR_KEYWORD,/^(?:case|class|data|default|deriving|do|else|if|import|in|infix|infixl|infixr|instance|let|module|newtype|of|then|type|where|_)(?=[^a-zA-Z0-9\']|$)/,null],[PR.PR_PLAIN,/^(?:[A-Z][\w\']*\.)*[a-zA-Z][\w\']*/],[PR.PR_PUNCTUATION,/^[^\t\n\x0B\x0C\r a-zA-Z0-9\'\"]+/]]),['hs'])
\ No newline at end of file
+PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t-\r ]+/,null,"\t\n\r "],["str",/^"(?:[^\n\f\r"\\]|\\[\S\s])*(?:"|$)/,null,'"'],["str",/^'(?:[^\n\f\r'\\]|\\[^&])'?/,null,"'"],["lit",/^(?:0o[0-7]+|0x[\da-f]+|\d+(?:\.\d+)?(?:e[+-]?\d+)?)/i,null,"0123456789"]],[["com",/^(?:--+[^\n\f\r]*|{-(?:[^-]|-+[^}-])*-})/],["kwd",/^(?:case|class|data|default|deriving|do|else|if|import|in|infix|infixl|infixr|instance|let|module|newtype|of|then|type|where|_)(?=[^\d'A-Za-z]|$)/,
+null],["pln",/^(?:[A-Z][\w']*\.)*[A-Za-z][\w']*/],["pun",/^[^\d\t-\r "'A-Za-z]+/]]),["hs"]);
Modified: lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-lisp.js
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-lisp.js?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-lisp.js (original)
+++ lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-lisp.js Sat Oct 1 03:04:53 2011
@@ -1 +1,3 @@
-PR.registerLangHandler(PR.createSimpleLexer([['opn',/^\(/,null,'('],['clo',/^\)/,null,')'],[PR.PR_COMMENT,/^;[^\r\n]*/,null,';'],[PR.PR_PLAIN,/^[\t\n\r \xA0]+/,null,' \n\r \xa0'],[PR.PR_STRING,/^\"(?:[^\"\\]|\\[\s\S])*(?:\"|$)/,null,'\"']],[[PR.PR_KEYWORD,/^(?:block|c[ad]+r|catch|cons|defun|do|eq|eql|equal|equalp|eval-when|flet|format|go|if|labels|lambda|let|load-time-value|locally|macrolet|multiple-value-call|nil|progn|progv|quote|require|return-from|setq|symbol-macrolet|t|tagbody|the|throw|unwind)\b/,null],[PR.PR_LITERAL,/^[+\-]?(?:0x[0-9a-f]+|\d+\/\d+|(?:\.\d+|\d+(?:\.\d*)?)(?:[ed][+\-]?\d+)?)/i],[PR.PR_LITERAL,/^\'(?:-*(?:\w|\\[\x21-\x7e])(?:[\w-]*|\\[\x21-\x7e])[=!?]?)?/],[PR.PR_PLAIN,/^-*(?:[a-z_]|\\[\x21-\x7e])(?:[\w-]*|\\[\x21-\x7e])[=!?]?/i],[PR.PR_PUNCTUATION,/^[^\w\t\n\r \xA0()\"\\\';]+/]]),['cl','el','lisp','scm'])
\ No newline at end of file
+var a=null;
+PR.registerLangHandler(PR.createSimpleLexer([["opn",/^\(+/,a,"("],["clo",/^\)+/,a,")"],["com",/^;[^\n\r]*/,a,";"],["pln",/^[\t\n\r \xa0]+/,a,"\t\n\r \xa0"],["str",/^"(?:[^"\\]|\\[\S\s])*(?:"|$)/,a,'"']],[["kwd",/^(?:block|c[ad]+r|catch|con[ds]|def(?:ine|un)|do|eq|eql|equal|equalp|eval-when|flet|format|go|if|labels|lambda|let|load-time-value|locally|macrolet|multiple-value-call|nil|progn|progv|quote|require|return-from|setq|symbol-macrolet|t|tagbody|the|throw|unwind)\b/,a],
+["lit",/^[+-]?(?:[#0]x[\da-f]+|\d+\/\d+|(?:\.\d+|\d+(?:\.\d*)?)(?:[de][+-]?\d+)?)/i],["lit",/^'(?:-*(?:\w|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?)?/],["pln",/^-*(?:[_a-z]|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?/i],["pun",/^[^\w\t\n\r "'-);\\\xa0]+/]]),["cl","el","lisp","scm"]);
Modified: lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-lua.js
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-lua.js?rev=1177888&r1=1177887&r2=1177888&view=diff
==============================================================================
--- lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-lua.js (original)
+++ lucene/dev/branches/lucene2621/lucene/src/tools/prettify/lang-lua.js Sat Oct 1 03:04:53 2011
@@ -1 +1,2 @@
-PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_PLAIN,/^[\t\n\r \xA0]+/,null,' \n\r \xa0'],[PR.PR_STRING,/^(?:\"(?:[^\"\\]|\\[\s\S])*(?:\"|$)|\'(?:[^\'\\]|\\[\s\S])*(?:\'|$))/,null,'\"\'']],[[PR.PR_COMMENT,/^--(?:\[(=*)\[[\s\S]*?(?:\]\1\]|$)|[^\r\n]*)/],[PR.PR_STRING,/^\[(=*)\[[\s\S]*?(?:\]\1\]|$)/],[PR.PR_KEYWORD,/^(?:and|break|do|else|elseif|end|false|for|function|if|in|local|nil|not|or|repeat|return|then|true|until|while)\b/,null],[PR.PR_LITERAL,/^[+-]?(?:0x[\da-f]+|(?:(?:\.\d+|\d+(?:\.\d*)?)(?:e[+\-]?\d+)?))/i],[PR.PR_PLAIN,/^[a-z_]\w*/i],[PR.PR_PUNCTUATION,/^[^\w\t\n\r \xA0][^\w\t\n\r \xA0\"\'\-\+=]*/]]),['lua'])
\ No newline at end of file
+PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r Â\xa0"],["str",/^(?:"(?:[^"\\]|\\[\S\s])*(?:"|$)|'(?:[^'\\]|\\[\S\s])*(?:'|$))/,null,"\"'"]],[["com",/^--(?:\[(=*)\[[\S\s]*?(?:]\1]|$)|[^\n\r]*)/],["str",/^\[(=*)\[[\S\s]*?(?:]\1]|$)/],["kwd",/^(?:and|break|do|else|elseif|end|false|for|function|if|in|local|nil|not|or|repeat|return|then|true|until|while)\b/,null],["lit",/^[+-]?(?:0x[\da-f]+|(?:\.\d+|\d+(?:\.\d*)?)(?:e[+-]?\d+)?)/i],
+["pln",/^[_a-z]\w*/i],["pun",/^[^\w\t\n\r \xa0][^\w\t\n\r "'+=\xa0-]*/]]),["lua"]);