You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ho...@apache.org on 2016/03/12 01:27:12 UTC
[31/50] [abbrv] lucene-solr git commit: LUCENE-7086: move
SlowCompositeReaderWrapper to misc module,
and throw clear exc if you try to use in with points
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpans.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpans.java b/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpans.java
index 3f168bb..179b971 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpans.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpans.java
@@ -42,7 +42,6 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.similarities.ClassicSimilarity;
import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.search.spans.MultiSpansWrapper;
import org.apache.lucene.search.spans.SpanCollector;
import org.apache.lucene.search.spans.SpanFirstQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
@@ -75,12 +74,12 @@ public class TestPayloadSpans extends LuceneTestCase {
Spans spans;
stq = new SpanTermQuery(new Term(PayloadHelper.FIELD, "seventy"));
- spans = MultiSpansWrapper.wrap(indexReader, stq, SpanWeight.Postings.PAYLOADS);
+ spans = stq.createWeight(searcher, false).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS);
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 100, 1, 1, 1);
stq = new SpanTermQuery(new Term(PayloadHelper.NO_PAYLOAD_FIELD, "seventy"));
- spans = MultiSpansWrapper.wrap(indexReader, stq, SpanWeight.Postings.PAYLOADS);
+ spans = stq.createWeight(searcher, false).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS);
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 100, 0, 0, 0);
}
@@ -91,7 +90,7 @@ public class TestPayloadSpans extends LuceneTestCase {
SpanFirstQuery sfq;
match = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one"));
sfq = new SpanFirstQuery(match, 2);
- Spans spans = MultiSpansWrapper.wrap(indexReader, sfq, SpanWeight.Postings.PAYLOADS);
+ Spans spans = sfq.createWeight(searcher, false).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS);
checkSpans(spans, 109, 1, 1, 1);
//Test more complicated subclause
SpanQuery[] clauses = new SpanQuery[2];
@@ -99,11 +98,11 @@ public class TestPayloadSpans extends LuceneTestCase {
clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "hundred"));
match = new SpanNearQuery(clauses, 0, true);
sfq = new SpanFirstQuery(match, 2);
- checkSpans(MultiSpansWrapper.wrap(indexReader, sfq, SpanWeight.Postings.PAYLOADS), 100, 2, 1, 1);
+ checkSpans(sfq.createWeight(searcher, false).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS), 100, 2, 1, 1);
match = new SpanNearQuery(clauses, 0, false);
sfq = new SpanFirstQuery(match, 2);
- checkSpans(MultiSpansWrapper.wrap(indexReader, sfq, SpanWeight.Postings.PAYLOADS), 100, 2, 1, 1);
+ checkSpans(sfq.createWeight(searcher, false).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS), 100, 2, 1, 1);
}
@@ -123,10 +122,10 @@ public class TestPayloadSpans extends LuceneTestCase {
Document doc = new Document();
doc.add(newTextField(PayloadHelper.FIELD, "one two three one four three", Field.Store.YES));
writer.addDocument(doc);
- IndexReader reader = writer.getReader();
+ IndexReader reader = getOnlyLeafReader(writer.getReader());
writer.close();
- checkSpans(MultiSpansWrapper.wrap(reader, snq, SpanWeight.Postings.PAYLOADS), 1, new int[]{2});
+ checkSpans(snq.createWeight(newSearcher(reader), false).getSpans(reader.leaves().get(0), SpanWeight.Postings.PAYLOADS), 1, new int[]{2});
reader.close();
directory.close();
}
@@ -137,7 +136,7 @@ public class TestPayloadSpans extends LuceneTestCase {
IndexSearcher searcher = getSearcher();
stq = new SpanTermQuery(new Term(PayloadHelper.FIELD, "mark"));
- spans = MultiSpansWrapper.wrap(searcher.getIndexReader(), stq, SpanWeight.Postings.PAYLOADS);
+ spans = stq.createWeight(searcher, false).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS);
assertNull(spans);
SpanQuery[] clauses = new SpanQuery[3];
@@ -146,7 +145,7 @@ public class TestPayloadSpans extends LuceneTestCase {
clauses[2] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "xx"));
SpanNearQuery spanNearQuery = new SpanNearQuery(clauses, 12, false);
- spans = MultiSpansWrapper.wrap(searcher.getIndexReader(), spanNearQuery, SpanWeight.Postings.PAYLOADS);
+ spans = spanNearQuery.createWeight(searcher, false).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS);
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 2, new int[]{3,3});
@@ -157,7 +156,7 @@ public class TestPayloadSpans extends LuceneTestCase {
spanNearQuery = new SpanNearQuery(clauses, 6, true);
- spans = MultiSpansWrapper.wrap(searcher.getIndexReader(), spanNearQuery, SpanWeight.Postings.PAYLOADS);
+ spans = spanNearQuery.createWeight(searcher, false).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS);
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 1, new int[]{3});
@@ -179,7 +178,7 @@ public class TestPayloadSpans extends LuceneTestCase {
SpanNearQuery nestedSpanNearQuery = new SpanNearQuery(clauses2, 6, false);
// yy within 6 of xx within 6 of rr
- spans = MultiSpansWrapper.wrap(searcher.getIndexReader(), nestedSpanNearQuery, SpanWeight.Postings.PAYLOADS);
+ spans = nestedSpanNearQuery.createWeight(searcher, false).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS);
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 2, new int[]{3,3});
closeIndexReader.close();
@@ -210,7 +209,7 @@ public class TestPayloadSpans extends LuceneTestCase {
clauses3[1] = snq;
SpanNearQuery nestedSpanNearQuery = new SpanNearQuery(clauses3, 6, false);
- spans = MultiSpansWrapper.wrap(searcher.getIndexReader(), nestedSpanNearQuery, SpanWeight.Postings.PAYLOADS);
+ spans = nestedSpanNearQuery.createWeight(searcher, false).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS);
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 1, new int[]{3});
@@ -248,7 +247,7 @@ public class TestPayloadSpans extends LuceneTestCase {
SpanNearQuery nestedSpanNearQuery = new SpanNearQuery(clauses3, 6, false);
- spans = MultiSpansWrapper.wrap(searcher.getIndexReader(), nestedSpanNearQuery, SpanWeight.Postings.PAYLOADS);
+ spans = nestedSpanNearQuery.createWeight(searcher, false).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS);
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 2, new int[]{8, 8});
closeIndexReader.close();
@@ -265,7 +264,7 @@ public class TestPayloadSpans extends LuceneTestCase {
writer.addDocument(doc);
IndexReader reader = writer.getReader();
- IndexSearcher is = newSearcher(reader);
+ IndexSearcher is = newSearcher(getOnlyLeafReader(reader));
writer.close();
SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a"));
@@ -273,7 +272,7 @@ public class TestPayloadSpans extends LuceneTestCase {
SpanQuery[] sqs = { stq1, stq2 };
SpanNearQuery snq = new SpanNearQuery(sqs, 1, true);
VerifyingCollector collector = new VerifyingCollector();
- Spans spans = MultiSpansWrapper.wrap(is.getIndexReader(), snq, SpanWeight.Postings.PAYLOADS);
+ Spans spans = snq.createWeight(is, false).getSpans(is.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS);
TopDocs topDocs = is.search(snq, 1);
Set<String> payloadSet = new HashSet<>();
@@ -304,7 +303,7 @@ public class TestPayloadSpans extends LuceneTestCase {
doc.add(new TextField("content", new StringReader("a b a d k f a h i k a k")));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
- IndexSearcher is = newSearcher(reader);
+ IndexSearcher is = newSearcher(getOnlyLeafReader(reader));
writer.close();
SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a"));
@@ -312,7 +311,7 @@ public class TestPayloadSpans extends LuceneTestCase {
SpanQuery[] sqs = { stq1, stq2 };
SpanNearQuery snq = new SpanNearQuery(sqs, 0, true);
VerifyingCollector collector = new VerifyingCollector();
- Spans spans = MultiSpansWrapper.wrap(is.getIndexReader(), snq, SpanWeight.Postings.PAYLOADS);
+ Spans spans = snq.createWeight(is, false).getSpans(is.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS);
TopDocs topDocs = is.search(snq, 1);
Set<String> payloadSet = new HashSet<>();
@@ -343,14 +342,14 @@ public class TestPayloadSpans extends LuceneTestCase {
doc.add(new TextField("content", new StringReader("j k a l f k k p a t a k l k t a")));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
- IndexSearcher is = newSearcher(reader);
+ IndexSearcher is = newSearcher(getOnlyLeafReader(reader));
writer.close();
SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a"));
SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k"));
SpanQuery[] sqs = { stq1, stq2 };
SpanNearQuery snq = new SpanNearQuery(sqs, 0, true);
- Spans spans = MultiSpansWrapper.wrap(is.getIndexReader(), snq, SpanWeight.Postings.PAYLOADS);
+ Spans spans = snq.createWeight(is, false).getSpans(is.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS);
TopDocs topDocs = is.search(snq, 1);
Set<String> payloadSet = new HashSet<>();
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadTermQuery.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadTermQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadTermQuery.java
index f453b0d..dfa0191 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadTermQuery.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadTermQuery.java
@@ -38,10 +38,10 @@ import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.similarities.ClassicSimilarity;
import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.search.spans.MultiSpansWrapper;
import org.apache.lucene.search.spans.SpanQuery;
-import org.apache.lucene.search.spans.Spans;
import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.search.spans.SpanWeight;
+import org.apache.lucene.search.spans.Spans;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.English;
@@ -131,10 +131,11 @@ public class TestPayloadTermQuery extends LuceneTestCase {
doc.add(newTextField("multiField", English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES));
writer.addDocument(doc);
}
+ writer.forceMerge(1);
reader = writer.getReader();
writer.close();
- searcher = newSearcher(reader);
+ searcher = newSearcher(getOnlyLeafReader(reader));
searcher.setSimilarity(similarity);
}
@@ -163,7 +164,7 @@ public class TestPayloadTermQuery extends LuceneTestCase {
assertTrue(doc.score + " does not equal: " + 1, doc.score == 1);
}
CheckHits.checkExplanations(query, PayloadHelper.FIELD, searcher, true);
- Spans spans = MultiSpansWrapper.wrap(searcher.getIndexReader(), query);
+ Spans spans = query.createWeight(searcher, false).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.POSITIONS);
assertTrue("spans is null and it shouldn't be", spans != null);
/*float score = hits.score(0);
for (int i =1; i < hits.length(); i++)
@@ -214,7 +215,7 @@ public class TestPayloadTermQuery extends LuceneTestCase {
}
assertTrue(numTens + " does not equal: " + 10, numTens == 10);
CheckHits.checkExplanations(query, "field", searcher, true);
- Spans spans = MultiSpansWrapper.wrap(searcher.getIndexReader(), query);
+ Spans spans = query.createWeight(searcher, false).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.POSITIONS);
assertTrue("spans is null and it shouldn't be", spans != null);
//should be two matches per document
int count = 0;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
index 5a8a99f..85ac12f 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
@@ -568,7 +568,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- BinaryDocValues dv = getOnlySegmentReader(ireader).getBinaryDocValues("field");
+ BinaryDocValues dv = getOnlyLeafReader(ireader).getBinaryDocValues("field");
assertEquals(new BytesRef(), dv.get(0));
ireader.close();
@@ -743,7 +743,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedDocValues dv = getOnlySegmentReader(ireader).getSortedDocValues("field");
+ SortedDocValues dv = getOnlyLeafReader(ireader).getSortedDocValues("field");
if (codecSupportsDocsWithField()) {
assertEquals(-1, dv.getOrd(0));
assertEquals(0, dv.getValueCount());
@@ -833,7 +833,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedDocValues dv = getOnlySegmentReader(ireader).getSortedDocValues("field");
+ SortedDocValues dv = getOnlyLeafReader(ireader).getSortedDocValues("field");
assertEquals(3, dv.getValueCount());
TermsEnum termsEnum = dv.termsEnum();
@@ -1077,7 +1077,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
TopDocs search = searcher.search(query.build(), 10);
assertEquals(5, search.totalHits);
ScoreDoc[] scoreDocs = search.scoreDocs;
- NumericDocValues docValues = getOnlySegmentReader(reader).getNumericDocValues("docId");
+ NumericDocValues docValues = getOnlyLeafReader(reader).getNumericDocValues("docId");
for (int i = 0; i < scoreDocs.length; i++) {
assertEquals(i, scoreDocs[i].doc);
assertEquals(i, docValues.get(scoreDocs[i].doc));
@@ -1154,12 +1154,11 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
int ord = docValues.lookupTerm(expected);
assertEquals(i, ord);
}
- LeafReader slowR = SlowCompositeReaderWrapper.wrap(reader);
Set<Entry<String, String>> entrySet = docToString.entrySet();
for (Entry<String, String> entry : entrySet) {
// pk lookup
- PostingsEnum termPostingsEnum = slowR.postings(new Term("id", entry.getKey()));
+ PostingsEnum termPostingsEnum = TestUtil.docs(random(), reader, "id", new BytesRef(entry.getKey()), null, 0);
int docId = termPostingsEnum.nextDoc();
expected = new BytesRef(entry.getValue());
final BytesRef actual = docValues.get(docId);
@@ -1516,7 +1515,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
dv.setDocument(0);
assertEquals(0, dv.nextOrd());
@@ -1542,7 +1541,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
dv.setDocument(0);
assertEquals(0, dv.nextOrd());
@@ -1551,7 +1550,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
BytesRef bytes = dv.lookupOrd(0);
assertEquals(new BytesRef("hello"), bytes);
- dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field2");
+ dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field2");
dv.setDocument(0);
assertEquals(0, dv.nextOrd());
@@ -1585,7 +1584,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
assertEquals(2, dv.getValueCount());
dv.setDocument(0);
@@ -1619,7 +1618,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
dv.setDocument(0);
assertEquals(0, dv.nextOrd());
@@ -1649,7 +1648,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
dv.setDocument(0);
assertEquals(0, dv.nextOrd());
@@ -1689,7 +1688,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
assertEquals(3, dv.getValueCount());
dv.setDocument(0);
@@ -1733,7 +1732,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
assertEquals(1, dv.getValueCount());
dv.setDocument(0);
@@ -1767,7 +1766,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
assertEquals(1, dv.getValueCount());
dv.setDocument(0);
@@ -1800,7 +1799,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
assertEquals(1, dv.getValueCount());
dv.setDocument(1);
@@ -1834,7 +1833,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
assertEquals(1, dv.getValueCount());
dv.setDocument(1);
@@ -1870,7 +1869,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
assertEquals(0, dv.getValueCount());
ireader.close();
@@ -1894,7 +1893,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
assertEquals(3, dv.getValueCount());
TermsEnum termsEnum = dv.termsEnum();
@@ -2784,13 +2783,12 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
IndexReader r = w.getReader();
w.close();
- LeafReader ar = SlowCompositeReaderWrapper.wrap(r);
- BinaryDocValues values = ar.getBinaryDocValues("field");
+ BinaryDocValues values = MultiDocValues.getBinaryValues(r, "field");
for(int j=0;j<5;j++) {
BytesRef result = values.get(0);
assertTrue(result.length == 0 || result.length == 1<<i);
}
- ar.close();
+ r.close();
dir.close();
}
}
@@ -2866,7 +2864,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- NumericDocValues dv = getOnlySegmentReader(ireader).getNumericDocValues("field");
+ NumericDocValues dv = getOnlyLeafReader(ireader).getNumericDocValues("field");
assertEquals(0, dv.get(0));
ireader.close();
@@ -3003,7 +3001,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedNumericDocValues dv = getOnlySegmentReader(ireader).getSortedNumericDocValues("field");
+ SortedNumericDocValues dv = getOnlyLeafReader(ireader).getSortedNumericDocValues("field");
dv.setDocument(0);
assertEquals(0, dv.count());
@@ -3033,7 +3031,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedDocValues dv = getOnlySegmentReader(ireader).getSortedDocValues("field");
+ SortedDocValues dv = getOnlyLeafReader(ireader).getSortedDocValues("field");
doTestSortedSetEnumAdvanceIndependently(DocValues.singleton(dv));
ireader.close();
@@ -3064,7 +3062,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
doTestSortedSetEnumAdvanceIndependently(dv);
ireader.close();
@@ -3170,7 +3168,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedDocValues dv = getOnlySegmentReader(ireader).getSortedDocValues("field");
+ SortedDocValues dv = getOnlyLeafReader(ireader).getSortedDocValues("field");
for (int i = 0; i < numEmptyDocs; ++i) {
assertEquals(-1, dv.getOrd(i));
}
@@ -3202,7 +3200,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedSetDocValues dv = getOnlySegmentReader(ireader).getSortedSetDocValues("field");
+ SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
for (int i = 0; i < numEmptyDocs; ++i) {
dv.setDocument(i);
assertEquals(-1L, dv.nextOrd());
@@ -3235,8 +3233,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- NumericDocValues dv = getOnlySegmentReader(ireader).getNumericDocValues("field");
- Bits docsWithField = getOnlySegmentReader(ireader).getDocsWithField("field");
+ NumericDocValues dv = getOnlyLeafReader(ireader).getNumericDocValues("field");
+ Bits docsWithField = getOnlyLeafReader(ireader).getDocsWithField("field");
for (int i = 0; i < numEmptyDocs; ++i) {
assertEquals(0, dv.get(i));
assertFalse(docsWithField.get(i));
@@ -3269,7 +3267,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- SortedNumericDocValues dv = getOnlySegmentReader(ireader).getSortedNumericDocValues("field");
+ SortedNumericDocValues dv = getOnlyLeafReader(ireader).getSortedNumericDocValues("field");
for (int i = 0; i < numEmptyDocs; ++i) {
dv.setDocument(i);
assertEquals(0, dv.count());
@@ -3302,8 +3300,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
- BinaryDocValues dv = getOnlySegmentReader(ireader).getBinaryDocValues("field");
- Bits docsWithField = getOnlySegmentReader(ireader).getDocsWithField("field");
+ BinaryDocValues dv = getOnlyLeafReader(ireader).getBinaryDocValues("field");
+ Bits docsWithField = getOnlyLeafReader(ireader).getDocsWithField("field");
for (int i = 0; i < numEmptyDocs; ++i) {
assertEquals(new BytesRef(), dv.get(i));
assertFalse(docsWithField.get(i));
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
index 3433caa..2c6f379 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
@@ -258,14 +258,14 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
if (i == 100) {
w.forceMerge(1);
w.commit();
- reader1 = getOnlySegmentReader(DirectoryReader.open(dir));
+ reader1 = getOnlyLeafReader(DirectoryReader.open(dir));
}
}
w.forceMerge(1);
w.commit();
w.close();
- LeafReader reader2 = getOnlySegmentReader(DirectoryReader.open(dir));
+ LeafReader reader2 = getOnlyLeafReader(DirectoryReader.open(dir));
for (LeafReader reader : Arrays.asList(reader1, reader2)) {
new SimpleMergedSegmentWarmer(InfoStream.NO_OUTPUT).warm(reader);
@@ -295,7 +295,7 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
oneDoc.add(customField);
oneDoc.add(new NumericDocValuesField("field", 5));
iw.addDocument(oneDoc);
- LeafReader oneDocReader = getOnlySegmentReader(DirectoryReader.open(iw));
+ LeafReader oneDocReader = getOnlyLeafReader(DirectoryReader.open(iw));
iw.close();
// now feed to codec apis manually
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/test-framework/src/java/org/apache/lucene/index/BasePointsFormatTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BasePointsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BasePointsFormatTestCase.java
index 24753aa..ecb3a61 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BasePointsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BasePointsFormatTestCase.java
@@ -77,7 +77,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
w.close();
DirectoryReader r = DirectoryReader.open(dir);
- LeafReader sub = getOnlySegmentReader(r);
+ LeafReader sub = getOnlyLeafReader(r);
PointValues values = sub.getPointValues();
// Simple test: make sure intersect can visit every doc:
@@ -119,7 +119,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
w.close();
DirectoryReader r = DirectoryReader.open(dir);
- LeafReader sub = getOnlySegmentReader(r);
+ LeafReader sub = getOnlyLeafReader(r);
PointValues values = sub.getPointValues();
// Simple test: make sure intersect can visit every doc:
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
index a5957a2..10ed5b1 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
@@ -190,7 +190,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
doc.add(newStringField("", "something", Field.Store.NO));
iw.addDocument(doc);
DirectoryReader ir = iw.getReader();
- LeafReader ar = getOnlySegmentReader(ir);
+ LeafReader ar = getOnlyLeafReader(ir);
Fields fields = ar.fields();
int fieldCount = fields.size();
// -1 is allowed, if the codec doesn't implement fields.size():
@@ -215,7 +215,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
doc.add(newStringField("", "", Field.Store.NO));
iw.addDocument(doc);
DirectoryReader ir = iw.getReader();
- LeafReader ar = getOnlySegmentReader(ir);
+ LeafReader ar = getOnlyLeafReader(ir);
Fields fields = ar.fields();
int fieldCount = fields.size();
// -1 is allowed, if the codec doesn't implement fields.size():
@@ -241,7 +241,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
iw.addDocument(doc);
iw.addDocument(doc);
DirectoryReader ir = iw.getReader();
- LeafReader ar = getOnlySegmentReader(ir);
+ LeafReader ar = getOnlyLeafReader(ir);
TermsEnum termsEnum = ar.terms("field").iterator();
assertTrue(termsEnum.seekExact(new BytesRef("value")));
PostingsEnum docsEnum = termsEnum.postings(null, PostingsEnum.NONE);
@@ -264,7 +264,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
iw.addDocument(doc);
iw.addDocument(doc);
DirectoryReader ir = iw.getReader();
- LeafReader ar = getOnlySegmentReader(ir);
+ LeafReader ar = getOnlyLeafReader(ir);
TermsEnum termsEnum = ar.terms("field").iterator();
assertTrue(termsEnum.seekExact(new BytesRef("value")));
PostingsEnum docsEnum = termsEnum.postings(null, PostingsEnum.POSITIONS);
@@ -293,7 +293,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
iw.deleteDocuments(new Term("ghostField", "something")); // delete the only term for the field
iw.forceMerge(1);
DirectoryReader ir = iw.getReader();
- LeafReader ar = getOnlySegmentReader(ir);
+ LeafReader ar = getOnlyLeafReader(ir);
Fields fields = ar.fields();
// Ghost busting terms dict impls will have
// fields.size() == 0; all others must be == 1:
@@ -567,14 +567,14 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
DirectoryReader reader = DirectoryReader.open(iw);
// sugar method (FREQS)
- PostingsEnum postings = getOnlySegmentReader(reader).postings(new Term("foo", "bar"));
+ PostingsEnum postings = getOnlyLeafReader(reader).postings(new Term("foo", "bar"));
assertEquals(-1, postings.docID());
assertEquals(0, postings.nextDoc());
assertEquals(1, postings.freq());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
// termsenum reuse (FREQS)
- TermsEnum termsEnum = getOnlySegmentReader(reader).terms("foo").iterator();
+ TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator();
termsEnum.seekExact(new BytesRef("bar"));
PostingsEnum postings2 = termsEnum.postings(postings);
assertNotNull(postings2);
@@ -625,14 +625,14 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
DirectoryReader reader = DirectoryReader.open(iw);
// sugar method (FREQS)
- PostingsEnum postings = getOnlySegmentReader(reader).postings(new Term("foo", "bar"));
+ PostingsEnum postings = getOnlyLeafReader(reader).postings(new Term("foo", "bar"));
assertEquals(-1, postings.docID());
assertEquals(0, postings.nextDoc());
assertEquals(2, postings.freq());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
// termsenum reuse (FREQS)
- TermsEnum termsEnum = getOnlySegmentReader(reader).terms("foo").iterator();
+ TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator();
termsEnum.seekExact(new BytesRef("bar"));
PostingsEnum postings2 = termsEnum.postings(postings);
assertNotNull(postings2);
@@ -703,14 +703,14 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
DirectoryReader reader = DirectoryReader.open(iw);
// sugar method (FREQS)
- PostingsEnum postings = getOnlySegmentReader(reader).postings(new Term("foo", "bar"));
+ PostingsEnum postings = getOnlyLeafReader(reader).postings(new Term("foo", "bar"));
assertEquals(-1, postings.docID());
assertEquals(0, postings.nextDoc());
assertEquals(2, postings.freq());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
// termsenum reuse (FREQS)
- TermsEnum termsEnum = getOnlySegmentReader(reader).terms("foo").iterator();
+ TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator();
termsEnum.seekExact(new BytesRef("bar"));
PostingsEnum postings2 = termsEnum.postings(postings);
assertNotNull(postings2);
@@ -740,7 +740,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc());
// asking for positions, ok
- PostingsEnum docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS);
+ PostingsEnum docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
assertEquals(2, docsAndPositionsEnum.freq());
@@ -771,7 +771,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
// payloads, offsets, etc don't cause an error if they aren't there
- docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS);
+ docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS);
assertNotNull(docsAndPositionsEnum);
// but make sure they work
assertEquals(-1, docsAndPositionsEnum.docID());
@@ -802,7 +802,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertNull(docsAndPositionsEnum2.getPayload());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
- docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS);
+ docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS);
assertNotNull(docsAndPositionsEnum);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
@@ -832,7 +832,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertNull(docsAndPositionsEnum2.getPayload());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
- docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL);
+ docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL);
assertNotNull(docsAndPositionsEnum);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
@@ -883,14 +883,14 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
DirectoryReader reader = DirectoryReader.open(iw);
// sugar method (FREQS)
- PostingsEnum postings = getOnlySegmentReader(reader).postings(new Term("foo", "bar"));
+ PostingsEnum postings = getOnlyLeafReader(reader).postings(new Term("foo", "bar"));
assertEquals(-1, postings.docID());
assertEquals(0, postings.nextDoc());
assertEquals(2, postings.freq());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
// termsenum reuse (FREQS)
- TermsEnum termsEnum = getOnlySegmentReader(reader).terms("foo").iterator();
+ TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator();
termsEnum.seekExact(new BytesRef("bar"));
PostingsEnum postings2 = termsEnum.postings(postings);
assertNotNull(postings2);
@@ -920,7 +920,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc());
// asking for positions, ok
- PostingsEnum docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS);
+ PostingsEnum docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
assertEquals(2, docsAndPositionsEnum.freq());
@@ -955,7 +955,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
// payloads don't cause an error if they aren't there
- docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS);
+ docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS);
assertNotNull(docsAndPositionsEnum);
// but make sure they work
assertEquals(-1, docsAndPositionsEnum.docID());
@@ -990,7 +990,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertNull(docsAndPositionsEnum2.getPayload());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
- docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS);
+ docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS);
assertNotNull(docsAndPositionsEnum);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
@@ -1020,7 +1020,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertNull(docsAndPositionsEnum2.getPayload());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
- docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL);
+ docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL);
assertNotNull(docsAndPositionsEnum);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
@@ -1068,14 +1068,14 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
DirectoryReader reader = DirectoryReader.open(iw);
// sugar method (FREQS)
- PostingsEnum postings = getOnlySegmentReader(reader).postings(new Term("foo", "bar"));
+ PostingsEnum postings = getOnlyLeafReader(reader).postings(new Term("foo", "bar"));
assertEquals(-1, postings.docID());
assertEquals(0, postings.nextDoc());
assertEquals(2, postings.freq());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
// termsenum reuse (FREQS)
- TermsEnum termsEnum = getOnlySegmentReader(reader).terms("foo").iterator();
+ TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator();
termsEnum.seekExact(new BytesRef("bar"));
PostingsEnum postings2 = termsEnum.postings(postings);
assertNotNull(postings2);
@@ -1105,7 +1105,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc());
// asking for positions, ok
- PostingsEnum docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS);
+ PostingsEnum docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
assertEquals(2, docsAndPositionsEnum.freq());
@@ -1140,7 +1140,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
// payloads
- docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS);
+ docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS);
assertNotNull(docsAndPositionsEnum);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
@@ -1170,7 +1170,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
- docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS);
+ docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS);
assertNotNull(docsAndPositionsEnum);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
@@ -1204,7 +1204,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload()));
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
- docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL);
+ docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL);
assertNotNull(docsAndPositionsEnum);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
@@ -1254,14 +1254,14 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
DirectoryReader reader = DirectoryReader.open(iw);
// sugar method (FREQS)
- PostingsEnum postings = getOnlySegmentReader(reader).postings(new Term("foo", "bar"));
+ PostingsEnum postings = getOnlyLeafReader(reader).postings(new Term("foo", "bar"));
assertEquals(-1, postings.docID());
assertEquals(0, postings.nextDoc());
assertEquals(2, postings.freq());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
// termsenum reuse (FREQS)
- TermsEnum termsEnum = getOnlySegmentReader(reader).terms("foo").iterator();
+ TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator();
termsEnum.seekExact(new BytesRef("bar"));
PostingsEnum postings2 = termsEnum.postings(postings);
assertNotNull(postings2);
@@ -1291,7 +1291,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsOnly2.nextDoc());
// asking for positions, ok
- PostingsEnum docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS);
+ PostingsEnum docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.POSITIONS);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
assertEquals(2, docsAndPositionsEnum.freq());
@@ -1330,7 +1330,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
// payloads
- docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS);
+ docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.PAYLOADS);
assertNotNull(docsAndPositionsEnum);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
@@ -1364,7 +1364,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
- docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS);
+ docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.OFFSETS);
assertNotNull(docsAndPositionsEnum);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
@@ -1398,7 +1398,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assertTrue(docsAndPositionsEnum2.getPayload() == null || new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload()));
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
- docsAndPositionsEnum = getOnlySegmentReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL);
+ docsAndPositionsEnum = getOnlyLeafReader(reader).postings(new Term("foo", "bar"), PostingsEnum.ALL);
assertNotNull(docsAndPositionsEnum);
assertEquals(-1, docsAndPositionsEnum.docID());
assertEquals(0, docsAndPositionsEnum.nextDoc());
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
index c58d56a..adcb0af 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
@@ -789,7 +789,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat
iw.addIndexes(dirs);
iw.forceMerge(1);
- LeafReader ir = getOnlySegmentReader(DirectoryReader.open(iw));
+ LeafReader ir = getOnlyLeafReader(DirectoryReader.open(iw));
for (int i = 0; i < ir.maxDoc(); i++) {
Document doc = ir.document(i);
assertEquals(10, doc.getFields().size());
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
index 77a46dd..432a25d 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
@@ -768,7 +768,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
iw.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(iw);
- Terms terms = getOnlySegmentReader(reader).getTermVector(0, "foo");
+ Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
TermsEnum termsEnum = terms.iterator();
assertNotNull(termsEnum);
assertEquals(new BytesRef("bar"), termsEnum.next());
@@ -849,7 +849,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
iw.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(iw);
- Terms terms = getOnlySegmentReader(reader).getTermVector(0, "foo");
+ Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
TermsEnum termsEnum = terms.iterator();
assertNotNull(termsEnum);
assertEquals(new BytesRef("bar"), termsEnum.next());
@@ -1028,7 +1028,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
iw.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(iw);
- Terms terms = getOnlySegmentReader(reader).getTermVector(0, "foo");
+ Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
TermsEnum termsEnum = terms.iterator();
assertNotNull(termsEnum);
assertEquals(new BytesRef("bar"), termsEnum.next());
@@ -1214,7 +1214,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
iw.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(iw);
- Terms terms = getOnlySegmentReader(reader).getTermVector(0, "foo");
+ Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
TermsEnum termsEnum = terms.iterator();
assertNotNull(termsEnum);
assertEquals(new BytesRef("bar"), termsEnum.next());
@@ -1400,7 +1400,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
iw.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(iw);
- Terms terms = getOnlySegmentReader(reader).getTermVector(0, "foo");
+ Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
TermsEnum termsEnum = terms.iterator();
assertNotNull(termsEnum);
assertEquals(new BytesRef("bar"), termsEnum.next());
@@ -1586,7 +1586,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
iw.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(iw);
- Terms terms = getOnlySegmentReader(reader).getTermVector(0, "foo");
+ Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
TermsEnum termsEnum = terms.iterator();
assertNotNull(termsEnum);
assertEquals(new BytesRef("bar"), termsEnum.next());
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
index c92adde..b517af0 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
@@ -398,7 +398,7 @@ public class QueryUtils {
// previous reader, hits NO_MORE_DOCS
if (lastReader[0] != null) {
final LeafReader previousReader = lastReader[0];
- IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
+ IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
indexSearcher.setSimilarity(s.getSimilarity(true));
Weight w = indexSearcher.createNormalizedWeight(q, true);
LeafReaderContext ctx = (LeafReaderContext)indexSearcher.getTopReaderContext();
@@ -500,7 +500,7 @@ public class QueryUtils {
// previous reader, hits NO_MORE_DOCS
if (lastReader[0] != null) {
final LeafReader previousReader = lastReader[0];
- IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
+ IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
indexSearcher.setSimilarity(s.getSimilarity(true));
Weight w = indexSearcher.createNormalizedWeight(q, true);
Scorer scorer = w.scorer((LeafReaderContext)indexSearcher.getTopReaderContext());
@@ -528,7 +528,7 @@ public class QueryUtils {
// confirm that skipping beyond the last doc, on the
// previous reader, hits NO_MORE_DOCS
final LeafReader previousReader = lastReader[0];
- IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
+ IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
indexSearcher.setSimilarity(s.getSimilarity(true));
Weight w = indexSearcher.createNormalizedWeight(q, true);
Scorer scorer = w.scorer((LeafReaderContext)indexSearcher.getTopReaderContext());
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/test-framework/src/java/org/apache/lucene/search/spans/MultiSpansWrapper.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/spans/MultiSpansWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/search/spans/MultiSpansWrapper.java
deleted file mode 100644
index 9915067..0000000
--- a/lucene/test-framework/src/java/org/apache/lucene/search/spans/MultiSpansWrapper.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search.spans;
-
-import java.io.IOException;
-
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.SlowCompositeReaderWrapper;
-import org.apache.lucene.search.IndexSearcher;
-
-/**
- *
- * A wrapper to perform span operations on a non-leaf reader context
- * <p>
- * NOTE: This should be used for testing purposes only
- * @lucene.internal
- */
-public class MultiSpansWrapper {
-
- public static Spans wrap(IndexReader reader, SpanQuery spanQuery) throws IOException {
- return wrap(reader, spanQuery, SpanWeight.Postings.POSITIONS);
- }
-
- public static Spans wrap(IndexReader reader, SpanQuery spanQuery, SpanWeight.Postings requiredPostings) throws IOException {
-
- LeafReader lr = SlowCompositeReaderWrapper.wrap(reader); // slow, but ok for testing
- LeafReaderContext lrContext = lr.getContext();
- IndexSearcher searcher = new IndexSearcher(lr);
- searcher.setQueryCache(null);
-
- SpanWeight w = spanQuery.createWeight(searcher, false);
-
- return w.getSpans(lrContext, requiredPostings);
- }
-}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
index 79eb6280..e5aa7a2 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
@@ -764,15 +764,29 @@ public abstract class LuceneTestCase extends Assert {
* Some tests expect the directory to contain a single segment, and want to
* do tests on that segment's reader. This is an utility method to help them.
*/
+ /*
public static SegmentReader getOnlySegmentReader(DirectoryReader reader) {
List<LeafReaderContext> subReaders = reader.leaves();
if (subReaders.size() != 1) {
throw new IllegalArgumentException(reader + " has " + subReaders.size() + " segments instead of exactly one");
}
final LeafReader r = subReaders.get(0).reader();
- assertTrue(r instanceof SegmentReader);
+ assertTrue("expected a SegmentReader but got " + r, r instanceof SegmentReader);
return (SegmentReader) r;
}
+ */
+
+ /**
+ * Some tests expect the directory to contain a single segment, and want to
+ * do tests on that segment's reader. This is an utility method to help them.
+ */
+ public static LeafReader getOnlyLeafReader(IndexReader reader) {
+ List<LeafReaderContext> subReaders = reader.leaves();
+ if (subReaders.size() != 1) {
+ throw new IllegalArgumentException(reader + " has " + subReaders.size() + " segments instead of exactly one");
+ }
+ return subReaders.get(0).reader();
+ }
/**
* Returns true if and only if the calling thread is the primary thread
@@ -1625,25 +1639,11 @@ public abstract class LuceneTestCase extends Assert {
}
public static IndexReader wrapReader(IndexReader r) throws IOException {
- return wrapReader(r, true);
- }
-
- public static IndexReader wrapReader(IndexReader r, boolean allowSlowCompositeReader) throws IOException {
Random random = random();
- // TODO: remove this, and fix those tests to wrap before putting slow around:
- final boolean wasOriginallyAtomic = r instanceof LeafReader;
for (int i = 0, c = random.nextInt(6)+1; i < c; i++) {
- switch(random.nextInt(6)) {
+ switch(random.nextInt(5)) {
case 0:
- if (allowSlowCompositeReader) {
- if (VERBOSE) {
- System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with SlowCompositeReaderWrapper.wrap");
- }
- r = SlowCompositeReaderWrapper.wrap(r);
- }
- break;
- case 1:
// will create no FC insanity in atomic case, as ParallelLeafReader has own cache key:
if (VERBOSE) {
System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with ParallelLeaf/CompositeReader");
@@ -1652,7 +1652,7 @@ public abstract class LuceneTestCase extends Assert {
new ParallelLeafReader((LeafReader) r) :
new ParallelCompositeReader((CompositeReader) r);
break;
- case 2:
+ case 1:
// Häckidy-Hick-Hack: a standard MultiReader will cause FC insanity, so we use
// QueryUtils' reader with a fake cache key, so insanity checker cannot walk
// along our reader:
@@ -1661,9 +1661,9 @@ public abstract class LuceneTestCase extends Assert {
}
r = new FCInvisibleMultiReader(r);
break;
- case 3:
- if (allowSlowCompositeReader) {
- final LeafReader ar = SlowCompositeReaderWrapper.wrap(r);
+ case 2:
+ if (r instanceof LeafReader) {
+ final LeafReader ar = (LeafReader) r;
final List<String> allFields = new ArrayList<>();
for (FieldInfo fi : ar.getFieldInfos()) {
allFields.add(fi.name);
@@ -1673,7 +1673,7 @@ public abstract class LuceneTestCase extends Assert {
final Set<String> fields = new HashSet<>(allFields.subList(0, end));
// will create no FC insanity as ParallelLeafReader has own cache key:
if (VERBOSE) {
- System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with ParallelLeafReader(SlowCompositeReaderWapper)");
+ System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with ParallelLeafReader");
}
r = new ParallelLeafReader(
new FieldFilterLeafReader(ar, fields, false),
@@ -1681,7 +1681,7 @@ public abstract class LuceneTestCase extends Assert {
);
}
break;
- case 4:
+ case 3:
// Häckidy-Hick-Hack: a standard Reader will cause FC insanity, so we use
// QueryUtils' reader with a fake cache key, so insanity checker cannot walk
// along our reader:
@@ -1694,7 +1694,7 @@ public abstract class LuceneTestCase extends Assert {
r = new AssertingDirectoryReader((DirectoryReader)r);
}
break;
- case 5:
+ case 4:
if (VERBOSE) {
System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with MismatchedLeaf/DirectoryReader");
}
@@ -1708,11 +1708,8 @@ public abstract class LuceneTestCase extends Assert {
fail("should not get here");
}
}
- if (wasOriginallyAtomic) {
- if (allowSlowCompositeReader) {
- r = SlowCompositeReaderWrapper.wrap(r);
- }
- } else if ((r instanceof CompositeReader) && !(r instanceof FCInvisibleMultiReader)) {
+
+ if ((r instanceof CompositeReader) && !(r instanceof FCInvisibleMultiReader)) {
// prevent cache insanity caused by e.g. ParallelCompositeReader, to fix we wrap one more time:
r = new FCInvisibleMultiReader(r);
}
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
index 22cd467..5295349 100644
--- a/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
+++ b/lucene/test-framework/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
@@ -315,7 +315,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
doc.add(new Field("f", "a", ft));
doc.add(new Field("f", "a", ft));
writer.addDocument(doc);
- final LeafReader reader = getOnlySegmentReader(writer.getReader());
+ final LeafReader reader = getOnlyLeafReader(writer.getReader());
final Fields fields = reader.getTermVectors(0);
final Terms terms = fields.terms("f");
final TermsEnum te = terms.iterator();
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java b/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
index d84bed9..c6396ae 100644
--- a/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
+++ b/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
@@ -21,17 +21,18 @@ import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.document.IntPoint;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.IntPoint;
+import org.apache.lucene.document.IntPoint;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.BaseStoredFieldsFormatTestCase;
import org.apache.lucene.index.CodecReader;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.store.ByteArrayDataInput;
@@ -306,7 +307,7 @@ public class TestCompressingStoredFieldsFormat extends BaseStoredFieldsFormatTes
assertNotNull(ir2);
ir.close();
ir = ir2;
- CodecReader sr = getOnlySegmentReader(ir);
+ CodecReader sr = (CodecReader) getOnlyLeafReader(ir);
CompressingStoredFieldsReader reader = (CompressingStoredFieldsReader)sr.getFieldsReader();
// we could get lucky, and have zero, but typically one.
assertTrue(reader.getNumDirtyChunks() <= 1);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingTermVectorsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingTermVectorsFormat.java b/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingTermVectorsFormat.java
index 4fa0278..f4858d1 100644
--- a/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingTermVectorsFormat.java
+++ b/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingTermVectorsFormat.java
@@ -25,18 +25,18 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.BaseTermVectorsFormatTestCase;
import org.apache.lucene.index.CodecReader;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.BaseTermVectorsFormatTestCase;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
@@ -56,7 +56,7 @@ public class TestCompressingTermVectorsFormat extends BaseTermVectorsFormatTestC
ft.setStoreTermVectors(true);
doc.add(new Field("foo", "this is a test", ft));
iw.addDocument(doc);
- LeafReader ir = getOnlySegmentReader(iw.getReader());
+ LeafReader ir = getOnlyLeafReader(iw.getReader());
Terms terms = ir.getTermVector(0, "foo");
assertNotNull(terms);
TermsEnum termsEnum = terms.iterator();
@@ -118,7 +118,7 @@ public class TestCompressingTermVectorsFormat extends BaseTermVectorsFormatTestC
assertNotNull(ir2);
ir.close();
ir = ir2;
- CodecReader sr = getOnlySegmentReader(ir);
+ CodecReader sr = (CodecReader) getOnlyLeafReader(ir);
CompressingTermVectorsReader reader = (CompressingTermVectorsReader)sr.getTermVectorsReader();
// we could get lucky, and have zero, but typically one.
assertTrue(reader.getNumDirtyChunks() <= 1);
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7523ca11/lucene/test-framework/src/test/org/apache/lucene/index/TestAssertingLeafReader.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/test/org/apache/lucene/index/TestAssertingLeafReader.java b/lucene/test-framework/src/test/org/apache/lucene/index/TestAssertingLeafReader.java
index b572289..5276301 100644
--- a/lucene/test-framework/src/test/org/apache/lucene/index/TestAssertingLeafReader.java
+++ b/lucene/test-framework/src/test/org/apache/lucene/index/TestAssertingLeafReader.java
@@ -54,9 +54,8 @@ public class TestAssertingLeafReader extends LuceneTestCase {
assertEquals(1, r.numDocs());
r = new AssertingDirectoryReader((DirectoryReader) r);
+ final IndexReader r2 = r;
- final IndexReader r2 = SlowCompositeReaderWrapper.wrap(r);
-
Thread thread = new Thread() {
@Override
public void run() {
@@ -68,6 +67,6 @@ public class TestAssertingLeafReader extends LuceneTestCase {
thread.start();
thread.join();
- IOUtils.close(r2, dir);
+ IOUtils.close(r, dir);
}
}