You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2011/12/15 06:11:30 UTC
svn commit: r1214623 [12/17] - in /lucene/dev/branches/solrcloud: ./
dev-tools/idea/lucene/contrib/ lucene/ lucene/contrib/
lucene/contrib/demo/src/java/org/apache/lucene/demo/
lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighligh...
Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java Thu Dec 15 05:11:14 2011
@@ -144,6 +144,7 @@ public abstract class CharTokenizer exte
clearAttributes();
int length = 0;
int start = -1; // this variable is always initialized
+ int end = -1;
char[] buffer = termAtt.buffer();
while (true) {
if (bufferIndex >= dataLen) {
@@ -162,15 +163,18 @@ public abstract class CharTokenizer exte
}
// use CharacterUtils here to support < 3.1 UTF-16 code unit behavior if the char based methods are gone
final int c = charUtils.codePointAt(ioBuffer.getBuffer(), bufferIndex);
- bufferIndex += Character.charCount(c);
+ final int charCount = Character.charCount(c);
+ bufferIndex += charCount;
if (isTokenChar(c)) { // if it's a token char
if (length == 0) { // start of token
assert start == -1;
- start = offset + bufferIndex - 1;
+ start = offset + bufferIndex - charCount;
+ end = start;
} else if (length >= buffer.length-1) { // check if a supplementary could run out of bounds
buffer = termAtt.resizeBuffer(2+length); // make sure a supplementary fits in the buffer
}
+ end += charCount;
length += Character.toChars(normalize(c), buffer, length); // buffer it, normalized
if (length >= MAX_WORD_LEN) // buffer overflow! make sure to check for >= surrogate pair could break == test
break;
@@ -180,7 +184,7 @@ public abstract class CharTokenizer exte
termAtt.setLength(length);
assert start != -1;
- offsetAtt.setOffset(correctOffset(start), finalOffset = correctOffset(start+length));
+ offsetAtt.setOffset(correctOffset(start), finalOffset = correctOffset(end));
return true;
}
Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java Thu Dec 15 05:11:14 2011
@@ -270,7 +270,7 @@ public class TestClassicAnalyzer extends
writer.addDocument(doc);
writer.close();
- IndexReader reader = IndexReader.open(dir, true);
+ IndexReader reader = IndexReader.open(dir);
// Make sure all terms < max size were indexed
assertEquals(2, reader.docFreq(new Term("content", "abc")));
@@ -303,7 +303,7 @@ public class TestClassicAnalyzer extends
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, sa));
writer.addDocument(doc);
writer.close();
- reader = IndexReader.open(dir, true);
+ reader = IndexReader.open(dir);
assertEquals(1, reader.docFreq(new Term("content", bigTerm)));
reader.close();
Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java Thu Dec 15 05:11:14 2011
@@ -63,7 +63,6 @@ public class TestKeywordAnalyzer extends
@Override
public void tearDown() throws Exception {
- searcher.close();
reader.close();
directory.close();
super.tearDown();
@@ -95,7 +94,7 @@ public class TestKeywordAnalyzer extends
writer.addDocument(doc);
writer.close();
- IndexReader reader = IndexReader.open(dir, true);
+ IndexReader reader = IndexReader.open(dir);
DocsEnum td = _TestUtil.docs(random,
reader,
"partnum",
Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilter.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilter.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilter.java Thu Dec 15 05:11:14 2011
@@ -64,8 +64,8 @@ public class TestKeywordMarkerFilter ext
new KeywordMarkerFilter(
new KeywordMarkerFilter(
new MockTokenizer(new StringReader("Dogs Trees Birds Houses"), MockTokenizer.WHITESPACE, false),
- new HashSet<String>(Arrays.asList(new String[] { "Birds", "Houses" }))),
- new HashSet<String>(Arrays.asList(new String[] { "Dogs", "Trees" }))));
+ new HashSet<String>(Arrays.asList("Birds", "Houses"))),
+ new HashSet<String>(Arrays.asList("Dogs", "Trees"))));
assertTokenStreamContents(ts, new String[] { "Dogs", "Trees", "Birds", "Houses" });
}
Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java Thu Dec 15 05:11:14 2011
@@ -61,7 +61,7 @@ public class TestLimitTokenCountAnalyzer
writer.addDocument(doc);
writer.close();
- IndexReader reader = IndexReader.open(dir, true);
+ IndexReader reader = IndexReader.open(dir);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java Thu Dec 15 05:11:14 2011
@@ -17,11 +17,16 @@ package org.apache.lucene.analysis.ngram
* limitations under the License.
*/
+import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
+import java.io.Reader;
import java.io.StringReader;
/**
@@ -104,4 +109,24 @@ public class EdgeNGramTokenFilterTest ex
tokenizer.reset(new StringReader("abcde"));
assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
}
+
+ // LUCENE-3642
+ // EdgeNgram blindly adds term length to offset, but this can take things out of bounds
+ // wrt original text if a previous filter increases the length of the word (in this case æ -> ae)
+ // so in this case we behave like WDF, and preserve any modified offsets
+ public void testInvalidOffsets() throws Exception {
+ Analyzer analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+ TokenFilter filters = new ASCIIFoldingFilter(tokenizer);
+ filters = new EdgeNGramTokenFilter(filters, EdgeNGramTokenFilter.Side.FRONT, 2, 15);
+ return new TokenStreamComponents(tokenizer, filters);
+ }
+ };
+ assertAnalyzesTo(analyzer, "mosfellsbær",
+ new String[] { "mo", "mos", "mosf", "mosfe", "mosfel", "mosfell", "mosfells", "mosfellsb", "mosfellsba", "mosfellsbae", "mosfellsbaer" },
+ new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ new int[] { 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11 });
+ }
}
Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java Thu Dec 15 05:11:14 2011
@@ -17,11 +17,16 @@ package org.apache.lucene.analysis.ngram
* limitations under the License.
*/
+import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
+import java.io.Reader;
import java.io.StringReader;
/**
@@ -93,4 +98,24 @@ public class NGramTokenFilterTest extend
tokenizer.reset(new StringReader("abcde"));
assertTokenStreamContents(filter, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
}
+
+ // LUCENE-3642
+ // EdgeNgram blindly adds term length to offset, but this can take things out of bounds
+ // wrt original text if a previous filter increases the length of the word (in this case æ -> ae)
+ // so in this case we behave like WDF, and preserve any modified offsets
+ public void testInvalidOffsets() throws Exception {
+ Analyzer analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+ TokenFilter filters = new ASCIIFoldingFilter(tokenizer);
+ filters = new NGramTokenFilter(filters, 2, 2);
+ return new TokenStreamComponents(tokenizer, filters);
+ }
+ };
+ assertAnalyzesTo(analyzer, "mosfellsbær",
+ new String[] { "mo", "os", "sf", "fe", "el", "ll", "ls", "sb", "ba", "ae", "er" },
+ new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ new int[] { 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11 });
+ }
}
Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java Thu Dec 15 05:11:14 2011
@@ -53,7 +53,7 @@ public class QueryAutoStopWordAnalyzerTe
writer.addDocument(doc);
}
writer.close();
- reader = IndexReader.open(dir, true);
+ reader = IndexReader.open(dir);
}
@Override
Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java Thu Dec 15 05:11:14 2011
@@ -80,7 +80,6 @@ public class ShingleAnalyzerWrapperTest
@Override
public void tearDown() throws Exception {
- searcher.close();
reader.close();
directory.close();
super.tearDown();
Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java Thu Dec 15 05:11:14 2011
@@ -104,7 +104,7 @@ public class TestTeeSinkTokenFilter exte
w.addDocument(doc);
w.close();
- IndexReader r = IndexReader.open(dir, true);
+ IndexReader r = IndexReader.open(dir);
Terms vector = r.getTermVectors(0).terms("field");
assertEquals(1, vector.getUniqueTermCount());
TermsEnum termsEnum = vector.iterator(null);
Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java Thu Dec 15 05:11:14 2011
@@ -18,11 +18,17 @@ package org.apache.lucene.analysis.util;
*/
import java.io.IOException;
+import java.io.Reader;
import java.io.StringReader;
+import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.LetterTokenizer;
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.util._TestUtil;
/**
@@ -94,4 +100,80 @@ public class TestCharTokenizers extends
Tokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(builder.toString() + builder.toString()));
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(), builder.toString().toLowerCase()});
}
+
+ // LUCENE-3642: normalize SMP->BMP and check that offsets are correct
+ public void testCrossPlaneNormalization() throws IOException {
+ Analyzer analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer tokenizer = new LetterTokenizer(TEST_VERSION_CURRENT, reader) {
+ @Override
+ protected int normalize(int c) {
+ if (c > 0xffff) {
+ return 'δ';
+ } else {
+ return c;
+ }
+ }
+ };
+ return new TokenStreamComponents(tokenizer, tokenizer);
+ }
+ };
+ int num = 10000 * RANDOM_MULTIPLIER;
+ for (int i = 0; i < num; i++) {
+ String s = _TestUtil.randomUnicodeString(random);
+ TokenStream ts = analyzer.tokenStream("foo", new StringReader(s));
+ ts.reset();
+ OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
+ while (ts.incrementToken()) {
+ String highlightedText = s.substring(offsetAtt.startOffset(), offsetAtt.endOffset());
+ for (int j = 0, cp = 0; j < highlightedText.length(); j += Character.charCount(cp)) {
+ cp = highlightedText.codePointAt(j);
+ assertTrue("non-letter:" + Integer.toHexString(cp), Character.isLetter(cp));
+ }
+ }
+ ts.end();
+ ts.close();
+ }
+ // just for fun
+ checkRandomData(random, analyzer, num);
+ }
+
+ // LUCENE-3642: normalize BMP->SMP and check that offsets are correct
+ public void testCrossPlaneNormalization2() throws IOException {
+ Analyzer analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer tokenizer = new LetterTokenizer(TEST_VERSION_CURRENT, reader) {
+ @Override
+ protected int normalize(int c) {
+ if (c <= 0xffff) {
+ return 0x1043C;
+ } else {
+ return c;
+ }
+ }
+ };
+ return new TokenStreamComponents(tokenizer, tokenizer);
+ }
+ };
+ int num = 10000 * RANDOM_MULTIPLIER;
+ for (int i = 0; i < num; i++) {
+ String s = _TestUtil.randomUnicodeString(random);
+ TokenStream ts = analyzer.tokenStream("foo", new StringReader(s));
+ ts.reset();
+ OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
+ while (ts.incrementToken()) {
+ String highlightedText = s.substring(offsetAtt.startOffset(), offsetAtt.endOffset());
+ for (int j = 0, cp = 0; j < highlightedText.length(); j += Character.charCount(cp)) {
+ cp = highlightedText.codePointAt(j);
+ assertTrue("non-letter:" + Integer.toHexString(cp), Character.isLetter(cp));
+ }
+ }
+ ts.end();
+ ts.close();
+ }
+ // just for fun
+ checkRandomData(random, analyzer, num);
+ }
}
Modified: lucene/dev/branches/solrcloud/modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/WordTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/WordTokenFilter.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/WordTokenFilter.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/WordTokenFilter.java Thu Dec 15 05:11:14 2011
@@ -43,6 +43,10 @@ public final class WordTokenFilter exten
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
+
+ private int tokStart; // only used if the length changed before this filter
+ private int tokEnd; // only used if the length changed before this filter
+ private boolean hasIllegalOffsets; // only if the length changed before this filter
/**
* Construct a new WordTokenizer.
@@ -59,6 +63,11 @@ public final class WordTokenFilter exten
if (tokenIter == null || !tokenIter.hasNext()) {
// there are no remaining tokens from the current sentence... are there more sentences?
if (input.incrementToken()) {
+ tokStart = offsetAtt.startOffset();
+ tokEnd = offsetAtt.endOffset();
+ // if length by start + end offsets doesn't match the term text then assume
+ // this is a synonym and don't adjust the offsets.
+ hasIllegalOffsets = (tokStart + termAtt.length()) != tokEnd;
// a new sentence is available: process it.
tokenBuffer = wordSegmenter.segmentSentence(termAtt.toString(), offsetAtt.startOffset());
tokenIter = tokenBuffer.iterator();
@@ -77,7 +86,11 @@ public final class WordTokenFilter exten
// There are remaining tokens from the current sentence, return the next one.
SegToken nextWord = tokenIter.next();
termAtt.copyBuffer(nextWord.charArray, 0, nextWord.charArray.length);
- offsetAtt.setOffset(nextWord.startOffset, nextWord.endOffset);
+ if (hasIllegalOffsets) {
+ offsetAtt.setOffset(tokStart, tokEnd);
+ } else {
+ offsetAtt.setOffset(nextWord.startOffset, nextWord.endOffset);
+ }
typeAtt.setType("word");
return true;
}
Modified: lucene/dev/branches/solrcloud/modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java Thu Dec 15 05:11:14 2011
@@ -17,11 +17,16 @@
package org.apache.lucene.analysis.cn.smart;
+import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
import org.apache.lucene.util.Version;
public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase {
@@ -196,6 +201,24 @@ public class TestSmartChineseAnalyzer ex
}
}
+ // LUCENE-3642
+ public void testInvalidOffset() throws Exception {
+ Analyzer analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+ Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+ TokenFilter filters = new ASCIIFoldingFilter(tokenizer);
+ filters = new WordTokenFilter(filters);
+ return new TokenStreamComponents(tokenizer, filters);
+ }
+ };
+
+ assertAnalyzesTo(analyzer, "mosfellsbær",
+ new String[] { "mosfellsbaer" },
+ new int[] { 0 },
+ new int[] { 11 });
+ }
+
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
checkRandomData(random, new SmartChineseAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER);
Modified: lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/PerfRunData.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/PerfRunData.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/PerfRunData.java (original)
+++ lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/PerfRunData.java Thu Dec 15 05:11:14 2011
@@ -126,7 +126,7 @@ public class PerfRunData implements Clos
}
public void close() throws IOException {
- IOUtils.close(indexWriter, indexReader, indexSearcher, directory,
+ IOUtils.close(indexWriter, indexReader, directory,
taxonomyWriter, taxonomyReader, taxonomyDir,
docMaker, facetSource);
}
Modified: lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CommitIndexTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CommitIndexTask.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CommitIndexTask.java (original)
+++ lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CommitIndexTask.java Thu Dec 15 05:11:14 2011
@@ -21,7 +21,6 @@ import java.util.Map;
import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexReader;
/**
* Commits the IndexWriter.
@@ -51,14 +50,6 @@ public class CommitIndexTask extends Per
IndexWriter iw = getRunData().getIndexWriter();
if (iw != null) {
iw.commit(commitUserData);
- } else {
- IndexReader r = getRunData().getIndexReader();
- if (r != null) {
- r.commit(commitUserData);
- r.decRef();
- } else {
- throw new IllegalStateException("neither IndexWriter nor IndexReader is currently open");
- }
}
return 1;
Modified: lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenReaderTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenReaderTask.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenReaderTask.java (original)
+++ lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenReaderTask.java Thu Dec 15 05:11:14 2011
@@ -22,20 +22,17 @@ import java.util.Collection;
import java.util.Map;
import org.apache.lucene.benchmark.byTask.PerfRunData;
-import org.apache.lucene.benchmark.byTask.utils.Config;
import org.apache.lucene.index.IndexCommit;
-import org.apache.lucene.index.IndexDeletionPolicy;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.store.Directory;
/**
* Open an index reader.
* <br>Other side effects: index reader object in perfRunData is set.
- * <br> Optional params readOnly,commitUserData eg. OpenReader(false,commit1)
+ * <br> Optional params commitUserData eg. OpenReader(false,commit1)
*/
public class OpenReaderTask extends PerfTask {
public static final String USER_DATA = "userData";
- private boolean readOnly = true;
private String commitUserData = null;
public OpenReaderTask(PerfRunData runData) {
@@ -45,22 +42,11 @@ public class OpenReaderTask extends Perf
@Override
public int doLogic() throws IOException {
Directory dir = getRunData().getDirectory();
- Config config = getRunData().getConfig();
IndexReader r = null;
- final IndexDeletionPolicy deletionPolicy;
- if (readOnly) {
- deletionPolicy = null;
- } else {
- deletionPolicy = CreateIndexTask.getIndexDeletionPolicy(config);
- }
if (commitUserData != null) {
- r = IndexReader.open(OpenReaderTask.findIndexCommit(dir, commitUserData),
- deletionPolicy,
- readOnly);
+ r = IndexReader.open(OpenReaderTask.findIndexCommit(dir, commitUserData));
} else {
- r = IndexReader.open(dir,
- deletionPolicy,
- readOnly);
+ r = IndexReader.open(dir);
}
getRunData().setIndexReader(r);
// We transfer reference to the run data
@@ -74,10 +60,7 @@ public class OpenReaderTask extends Perf
if (params != null) {
String[] split = params.split(",");
if (split.length > 0) {
- readOnly = Boolean.valueOf(split[0]).booleanValue();
- }
- if (split.length > 1) {
- commitUserData = split[1];
+ commitUserData = split[0];
}
}
}
Modified: lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/PerfTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/PerfTask.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/PerfTask.java (original)
+++ lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/PerfTask.java Thu Dec 15 05:11:14 2011
@@ -36,7 +36,7 @@ import org.apache.lucene.benchmark.byTas
* logging.
* <li>log.step.[class Task Name] - specifies the same as 'log.step', only for a
* particular task name. For example, log.step.AddDoc will be applied only for
- * {@link AddDocTask}, but not for {@link DeleteDocTask}. It's a way to control
+ * {@link AddDocTask}. It's a way to control
* per task logging settings. If you want to omit logging for any other task,
* include log.step=-1. The syntax is "log.step." together with the Task's
* 'short' name (i.e., without the 'Task' part).
Modified: lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/PrintReaderTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/PrintReaderTask.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/PrintReaderTask.java (original)
+++ lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/PrintReaderTask.java Thu Dec 15 05:11:14 2011
@@ -44,11 +44,9 @@ public class PrintReaderTask extends Per
Directory dir = getRunData().getDirectory();
IndexReader r = null;
if (userData == null)
- r = IndexReader.open(dir, true);
+ r = IndexReader.open(dir);
else
- r = IndexReader.open(OpenReaderTask.findIndexCommit(dir, userData),
- null,
- true);
+ r = IndexReader.open(OpenReaderTask.findIndexCommit(dir, userData));
System.out.println("--> numDocs:"+r.numDocs()+" dels:"+r.numDeletedDocs());
r.close();
return 1;
Modified: lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java (original)
+++ lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java Thu Dec 15 05:11:14 2011
@@ -84,7 +84,7 @@ public abstract class ReadTask extends P
if (searcher == null) {
// open our own reader
Directory dir = getRunData().getDirectory();
- reader = IndexReader.open(dir, true);
+ reader = IndexReader.open(dir);
searcher = new IndexSearcher(reader);
closeSearcher = true;
} else {
@@ -179,7 +179,6 @@ public abstract class ReadTask extends P
}
if (closeSearcher) {
- searcher.close();
reader.close();
} else {
// Release our +1 ref from above
Modified: lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java (original)
+++ lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java Thu Dec 15 05:11:14 2011
@@ -88,7 +88,6 @@ public class QueryDriver {
// print an avarage sum of the results
QualityStats avg = QualityStats.average(stats);
avg.log("SUMMARY", 2, logger, " ");
- searcher.close();
reader.close();
dir.close();
}
Modified: lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java (original)
+++ lucene/dev/branches/solrcloud/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java Thu Dec 15 05:11:14 2011
@@ -86,7 +86,7 @@ public class QualityQueriesFinder {
private String [] bestTerms(String field,int numTerms) throws IOException {
PriorityQueue<TermDf> pq = new TermsDfQueue(numTerms);
- IndexReader ir = IndexReader.open(dir, true);
+ IndexReader ir = IndexReader.open(dir);
try {
int threshold = ir.maxDoc() / 10; // ignore words too common.
Terms terms = MultiFields.getTerms(ir, field);
Modified: lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (original)
+++ lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java Thu Dec 15 05:11:14 2011
@@ -102,7 +102,7 @@ public class TestPerfTasksLogic extends
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND));
iw.close();
- IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs());
ir.close();
}
@@ -167,7 +167,7 @@ public class TestPerfTasksLogic extends
"{ AddDoc } : 100",
"ForceMerge(1)",
"CloseIndex",
- "OpenReader(true)",
+ "OpenReader",
"{ CountingHighlighterTest(size[1],highlight[1],mergeContiguous[true],maxFrags[1],fields[body]) } : 200",
"CloseReader",
};
@@ -188,7 +188,7 @@ public class TestPerfTasksLogic extends
// now we should be able to open the index for write.
IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
iw.close();
- IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
assertEquals("100 docs were added to the index, this is what we expect to find!",100,ir.numDocs());
ir.close();
}
@@ -206,7 +206,7 @@ public class TestPerfTasksLogic extends
"{ AddDoc } : 1000",
"ForceMerge(1)",
"CloseIndex",
- "OpenReader(false)",
+ "OpenReader",
"{ CountingHighlighterTest(size[1],highlight[1],mergeContiguous[true],maxFrags[1],fields[body]) } : 200",
"CloseReader",
};
@@ -227,7 +227,7 @@ public class TestPerfTasksLogic extends
// now we should be able to open the index for write.
IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
iw.close();
- IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs());
ir.close();
}
@@ -300,7 +300,7 @@ public class TestPerfTasksLogic extends
// now we should be able to open the index for write.
IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
iw.close();
- IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
assertEquals("1 docs were added to the index, this is what we expect to find!",1,ir.numDocs());
ir.close();
}
@@ -331,7 +331,7 @@ public class TestPerfTasksLogic extends
// 3. execute the algorithm (required in every "logic" test)
Benchmark benchmark = execBenchmark(algLines);
- IndexReader r = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader r = IndexReader.open(benchmark.getRunData().getDirectory());
DocTermsIndex idx = FieldCache.DEFAULT.getTermsIndex(r, "country");
final int maxDoc = r.maxDoc();
assertEquals(1000, maxDoc);
@@ -367,7 +367,7 @@ public class TestPerfTasksLogic extends
Benchmark benchmark = execBenchmark(algLines);
// 3. test number of docs in the index
- IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
int ndocsExpected = 20; // first 20 reuters docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close();
@@ -432,7 +432,7 @@ public class TestPerfTasksLogic extends
.setOpenMode(OpenMode.APPEND));
iw.close();
- IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
assertEquals(numLines + " lines were created but " + ir.numDocs() + " docs are in the index", numLines, ir.numDocs());
ir.close();
@@ -476,7 +476,7 @@ public class TestPerfTasksLogic extends
}
// Separately count how many tokens are actually in the index:
- IndexReader reader = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader reader = IndexReader.open(benchmark.getRunData().getDirectory());
assertEquals(NUM_DOCS, reader.numDocs());
int totalTokenCount2 = 0;
@@ -535,7 +535,7 @@ public class TestPerfTasksLogic extends
Benchmark benchmark = execBenchmark(algLines);
// 3. test number of docs in the index
- IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
int ndocsExpected = 2 * 20; // first 20 reuters docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close();
@@ -572,7 +572,7 @@ public class TestPerfTasksLogic extends
Benchmark benchmark = execBenchmark(algLines);
// 3. test number of docs in the index
- IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
int ndocsExpected = 20; // first 20 reuters docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close();
@@ -609,7 +609,7 @@ public class TestPerfTasksLogic extends
Benchmark benchmark = execBenchmark(algLines);
// 3. test number of docs in the index
- IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
int ndocsExpected = 20; // first 20 reuters docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close();
@@ -623,40 +623,6 @@ public class TestPerfTasksLogic extends
}
}
- public void testDeleteByPercent() throws Exception {
- // 1. alg definition (required in every "logic" test)
- String algLines[] = {
- "# ----- properties ",
- "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource",
- "docs.file=" + getReuters20LinesFile(),
- "ram.flush.mb=-1",
- "max.buffered=2",
- "content.source.log.step=3",
- "doc.term.vector=false",
- "content.source.forever=false",
- "directory=RAMDirectory",
- "doc.stored=false",
- "doc.tokenized=false",
- "debug.level=1",
- "# ----- alg ",
- "CreateIndex",
- "{ \"AddDocs\" AddDoc > : * ",
- "CloseIndex()",
- "OpenReader(false)",
- "DeleteByPercent(20)",
- "CloseReader"
- };
-
- // 2. execute the algorithm (required in every "logic" test)
- Benchmark benchmark = execBenchmark(algLines);
-
- // 3. test number of docs in the index
- IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
- int ndocsExpected = 16; // first 20 reuters docs, minus 20%
- assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
- ir.close();
- }
-
/**
* Test that we can set merge scheduler".
*/
@@ -690,7 +656,7 @@ public class TestPerfTasksLogic extends
benchmark.getRunData().getIndexWriter().close();
// 3. test number of docs in the index
- IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
int ndocsExpected = 20; // first 20 reuters docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close();
@@ -736,7 +702,7 @@ public class TestPerfTasksLogic extends
benchmark.getRunData().getIndexWriter().close();
// 3. test number of docs in the index
- IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
int ndocsExpected = 20; // first 20 reuters docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close();
@@ -780,7 +746,7 @@ public class TestPerfTasksLogic extends
assertFalse(((LogMergePolicy) writer.getConfig().getMergePolicy()).getUseCompoundFile());
writer.close();
Directory dir = benchmark.getRunData().getDirectory();
- IndexReader reader = IndexReader.open(dir, true);
+ IndexReader reader = IndexReader.open(dir);
Fields tfv = reader.getTermVectors(0);
assertNotNull(tfv);
assertTrue(tfv.getUniqueFieldCount() > 0);
@@ -856,7 +822,7 @@ public class TestPerfTasksLogic extends
Benchmark benchmark = execBenchmark(algLines);
// 3. test number of docs in the index
- IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true);
+ IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory());
int ndocsExpected = 20; // first 20 reuters docs.
assertEquals("wrong number of docs in the index!", ndocsExpected, ir.numDocs());
ir.close();
Modified: lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java (original)
+++ lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java Thu Dec 15 05:11:14 2011
@@ -92,7 +92,6 @@ public class DocMakerTest extends Benchm
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs td = searcher.search(new TermQuery(new Term("key", "value")), 10);
assertEquals(numExpectedResults, td.totalHits);
- searcher.close();
reader.close();
}
Modified: lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java (original)
+++ lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java Thu Dec 15 05:11:14 2011
@@ -160,7 +160,7 @@ public class LineDocSourceTest extends B
}
assertEquals("Wrong field value", storedField, searcher.doc(0).get(storedField));
} finally {
- IOUtils.close(searcher, reader, runData);
+ IOUtils.close(reader, runData);
}
}
Modified: lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/quality/TestQualityRun.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/quality/TestQualityRun.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/quality/TestQualityRun.java (original)
+++ lucene/dev/branches/solrcloud/modules/benchmark/src/test/org/apache/lucene/benchmark/quality/TestQualityRun.java Thu Dec 15 05:11:14 2011
@@ -135,7 +135,6 @@ public class TestQualityRun extends Benc
assertTrue("avg p_at_"+j+" should be hurt: "+avg.getPrecisionAt(j), 1.0 > avg.getPrecisionAt(j));
}
- searcher.close();
reader.close();
dir.close();
}
Modified: lucene/dev/branches/solrcloud/modules/facet/src/examples/org/apache/lucene/facet/example/simple/SimpleMain.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/facet/src/examples/org/apache/lucene/facet/example/simple/SimpleMain.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/facet/src/examples/org/apache/lucene/facet/example/simple/SimpleMain.java (original)
+++ lucene/dev/branches/solrcloud/modules/facet/src/examples/org/apache/lucene/facet/example/simple/SimpleMain.java Thu Dec 15 05:11:14 2011
@@ -57,7 +57,7 @@ public class SimpleMain {
// open readers
TaxonomyReader taxo = new DirectoryTaxonomyReader(taxoDir);
- IndexReader indexReader = IndexReader.open(indexDir, true);
+ IndexReader indexReader = IndexReader.open(indexDir);
ExampleUtils.log("search the sample documents...");
List<FacetResult> facetRes = SimpleSearcher.searchWithFacets(indexReader, taxo);
@@ -82,7 +82,7 @@ public class SimpleMain {
// open readers
TaxonomyReader taxo = new DirectoryTaxonomyReader(taxoDir);
- IndexReader indexReader = IndexReader.open(indexDir, true);
+ IndexReader indexReader = IndexReader.open(indexDir);
ExampleUtils.log("search the sample documents...");
List<FacetResult> facetRes = SimpleSearcher.searchWithDrillDown(indexReader, taxo);
Modified: lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java (original)
+++ lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java Thu Dec 15 05:11:14 2011
@@ -231,7 +231,7 @@ public abstract class FacetTestBase exte
/** Close all indexes */
protected void closeAll() throws Exception {
// close and nullify everything
- IOUtils.close(taxoReader, indexReader, searcher);
+ IOUtils.close(taxoReader, indexReader);
taxoReader = null;
indexReader = null;
searcher = null;
Modified: lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/FacetTestUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/FacetTestUtils.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/FacetTestUtils.java (original)
+++ lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/FacetTestUtils.java Thu Dec 15 05:11:14 2011
@@ -138,7 +138,6 @@ public class FacetTestUtils {
public IndexSearcher indexSearcher;
public void close() throws IOException {
- indexSearcher.close();
indexReader.close();
taxReader.close();
}
Modified: lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/BaseTestTopK.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/BaseTestTopK.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/BaseTestTopK.java (original)
+++ lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/BaseTestTopK.java Thu Dec 15 05:11:14 2011
@@ -86,7 +86,7 @@ public abstract class BaseTestTopK exten
if (VERBOSE) {
System.out.println("Adding CP: " + cp.toString());
}
- return Arrays.asList(new CategoryPath[] { cp });
+ return Arrays.asList(cp);
}
protected FacetSearchParams searchParamsWithRequests(int numResults) {
Modified: lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java (original)
+++ lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java Thu Dec 15 05:11:14 2011
@@ -149,8 +149,6 @@ public class DrillDownTest extends Lucen
Query q4 = DrillDown.query(defaultParams, fooQuery, new CategoryPath("b"));
docs = searcher.search(q4, 100);
assertEquals(10, docs.totalHits);
-
- searcher.close();
}
@Test
@@ -172,8 +170,6 @@ public class DrillDownTest extends Lucen
Query q4 = DrillDown.query(fooQuery, new CategoryPath("b"));
docs = searcher.search(q4, 100);
assertEquals(10, docs.totalHits);
-
- searcher.close();
}
@AfterClass
Modified: lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java (original)
+++ lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java Thu Dec 15 05:11:14 2011
@@ -94,7 +94,6 @@ public class TestMultipleCategoryLists e
tr.close();
ir.close();
- searcher.close();
iw.close();
tw.close();
IOUtils.close(dirs[0]);
@@ -135,7 +134,6 @@ public class TestMultipleCategoryLists e
tr.close();
ir.close();
- searcher.close();
iw.close();
tw.close();
IOUtils.close(dirs[0]);
@@ -179,7 +177,6 @@ public class TestMultipleCategoryLists e
tr.close();
ir.close();
- searcher.close();
iw.close();
tw.close();
IOUtils.close(dirs[0]);
@@ -225,7 +222,6 @@ public class TestMultipleCategoryLists e
assertPostingListExists("$composers", "Composers", ir);
tr.close();
ir.close();
- searcher.close();
iw.close();
tw.close();
IOUtils.close(dirs[0]);
@@ -270,7 +266,6 @@ public class TestMultipleCategoryLists e
tr.close();
ir.close();
- searcher.close();
iw.close();
tw.close();
IOUtils.close(dirs[0]);
Modified: lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/TestTotalFacetCountsCache.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/TestTotalFacetCountsCache.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/TestTotalFacetCountsCache.java (original)
+++ lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/TestTotalFacetCountsCache.java Thu Dec 15 05:11:14 2011
@@ -323,19 +323,6 @@ public class TestTotalFacetCountsCache e
assertTrue("Should be obtained from cache at 8th attempt",totalCounts ==
TFC.getTotalCounts(readers[0].indexReader, readers[0].taxReader, iParams, null));
- // delete a doc from the reader and commit - should recompute
- origReader.close();
- origReader = readers[0].indexReader;
- readers[0].indexReader = IndexReader.open(origReader.directory(),false);
- initCache();
- totalCounts = TFC.getTotalCounts(readers[0].indexReader, readers[0].taxReader, iParams, null);
- prevGen = assertRecomputed(totalCounts, prevGen, "after opening a writable reader - 9th attempt!");
- // now do the delete
- readers[0].indexReader.deleteDocument(1);
- readers[0].indexReader.commit(null);
- totalCounts = TFC.getTotalCounts(readers[0].indexReader, readers[0].taxReader, iParams, null);
- prevGen = assertRecomputed(totalCounts, prevGen, "after deleting docs the index - 10th attempt!");
-
origReader.close();
readers[0].close();
r2.close();
Modified: lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/association/AssociationsFacetRequestTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/association/AssociationsFacetRequestTest.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/association/AssociationsFacetRequestTest.java (original)
+++ lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/search/association/AssociationsFacetRequestTest.java Thu Dec 15 05:11:14 2011
@@ -126,7 +126,6 @@ public class AssociationsFacetRequestTes
assertEquals("Wrong count for category 'a'!",200, (int) res.get(0).getFacetResultNode().getValue());
assertEquals("Wrong count for category 'b'!",150, (int) res.get(1).getFacetResultNode().getValue());
- searcher.close();
taxo.close();
}
@@ -152,7 +151,6 @@ public class AssociationsFacetRequestTes
assertEquals("Wrong count for category 'a'!",50f, (float) res.get(0).getFacetResultNode().getValue(), 0.00001);
assertEquals("Wrong count for category 'b'!",10f, (float) res.get(1).getFacetResultNode().getValue(), 0.00001);
- searcher.close();
taxo.close();
}
@@ -182,7 +180,6 @@ public class AssociationsFacetRequestTes
} catch (RuntimeException e) {
// ok - expected
}
- searcher.close();
taxo.close();
}
Modified: lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestIndexClose.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestIndexClose.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestIndexClose.java (original)
+++ lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestIndexClose.java Thu Dec 15 05:11:14 2011
@@ -147,7 +147,7 @@ public class TestIndexClose extends Luce
}
@Override
protected IndexReader openIndexReader(Directory dir) throws CorruptIndexException, IOException {
- return new InstrumentedIndexReader(IndexReader.open(dir,true));
+ return new InstrumentedIndexReader(IndexReader.open(dir));
}
}
Modified: lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java (original)
+++ lucene/dev/branches/solrcloud/modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java Thu Dec 15 05:11:14 2011
@@ -144,7 +144,6 @@ public class TestScoredDocIDsUtils exten
Query q = new TermQuery(new Term(DocumentFactory.field, DocumentFactory.alphaTxt));
IndexSearcher searcher = newSearcher(reader);
searcher.search(q, collector);
- searcher.close();
ScoredDocIDs scoredDocIds = collector.getScoredDocIDs();
OpenBitSet resultSet = new OpenBitSetDISI(scoredDocIds.getDocIDs().iterator(), reader.maxDoc());
@@ -231,14 +230,11 @@ public class TestScoredDocIDsUtils exten
for (int docNum = 0; docNum < nDocs; docNum++) {
writer.addDocument(docFactory.getDoc(docNum));
}
- writer.close();
-
// Delete documents marked for deletion
- IndexReader reader = IndexReader.open(dir, false);
- reader.deleteDocuments(new Term(DocumentFactory.field, DocumentFactory.delTxt));
- reader.close();
+ writer.deleteDocuments(new Term(DocumentFactory.field, DocumentFactory.delTxt));
+ writer.close();
// Open a fresh read-only reader with the deletions in place
- return IndexReader.open(dir, true);
+ return IndexReader.open(dir);
}
}
Modified: lucene/dev/branches/solrcloud/modules/grouping/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/grouping/CHANGES.txt?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/grouping/CHANGES.txt (original)
+++ lucene/dev/branches/solrcloud/modules/grouping/CHANGES.txt Thu Dec 15 05:11:14 2011
@@ -17,4 +17,4 @@ LUCENE-3483: Move Function grouping coll
New features
-LUCENE-3496: Support grouping by IndexDocValues. (Martijn van Groningen)
\ No newline at end of file
+LUCENE-3496: Support grouping by DocValues. (Martijn van Groningen)
\ No newline at end of file
Modified: lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/dv/DVAllGroupHeadsCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/dv/DVAllGroupHeadsCollector.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/dv/DVAllGroupHeadsCollector.java (original)
+++ lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/dv/DVAllGroupHeadsCollector.java Thu Dec 15 05:11:14 2011
@@ -17,9 +17,9 @@ package org.apache.lucene.search.groupin
* limitations under the License.
*/
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.DocValues.Type; // javadocs
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.values.IndexDocValues;
-import org.apache.lucene.index.values.ValueType;
import org.apache.lucene.search.*;
import org.apache.lucene.search.grouping.AbstractAllGroupHeadsCollector;
import org.apache.lucene.util.BytesRef;
@@ -40,13 +40,13 @@ public abstract class DVAllGroupHeadsCol
final String groupField;
final boolean diskResident;
- final ValueType valueType;
+ final DocValues.Type valueType;
final BytesRef scratchBytesRef = new BytesRef();
IndexReader.AtomicReaderContext readerContext;
Scorer scorer;
- DVAllGroupHeadsCollector(String groupField, ValueType valueType, int numberOfSorts, boolean diskResident) {
+ DVAllGroupHeadsCollector(String groupField, DocValues.Type valueType, int numberOfSorts, boolean diskResident) {
super(numberOfSorts);
this.groupField = groupField;
this.valueType = valueType;
@@ -59,12 +59,12 @@ public abstract class DVAllGroupHeadsCol
*
* @param groupField The field to group by
* @param sortWithinGroup The sort within each group
- * @param type The {@link ValueType} which is used to select a concrete implementation.
+ * @param type The {@link Type} which is used to select a concrete implementation.
* @param diskResident Whether the values to group by should be disk resident
* @return an <code>AbstractAllGroupHeadsCollector</code> instance based on the supplied arguments
* @throws IOException If I/O related errors occur
*/
- public static AbstractAllGroupHeadsCollector create(String groupField, Sort sortWithinGroup, ValueType type, boolean diskResident) throws IOException {
+ public static AbstractAllGroupHeadsCollector create(String groupField, Sort sortWithinGroup, DocValues.Type type, boolean diskResident) throws IOException {
switch (type) {
case VAR_INTS:
case FIXED_INTS_8:
@@ -126,8 +126,8 @@ public abstract class DVAllGroupHeadsCol
public void setNextReader(IndexReader.AtomicReaderContext readerContext) throws IOException {
this.readerContext = readerContext;
- final IndexDocValues dv = readerContext.reader.docValues(groupField);
- final IndexDocValues.Source dvSource;
+ final DocValues dv = readerContext.reader.docValues(groupField);
+ final DocValues.Source dvSource;
if (dv != null) {
dvSource = diskResident ? dv.getDirectSource() : dv.getSource();
} else {
@@ -141,14 +141,14 @@ public abstract class DVAllGroupHeadsCol
*
* @param source The idv source to be used by concrete implementations
*/
- protected abstract void setDocValuesSources(IndexDocValues.Source source);
+ protected abstract void setDocValuesSources(DocValues.Source source);
/**
* @return The default source when no doc values are available.
* @param readerContext The current reader context
*/
- protected IndexDocValues.Source getDefaultSource(IndexReader.AtomicReaderContext readerContext) {
- return IndexDocValues.getDefaultSource(valueType);
+ protected DocValues.Source getDefaultSource(IndexReader.AtomicReaderContext readerContext) {
+ return DocValues.getDefaultSource(valueType);
}
// A general impl that works for any group sort.
@@ -157,7 +157,7 @@ public abstract class DVAllGroupHeadsCol
private final Sort sortWithinGroup;
private final Map<Comparable, GroupHead> groups;
- GeneralAllGroupHeadsCollector(String groupField, ValueType valueType, Sort sortWithinGroup, boolean diskResident) throws IOException {
+ GeneralAllGroupHeadsCollector(String groupField, DocValues.Type valueType, Sort sortWithinGroup, boolean diskResident) throws IOException {
super(groupField, valueType, sortWithinGroup.getSort().length, diskResident);
this.sortWithinGroup = sortWithinGroup;
groups = new HashMap<Comparable, GroupHead>();
@@ -211,9 +211,9 @@ public abstract class DVAllGroupHeadsCol
static class SortedBR extends GeneralAllGroupHeadsCollector {
- private IndexDocValues.SortedSource source;
+ private DocValues.SortedSource source;
- SortedBR(String groupField, ValueType valueType, Sort sortWithinGroup, boolean diskResident) throws IOException {
+ SortedBR(String groupField, DocValues.Type valueType, Sort sortWithinGroup, boolean diskResident) throws IOException {
super(groupField, valueType, sortWithinGroup, diskResident);
}
@@ -225,21 +225,21 @@ public abstract class DVAllGroupHeadsCol
return BytesRef.deepCopyOf((BytesRef) value);
}
- protected void setDocValuesSources(IndexDocValues.Source source) {
+ protected void setDocValuesSources(DocValues.Source source) {
this.source = source.asSortedSource();
}
@Override
- protected IndexDocValues.Source getDefaultSource(IndexReader.AtomicReaderContext readerContext) {
- return IndexDocValues.getDefaultSortedSource(valueType, readerContext.reader.maxDoc());
+ protected DocValues.Source getDefaultSource(IndexReader.AtomicReaderContext readerContext) {
+ return DocValues.getDefaultSortedSource(valueType, readerContext.reader.maxDoc());
}
}
static class BR extends GeneralAllGroupHeadsCollector {
- private IndexDocValues.Source source;
+ private DocValues.Source source;
- BR(String groupField, ValueType valueType, Sort sortWithinGroup, boolean diskResident) throws IOException {
+ BR(String groupField, DocValues.Type valueType, Sort sortWithinGroup, boolean diskResident) throws IOException {
super(groupField, valueType, sortWithinGroup, diskResident);
}
@@ -251,7 +251,7 @@ public abstract class DVAllGroupHeadsCol
return BytesRef.deepCopyOf((BytesRef) value);
}
- protected void setDocValuesSources(IndexDocValues.Source source) {
+ protected void setDocValuesSources(DocValues.Source source) {
this.source = source;
}
@@ -259,9 +259,9 @@ public abstract class DVAllGroupHeadsCol
static class Lng extends GeneralAllGroupHeadsCollector {
- private IndexDocValues.Source source;
+ private DocValues.Source source;
- Lng(String groupField, ValueType valueType, Sort sortWithinGroup, boolean diskResident) throws IOException {
+ Lng(String groupField, DocValues.Type valueType, Sort sortWithinGroup, boolean diskResident) throws IOException {
super(groupField, valueType, sortWithinGroup, diskResident);
}
@@ -273,16 +273,16 @@ public abstract class DVAllGroupHeadsCol
return value;
}
- protected void setDocValuesSources(IndexDocValues.Source source) {
+ protected void setDocValuesSources(DocValues.Source source) {
this.source = source;
}
}
static class Dbl extends GeneralAllGroupHeadsCollector {
- private IndexDocValues.Source source;
+ private DocValues.Source source;
- Dbl(String groupField, ValueType valueType, Sort sortWithinGroup, boolean diskResident) throws IOException {
+ Dbl(String groupField, DocValues.Type valueType, Sort sortWithinGroup, boolean diskResident) throws IOException {
super(groupField, valueType, sortWithinGroup, diskResident);
}
@@ -294,7 +294,7 @@ public abstract class DVAllGroupHeadsCol
return value;
}
- protected void setDocValuesSources(IndexDocValues.Source source) {
+ protected void setDocValuesSources(DocValues.Source source) {
this.source = source;
}
Modified: lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/dv/DVAllGroupsCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/dv/DVAllGroupsCollector.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/dv/DVAllGroupsCollector.java (original)
+++ lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/dv/DVAllGroupsCollector.java Thu Dec 15 05:11:14 2011
@@ -17,9 +17,9 @@ package org.apache.lucene.search.groupin
* limitations under the License.
*/
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.DocValues.Type; // javadocs
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.values.IndexDocValues;
-import org.apache.lucene.index.values.ValueType;
import org.apache.lucene.search.grouping.AbstractAllGroupsCollector;
import org.apache.lucene.search.grouping.SentinelIntSet;
import org.apache.lucene.util.BytesRef;
@@ -29,7 +29,7 @@ import java.util.*;
/**
* Implementation of {@link AbstractAllGroupsCollector} that groups documents based on
- * {@link IndexDocValues} fields.
+ * {@link DocValues} fields.
*
* @lucene.experimental
*/
@@ -39,20 +39,20 @@ public abstract class DVAllGroupsCollect
/**
* Expert: Constructs a {@link DVAllGroupsCollector}.
- * Selects and constructs the most optimal all groups collector implementation for grouping by {@link IndexDocValues}.
+ * Selects and constructs the most optimal all groups collector implementation for grouping by {@link DocValues}.
*
*
* @param groupField The field to group by
- * @param type The {@link ValueType} which is used to select a concrete implementation.
+ * @param type The {@link Type} which is used to select a concrete implementation.
* @param diskResident Whether the values to group by should be disk resident
* @param initialSize The initial allocation size of the
* internal int set and group list
* which should roughly match the total
* number of expected unique groups. Be aware that the
* heap usage is 4 bytes * initialSize. Not all concrete implementions use this!
- * @return the most optimal all groups collector implementation for grouping by {@link IndexDocValues}
+ * @return the most optimal all groups collector implementation for grouping by {@link DocValues}
*/
- public static DVAllGroupsCollector create(String groupField, ValueType type, boolean diskResident, int initialSize) {
+ public static DVAllGroupsCollector create(String groupField, DocValues.Type type, boolean diskResident, int initialSize) {
switch (type) {
case VAR_INTS:
case FIXED_INTS_8:
@@ -78,25 +78,25 @@ public abstract class DVAllGroupsCollect
/**
* Constructs a {@link DVAllGroupsCollector}.
- * Selects and constructs the most optimal all groups collector implementation for grouping by {@link IndexDocValues}.
+ * Selects and constructs the most optimal all groups collector implementation for grouping by {@link DocValues}.
* If implementations require an initial allocation size then this will be set to 128.
*
*
* @param groupField The field to group by
- * @param type The {@link ValueType} which is used to select a concrete implementation.
+ * @param type The {@link Type} which is used to select a concrete implementation.
* @param diskResident Wether the values to group by should be disk resident
- * @return the most optimal all groups collector implementation for grouping by {@link IndexDocValues}
+ * @return the most optimal all groups collector implementation for grouping by {@link DocValues}
*/
- public static DVAllGroupsCollector create(String groupField, ValueType type, boolean diskResident) {
+ public static DVAllGroupsCollector create(String groupField, DocValues.Type type, boolean diskResident) {
return create(groupField, type, diskResident, DEFAULT_INITIAL_SIZE);
}
final String groupField;
- final ValueType valueType;
+ final DocValues.Type valueType;
final boolean diskResident;
final Collection<GROUP_VALUE_TYPE> groups;
- DVAllGroupsCollector(String groupField, ValueType valueType, boolean diskResident, Collection<GROUP_VALUE_TYPE> groups) {
+ DVAllGroupsCollector(String groupField, DocValues.Type valueType, boolean diskResident, Collection<GROUP_VALUE_TYPE> groups) {
this.groupField = groupField;
this.valueType = valueType;
this.diskResident = diskResident;
@@ -105,8 +105,8 @@ public abstract class DVAllGroupsCollect
@Override
public void setNextReader(IndexReader.AtomicReaderContext readerContext) throws IOException {
- final IndexDocValues dv = readerContext.reader.docValues(groupField);
- final IndexDocValues.Source dvSource;
+ final DocValues dv = readerContext.reader.docValues(groupField);
+ final DocValues.Source dvSource;
if (dv != null) {
dvSource = diskResident ? dv.getDirectSource() : dv.getSource();
} else {
@@ -121,21 +121,21 @@ public abstract class DVAllGroupsCollect
* @param source The idv source to be used by concrete implementations
* @param readerContext The current reader context
*/
- protected abstract void setDocValuesSources(IndexDocValues.Source source, IndexReader.AtomicReaderContext readerContext);
+ protected abstract void setDocValuesSources(DocValues.Source source, IndexReader.AtomicReaderContext readerContext);
/**
* @return The default source when no doc values are available.
* @param readerContext The current reader context
*/
- protected IndexDocValues.Source getDefaultSource(IndexReader.AtomicReaderContext readerContext) {
- return IndexDocValues.getDefaultSource(valueType);
+ protected DocValues.Source getDefaultSource(IndexReader.AtomicReaderContext readerContext) {
+ return DocValues.getDefaultSource(valueType);
}
static class Lng extends DVAllGroupsCollector<Long> {
- private IndexDocValues.Source source;
+ private DocValues.Source source;
- Lng(String groupField, ValueType valueType, boolean diskResident) {
+ Lng(String groupField, DocValues.Type valueType, boolean diskResident) {
super(groupField, valueType, diskResident, new TreeSet<Long>());
}
@@ -150,7 +150,7 @@ public abstract class DVAllGroupsCollect
return groups;
}
- protected void setDocValuesSources(IndexDocValues.Source source, IndexReader.AtomicReaderContext readerContext) {
+ protected void setDocValuesSources(DocValues.Source source, IndexReader.AtomicReaderContext readerContext) {
this.source = source;
}
@@ -158,9 +158,9 @@ public abstract class DVAllGroupsCollect
static class Dbl extends DVAllGroupsCollector<Double> {
- private IndexDocValues.Source source;
+ private DocValues.Source source;
- Dbl(String groupField, ValueType valueType, boolean diskResident) {
+ Dbl(String groupField, DocValues.Type valueType, boolean diskResident) {
super(groupField, valueType, diskResident, new TreeSet<Double>());
}
@@ -175,7 +175,7 @@ public abstract class DVAllGroupsCollect
return groups;
}
- protected void setDocValuesSources(IndexDocValues.Source source, IndexReader.AtomicReaderContext readerContext) {
+ protected void setDocValuesSources(DocValues.Source source, IndexReader.AtomicReaderContext readerContext) {
this.source = source;
}
@@ -185,9 +185,9 @@ public abstract class DVAllGroupsCollect
private final BytesRef spare = new BytesRef();
- private IndexDocValues.Source source;
+ private DocValues.Source source;
- BR(String groupField, ValueType valueType, boolean diskResident) {
+ BR(String groupField, DocValues.Type valueType, boolean diskResident) {
super(groupField, valueType, diskResident, new TreeSet<BytesRef>());
}
@@ -202,7 +202,7 @@ public abstract class DVAllGroupsCollect
return groups;
}
- protected void setDocValuesSources(IndexDocValues.Source source, IndexReader.AtomicReaderContext readerContext) {
+ protected void setDocValuesSources(DocValues.Source source, IndexReader.AtomicReaderContext readerContext) {
this.source = source;
}
@@ -213,9 +213,9 @@ public abstract class DVAllGroupsCollect
private final SentinelIntSet ordSet;
private final BytesRef spare = new BytesRef();
- private IndexDocValues.SortedSource source;
+ private DocValues.SortedSource source;
- SortedBR(String groupField, ValueType valueType, boolean diskResident, int initialSize) {
+ SortedBR(String groupField, DocValues.Type valueType, boolean diskResident, int initialSize) {
super(groupField, valueType, diskResident, new ArrayList<BytesRef>(initialSize));
ordSet = new SentinelIntSet(initialSize, -1);
}
@@ -233,7 +233,7 @@ public abstract class DVAllGroupsCollect
return groups;
}
- protected void setDocValuesSources(IndexDocValues.Source source, IndexReader.AtomicReaderContext readerContext) {
+ protected void setDocValuesSources(DocValues.Source source, IndexReader.AtomicReaderContext readerContext) {
this.source = source.asSortedSource();
ordSet.clear();
@@ -246,8 +246,8 @@ public abstract class DVAllGroupsCollect
}
@Override
- protected IndexDocValues.Source getDefaultSource(IndexReader.AtomicReaderContext readerContext) {
- return IndexDocValues.getDefaultSortedSource(valueType, readerContext.reader.maxDoc());
+ protected DocValues.Source getDefaultSource(IndexReader.AtomicReaderContext readerContext) {
+ return DocValues.getDefaultSortedSource(valueType, readerContext.reader.maxDoc());
}
}
Modified: lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/dv/DVFirstPassGroupingCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/dv/DVFirstPassGroupingCollector.java?rev=1214623&r1=1214622&r2=1214623&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/dv/DVFirstPassGroupingCollector.java (original)
+++ lucene/dev/branches/solrcloud/modules/grouping/src/java/org/apache/lucene/search/grouping/dv/DVFirstPassGroupingCollector.java Thu Dec 15 05:11:14 2011
@@ -17,9 +17,9 @@ package org.apache.lucene.search.groupin
* limitations under the License.
*/
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.DocValues.Type; // javadocs
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.values.IndexDocValues;
-import org.apache.lucene.index.values.ValueType;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.grouping.AbstractFirstPassGroupingCollector;
import org.apache.lucene.util.BytesRef;
@@ -35,9 +35,9 @@ public abstract class DVFirstPassGroupin
final String groupField;
final boolean diskResident;
- final ValueType valueType;
+ final DocValues.Type valueType;
- public static DVFirstPassGroupingCollector create(Sort groupSort, int topNGroups, String groupField, ValueType type, boolean diskResident) throws IOException {
+ public static DVFirstPassGroupingCollector create(Sort groupSort, int topNGroups, String groupField, DocValues.Type type, boolean diskResident) throws IOException {
switch (type) {
case VAR_INTS:
case FIXED_INTS_8:
@@ -61,7 +61,7 @@ public abstract class DVFirstPassGroupin
}
}
- DVFirstPassGroupingCollector(Sort groupSort, int topNGroups, String groupField, boolean diskResident, ValueType valueType) throws IOException {
+ DVFirstPassGroupingCollector(Sort groupSort, int topNGroups, String groupField, boolean diskResident, DocValues.Type valueType) throws IOException {
super(groupSort, topNGroups);
this.groupField = groupField;
this.diskResident = diskResident;
@@ -72,8 +72,8 @@ public abstract class DVFirstPassGroupin
public void setNextReader(IndexReader.AtomicReaderContext readerContext) throws IOException {
super.setNextReader(readerContext);
- final IndexDocValues dv = readerContext.reader.docValues(groupField);
- final IndexDocValues.Source dvSource;
+ final DocValues dv = readerContext.reader.docValues(groupField);
+ final DocValues.Source dvSource;
if (dv != null) {
dvSource = diskResident ? dv.getDirectSource() : dv.getSource();
} else {
@@ -87,21 +87,21 @@ public abstract class DVFirstPassGroupin
*
* @param source The idv source to be used by concrete implementations
*/
- protected abstract void setDocValuesSources(IndexDocValues.Source source);
+ protected abstract void setDocValuesSources(DocValues.Source source);
/**
* @return The default source when no doc values are available.
* @param readerContext The current reader context
*/
- protected IndexDocValues.Source getDefaultSource(IndexReader.AtomicReaderContext readerContext) {
- return IndexDocValues.getDefaultSource(valueType);
+ protected DocValues.Source getDefaultSource(IndexReader.AtomicReaderContext readerContext) {
+ return DocValues.getDefaultSource(valueType);
}
static class Lng extends DVFirstPassGroupingCollector<Long> {
- private IndexDocValues.Source source;
+ private DocValues.Source source;
- Lng(Sort groupSort, int topNGroups, String groupField, boolean diskResident, ValueType type) throws IOException {
+ Lng(Sort groupSort, int topNGroups, String groupField, boolean diskResident, DocValues.Type type) throws IOException {
super(groupSort, topNGroups, groupField, diskResident, type);
}
@@ -113,16 +113,16 @@ public abstract class DVFirstPassGroupin
return groupValue;
}
- protected void setDocValuesSources(IndexDocValues.Source source) {
+ protected void setDocValuesSources(DocValues.Source source) {
this.source = source;
}
}
static class Dbl extends DVFirstPassGroupingCollector<Double> {
- private IndexDocValues.Source source;
+ private DocValues.Source source;
- Dbl(Sort groupSort, int topNGroups, String groupField, boolean diskResident, ValueType type) throws IOException {
+ Dbl(Sort groupSort, int topNGroups, String groupField, boolean diskResident, DocValues.Type type) throws IOException {
super(groupSort, topNGroups, groupField, diskResident, type);
}
@@ -134,17 +134,17 @@ public abstract class DVFirstPassGroupin
return groupValue;
}
- protected void setDocValuesSources(IndexDocValues.Source source) {
+ protected void setDocValuesSources(DocValues.Source source) {
this.source = source;
}
}
static class BR extends DVFirstPassGroupingCollector<BytesRef> {
- private IndexDocValues.Source source;
+ private DocValues.Source source;
private final BytesRef spare = new BytesRef();
- BR(Sort groupSort, int topNGroups, String groupField, boolean diskResident, ValueType type) throws IOException {
+ BR(Sort groupSort, int topNGroups, String groupField, boolean diskResident, DocValues.Type type) throws IOException {
super(groupSort, topNGroups, groupField, diskResident, type);
}
@@ -162,17 +162,17 @@ public abstract class DVFirstPassGroupin
}
@Override
- protected void setDocValuesSources(IndexDocValues.Source source) {
+ protected void setDocValuesSources(DocValues.Source source) {
this.source = source;
}
}
static class SortedBR extends DVFirstPassGroupingCollector<BytesRef> {
- private IndexDocValues.SortedSource sortedSource;
+ private DocValues.SortedSource sortedSource;
private final BytesRef spare = new BytesRef();
- SortedBR(Sort groupSort, int topNGroups, String groupField, boolean diskResident, ValueType type) throws IOException {
+ SortedBR(Sort groupSort, int topNGroups, String groupField, boolean diskResident, DocValues.Type type) throws IOException {
super(groupSort, topNGroups, groupField, diskResident, type);
}
@@ -192,13 +192,13 @@ public abstract class DVFirstPassGroupin
}
@Override
- protected void setDocValuesSources(IndexDocValues.Source source) {
+ protected void setDocValuesSources(DocValues.Source source) {
this.sortedSource = source.asSortedSource();
}
@Override
- protected IndexDocValues.Source getDefaultSource(IndexReader.AtomicReaderContext readerContext) {
- return IndexDocValues.getDefaultSortedSource(valueType, readerContext.reader.maxDoc());
+ protected DocValues.Source getDefaultSource(IndexReader.AtomicReaderContext readerContext) {
+ return DocValues.getDefaultSortedSource(valueType, readerContext.reader.maxDoc());
}
}