You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ha...@apache.org on 2013/08/13 06:06:27 UTC
svn commit: r1513336 [10/11] - in /lucene/dev/branches/lucene3069/lucene: ./
analysis/ analysis/common/
analysis/common/src/java/org/apache/lucene/analysis/charfilter/
analysis/common/src/java/org/apache/lucene/analysis/hunspell/
analysis/common/src/ja...
Modified: lucene/dev/branches/lucene3069/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java (original)
+++ lucene/dev/branches/lucene3069/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java Tue Aug 13 04:06:18 2013
@@ -159,7 +159,7 @@ public class AnalyzingSuggester extends
/** Represents the separation between tokens, if
* PRESERVE_SEP was specified */
- private static final int SEP_LABEL = 0xff;
+ private static final int SEP_LABEL = '\u001F';
/** Marks end of the analyzed input and start of dedup
* byte. */
@@ -306,44 +306,14 @@ public class AnalyzingSuggester extends
}
}
- /** Just escapes the 0xff byte (which we still for SEP). */
- private static final class EscapingTokenStreamToAutomaton extends TokenStreamToAutomaton {
-
- final BytesRef spare = new BytesRef();
-
- @Override
- protected BytesRef changeToken(BytesRef in) {
- int upto = 0;
- for(int i=0;i<in.length;i++) {
- byte b = in.bytes[in.offset+i];
- if (b == (byte) 0xff) {
- if (spare.bytes.length == upto) {
- spare.grow(upto+2);
- }
- spare.bytes[upto++] = (byte) 0xff;
- spare.bytes[upto++] = b;
- } else {
- if (spare.bytes.length == upto) {
- spare.grow(upto+1);
- }
- spare.bytes[upto++] = b;
- }
- }
- spare.offset = 0;
- spare.length = upto;
- return spare;
- }
+ /** Used by subclass to change the lookup automaton, if
+ * necessary. */
+ protected Automaton convertAutomaton(Automaton a) {
+ return a;
}
-
+
TokenStreamToAutomaton getTokenStreamToAutomaton() {
- final TokenStreamToAutomaton tsta;
- if (preserveSep) {
- tsta = new EscapingTokenStreamToAutomaton();
- } else {
- // When we're not preserving sep, we don't steal 0xff
- // byte, so we don't need to do any escaping:
- tsta = new TokenStreamToAutomaton();
- }
+ final TokenStreamToAutomaton tsta = new TokenStreamToAutomaton();
tsta.setPreservePositionIncrements(preservePositionIncrements);
return tsta;
}
@@ -379,11 +349,14 @@ public class AnalyzingSuggester extends
if (cmp != 0) {
return cmp;
}
+ readerA.skipBytes(scratchA.length);
+ readerB.skipBytes(scratchB.length);
// Next by cost:
long aCost = readerA.readInt();
long bCost = readerB.readInt();
-
+ assert decodeWeight(aCost) >= 0;
+ assert decodeWeight(bCost) >= 0;
if (aCost < bCost) {
return -1;
} else if (aCost > bCost) {
@@ -392,27 +365,20 @@ public class AnalyzingSuggester extends
// Finally by surface form:
if (hasPayloads) {
- readerA.setPosition(readerA.getPosition() + scratchA.length);
scratchA.length = readerA.readShort();
- scratchA.offset = readerA.getPosition();
- readerB.setPosition(readerB.getPosition() + scratchB.length);
scratchB.length = readerB.readShort();
+ scratchA.offset = readerA.getPosition();
scratchB.offset = readerB.getPosition();
} else {
scratchA.offset = readerA.getPosition();
- scratchA.length = a.length - scratchA.offset;
scratchB.offset = readerB.getPosition();
+ scratchA.length = a.length - scratchA.offset;
scratchB.length = b.length - scratchB.offset;
}
-
- cmp = scratchA.compareTo(scratchB);
- if (cmp != 0) {
- return cmp;
- }
-
- return 0;
+
+ return scratchA.compareTo(scratchB);
}
- };
+ }
@Override
public void build(TermFreqIterator iterator) throws IOException {
@@ -654,9 +620,8 @@ public class AnalyzingSuggester extends
}
assert sepIndex != -1;
spare.grow(sepIndex);
- int payloadLen = output2.length - sepIndex - 1;
- output2.length = sepIndex;
- UnicodeUtil.UTF8toUTF16(output2, spare);
+ final int payloadLen = output2.length - sepIndex - 1;
+ UnicodeUtil.UTF8toUTF16(output2.bytes, output2.offset, sepIndex, spare);
BytesRef payload = new BytesRef(payloadLen);
System.arraycopy(output2.bytes, sepIndex+1, payload.bytes, 0, payloadLen);
payload.length = payloadLen;
@@ -699,6 +664,14 @@ public class AnalyzingSuggester extends
}
//System.out.println("lookup key=" + key + " num=" + num);
+ for (int i = 0; i < key.length(); i++) {
+ if (key.charAt(i) == 0x1E) {
+ throw new IllegalArgumentException("lookup key cannot contain HOLE character U+001E; this character is reserved");
+ }
+ if (key.charAt(i) == 0x1F) {
+ throw new IllegalArgumentException("lookup key cannot contain unit separator character U+001F; this character is reserved");
+ }
+ }
final BytesRef utf8Key = new BytesRef(key);
try {
@@ -720,7 +693,7 @@ public class AnalyzingSuggester extends
final List<LookupResult> results = new ArrayList<LookupResult>();
- List<FSTUtil.Path<Pair<Long,BytesRef>>> prefixPaths = FSTUtil.intersectPrefixPaths(lookupAutomaton, fst);
+ List<FSTUtil.Path<Pair<Long,BytesRef>>> prefixPaths = FSTUtil.intersectPrefixPaths(convertAutomaton(lookupAutomaton), fst);
if (exactFirst) {
@@ -864,6 +837,7 @@ public class AnalyzingSuggester extends
ts.close();
replaceSep(automaton);
+ automaton = convertAutomaton(automaton);
assert SpecialOperations.isFinite(automaton);
Modified: lucene/dev/branches/lucene3069/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java (original)
+++ lucene/dev/branches/lucene3069/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java Tue Aug 13 04:06:18 2013
@@ -15,16 +15,15 @@ package org.apache.lucene.search.suggest
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-import java.io.FileOutputStream;
+
import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.TokenStreamToAutomaton;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; // javadocs
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IntsRef;
@@ -33,6 +32,7 @@ import org.apache.lucene.util.automaton.
import org.apache.lucene.util.automaton.BasicOperations;
import org.apache.lucene.util.automaton.LevenshteinAutomata;
import org.apache.lucene.util.automaton.SpecialOperations;
+import org.apache.lucene.util.automaton.UTF32ToUTF8;
import org.apache.lucene.util.fst.FST;
import org.apache.lucene.util.fst.PairOutputs.Pair;
@@ -54,6 +54,9 @@ import org.apache.lucene.util.fst.PairOu
* #DEFAULT_NON_FUZZY_PREFIX} byte is not allowed to be
* edited. We allow up to 1 (@link
* #DEFAULT_MAX_EDITS} edit.
+ * If {@link #unicodeAware} parameter in the constructor is set to true, maxEdits,
+ * minFuzzyLength, transpositions and nonFuzzyPrefix are measured in Unicode code
+ * points (actual letters) instead of bytes.
*
* <p>
* NOTE: This suggester does not boost suggestions that
@@ -66,12 +69,20 @@ import org.apache.lucene.util.fst.PairOu
* like synonyms to keep the complexity of the prefix intersection low for good
* lookup performance. At index time, complex analyzers can safely be used.
* </p>
+ *
+ * @lucene.experimental
*/
public final class FuzzySuggester extends AnalyzingSuggester {
private final int maxEdits;
private final boolean transpositions;
private final int nonFuzzyPrefix;
private final int minFuzzyLength;
+ private final boolean unicodeAware;
+
+ /** Measure maxEdits, minFuzzyLength, transpositions and nonFuzzyPrefix
+ * parameters in Unicode code points (actual letters)
+ * instead of bytes. */
+ public static final boolean DEFAULT_UNICODE_AWARE = false;
/**
* The default minimum length of the key passed to {@link
@@ -114,7 +125,7 @@ public final class FuzzySuggester extend
*/
public FuzzySuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer) {
this(indexAnalyzer, queryAnalyzer, EXACT_FIRST | PRESERVE_SEP, 256, -1, DEFAULT_MAX_EDITS, DEFAULT_TRANSPOSITIONS,
- DEFAULT_NON_FUZZY_PREFIX, DEFAULT_MIN_FUZZY_LENGTH);
+ DEFAULT_NON_FUZZY_PREFIX, DEFAULT_MIN_FUZZY_LENGTH, DEFAULT_UNICODE_AWARE);
}
/**
@@ -138,11 +149,12 @@ public final class FuzzySuggester extend
* Levenshtein algorithm.
* @param nonFuzzyPrefix length of common (non-fuzzy) prefix (see default {@link #DEFAULT_NON_FUZZY_PREFIX}
* @param minFuzzyLength minimum length of lookup key before any edits are allowed (see default {@link #DEFAULT_MIN_FUZZY_LENGTH})
+ * @param unicodeAware operate Unicode code points instead of bytes.
*/
public FuzzySuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer,
int options, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions,
int maxEdits, boolean transpositions, int nonFuzzyPrefix,
- int minFuzzyLength) {
+ int minFuzzyLength, boolean unicodeAware) {
super(indexAnalyzer, queryAnalyzer, options, maxSurfaceFormsPerAnalyzedForm, maxGraphExpansions);
if (maxEdits < 0 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) {
throw new IllegalArgumentException("maxEdits must be between 0 and " + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE);
@@ -158,6 +170,7 @@ public final class FuzzySuggester extend
this.transpositions = transpositions;
this.nonFuzzyPrefix = nonFuzzyPrefix;
this.minFuzzyLength = minFuzzyLength;
+ this.unicodeAware = unicodeAware;
}
@Override
@@ -176,7 +189,7 @@ public final class FuzzySuggester extend
// "compete") ... in which case I think the wFST needs
// to be log weights or something ...
- Automaton levA = toLevenshteinAutomata(lookupAutomaton);
+ Automaton levA = convertAutomaton(toLevenshteinAutomata(lookupAutomaton));
/*
Writer w = new OutputStreamWriter(new FileOutputStream("out.dot"), "UTF-8");
w.write(levA.toDot());
@@ -186,6 +199,24 @@ public final class FuzzySuggester extend
return FSTUtil.intersectPrefixPaths(levA, fst);
}
+ @Override
+ protected Automaton convertAutomaton(Automaton a) {
+ if (unicodeAware) {
+ Automaton utf8automaton = new UTF32ToUTF8().convert(a);
+ BasicOperations.determinize(utf8automaton);
+ return utf8automaton;
+ } else {
+ return a;
+ }
+ }
+
+ @Override
+ TokenStreamToAutomaton getTokenStreamToAutomaton() {
+ final TokenStreamToAutomaton tsta = super.getTokenStreamToAutomaton();
+ tsta.setUnicodeArcs(unicodeAware);
+ return tsta;
+ }
+
Automaton toLevenshteinAutomata(Automaton automaton) {
final Set<IntsRef> ref = SpecialOperations.getFiniteStrings(automaton, -1);
Automaton subs[] = new Automaton[ref.size()];
@@ -203,7 +234,7 @@ public final class FuzzySuggester extend
// to allow the trailing dedup bytes to be
// edited... but then 0 byte is "in general" allowed
// on input (but not in UTF8).
- LevenshteinAutomata lev = new LevenshteinAutomata(ints, 255, transpositions);
+ LevenshteinAutomata lev = new LevenshteinAutomata(ints, unicodeAware ? Character.MAX_CODE_POINT : 255, transpositions);
Automaton levAutomaton = lev.toAutomaton(maxEdits);
Automaton combined = BasicOperations.concatenate(Arrays.asList(prefix, levAutomaton));
combined.setDeterministic(true); // its like the special case in concatenate itself, except we cloneExpanded already
Modified: lucene/dev/branches/lucene3069/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java (original)
+++ lucene/dev/branches/lucene3069/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java Tue Aug 13 04:06:18 2013
@@ -24,13 +24,15 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
-import java.io.StringReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
+import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
@@ -48,14 +50,14 @@ import org.apache.lucene.analysis.TokenS
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
+import org.apache.lucene.document.Document;
import org.apache.lucene.search.suggest.Lookup.LookupResult;
import org.apache.lucene.search.suggest.TermFreq;
import org.apache.lucene.search.suggest.TermFreqArrayIterator;
import org.apache.lucene.search.suggest.TermFreqPayload;
import org.apache.lucene.search.suggest.TermFreqPayloadArrayIterator;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
@@ -63,13 +65,16 @@ public class AnalyzingSuggesterTest exte
/** this is basically the WFST test ported to KeywordAnalyzer. so it acts the same */
public void testKeyword() throws Exception {
- TermFreq keys[] = new TermFreq[] {
+ Iterable<TermFreq> keys = shuffle(
new TermFreq("foo", 50),
new TermFreq("bar", 10),
+ new TermFreq("barbar", 10),
new TermFreq("barbar", 12),
- new TermFreq("barbara", 6)
- };
-
+ new TermFreq("barbara", 6),
+ new TermFreq("bar", 5),
+ new TermFreq("barbara", 1)
+ );
+
AnalyzingSuggester suggester = new AnalyzingSuggester(new MockAnalyzer(random(), MockTokenizer.KEYWORD, false));
suggester.build(new TermFreqArrayIterator(keys));
@@ -106,53 +111,99 @@ public class AnalyzingSuggesterTest exte
}
public void testKeywordWithPayloads() throws Exception {
- TermFreqPayload keys[] = new TermFreqPayload[] {
+ Iterable<TermFreqPayload> keys = shuffle(
new TermFreqPayload("foo", 50, new BytesRef("hello")),
new TermFreqPayload("bar", 10, new BytesRef("goodbye")),
new TermFreqPayload("barbar", 12, new BytesRef("thank you")),
- new TermFreqPayload("barbara", 6, new BytesRef("for all the fish"))
- };
+ new TermFreqPayload("bar", 9, new BytesRef("should be deduplicated")),
+ new TermFreqPayload("bar", 8, new BytesRef("should also be deduplicated")),
+ new TermFreqPayload("barbara", 6, new BytesRef("for all the fish")));
AnalyzingSuggester suggester = new AnalyzingSuggester(new MockAnalyzer(random(), MockTokenizer.KEYWORD, false));
suggester.build(new TermFreqPayloadArrayIterator(keys));
+ for (int i = 0; i < 2; i++) {
+ // top N of 2, but only foo is available
+ List<LookupResult> results = suggester.lookup(_TestUtil.stringToCharSequence("f", random()), false, 2);
+ assertEquals(1, results.size());
+ assertEquals("foo", results.get(0).key.toString());
+ assertEquals(50, results.get(0).value, 0.01F);
+ assertEquals(new BytesRef("hello"), results.get(0).payload);
+
+ // top N of 1 for 'bar': we return this even though
+ // barbar is higher because exactFirst is enabled:
+ results = suggester.lookup(_TestUtil.stringToCharSequence("bar", random()), false, 1);
+ assertEquals(1, results.size());
+ assertEquals("bar", results.get(0).key.toString());
+ assertEquals(10, results.get(0).value, 0.01F);
+ assertEquals(new BytesRef("goodbye"), results.get(0).payload);
+
+ // top N Of 2 for 'b'
+ results = suggester.lookup(_TestUtil.stringToCharSequence("b", random()), false, 2);
+ assertEquals(2, results.size());
+ assertEquals("barbar", results.get(0).key.toString());
+ assertEquals(12, results.get(0).value, 0.01F);
+ assertEquals(new BytesRef("thank you"), results.get(0).payload);
+ assertEquals("bar", results.get(1).key.toString());
+ assertEquals(10, results.get(1).value, 0.01F);
+ assertEquals(new BytesRef("goodbye"), results.get(1).payload);
+
+ // top N of 3 for 'ba'
+ results = suggester.lookup(_TestUtil.stringToCharSequence("ba", random()), false, 3);
+ assertEquals(3, results.size());
+ assertEquals("barbar", results.get(0).key.toString());
+ assertEquals(12, results.get(0).value, 0.01F);
+ assertEquals(new BytesRef("thank you"), results.get(0).payload);
+ assertEquals("bar", results.get(1).key.toString());
+ assertEquals(10, results.get(1).value, 0.01F);
+ assertEquals(new BytesRef("goodbye"), results.get(1).payload);
+ assertEquals("barbara", results.get(2).key.toString());
+ assertEquals(6, results.get(2).value, 0.01F);
+ assertEquals(new BytesRef("for all the fish"), results.get(2).payload);
+ }
+ }
+
+ public void testRandomRealisticKeys() throws IOException {
+ LineFileDocs lineFile = new LineFileDocs(random());
+ Map<String, Long> mapping = new HashMap<>();
+ List<TermFreq> keys = new ArrayList<>();
- // top N of 2, but only foo is available
- List<LookupResult> results = suggester.lookup(_TestUtil.stringToCharSequence("f", random()), false, 2);
- assertEquals(1, results.size());
- assertEquals("foo", results.get(0).key.toString());
- assertEquals(50, results.get(0).value, 0.01F);
- assertEquals(new BytesRef("hello"), results.get(0).payload);
+ int howMany = atLeast(100); // this might bring up duplicates
+ for (int i = 0; i < howMany; i++) {
+ Document nextDoc = lineFile.nextDoc();
+ String title = nextDoc.getField("title").stringValue();
+ int randomWeight = random().nextInt(100);
+ keys.add(new TermFreq(title, randomWeight));
+ if (!mapping.containsKey(title) || mapping.get(title) < randomWeight) {
+ mapping.put(title, Long.valueOf(randomWeight));
+ }
+ }
- // top N of 1 for 'bar': we return this even though
- // barbar is higher because exactFirst is enabled:
- results = suggester.lookup(_TestUtil.stringToCharSequence("bar", random()), false, 1);
- assertEquals(1, results.size());
- assertEquals("bar", results.get(0).key.toString());
- assertEquals(10, results.get(0).value, 0.01F);
- assertEquals(new BytesRef("goodbye"), results.get(0).payload);
+ AnalyzingSuggester analyzingSuggester = new AnalyzingSuggester(new MockAnalyzer(random()));
+ analyzingSuggester.setPreservePositionIncrements(random().nextBoolean());
+ boolean doPayloads = random().nextBoolean();
+ if (doPayloads) {
+ List<TermFreqPayload> keysAndPayloads = new ArrayList<>();
+ for (TermFreq termFreq : keys) {
+ keysAndPayloads.add(new TermFreqPayload(termFreq.term, termFreq.v, new BytesRef(Long.toString(termFreq.v))));
+ }
+ analyzingSuggester.build(new TermFreqPayloadArrayIterator(keysAndPayloads));
+ } else {
+ analyzingSuggester.build(new TermFreqArrayIterator(keys));
+ }
- // top N Of 2 for 'b'
- results = suggester.lookup(_TestUtil.stringToCharSequence("b", random()), false, 2);
- assertEquals(2, results.size());
- assertEquals("barbar", results.get(0).key.toString());
- assertEquals(12, results.get(0).value, 0.01F);
- assertEquals(new BytesRef("thank you"), results.get(0).payload);
- assertEquals("bar", results.get(1).key.toString());
- assertEquals(10, results.get(1).value, 0.01F);
- assertEquals(new BytesRef("goodbye"), results.get(1).payload);
+ for (TermFreq termFreq : keys) {
+ List<LookupResult> lookup = analyzingSuggester.lookup(termFreq.term.utf8ToString(), false, keys.size());
+ for (LookupResult lookupResult : lookup) {
+ assertEquals(mapping.get(lookupResult.key), Long.valueOf(lookupResult.value));
+ if (doPayloads) {
+ assertEquals(lookupResult.payload.utf8ToString(), Long.toString(lookupResult.value));
+ } else {
+ assertNull(lookupResult.payload);
+ }
+ }
+ }
- // top N of 3 for 'ba'
- results = suggester.lookup(_TestUtil.stringToCharSequence("ba", random()), false, 3);
- assertEquals(3, results.size());
- assertEquals("barbar", results.get(0).key.toString());
- assertEquals(12, results.get(0).value, 0.01F);
- assertEquals(new BytesRef("thank you"), results.get(0).payload);
- assertEquals("bar", results.get(1).key.toString());
- assertEquals(10, results.get(1).value, 0.01F);
- assertEquals(new BytesRef("goodbye"), results.get(1).payload);
- assertEquals("barbara", results.get(2).key.toString());
- assertEquals(6, results.get(2).value, 0.01F);
- assertEquals(new BytesRef("for all the fish"), results.get(2).payload);
+ lineFile.close();
}
// TODO: more tests
@@ -594,7 +645,7 @@ public class AnalyzingSuggesterTest exte
}
}
- private static char SEP = '\uFFFF';
+ private static char SEP = '\u001F';
public void testRandom() throws Exception {
@@ -705,9 +756,9 @@ public class AnalyzingSuggesterTest exte
AnalyzingSuggester suggester = new AnalyzingSuggester(a, a,
preserveSep ? AnalyzingSuggester.PRESERVE_SEP : 0, 256, -1);
if (doPayloads) {
- suggester.build(new TermFreqPayloadArrayIterator(payloadKeys));
+ suggester.build(new TermFreqPayloadArrayIterator(shuffle(payloadKeys)));
} else {
- suggester.build(new TermFreqArrayIterator(keys));
+ suggester.build(new TermFreqArrayIterator(shuffle(keys)));
}
for (String prefix : allPrefixes) {
@@ -822,82 +873,11 @@ public class AnalyzingSuggesterTest exte
}
}
- public void testStolenBytes() throws Exception {
-
- // First time w/ preserveSep, second time without:
- for(int i=0;i<2;i++) {
-
- final Analyzer analyzer = new Analyzer() {
- @Override
- protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
- Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.SIMPLE, true);
-
- // TokenStream stream = new SynonymFilter(tokenizer, map, true);
- // return new TokenStreamComponents(tokenizer, new RemoveDuplicatesTokenFilter(stream));
- return new TokenStreamComponents(tokenizer) {
- int tokenStreamCounter = 0;
- final TokenStream[] tokenStreams = new TokenStream[] {
- new CannedBinaryTokenStream(new BinaryToken[] {
- token(new BytesRef(new byte[] {0x61, (byte) 0xff, 0x61})),
- }),
- new CannedTokenStream(new Token[] {
- token("a",1,1),
- token("a",1,1)
- }),
- new CannedTokenStream(new Token[] {
- token("a",1,1),
- token("a",1,1)
- }),
- new CannedBinaryTokenStream(new BinaryToken[] {
- token(new BytesRef(new byte[] {0x61, (byte) 0xff, 0x61})),
- })
- };
-
- @Override
- public TokenStream getTokenStream() {
- TokenStream result = tokenStreams[tokenStreamCounter];
- tokenStreamCounter++;
- return result;
- }
-
- @Override
- protected void setReader(final Reader reader) throws IOException {
- }
- };
- }
- };
-
- TermFreq keys[] = new TermFreq[] {
- new TermFreq("a a", 50),
- new TermFreq("a b", 50),
- };
-
- AnalyzingSuggester suggester = new AnalyzingSuggester(analyzer, analyzer, AnalyzingSuggester.EXACT_FIRST | (i==0 ? AnalyzingSuggester.PRESERVE_SEP : 0), 256, -1);
- suggester.build(new TermFreqArrayIterator(keys));
- List<LookupResult> results = suggester.lookup("a a", false, 5);
- assertEquals(1, results.size());
- assertEquals("a b", results.get(0).key);
- assertEquals(50, results.get(0).value);
-
- results = suggester.lookup("a a", false, 5);
- assertEquals(1, results.size());
- assertEquals("a a", results.get(0).key);
- assertEquals(50, results.get(0).value);
- }
- }
-
public void testMaxSurfaceFormsPerAnalyzedForm() throws Exception {
Analyzer a = new MockAnalyzer(random());
AnalyzingSuggester suggester = new AnalyzingSuggester(a, a, 0, 2, -1);
-
- List<TermFreq> keys = Arrays.asList(new TermFreq[] {
- new TermFreq("a", 40),
- new TermFreq("a ", 50),
- new TermFreq(" a", 60),
- });
-
- Collections.shuffle(keys, random());
- suggester.build(new TermFreqArrayIterator(keys));
+ suggester.build(new TermFreqArrayIterator(shuffle(new TermFreq("a", 40),
+ new TermFreq("a ", 50), new TermFreq(" a", 60))));
List<LookupResult> results = suggester.lookup("a", false, 5);
assertEquals(2, results.size());
@@ -992,10 +972,9 @@ public class AnalyzingSuggesterTest exte
AnalyzingSuggester suggester = new AnalyzingSuggester(a, a, 0, 256, -1);
- suggester.build(new TermFreqArrayIterator(new TermFreq[] {
+ suggester.build(new TermFreqArrayIterator(shuffle(
new TermFreq("hambone", 6),
- new TermFreq("nellie", 5),
- }));
+ new TermFreq("nellie", 5))));
List<LookupResult> results = suggester.lookup("nellie", false, 2);
assertEquals(2, results.size());
@@ -1193,4 +1172,34 @@ public class AnalyzingSuggesterTest exte
suggester.build(new TermFreqArrayIterator(new TermFreq[] {new TermFreq("a", 1)}));
assertEquals("[a/1]", suggester.lookup("a", false, 1).toString());
}
+
+ public void testIllegalLookupArgument() throws Exception {
+ Analyzer a = new MockAnalyzer(random());
+ AnalyzingSuggester suggester = new AnalyzingSuggester(a, a, 0, 256, -1);
+ suggester.build(new TermFreqArrayIterator(new TermFreq[] {
+ new TermFreq("а где ÐÑÑи?", 7),
+ }));
+ try {
+ suggester.lookup("а\u001E", false, 3);
+ fail("should throw IllegalArgumentException");
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+ try {
+ suggester.lookup("а\u001F", false, 3);
+ fail("should throw IllegalArgumentException");
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+ }
+
+ @SafeVarargs
+ public final <T> Iterable<T> shuffle(T...values) {
+ final List<T> asList = new ArrayList<T>(values.length);
+ for (T value : values) {
+ asList.add(value);
+ }
+ Collections.shuffle(asList, random());
+ return asList;
+ }
}
Modified: lucene/dev/branches/lucene3069/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java (original)
+++ lucene/dev/branches/lucene3069/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java Tue Aug 13 04:06:18 2013
@@ -60,7 +60,9 @@ public class FuzzySuggesterTest extends
keys.add(new TermFreq("boo" + _TestUtil.randomSimpleString(random()), 1 + random().nextInt(100)));
}
keys.add(new TermFreq("foo bar boo far", 12));
- FuzzySuggester suggester = new FuzzySuggester(new MockAnalyzer(random(), MockTokenizer.KEYWORD, false));
+ MockAnalyzer analyzer = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false);
+ FuzzySuggester suggester = new FuzzySuggester(analyzer, analyzer, FuzzySuggester.EXACT_FIRST | FuzzySuggester.PRESERVE_SEP, 256, -1, FuzzySuggester.DEFAULT_MAX_EDITS, FuzzySuggester.DEFAULT_TRANSPOSITIONS,
+ 0, FuzzySuggester.DEFAULT_MIN_FUZZY_LENGTH, FuzzySuggester.DEFAULT_UNICODE_AWARE);
suggester.build(new TermFreqArrayIterator(keys));
int numIters = atLeast(10);
for (int i = 0; i < numIters; i++) {
@@ -72,6 +74,27 @@ public class FuzzySuggesterTest extends
}
}
+ public void testNonLatinRandomEdits() throws IOException {
+ List<TermFreq> keys = new ArrayList<TermFreq>();
+ int numTerms = atLeast(100);
+ for (int i = 0; i < numTerms; i++) {
+ keys.add(new TermFreq("бÑÑ" + _TestUtil.randomSimpleString(random()), 1 + random().nextInt(100)));
+ }
+ keys.add(new TermFreq("ÑÑÑ Ð±Ð°Ñ Ð±ÑÑ ÑаÑ", 12));
+ MockAnalyzer analyzer = new MockAnalyzer(random(), MockTokenizer.KEYWORD, false);
+ FuzzySuggester suggester = new FuzzySuggester(analyzer, analyzer, FuzzySuggester.EXACT_FIRST | FuzzySuggester.PRESERVE_SEP, 256, -1, FuzzySuggester.DEFAULT_MAX_EDITS, FuzzySuggester.DEFAULT_TRANSPOSITIONS,
+ 0, FuzzySuggester.DEFAULT_MIN_FUZZY_LENGTH, true);
+ suggester.build(new TermFreqArrayIterator(keys));
+ int numIters = atLeast(10);
+ for (int i = 0; i < numIters; i++) {
+ String addRandomEdit = addRandomEdit("ÑÑÑ Ð±Ð°Ñ Ð±ÑÑ", 0);
+ List<LookupResult> results = suggester.lookup(_TestUtil.stringToCharSequence(addRandomEdit, random()), false, 2);
+ assertEquals(addRandomEdit, 1, results.size());
+ assertEquals("ÑÑÑ Ð±Ð°Ñ Ð±ÑÑ ÑаÑ", results.get(0).key.toString());
+ assertEquals(12, results.get(0).value, 0.01F);
+ }
+ }
+
/** this is basically the WFST test ported to KeywordAnalyzer. so it acts the same */
public void testKeyword() throws Exception {
TermFreq keys[] = new TermFreq[] {
@@ -185,7 +208,7 @@ public class FuzzySuggesterTest extends
int options = 0;
Analyzer a = new MockAnalyzer(random());
- FuzzySuggester suggester = new FuzzySuggester(a, a, options, 256, -1, 1, true, 1, 3);
+ FuzzySuggester suggester = new FuzzySuggester(a, a, options, 256, -1, 1, true, 1, 3, false);
suggester.build(new TermFreqArrayIterator(keys));
// TODO: would be nice if "ab " would allow the test to
// pass, and more generally if the analyzer can know
@@ -394,7 +417,7 @@ public class FuzzySuggesterTest extends
public void testExactFirst() throws Exception {
Analyzer a = getUnusualAnalyzer();
- FuzzySuggester suggester = new FuzzySuggester(a, a, AnalyzingSuggester.EXACT_FIRST | AnalyzingSuggester.PRESERVE_SEP, 256, -1, 1, true, 1, 3);
+ FuzzySuggester suggester = new FuzzySuggester(a, a, AnalyzingSuggester.EXACT_FIRST | AnalyzingSuggester.PRESERVE_SEP, 256, -1, 1, true, 1, 3, false);
suggester.build(new TermFreqArrayIterator(new TermFreq[] {
new TermFreq("x y", 1),
new TermFreq("x y z", 3),
@@ -433,7 +456,7 @@ public class FuzzySuggesterTest extends
public void testNonExactFirst() throws Exception {
Analyzer a = getUnusualAnalyzer();
- FuzzySuggester suggester = new FuzzySuggester(a, a, AnalyzingSuggester.PRESERVE_SEP, 256, -1, 1, true, 1, 3);
+ FuzzySuggester suggester = new FuzzySuggester(a, a, AnalyzingSuggester.PRESERVE_SEP, 256, -1, 1, true, 1, 3, false);
suggester.build(new TermFreqArrayIterator(new TermFreq[] {
new TermFreq("x y", 1),
@@ -580,12 +603,13 @@ public class FuzzySuggesterTest extends
TermFreq[] keys = new TermFreq[numQueries];
boolean preserveSep = random().nextBoolean();
+ boolean unicodeAware = random().nextBoolean();
final int numStopChars = random().nextInt(10);
final boolean preserveHoles = random().nextBoolean();
if (VERBOSE) {
- System.out.println("TEST: " + numQueries + " words; preserveSep=" + preserveSep + " numStopChars=" + numStopChars + " preserveHoles=" + preserveHoles);
+ System.out.println("TEST: " + numQueries + " words; preserveSep=" + preserveSep + " ; unicodeAware=" + unicodeAware + " numStopChars=" + numStopChars + " preserveHoles=" + preserveHoles);
}
for (int i = 0; i < numQueries; i++) {
@@ -606,7 +630,7 @@ public class FuzzySuggesterTest extends
if (token > 0) {
key += " ";
}
- if (preserveSep && analyzedKey.length() > 0 && analyzedKey.charAt(analyzedKey.length()-1) != ' ') {
+ if (preserveSep && analyzedKey.length() > 0 && (unicodeAware ? analyzedKey.codePointAt(analyzedKey.codePointCount(0, analyzedKey.length())-1) != ' ' : analyzedKey.charAt(analyzedKey.length()-1) != ' ')) {
analyzedKey += " ";
}
key += s;
@@ -659,7 +683,7 @@ public class FuzzySuggesterTest extends
Analyzer a = new MockTokenEatingAnalyzer(numStopChars, preserveHoles);
FuzzySuggester suggester = new FuzzySuggester(a, a,
- preserveSep ? AnalyzingSuggester.PRESERVE_SEP : 0, 256, -1, 1, false, 1, 3);
+ preserveSep ? AnalyzingSuggester.PRESERVE_SEP : 0, 256, -1, 1, false, 1, 3, unicodeAware);
suggester.build(new TermFreqArrayIterator(keys));
for (String prefix : allPrefixes) {
@@ -728,7 +752,7 @@ public class FuzzySuggesterTest extends
// us the "answer key" (ie maybe we have a bug in
// suggester.toLevA ...) ... but testRandom2() fixes
// this:
- Automaton automaton = suggester.toLevenshteinAutomata(suggester.toLookupAutomaton(analyzedKey));
+ Automaton automaton = suggester.convertAutomaton(suggester.toLevenshteinAutomata(suggester.toLookupAutomaton(analyzedKey)));
assertTrue(automaton.isDeterministic());
// TODO: could be faster... but its slowCompletor for a reason
BytesRef spare = new BytesRef();
@@ -799,7 +823,7 @@ public class FuzzySuggesterTest extends
public void testMaxSurfaceFormsPerAnalyzedForm() throws Exception {
Analyzer a = new MockAnalyzer(random());
- FuzzySuggester suggester = new FuzzySuggester(a, a, 0, 2, -1, 1, true, 1, 3);
+ FuzzySuggester suggester = new FuzzySuggester(a, a, 0, 2, -1, 1, true, 1, 3, false);
List<TermFreq> keys = Arrays.asList(new TermFreq[] {
new TermFreq("a", 40),
@@ -820,7 +844,7 @@ public class FuzzySuggesterTest extends
public void testEditSeps() throws Exception {
Analyzer a = new MockAnalyzer(random());
- FuzzySuggester suggester = new FuzzySuggester(a, a, FuzzySuggester.PRESERVE_SEP, 2, -1, 2, true, 1, 3);
+ FuzzySuggester suggester = new FuzzySuggester(a, a, FuzzySuggester.PRESERVE_SEP, 2, -1, 2, true, 1, 3, false);
List<TermFreq> keys = Arrays.asList(new TermFreq[] {
new TermFreq("foo bar", 40),
@@ -878,7 +902,8 @@ public class FuzzySuggesterTest extends
// NOTE: can only use ascii here so that, in
// UTF8 byte space it's still a single
// insertion:
- int x = random().nextInt(128);
+ // bytes 0x1e and 0x1f are reserved
+ int x = random().nextBoolean() ? random().nextInt(30) : 32 + random().nextInt(128 - 32);
builder.append((char) x);
for (int j = i; j < input.length; j++) {
builder.append(input[j]);
@@ -933,7 +958,7 @@ public class FuzzySuggesterTest extends
boolean transpositions = random().nextBoolean();
// TODO: test graph analyzers
// TODO: test exactFirst / preserveSep permutations
- FuzzySuggester suggest = new FuzzySuggester(a, a, 0, 256, -1, maxEdits, transpositions, prefixLen, prefixLen);
+ FuzzySuggester suggest = new FuzzySuggester(a, a, 0, 256, -1, maxEdits, transpositions, prefixLen, prefixLen, false);
if (VERBOSE) {
System.out.println("TEST: maxEdits=" + maxEdits + " prefixLen=" + prefixLen + " transpositions=" + transpositions + " num=" + NUM);
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardDocValuesFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardDocValuesFormat.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardDocValuesFormat.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardDocValuesFormat.java Tue Aug 13 04:06:18 2013
@@ -24,8 +24,10 @@ import org.apache.lucene.codecs.DocValue
import org.apache.lucene.codecs.DocValuesFormat;
import org.apache.lucene.codecs.diskdv.DiskDocValuesConsumer;
import org.apache.lucene.codecs.diskdv.DiskDocValuesFormat;
+import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
+import org.apache.lucene.util.BytesRef;
/**
* DocValues format that keeps everything on disk.
@@ -53,7 +55,13 @@ public final class CheapBastardDocValues
return new DiskDocValuesConsumer(state, DiskDocValuesFormat.DATA_CODEC,
DiskDocValuesFormat.DATA_EXTENSION,
DiskDocValuesFormat.META_CODEC,
- DiskDocValuesFormat.META_EXTENSION);
+ DiskDocValuesFormat.META_EXTENSION) {
+ // don't ever write an index, we dont want to use RAM :)
+ @Override
+ protected void addTermsDict(FieldInfo field, Iterable<BytesRef> values) throws IOException {
+ addBinaryField(field, values);
+ }
+ };
}
@Override
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardDocValuesProducer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardDocValuesProducer.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardDocValuesProducer.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardDocValuesProducer.java Tue Aug 13 04:06:18 2013
@@ -27,6 +27,7 @@ import java.util.Map;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.DocValuesProducer;
+import org.apache.lucene.codecs.diskdv.DiskDocValuesConsumer;
import org.apache.lucene.codecs.diskdv.DiskDocValuesFormat;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.CorruptIndexException;
@@ -58,7 +59,7 @@ class CheapBastardDocValuesProducer exte
final int version;
try {
version = CodecUtil.checkHeader(in, metaCodec,
- DiskDocValuesFormat.VERSION_START,
+ DiskDocValuesFormat.VERSION_CURRENT,
DiskDocValuesFormat.VERSION_CURRENT);
numerics = new HashMap<Integer,NumericEntry>();
ords = new HashMap<Integer,NumericEntry>();
@@ -80,7 +81,7 @@ class CheapBastardDocValuesProducer exte
String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
data = state.directory.openInput(dataName, state.context);
final int version2 = CodecUtil.checkHeader(data, dataCodec,
- DiskDocValuesFormat.VERSION_START,
+ DiskDocValuesFormat.VERSION_CURRENT,
DiskDocValuesFormat.VERSION_CURRENT);
if (version != version2) {
throw new CorruptIndexException("Versions mismatch");
@@ -193,6 +194,10 @@ class CheapBastardDocValuesProducer exte
static BinaryEntry readBinaryEntry(IndexInput meta) throws IOException {
BinaryEntry entry = new BinaryEntry();
+ int format = meta.readVInt();
+ if (format != DiskDocValuesConsumer.BINARY_FIXED_UNCOMPRESSED && format != DiskDocValuesConsumer.BINARY_VARIABLE_UNCOMPRESSED) {
+ throw new CorruptIndexException("Unexpected format for binary entry: " + format + ", input=" + meta);
+ }
entry.minLength = meta.readVInt();
entry.maxLength = meta.readVInt();
entry.count = meta.readVLong();
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene41ords/Lucene41WithOrds.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene41ords/Lucene41WithOrds.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene41ords/Lucene41WithOrds.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene41ords/Lucene41WithOrds.java Tue Aug 13 04:06:18 2013
@@ -45,9 +45,15 @@ import org.apache.lucene.util.BytesRef;
* {@link FixedGapTermsIndexWriter}.
*/
public final class Lucene41WithOrds extends PostingsFormat {
-
+ final int termIndexInterval;
+
public Lucene41WithOrds() {
+ this(FixedGapTermsIndexWriter.DEFAULT_TERM_INDEX_INTERVAL);
+ }
+
+ public Lucene41WithOrds(int termIndexInterval) {
super("Lucene41WithOrds");
+ this.termIndexInterval = termIndexInterval;
}
@Override
@@ -61,7 +67,7 @@ public final class Lucene41WithOrds exte
TermsIndexWriterBase indexWriter;
boolean success = false;
try {
- indexWriter = new FixedGapTermsIndexWriter(state);
+ indexWriter = new FixedGapTermsIndexWriter(state, termIndexInterval);
success = true;
} finally {
if (!success) {
@@ -87,8 +93,6 @@ public final class Lucene41WithOrds exte
}
}
- public final static int TERMS_CACHE_SIZE = 1024;
-
@Override
public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
PostingsReaderBase postings = new Lucene41PostingsReader(state.directory, state.fieldInfos, state.segmentInfo, state.context, state.segmentSuffix);
@@ -99,7 +103,6 @@ public final class Lucene41WithOrds exte
indexReader = new FixedGapTermsIndexReader(state.directory,
state.fieldInfos,
state.segmentInfo.name,
- state.termsIndexDivisor,
BytesRef.getUTF8SortedAsUnicodeComparator(),
state.segmentSuffix, state.context);
success = true;
@@ -117,7 +120,6 @@ public final class Lucene41WithOrds exte
state.segmentInfo,
postings,
state.context,
- TERMS_CACHE_SIZE,
state.segmentSuffix);
success = true;
return ret;
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockFixedIntBlockPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockFixedIntBlockPostingsFormat.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockFixedIntBlockPostingsFormat.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockFixedIntBlockPostingsFormat.java Tue Aug 13 04:06:18 2013
@@ -169,7 +169,6 @@ public final class MockFixedIntBlockPost
indexReader = new FixedGapTermsIndexReader(state.directory,
state.fieldInfos,
state.segmentInfo.name,
- state.termsIndexDivisor,
BytesRef.getUTF8SortedAsUnicodeComparator(), state.segmentSuffix,
IOContext.DEFAULT);
success = true;
@@ -187,7 +186,6 @@ public final class MockFixedIntBlockPost
state.segmentInfo,
postingsReader,
state.context,
- 1024,
state.segmentSuffix);
success = true;
return ret;
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockVariableIntBlockPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockVariableIntBlockPostingsFormat.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockVariableIntBlockPostingsFormat.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockVariableIntBlockPostingsFormat.java Tue Aug 13 04:06:18 2013
@@ -194,7 +194,6 @@ public final class MockVariableIntBlockP
indexReader = new FixedGapTermsIndexReader(state.directory,
state.fieldInfos,
state.segmentInfo.name,
- state.termsIndexDivisor,
BytesRef.getUTF8SortedAsUnicodeComparator(),
state.segmentSuffix, state.context);
success = true;
@@ -212,7 +211,6 @@ public final class MockVariableIntBlockP
state.segmentInfo,
postingsReader,
state.context,
- 1024,
state.segmentSuffix);
success = true;
return ret;
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java Tue Aug 13 04:06:18 2013
@@ -220,11 +220,11 @@ public final class MockRandomPostingsFor
final TermsIndexWriterBase indexWriter;
try {
if (random.nextBoolean()) {
- state.termIndexInterval = _TestUtil.nextInt(random, 1, 100);
+ int termIndexInterval = _TestUtil.nextInt(random, 1, 100);
if (LuceneTestCase.VERBOSE) {
- System.out.println("MockRandomCodec: fixed-gap terms index (tii=" + state.termIndexInterval + ")");
+ System.out.println("MockRandomCodec: fixed-gap terms index (tii=" + termIndexInterval + ")");
}
- indexWriter = new FixedGapTermsIndexWriter(state);
+ indexWriter = new FixedGapTermsIndexWriter(state, termIndexInterval);
} else {
final VariableGapTermsIndexWriter.IndexTermSelector selector;
final int n2 = random.nextInt(3);
@@ -340,8 +340,7 @@ public final class MockRandomPostingsFor
state.segmentInfo,
postingsReader,
state.context,
- state.segmentSuffix,
- state.termsIndexDivisor);
+ state.segmentSuffix);
success = true;
} finally {
if (!success) {
@@ -359,20 +358,14 @@ public final class MockRandomPostingsFor
final boolean doFixedGap = random.nextBoolean();
// randomness diverges from writer, here:
- if (state.termsIndexDivisor != -1) {
- state.termsIndexDivisor = _TestUtil.nextInt(random, 1, 10);
- }
if (doFixedGap) {
- // if termsIndexDivisor is set to -1, we should not touch it. It means a
- // test explicitly instructed not to load the terms index.
if (LuceneTestCase.VERBOSE) {
- System.out.println("MockRandomCodec: fixed-gap terms index (divisor=" + state.termsIndexDivisor + ")");
+ System.out.println("MockRandomCodec: fixed-gap terms index");
}
indexReader = new FixedGapTermsIndexReader(state.directory,
state.fieldInfos,
state.segmentInfo.name,
- state.termsIndexDivisor,
BytesRef.getUTF8SortedAsUnicodeComparator(),
state.segmentSuffix, state.context);
} else {
@@ -383,12 +376,11 @@ public final class MockRandomPostingsFor
random.nextLong();
}
if (LuceneTestCase.VERBOSE) {
- System.out.println("MockRandomCodec: variable-gap terms index (divisor=" + state.termsIndexDivisor + ")");
+ System.out.println("MockRandomCodec: variable-gap terms index");
}
indexReader = new VariableGapTermsIndexReader(state.directory,
state.fieldInfos,
state.segmentInfo.name,
- state.termsIndexDivisor,
state.segmentSuffix, state.context);
}
@@ -400,8 +392,6 @@ public final class MockRandomPostingsFor
}
}
- final int termsCacheSize = _TestUtil.nextInt(random, 1, 1024);
-
success = false;
try {
fields = new BlockTermsReader(indexReader,
@@ -410,7 +400,6 @@ public final class MockRandomPostingsFor
state.segmentInfo,
postingsReader,
state.context,
- termsCacheSize,
state.segmentSuffix);
success = true;
} finally {
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mocksep/MockSepPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mocksep/MockSepPostingsFormat.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mocksep/MockSepPostingsFormat.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/mocksep/MockSepPostingsFormat.java Tue Aug 13 04:06:18 2013
@@ -92,7 +92,6 @@ public final class MockSepPostingsFormat
indexReader = new FixedGapTermsIndexReader(state.directory,
state.fieldInfos,
state.segmentInfo.name,
- state.termsIndexDivisor,
BytesRef.getUTF8SortedAsUnicodeComparator(),
state.segmentSuffix, state.context);
success = true;
@@ -110,7 +109,6 @@ public final class MockSepPostingsFormat
state.segmentInfo,
postingsReader,
state.context,
- 1024,
state.segmentSuffix);
success = true;
return ret;
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/nestedpulsing/NestedPulsingPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/nestedpulsing/NestedPulsingPostingsFormat.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/nestedpulsing/NestedPulsingPostingsFormat.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/nestedpulsing/NestedPulsingPostingsFormat.java Tue Aug 13 04:06:18 2013
@@ -84,8 +84,7 @@ public final class NestedPulsingPostings
state.directory, state.fieldInfos, state.segmentInfo,
pulsingReader,
state.context,
- state.segmentSuffix,
- state.termsIndexDivisor);
+ state.segmentSuffix);
success = true;
return ret;
} finally {
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java Tue Aug 13 04:06:18 2013
@@ -336,7 +336,7 @@ public final class RAMOnlyPostingsFormat
}
@Override
- public SeekStatus seekCeil(BytesRef term, boolean useCache) {
+ public SeekStatus seekCeil(BytesRef term) {
current = term.utf8ToString();
it = null;
if (ramField.termToDocs.containsKey(current)) {
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/AssertingAtomicReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/AssertingAtomicReader.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/AssertingAtomicReader.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/AssertingAtomicReader.java Tue Aug 13 04:06:18 2013
@@ -185,9 +185,9 @@ public class AssertingAtomicReader exten
}
@Override
- public SeekStatus seekCeil(BytesRef term, boolean useCache) throws IOException {
+ public SeekStatus seekCeil(BytesRef term) throws IOException {
assert term.isValid();
- SeekStatus result = super.seekCeil(term, useCache);
+ SeekStatus result = super.seekCeil(term);
if (result == SeekStatus.END) {
state = State.UNPOSITIONED;
} else {
@@ -197,9 +197,9 @@ public class AssertingAtomicReader exten
}
@Override
- public boolean seekExact(BytesRef text, boolean useCache) throws IOException {
+ public boolean seekExact(BytesRef text) throws IOException {
assert text.isValid();
- if (super.seekExact(text, useCache)) {
+ if (super.seekExact(text)) {
state = State.POSITIONED;
return true;
} else {
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java Tue Aug 13 04:06:18 2013
@@ -746,16 +746,16 @@ public abstract class BaseDocValuesForma
assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("zzz")));
// seekExact()
- assertTrue(termsEnum.seekExact(new BytesRef("beer"), true));
+ assertTrue(termsEnum.seekExact(new BytesRef("beer")));
assertEquals("beer", termsEnum.term().utf8ToString());
assertEquals(0, termsEnum.ord());
- assertTrue(termsEnum.seekExact(new BytesRef("hello"), true));
+ assertTrue(termsEnum.seekExact(new BytesRef("hello")));
assertEquals(Codec.getDefault().toString(), "hello", termsEnum.term().utf8ToString());
assertEquals(1, termsEnum.ord());
- assertTrue(termsEnum.seekExact(new BytesRef("world"), true));
+ assertTrue(termsEnum.seekExact(new BytesRef("world")));
assertEquals("world", termsEnum.term().utf8ToString());
assertEquals(2, termsEnum.ord());
- assertFalse(termsEnum.seekExact(new BytesRef("bogus"), true));
+ assertFalse(termsEnum.seekExact(new BytesRef("bogus")));
// seek(ord)
termsEnum.seekExact(0);
@@ -1022,7 +1022,7 @@ public abstract class BaseDocValuesForma
writer.close(true);
- DirectoryReader reader = DirectoryReader.open(dir, 1);
+ DirectoryReader reader = DirectoryReader.open(dir);
assertEquals(1, reader.leaves().size());
IndexSearcher searcher = new IndexSearcher(reader);
@@ -1350,6 +1350,57 @@ public abstract class BaseDocValuesForma
dir.close();
}
+ private void doTestSortedVsFieldCache(int minLength, int maxLength) throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
+ Document doc = new Document();
+ Field idField = new StringField("id", "", Field.Store.NO);
+ Field indexedField = new StringField("indexed", "", Field.Store.NO);
+ Field dvField = new SortedDocValuesField("dv", new BytesRef());
+ doc.add(idField);
+ doc.add(indexedField);
+ doc.add(dvField);
+
+ // index some docs
+ int numDocs = atLeast(300);
+ for (int i = 0; i < numDocs; i++) {
+ idField.setStringValue(Integer.toString(i));
+ final int length;
+ if (minLength == maxLength) {
+ length = minLength; // fixed length
+ } else {
+ length = _TestUtil.nextInt(random(), minLength, maxLength);
+ }
+ String value = _TestUtil.randomSimpleString(random(), length);
+ indexedField.setStringValue(value);
+ dvField.setBytesValue(new BytesRef(value));
+ writer.addDocument(doc);
+ if (random().nextInt(31) == 0) {
+ writer.commit();
+ }
+ }
+
+ // delete some docs
+ int numDeletions = random().nextInt(numDocs/10);
+ for (int i = 0; i < numDeletions; i++) {
+ int id = random().nextInt(numDocs);
+ writer.deleteDocuments(new Term("id", Integer.toString(id)));
+ }
+ writer.close();
+
+ // compare
+ DirectoryReader ir = DirectoryReader.open(dir);
+ for (AtomicReaderContext context : ir.leaves()) {
+ AtomicReader r = context.reader();
+ SortedDocValues expected = FieldCache.DEFAULT.getTermsIndex(r, "indexed");
+ SortedDocValues actual = r.getSortedDocValues("dv");
+ assertEquals(r.maxDoc(), expected, actual);
+ }
+ ir.close();
+ dir.close();
+ }
+
public void testSortedFixedLengthVsStoredFields() throws Exception {
int numIterations = atLeast(1);
for (int i = 0; i < numIterations; i++) {
@@ -1358,6 +1409,21 @@ public abstract class BaseDocValuesForma
}
}
+ public void testSortedFixedLengthVsFieldCache() throws Exception {
+ int numIterations = atLeast(1);
+ for (int i = 0; i < numIterations; i++) {
+ int fixedLength = _TestUtil.nextInt(random(), 1, 10);
+ doTestSortedVsFieldCache(fixedLength, fixedLength);
+ }
+ }
+
+ public void testSortedVariableLengthVsFieldCache() throws Exception {
+ int numIterations = atLeast(1);
+ for (int i = 0; i < numIterations; i++) {
+ doTestSortedVsFieldCache(1, 10);
+ }
+ }
+
public void testSortedVariableLengthVsStoredFields() throws Exception {
int numIterations = atLeast(1);
for (int i = 0; i < numIterations; i++) {
@@ -1788,16 +1854,16 @@ public abstract class BaseDocValuesForma
assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("zzz")));
// seekExact()
- assertTrue(termsEnum.seekExact(new BytesRef("beer"), true));
+ assertTrue(termsEnum.seekExact(new BytesRef("beer")));
assertEquals("beer", termsEnum.term().utf8ToString());
assertEquals(0, termsEnum.ord());
- assertTrue(termsEnum.seekExact(new BytesRef("hello"), true));
+ assertTrue(termsEnum.seekExact(new BytesRef("hello")));
assertEquals("hello", termsEnum.term().utf8ToString());
assertEquals(1, termsEnum.ord());
- assertTrue(termsEnum.seekExact(new BytesRef("world"), true));
+ assertTrue(termsEnum.seekExact(new BytesRef("world")));
assertEquals("world", termsEnum.term().utf8ToString());
assertEquals(2, termsEnum.ord());
- assertFalse(termsEnum.seekExact(new BytesRef("bogus"), true));
+ assertFalse(termsEnum.seekExact(new BytesRef("bogus")));
// seek(ord)
termsEnum.seekExact(0);
@@ -1905,6 +1971,10 @@ public abstract class BaseDocValuesForma
}
}
+ private void assertEquals(int maxDoc, SortedDocValues expected, SortedDocValues actual) throws Exception {
+ assertEquals(maxDoc, new SingletonSortedSetDocValues(expected), new SingletonSortedSetDocValues(actual));
+ }
+
private void assertEquals(int maxDoc, SortedSetDocValues expected, SortedSetDocValues actual) throws Exception {
// can be null for the segment if no docs actually had any SortedDocValues
// in this case FC.getDocTermsOrds returns EMPTY
@@ -1932,6 +2002,74 @@ public abstract class BaseDocValuesForma
actual.lookupTerm(actualBytes);
assertEquals(expectedBytes, actualBytes);
}
+
+ // compare termsenum
+ assertEquals(expected.getValueCount(), expected.termsEnum(), actual.termsEnum());
+ }
+
+ private void assertEquals(long numOrds, TermsEnum expected, TermsEnum actual) throws Exception {
+ BytesRef ref;
+
+ // sequential next() through all terms
+ while ((ref = expected.next()) != null) {
+ assertEquals(ref, actual.next());
+ assertEquals(expected.ord(), actual.ord());
+ assertEquals(expected.term(), actual.term());
+ }
+ assertNull(actual.next());
+
+ // sequential seekExact(ord) through all terms
+ for (long i = 0; i < numOrds; i++) {
+ expected.seekExact(i);
+ actual.seekExact(i);
+ assertEquals(expected.ord(), actual.ord());
+ assertEquals(expected.term(), actual.term());
+ }
+
+ // sequential seekExact(BytesRef) through all terms
+ for (long i = 0; i < numOrds; i++) {
+ expected.seekExact(i);
+ assertTrue(actual.seekExact(expected.term()));
+ assertEquals(expected.ord(), actual.ord());
+ assertEquals(expected.term(), actual.term());
+ }
+
+ // sequential seekCeil(BytesRef) through all terms
+ for (long i = 0; i < numOrds; i++) {
+ expected.seekExact(i);
+ assertEquals(SeekStatus.FOUND, actual.seekCeil(expected.term()));
+ assertEquals(expected.ord(), actual.ord());
+ assertEquals(expected.term(), actual.term());
+ }
+
+ // random seekExact(ord)
+ for (long i = 0; i < numOrds; i++) {
+ long randomOrd = _TestUtil.nextLong(random(), 0, numOrds-1);
+ expected.seekExact(randomOrd);
+ actual.seekExact(randomOrd);
+ assertEquals(expected.ord(), actual.ord());
+ assertEquals(expected.term(), actual.term());
+ }
+
+ // random seekExact(BytesRef)
+ for (long i = 0; i < numOrds; i++) {
+ long randomOrd = _TestUtil.nextLong(random(), 0, numOrds-1);
+ expected.seekExact(randomOrd);
+ actual.seekExact(expected.term());
+ assertEquals(expected.ord(), actual.ord());
+ assertEquals(expected.term(), actual.term());
+ }
+
+ // random seekCeil(BytesRef)
+ for (long i = 0; i < numOrds; i++) {
+ BytesRef target = new BytesRef(_TestUtil.randomUnicodeString(random()));
+ SeekStatus expectedStatus = expected.seekCeil(target);
+ assertEquals(expectedStatus, actual.seekCeil(target));
+ if (expectedStatus != SeekStatus.END) {
+ assertEquals(expected.ord(), actual.ord());
+ assertEquals(expected.term(), actual.term());
+ }
+ }
}
private void doTestSortedSetVsUninvertedField(int minLength, int maxLength) throws Exception {
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java Tue Aug 13 04:06:18 2013
@@ -38,6 +38,8 @@ import org.apache.lucene.codecs.FieldsPr
import org.apache.lucene.codecs.PostingsConsumer;
import org.apache.lucene.codecs.TermStats;
import org.apache.lucene.codecs.TermsConsumer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
import org.apache.lucene.index.FieldInfo.DocValuesType;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.store.Directory;
@@ -353,9 +355,12 @@ public abstract class BasePostingsFormat
fields.put(field, postings);
Set<String> seenTerms = new HashSet<String>();
- // TODO:
- //final int numTerms = atLeast(10);
- final int numTerms = 4;
+ int numTerms;
+ if (random().nextInt(10) == 7) {
+ numTerms = atLeast(50);
+ } else {
+ numTerms = _TestUtil.nextInt(random(), 2, 20);
+ }
for(int termUpto=0;termUpto<numTerms;termUpto++) {
String term = _TestUtil.randomSimpleString(random());
@@ -483,7 +488,7 @@ public abstract class BasePostingsFormat
SegmentWriteState writeState = new SegmentWriteState(null, dir,
segmentInfo, newFieldInfos,
- 32, null, new IOContext(new FlushInfo(maxDoc, bytes)));
+ null, new IOContext(new FlushInfo(maxDoc, bytes)));
FieldsConsumer fieldsConsumer = codec.postingsFormat().fieldsConsumer(writeState);
for(Map.Entry<String,Map<BytesRef,Long>> fieldEnt : fields.entrySet()) {
@@ -567,7 +572,7 @@ public abstract class BasePostingsFormat
currentFieldInfos = newFieldInfos;
- SegmentReadState readState = new SegmentReadState(dir, segmentInfo, newFieldInfos, IOContext.DEFAULT, 1);
+ SegmentReadState readState = new SegmentReadState(dir, segmentInfo, newFieldInfos, IOContext.DEFAULT);
return codec.postingsFormat().fieldsProducer(readState);
}
@@ -595,6 +600,10 @@ public abstract class BasePostingsFormat
System.out.println(" verifyEnum: options=" + options + " maxTestOptions=" + maxTestOptions);
}
+ // Make sure TermsEnum really is positioned on the
+ // expected term:
+ assertEquals(term, termsEnum.term());
+
// 50% of the time time pass liveDocs:
boolean useLiveDocs = options.contains(Option.LIVE_DOCS) && random().nextBoolean();
Bits liveDocs;
@@ -983,7 +992,7 @@ public abstract class BasePostingsFormat
termsEnum = terms.iterator(null);
if (!useTermState) {
- assertTrue(termsEnum.seekExact(fieldAndTerm.term, true));
+ assertTrue(termsEnum.seekExact(fieldAndTerm.term));
} else {
termsEnum.seekExact(fieldAndTerm.term, termState);
}
@@ -1130,4 +1139,89 @@ public abstract class BasePostingsFormat
_TestUtil.rmDir(path);
}
}
+
+ public void testEmptyField() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
+ iwc.setCodec(getCodec());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ doc.add(newStringField("", "something", Field.Store.NO));
+ iw.addDocument(doc);
+ DirectoryReader ir = iw.getReader();
+ AtomicReader ar = getOnlySegmentReader(ir);
+ Fields fields = ar.fields();
+ int fieldCount = fields.size();
+ // -1 is allowed, if the codec doesn't implement fields.size():
+ assertTrue(fieldCount == 1 || fieldCount == -1);
+ Terms terms = ar.terms("");
+ assertNotNull(terms);
+ TermsEnum termsEnum = terms.iterator(null);
+ assertNotNull(termsEnum.next());
+ assertEquals(termsEnum.term(), new BytesRef("something"));
+ assertNull(termsEnum.next());
+ ir.close();
+ iw.close();
+ dir.close();
+ }
+
+ public void testEmptyFieldAndEmptyTerm() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
+ iwc.setCodec(getCodec());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ doc.add(newStringField("", "", Field.Store.NO));
+ iw.addDocument(doc);
+ DirectoryReader ir = iw.getReader();
+ AtomicReader ar = getOnlySegmentReader(ir);
+ Fields fields = ar.fields();
+ int fieldCount = fields.size();
+ // -1 is allowed, if the codec doesn't implement fields.size():
+ assertTrue(fieldCount == 1 || fieldCount == -1);
+ Terms terms = ar.terms("");
+ assertNotNull(terms);
+ TermsEnum termsEnum = terms.iterator(null);
+ assertNotNull(termsEnum.next());
+ assertEquals(termsEnum.term(), new BytesRef(""));
+ assertNull(termsEnum.next());
+ ir.close();
+ iw.close();
+ dir.close();
+ }
+
+ // tests that ghost fields still work
+ // TODO: can this be improved?
+ public void testGhosts() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
+ iwc.setCodec(getCodec());
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ iw.addDocument(doc);
+ doc.add(newStringField("ghostField", "something", Field.Store.NO));
+ iw.addDocument(doc);
+ iw.forceMerge(1);
+ iw.deleteDocuments(new Term("ghostField", "something")); // delete the only term for the field
+ iw.forceMerge(1);
+ DirectoryReader ir = iw.getReader();
+ AtomicReader ar = getOnlySegmentReader(ir);
+ Fields fields = ar.fields();
+ // Ghost busting terms dict impls will have
+ // fields.size() == 0; all others must be == 1:
+ assertTrue(fields.size() <= 1);
+ Terms terms = fields.terms("ghostField");
+ if (terms != null) {
+ TermsEnum termsEnum = terms.iterator(null);
+ BytesRef term = termsEnum.next();
+ if (term != null) {
+ DocsEnum docsEnum = termsEnum.docs(null, null);
+ assertTrue(docsEnum.nextDoc() == DocsEnum.NO_MORE_DOCS);
+ }
+ }
+ ir.close();
+ iw.close();
+ dir.close();
+ }
}
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java Tue Aug 13 04:06:18 2013
@@ -509,7 +509,7 @@ public abstract class BaseStoredFieldsFo
Directory dir = newDirectory();
IndexWriterConfig iwConf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
iwConf.setMaxBufferedDocs(RandomInts.randomIntBetween(random(), 2, 30));
- RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf);
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf.clone());
final int docCount = atLeast(200);
final byte[][][] data = new byte [docCount][][];
@@ -548,7 +548,7 @@ public abstract class BaseStoredFieldsFo
} else {
iwConf.setCodec(otherCodec);
}
- iw = new RandomIndexWriter(random(), dir, iwConf);
+ iw = new RandomIndexWriter(random(), dir, iwConf.clone());
}
}
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java Tue Aug 13 04:06:18 2013
@@ -500,9 +500,9 @@ public abstract class BaseTermVectorsFor
assertNull(termsEnum.next());
for (int i = 0; i < 5; ++i) {
if (random().nextBoolean()) {
- assertTrue(termsEnum.seekExact(RandomPicks.randomFrom(random(), tk.termBytes), random().nextBoolean()));
+ assertTrue(termsEnum.seekExact(RandomPicks.randomFrom(random(), tk.termBytes)));
} else {
- assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(RandomPicks.randomFrom(random(), tk.termBytes), random().nextBoolean()));
+ assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(RandomPicks.randomFrom(random(), tk.termBytes)));
}
}
}
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java Tue Aug 13 04:06:18 2013
@@ -33,6 +33,8 @@ import org.apache.lucene.codecs.assertin
import org.apache.lucene.codecs.asserting.AssertingPostingsFormat;
import org.apache.lucene.codecs.lucene41.Lucene41PostingsFormat;
import org.apache.lucene.codecs.lucene41ords.Lucene41WithOrds;
+import org.apache.lucene.codecs.lucene41vargap.Lucene41VarGapDocFreqInterval;
+import org.apache.lucene.codecs.lucene41vargap.Lucene41VarGapFixedInterval;
import org.apache.lucene.codecs.lucene42.Lucene42Codec;
import org.apache.lucene.codecs.lucene42.Lucene42DocValuesFormat;
import org.apache.lucene.codecs.bloom.TestBloomFilteredLucene41Postings;
@@ -137,7 +139,9 @@ public class RandomCodec extends Lucene4
new MockVariableIntBlockPostingsFormat( _TestUtil.nextInt(random, 1, 127)),
new MockRandomPostingsFormat(random),
new NestedPulsingPostingsFormat(),
- new Lucene41WithOrds(),
+ new Lucene41WithOrds(_TestUtil.nextInt(random, 1, 1000)),
+ new Lucene41VarGapFixedInterval(_TestUtil.nextInt(random, 1, 1000)),
+ new Lucene41VarGapDocFreqInterval(_TestUtil.nextInt(random, 1, 100), _TestUtil.nextInt(random, 1, 1000)),
new SimpleTextPostingsFormat(),
new AssertingPostingsFormat(),
new MemoryPostingsFormat(true, random.nextFloat()),
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java Tue Aug 13 04:06:18 2013
@@ -308,7 +308,7 @@ public class RandomIndexWriter implement
}
w.commit();
if (r.nextBoolean()) {
- return DirectoryReader.open(w.getDirectory(), _TestUtil.nextInt(r, 1, 10));
+ return DirectoryReader.open(w.getDirectory());
} else {
return w.getReader(applyDeletions);
}
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java Tue Aug 13 04:06:18 2013
@@ -185,7 +185,7 @@ public abstract class ShardSearchingTest
}
try {
for(Term term : terms) {
- final TermContext termContext = TermContext.build(s.getIndexReader().getContext(), term, false);
+ final TermContext termContext = TermContext.build(s.getIndexReader().getContext(), term);
stats.put(term, s.termStatistics(term, termContext));
}
} finally {
@@ -370,20 +370,35 @@ public abstract class ShardSearchingTest
@Override
public TopDocs searchAfter(ScoreDoc after, Query query, int numHits) throws IOException {
final TopDocs[] shardHits = new TopDocs[nodeVersions.length];
+ // results are merged in that order: score, shardIndex, doc. therefore we set
+ // after to after.score and depending on the nodeID we set doc to either:
+ // - not collect any more documents with that score (only with worse score)
+ // - collect more documents with that score (and worse) following the last collected document
+ // - collect all documents with that score (and worse)
ScoreDoc shardAfter = new ScoreDoc(after.doc, after.score);
- for(int nodeID=0;nodeID<nodeVersions.length;nodeID++) {
+ for (int nodeID = 0; nodeID < nodeVersions.length; nodeID++) {
if (nodeID < after.shardIndex) {
- // If score is tied then no docs in this shard
- // should be collected:
- shardAfter.doc = Integer.MAX_VALUE;
+ // all documents with after.score were already collected, so collect
+ // only documents with worse scores.
+ final NodeState.ShardIndexSearcher s = nodes[nodeID].acquire(nodeVersions);
+ try {
+ // Setting after.doc to reader.maxDoc-1 is a way to tell
+ // TopScoreDocCollector that no more docs with that score should
+ // be collected. note that in practice the shard which sends the
+ // request to a remote shard won't have reader.maxDoc at hand, so
+ // it will send some arbitrary value which will be fixed on the
+ // other end.
+ shardAfter.doc = s.getIndexReader().maxDoc() - 1;
+ } finally {
+ nodes[nodeID].release(s);
+ }
} else if (nodeID == after.shardIndex) {
- // If score is tied then we break according to
- // docID (like normal):
+ // collect all documents following the last collected doc with
+ // after.score + documents with worse scores.
shardAfter.doc = after.doc;
} else {
- // If score is tied then all docs in this shard
- // should be collected, because they come after
- // the previous bottom:
+ // all documents with after.score (and worse) should be collected
+ // because they didn't make it to top-N in the previous round.
shardAfter.doc = -1;
}
if (nodeID == myNodeID) {
Modified: lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java?rev=1513336&r1=1513335&r2=1513336&view=diff
==============================================================================
--- lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java (original)
+++ lucene/dev/branches/lucene3069/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java Tue Aug 13 04:06:18 2013
@@ -20,6 +20,7 @@ package org.apache.lucene.util;
import java.io.*;
import java.lang.annotation.*;
import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.*;
import java.util.concurrent.*;
@@ -754,15 +755,6 @@ public abstract class LuceneTestCase ext
}
}
if (r.nextBoolean()) {
- if (rarely(r)) {
- // crazy value
- c.setTermIndexInterval(r.nextBoolean() ? _TestUtil.nextInt(r, 1, 31) : _TestUtil.nextInt(r, 129, 1000));
- } else {
- // reasonable value
- c.setTermIndexInterval(_TestUtil.nextInt(r, 32, 128));
- }
- }
- if (r.nextBoolean()) {
int maxNumThreadStates = rarely(r) ? _TestUtil.nextInt(r, 5, 20) // crazy value
: _TestUtil.nextInt(r, 1, 4); // reasonable value
@@ -802,24 +794,31 @@ public abstract class LuceneTestCase ext
}
}
- if (rarely(r)) {
- c.setMergePolicy(new MockRandomMergePolicy(r));
- } else if (r.nextBoolean()) {
- c.setMergePolicy(newTieredMergePolicy());
- } else if (r.nextInt(5) == 0) {
- c.setMergePolicy(newAlcoholicMergePolicy());
- } else {
- c.setMergePolicy(newLogMergePolicy());
- }
+ c.setMergePolicy(newMergePolicy(r));
+
if (rarely(r)) {
c.setMergedSegmentWarmer(new SimpleMergedSegmentWarmer(c.getInfoStream()));
}
c.setUseCompoundFile(r.nextBoolean());
c.setReaderPooling(r.nextBoolean());
- c.setReaderTermsIndexDivisor(_TestUtil.nextInt(r, 1, 4));
return c;
}
+ public static MergePolicy newMergePolicy(Random r) {
+ if (rarely(r)) {
+ return new MockRandomMergePolicy(r);
+ } else if (r.nextBoolean()) {
+ return newTieredMergePolicy(r);
+ } else if (r.nextInt(5) == 0) {
+ return newAlcoholicMergePolicy(r, classEnvRule.timeZone);
+ }
+ return newLogMergePolicy(r);
+ }
+
+ public static MergePolicy newMergePolicy() {
+ return newMergePolicy(random());
+ }
+
public static LogMergePolicy newLogMergePolicy() {
return newLogMergePolicy(random());
}
@@ -1137,8 +1136,8 @@ public abstract class LuceneTestCase ext
FSDirectory d = null;
try {
d = CommandLineUtil.newFSDirectory(clazz, file);
- } catch (Exception e) {
- d = FSDirectory.open(file);
+ } catch (NoSuchMethodException | InstantiationException | IllegalAccessException | InvocationTargetException e) {
+ Rethrow.rethrow(e);
}
return d;
}
@@ -1751,14 +1750,13 @@ public abstract class LuceneTestCase ext
rightEnum = rightTerms.iterator(rightEnum);
}
- final boolean useCache = random().nextBoolean();
final boolean seekExact = random().nextBoolean();
if (seekExact) {
- assertEquals(info, leftEnum.seekExact(b, useCache), rightEnum.seekExact(b, useCache));
+ assertEquals(info, leftEnum.seekExact(b), rightEnum.seekExact(b));
} else {
- SeekStatus leftStatus = leftEnum.seekCeil(b, useCache);
- SeekStatus rightStatus = rightEnum.seekCeil(b, useCache);
+ SeekStatus leftStatus = leftEnum.seekCeil(b);
+ SeekStatus rightStatus = rightEnum.seekCeil(b);
assertEquals(info, leftStatus, rightStatus);
if (leftStatus != SeekStatus.END) {
assertEquals(info, leftEnum.term(), rightEnum.term());