You are viewing a plain text version of this content. The canonical link for it is here.
Posted to java-commits@lucene.apache.org by us...@apache.org on 2010/02/27 20:14:09 UTC
svn commit: r917019 [3/8] - in /lucene/java/trunk: ./
contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/
contrib/analyzers/common/src/java/org/apache/lucene/analysis/bg/
contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/ con...
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java Sat Feb 27 19:14:01 2010
@@ -24,7 +24,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.util.Version;
/**
* Verifies the behavior of PatternAnalyzer.
@@ -37,13 +36,13 @@
*/
public void testNonWordPattern() throws IOException {
// Split on non-letter pattern, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
+ PatternAnalyzer a = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
false, null);
check(a, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"The", "quick", "brown", "Fox", "the", "abcd", "dc" });
// split on non-letter pattern, lowercase, english stopwords
- PatternAnalyzer b = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
+ PatternAnalyzer b = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
check(b, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"quick", "brown", "fox", "abcd", "dc" });
@@ -55,13 +54,13 @@
*/
public void testWhitespacePattern() throws IOException {
// Split on whitespace patterns, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
+ PatternAnalyzer a = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
false, null);
check(a, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"The", "quick", "brown", "Fox,the", "abcd1234", "(56.78)", "dc." });
// Split on whitespace patterns, lowercase, english stopwords
- PatternAnalyzer b = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
+ PatternAnalyzer b = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
check(b, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"quick", "brown", "fox,the", "abcd1234", "(56.78)", "dc." });
@@ -73,12 +72,12 @@
*/
public void testCustomPattern() throws IOException {
// Split on comma, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, Pattern.compile(","), false, null);
+ PatternAnalyzer a = new PatternAnalyzer(TEST_VERSION_CURRENT, Pattern.compile(","), false, null);
check(a, "Here,Are,some,Comma,separated,words,", new String[] { "Here",
"Are", "some", "Comma", "separated", "words" });
// split on comma, lowercase, english stopwords
- PatternAnalyzer b = new PatternAnalyzer(Version.LUCENE_CURRENT, Pattern.compile(","), true,
+ PatternAnalyzer b = new PatternAnalyzer(TEST_VERSION_CURRENT, Pattern.compile(","), true,
StopAnalyzer.ENGLISH_STOP_WORDS_SET);
check(b, "Here,Are,some,Comma,separated,words,", new String[] { "here",
"some", "comma", "separated", "words" });
@@ -103,7 +102,7 @@
document.append(largeWord2);
// Split on whitespace patterns, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
+ PatternAnalyzer a = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
false, null);
check(a, document.toString(), new String[] { new String(largeWord),
new String(largeWord2) });
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java Sat Feb 27 19:14:01 2010
@@ -20,7 +20,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -31,7 +30,7 @@
PrefixAndSuffixAwareTokenFilter ts = new PrefixAndSuffixAwareTokenFilter(
new SingleTokenTokenStream(createToken("^", 0, 0)),
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("hello world")),
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("hello world")),
new SingleTokenTokenStream(createToken("$", 0, 0)));
assertTokenStreamContents(ts,
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java Sat Feb 27 19:14:01 2010
@@ -20,7 +20,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -42,7 +41,7 @@
// prefix and suffix using 2x prefix
ts = new PrefixAwareTokenFilter(new SingleTokenTokenStream(createToken("^", 0, 0)),
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("hello world")));
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("hello world")));
ts = new PrefixAwareTokenFilter(ts, new SingleTokenTokenStream(createToken("$", 0, 0)));
assertTokenStreamContents(ts,
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java Sat Feb 27 19:14:01 2010
@@ -10,7 +10,6 @@
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.util.Version;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -38,7 +37,7 @@
dictionary.put("booked", "books");
Tokenizer tokenizer = new KeywordTokenizer(new StringReader("booked"));
TokenStream stream = new PorterStemFilter(
- new StemmerOverrideFilter(Version.LUCENE_CURRENT, tokenizer, dictionary));
+ new StemmerOverrideFilter(TEST_VERSION_CURRENT, tokenizer, dictionary));
assertTokenStreamContents(stream, new String[] { "books" });
}
}
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java Sat Feb 27 19:14:01 2010
@@ -20,7 +20,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
import java.io.StringReader;
@@ -31,9 +30,9 @@
private TokenStream input;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
- input = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abcde"));
+ input = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
}
public void testInvalidInput() throws Exception {
@@ -92,13 +91,13 @@
}
public void testSmallTokenInStream() throws Exception {
- input = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abc de fgh"));
+ input = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abc de fgh"));
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 3, 3);
assertTokenStreamContents(tokenizer, new String[]{"abc","fgh"}, new int[]{0,7}, new int[]{3,10});
}
public void testReset() throws Exception {
- WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abcde"));
+ WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
tokenizer.reset(new StringReader("abcde"));
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java Sat Feb 27 19:14:01 2010
@@ -29,7 +29,7 @@
private StringReader input;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
input = new StringReader("abcde");
}
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java Sat Feb 27 19:14:01 2010
@@ -20,7 +20,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
import java.io.StringReader;
@@ -31,9 +30,9 @@
private TokenStream input;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
- input = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abcde"));
+ input = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
}
public void testInvalidInput() throws Exception {
@@ -81,13 +80,13 @@
}
public void testSmallTokenInStream() throws Exception {
- input = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abc de fgh"));
+ input = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abc de fgh"));
NGramTokenFilter filter = new NGramTokenFilter(input, 3, 3);
assertTokenStreamContents(filter, new String[]{"abc","fgh"}, new int[]{0,7}, new int[]{3,10});
}
public void testReset() throws Exception {
- WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abcde"));
+ WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
NGramTokenFilter filter = new NGramTokenFilter(tokenizer, 1, 1);
assertTokenStreamContents(filter, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
tokenizer.reset(new StringReader("abcde"));
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java Sat Feb 27 19:14:01 2010
@@ -29,7 +29,7 @@
private StringReader input;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
input = new StringReader("abcde");
}
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java Sat Feb 27 19:14:01 2010
@@ -127,14 +127,14 @@
}
public void testSnowballCorrectness() throws Exception {
- Analyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "opheffen", "opheff");
checkOneTermReuse(a, "opheffende", "opheff");
checkOneTermReuse(a, "opheffing", "opheff");
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "lichaamsziek", "lichaamsziek");
checkOneTermReuse(a, "lichamelijk", "licham");
checkOneTermReuse(a, "lichamelijke", "licham");
@@ -146,7 +146,7 @@
* when using reusable token streams.
*/
public void testExclusionTableReuse() throws Exception {
- DutchAnalyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT);
+ DutchAnalyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "lichamelijk", "licham");
a.setStemExclusionTable(new String[] { "lichamelijk" });
checkOneTermReuse(a, "lichamelijk", "lichamelijk");
@@ -157,10 +157,10 @@
public void testExclusionTableViaCtor() throws IOException {
CharArraySet set = new CharArraySet(Version.LUCENE_30, 1, true);
set.add("lichamelijk");
- DutchAnalyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ DutchAnalyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesToReuse(a, "lichamelijk lichamelijke", new String[] { "lichamelijk", "licham" });
- a = new DutchAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ a = new DutchAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(a, "lichamelijk lichamelijke", new String[] { "lichamelijk", "licham" });
}
@@ -170,7 +170,7 @@
* when using reusable token streams.
*/
public void testStemDictionaryReuse() throws Exception {
- DutchAnalyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT);
+ DutchAnalyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "lichamelijk", "licham");
a.setStemDictionary(customDictFile);
checkOneTermReuse(a, "lichamelijk", "somethingentirelydifferent");
@@ -196,7 +196,7 @@
}
private void check(final String input, final String expected) throws Exception {
- checkOneTerm(new DutchAnalyzer(Version.LUCENE_CURRENT), input, expected);
+ checkOneTerm(new DutchAnalyzer(TEST_VERSION_CURRENT), input, expected);
}
}
\ No newline at end of file
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java Sat Feb 27 19:14:01 2010
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestNorwegianAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new NorwegianAnalyzer(Version.LUCENE_CURRENT);
+ new NorwegianAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new NorwegianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new NorwegianAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "havnedistriktene", "havnedistrikt");
checkOneTermReuse(a, "havnedistrikter", "havnedistrikt");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set<String> exclusionSet = new HashSet<String>();
exclusionSet.add("havnedistriktene");
- Analyzer a = new NorwegianAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new NorwegianAnalyzer(TEST_VERSION_CURRENT,
NorwegianAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "havnedistriktene", "havnedistriktene");
checkOneTermReuse(a, "havnedistrikter", "havnedistrikt");
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java Sat Feb 27 19:14:01 2010
@@ -22,21 +22,15 @@
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.index.Payload;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import java.io.StringReader;
-
-/**
- *
- *
- **/
public class DelimitedPayloadTokenFilterTest extends LuceneTestCase {
public void testPayloads() throws Exception {
String test = "The quick|JJ red|JJ fox|NN jumped|VB over the lazy|JJ brown|JJ dogs|NN";
DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter
- (new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)),
+ (new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)),
DelimitedPayloadTokenFilter.DEFAULT_DELIMITER, new IdentityEncoder());
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
@@ -57,7 +51,7 @@
String test = "The quick|JJ red|JJ fox|NN jumped|VB over the lazy|JJ brown|JJ dogs|NN";
DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter
- (new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)),
+ (new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)),
DelimitedPayloadTokenFilter.DEFAULT_DELIMITER, new IdentityEncoder());
assertTermEquals("The", filter, null);
assertTermEquals("quick", filter, "JJ".getBytes("UTF-8"));
@@ -75,7 +69,7 @@
public void testFloatEncoding() throws Exception {
String test = "The quick|1.0 red|2.0 fox|3.5 jumped|0.5 over the lazy|5 brown|99.3 dogs|83.7";
- DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)), '|', new FloatEncoder());
+ DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)), '|', new FloatEncoder());
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
assertTermEquals("The", filter, termAtt, payAtt, null);
@@ -93,7 +87,7 @@
public void testIntEncoding() throws Exception {
String test = "The quick|1 red|2 fox|3 jumped over the lazy|5 brown|99 dogs|83";
- DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)), '|', new IntegerEncoder());
+ DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)), '|', new IntegerEncoder());
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
assertTermEquals("The", filter, termAtt, payAtt, null);
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java Sat Feb 27 19:14:01 2010
@@ -23,7 +23,6 @@
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -38,7 +37,7 @@
public void test() throws IOException {
String test = "The quick red fox jumped over the lazy brown dogs";
- NumericPayloadTokenFilter nptf = new NumericPayloadTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test))), 3, "D");
+ NumericPayloadTokenFilter nptf = new NumericPayloadTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test))), 3, "D");
boolean seenDogs = false;
TermAttribute termAtt = nptf.getAttribute(TermAttribute.class);
TypeAttribute typeAtt = nptf.getAttribute(TypeAttribute.class);
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java Sat Feb 27 19:14:01 2010
@@ -21,7 +21,6 @@
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.index.Payload;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -36,7 +35,7 @@
public void test() throws IOException {
String test = "The quick red fox jumped over the lazy brown dogs";
- TokenOffsetPayloadTokenFilter nptf = new TokenOffsetPayloadTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)));
+ TokenOffsetPayloadTokenFilter nptf = new TokenOffsetPayloadTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)));
int count = 0;
PayloadAttribute payloadAtt = nptf.getAttribute(PayloadAttribute.class);
OffsetAttribute offsetAtt = nptf.getAttribute(OffsetAttribute.class);
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java Sat Feb 27 19:14:01 2010
@@ -23,7 +23,6 @@
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -38,7 +37,7 @@
public void test() throws IOException {
String test = "The quick red fox jumped over the lazy brown dogs";
- TypeAsPayloadTokenFilter nptf = new TypeAsPayloadTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test))));
+ TypeAsPayloadTokenFilter nptf = new TypeAsPayloadTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test))));
int count = 0;
TermAttribute termAtt = nptf.getAttribute(TermAttribute.class);
TypeAttribute typeAtt = nptf.getAttribute(TypeAttribute.class);
@@ -48,7 +47,6 @@
assertTrue(typeAtt.type() + " is not null and it should be", typeAtt.type().equals(String.valueOf(Character.toUpperCase(termAtt.termBuffer()[0]))));
assertTrue("nextToken.getPayload() is null and it shouldn't be", payloadAtt.getPayload() != null);
String type = new String(payloadAtt.getPayload().getData(), "UTF-8");
- assertTrue("type is null and it shouldn't be", type != null);
assertTrue(type + " is not equal to " + typeAtt.type(), type.equals(typeAtt.type()) == true);
count++;
}
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java Sat Feb 27 19:14:01 2010
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestPortugueseAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new PortugueseAnalyzer(Version.LUCENE_CURRENT);
+ new PortugueseAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new PortugueseAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PortugueseAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "quilométricas", "quilométr");
checkOneTermReuse(a, "quilométricos", "quilométr");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set<String> exclusionSet = new HashSet<String>();
exclusionSet.add("quilométricas");
- Analyzer a = new PortugueseAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new PortugueseAnalyzer(TEST_VERSION_CURRENT,
PortugueseAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "quilométricas", "quilométricas");
checkOneTermReuse(a, "quilométricos", "quilométr");
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java Sat Feb 27 19:14:01 2010
@@ -37,7 +37,6 @@
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
String variedFieldValues[] = {"the", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "boring", "dog"};
@@ -51,7 +50,7 @@
protected void setUp() throws Exception {
super.setUp();
dir = new RAMDirectory();
- appAnalyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ appAnalyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(dir, appAnalyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
int numDocs = 200;
for (int i = 0; i < numDocs; i++) {
@@ -64,7 +63,7 @@
}
writer.close();
reader = IndexReader.open(dir, true);
- protectedAnalyzer = new QueryAutoStopWordAnalyzer(Version.LUCENE_CURRENT, appAnalyzer);
+ protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer);
}
@Override
@@ -75,7 +74,7 @@
//Helper method to query
private int search(Analyzer a, String queryString) throws IOException, ParseException {
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "repetitiveField", a);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "repetitiveField", a);
Query q = qp.parse(queryString);
return new IndexSearcher(reader).search(q, null, 1000).totalHits;
}
@@ -157,14 +156,14 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
if (++invocationCount % 2 == 0)
- return new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader);
+ return new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
else
- return new LetterTokenizer(Version.LUCENE_CURRENT, reader);
+ return new LetterTokenizer(TEST_VERSION_CURRENT, reader);
}
}
public void testWrappingNonReusableAnalyzer() throws Exception {
- QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(Version.LUCENE_CURRENT, new NonreusableAnalyzer());
+ QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, new NonreusableAnalyzer());
a.addStopWords(reader, 10);
int numHits = search(a, "repetitiveField:boring");
assertTrue(numHits == 0);
@@ -173,7 +172,7 @@
}
public void testTokenStream() throws Exception {
- QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(Version.LUCENE_CURRENT, new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
a.addStopWords(reader, 10);
TokenStream ts = a.tokenStream("repetitiveField", new StringReader("this boring"));
TermAttribute termAtt = ts.getAttribute(TermAttribute.class);
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java Sat Feb 27 19:14:01 2010
@@ -27,9 +27,9 @@
public class TestReverseStringFilter extends BaseTokenStreamTestCase {
public void testFilter() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader("Do have a nice day")); // 1-4 length string
- ReverseStringFilter filter = new ReverseStringFilter(Version.LUCENE_CURRENT, stream);
+ ReverseStringFilter filter = new ReverseStringFilter(TEST_VERSION_CURRENT, stream);
TermAttribute text = filter.getAttribute(TermAttribute.class);
assertTrue(filter.incrementToken());
assertEquals("oD", text.term());
@@ -45,9 +45,9 @@
}
public void testFilterWithMark() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Do have a nice day")); // 1-4 length string
- ReverseStringFilter filter = new ReverseStringFilter(Version.LUCENE_CURRENT, stream, '\u0001');
+ ReverseStringFilter filter = new ReverseStringFilter(TEST_VERSION_CURRENT, stream, '\u0001');
TermAttribute text = filter
.getAttribute(TermAttribute.class);
assertTrue(filter.incrementToken());
@@ -64,14 +64,14 @@
}
public void testReverseString() throws Exception {
- assertEquals( "A", ReverseStringFilter.reverse( "A" ) );
- assertEquals( "BA", ReverseStringFilter.reverse( "AB" ) );
- assertEquals( "CBA", ReverseStringFilter.reverse( "ABC" ) );
+ assertEquals( "A", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "A" ) );
+ assertEquals( "BA", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "AB" ) );
+ assertEquals( "CBA", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "ABC" ) );
}
public void testReverseChar() throws Exception {
char[] buffer = { 'A', 'B', 'C', 'D', 'E', 'F' };
- ReverseStringFilter.reverse( buffer, 2, 3 );
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 2, 3 );
assertEquals( "ABEDCF", new String( buffer ) );
}
@@ -84,37 +84,37 @@
public void testReverseSupplementary() throws Exception {
// supplementary at end
- assertEquals("ð©¬
è±éä¹æ¯ç", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "çæ¯ä¹éè±ð©¬
"));
+ assertEquals("ð©¬
è±éä¹æ¯ç", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "çæ¯ä¹éè±ð©¬
"));
// supplementary at end - 1
- assertEquals("að©¬
è±éä¹æ¯ç", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "çæ¯ä¹éè±ð©¬
a"));
+ assertEquals("að©¬
è±éä¹æ¯ç", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "çæ¯ä¹éè±ð©¬
a"));
// supplementary at start
- assertEquals("fedcbað©¬
", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "ð©¬
abcdef"));
+ assertEquals("fedcbað©¬
", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "ð©¬
abcdef"));
// supplementary at start + 1
- assertEquals("fedcbað©¬
z", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "zð©¬
abcdef"));
+ assertEquals("fedcbað©¬
z", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "zð©¬
abcdef"));
// supplementary medial
- assertEquals("gfeð©¬
dcba", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "abcdð©¬
efg"));
+ assertEquals("gfeð©¬
dcba", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "abcdð©¬
efg"));
}
public void testReverseSupplementaryChar() throws Exception {
// supplementary at end
char[] buffer = "abcçæ¯ä¹éè±ð©¬
".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 7);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 7);
assertEquals("abcð©¬
è±éä¹æ¯ç", new String(buffer));
// supplementary at end - 1
buffer = "abcçæ¯ä¹éè±ð©¬
d".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 8);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 8);
assertEquals("abcdð©¬
è±éä¹æ¯ç", new String(buffer));
// supplementary at start
buffer = "abcð©¬
çæ¯ä¹éè±".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 7);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 7);
assertEquals("abcè±éä¹æ¯çð©¬
", new String(buffer));
// supplementary at start + 1
buffer = "abcdð©¬
çæ¯ä¹éè±".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 8);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 8);
assertEquals("abcè±éä¹æ¯çð©¬
d", new String(buffer));
// supplementary medial
buffer = "abcçæ¯ð©¬
def".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 7);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 7);
assertEquals("abcfedð©¬
æ¯ç", new String(buffer));
}
}
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java Sat Feb 27 19:14:01 2010
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestRomanianAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new RomanianAnalyzer(Version.LUCENE_CURRENT);
+ new RomanianAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new RomanianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new RomanianAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "absenţa", "absenţ");
checkOneTermReuse(a, "absenţi", "absenţ");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set<String> exclusionSet = new HashSet<String>();
exclusionSet.add("absenţa");
- Analyzer a = new RomanianAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new RomanianAnalyzer(TEST_VERSION_CURRENT,
RomanianAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "absenţa", "absenţa");
checkOneTermReuse(a, "absenţi", "absenţ");
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java Sat Feb 27 19:14:01 2010
@@ -44,8 +44,7 @@
private File dataDir;
@Override
- protected void setUp() throws Exception
- {
+ protected void setUp() throws Exception {
super.setUp();
dataDir = new File(System.getProperty("dataDir", "./bin"));
}
@@ -71,7 +70,7 @@
TokenStream in = ra.tokenStream("all", inWords);
RussianLetterTokenizer sample =
- new RussianLetterTokenizer(Version.LUCENE_CURRENT,
+ new RussianLetterTokenizer(TEST_VERSION_CURRENT,
sampleUnicode);
TermAttribute text = in.getAttribute(TermAttribute.class);
@@ -98,7 +97,7 @@
public void testDigitsInRussianCharset()
{
Reader reader = new StringReader("text 1000");
- RussianAnalyzer ra = new RussianAnalyzer(Version.LUCENE_CURRENT);
+ RussianAnalyzer ra = new RussianAnalyzer(TEST_VERSION_CURRENT);
TokenStream stream = ra.tokenStream("", reader);
TermAttribute termText = stream.getAttribute(TermAttribute.class);
@@ -126,7 +125,7 @@
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new RussianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new RussianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(a, "ÐмеÑÑе Ñ Ñем о Ñиле ÑлекÑÑомагниÑной ÑнеÑгии имели пÑедÑÑавление еÑе",
new String[] { "вмеÑÑ", "Ñил", "ÑлекÑÑомагниÑн", "ÑнеÑг", "имел", "пÑедÑÑавлен" });
assertAnalyzesToReuse(a, "Ðо знание ÑÑо Ñ
ÑанилоÑÑ Ð² Ñайне",
@@ -135,9 +134,9 @@
public void testWithStemExclusionSet() throws Exception {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("пÑедÑÑавление");
- Analyzer a = new RussianAnalyzer(Version.LUCENE_CURRENT, RussianAnalyzer.getDefaultStopSet() , set);
+ Analyzer a = new RussianAnalyzer(TEST_VERSION_CURRENT, RussianAnalyzer.getDefaultStopSet() , set);
assertAnalyzesToReuse(a, "ÐмеÑÑе Ñ Ñем о Ñиле ÑлекÑÑомагниÑной ÑнеÑгии имели пÑедÑÑавление еÑе",
new String[] { "вмеÑÑ", "Ñил", "ÑлекÑÑомагниÑн", "ÑнеÑг", "имел", "пÑедÑÑавление" });
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianStem.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianStem.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianStem.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianStem.java Sat Feb 27 19:14:01 2010
@@ -30,8 +30,8 @@
@Deprecated
public class TestRussianStem extends LuceneTestCase
{
- private ArrayList words = new ArrayList();
- private ArrayList stems = new ArrayList();
+ private ArrayList<String> words = new ArrayList<String>();
+ private ArrayList<String> stems = new ArrayList<String>();
public TestRussianStem(String name)
{
@@ -42,8 +42,7 @@
* @see TestCase#setUp()
*/
@Override
- protected void setUp() throws Exception
- {
+ protected void setUp() throws Exception {
super.setUp();
//System.out.println(new java.util.Date());
String str;
@@ -75,15 +74,6 @@
inStems.close();
}
- /**
- * @see TestCase#tearDown()
- */
- @Override
- protected void tearDown() throws Exception
- {
- super.tearDown();
- }
-
public void testStem()
{
for (int i = 0; i < words.size(); i++)
@@ -91,7 +81,7 @@
//if ( (i % 100) == 0 ) System.err.println(i);
String realStem =
RussianStemmer.stemWord(
- (String) words.get(i));
+ words.get(i));
assertEquals("unicode", stems.get(i), realStem);
}
}
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java Sat Feb 27 19:14:01 2010
@@ -42,7 +42,6 @@
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
/**
* A test class for ShingleAnalyzerWrapper as regards queries and scoring.
@@ -86,7 +85,7 @@
protected ScoreDoc[] queryParsingTest(Analyzer analyzer, String qs) throws Exception {
searcher = setUpSearcher(analyzer);
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "content", analyzer);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "content", analyzer);
Query q = qp.parse(qs);
@@ -106,7 +105,7 @@
*/
public void testShingleAnalyzerWrapperQueryParsing() throws Exception {
ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
- (new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2),
+ (new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2),
"test sentence");
int[] ranks = new int[] { 1, 2, 0 };
compareRanks(hits, ranks);
@@ -117,7 +116,7 @@
*/
public void testShingleAnalyzerWrapperPhraseQueryParsingFails() throws Exception {
ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
- (new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2),
+ (new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2),
"\"this sentence\"");
int[] ranks = new int[] { 0 };
compareRanks(hits, ranks);
@@ -128,7 +127,7 @@
*/
public void testShingleAnalyzerWrapperPhraseQueryParsing() throws Exception {
ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
- (new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2),
+ (new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2),
"\"test sentence\"");
int[] ranks = new int[] { 1 };
compareRanks(hits, ranks);
@@ -139,7 +138,7 @@
*/
public void testShingleAnalyzerWrapperRequiredQueryParsing() throws Exception {
ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
- (new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2),
+ (new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2),
"+test +sentence");
int[] ranks = new int[] { 1, 2 };
compareRanks(hits, ranks);
@@ -149,7 +148,7 @@
* This shows how to construct a phrase query containing shingles.
*/
public void testShingleAnalyzerWrapperPhraseQuery() throws Exception {
- Analyzer analyzer = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2);
+ Analyzer analyzer = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2);
searcher = setUpSearcher(analyzer);
PhraseQuery q = new PhraseQuery();
@@ -178,7 +177,7 @@
* in the right order and adjacent to each other.
*/
public void testShingleAnalyzerWrapperBooleanQuery() throws Exception {
- Analyzer analyzer = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2);
+ Analyzer analyzer = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2);
searcher = setUpSearcher(analyzer);
BooleanQuery q = new BooleanQuery();
@@ -200,7 +199,7 @@
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2);
+ Analyzer a = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2);
assertAnalyzesToReuse(a, "please divide into shingles",
new String[] { "please", "please divide", "divide", "divide into", "into", "into shingles", "shingles" },
new int[] { 0, 0, 7, 7, 14, 14, 19 },
@@ -222,9 +221,9 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
if (++invocationCount % 2 == 0)
- return new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader);
+ return new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
else
- return new LetterTokenizer(Version.LUCENE_CURRENT, reader);
+ return new LetterTokenizer(TEST_VERSION_CURRENT, reader);
}
}
@@ -249,7 +248,7 @@
public void testNonDefaultMinShingleSize() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(), 3, 4);
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 3, 4);
assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles",
new String[] { "please", "please divide this", "please divide this sentence",
"divide", "divide this sentence", "divide this sentence into",
@@ -273,7 +272,7 @@
public void testNonDefaultMinAndSameMaxShingleSize() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(), 3, 3);
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 3, 3);
assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles",
new String[] { "please", "please divide this",
"divide", "divide this sentence",
@@ -297,7 +296,7 @@
public void testNoTokenSeparator() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer());
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
analyzer.setTokenSeparator("");
assertAnalyzesToReuse(analyzer, "please divide into shingles",
new String[] { "please", "pleasedivide",
@@ -319,7 +318,7 @@
public void testNullTokenSeparator() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer());
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
analyzer.setTokenSeparator(null);
assertAnalyzesToReuse(analyzer, "please divide into shingles",
new String[] { "please", "pleasedivide",
@@ -340,7 +339,7 @@
}
public void testAltTokenSeparator() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer());
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
analyzer.setTokenSeparator("<SEP>");
assertAnalyzesToReuse(analyzer, "please divide into shingles",
new String[] { "please", "please<SEP>divide",
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java Sat Feb 27 19:14:01 2010
@@ -26,7 +26,6 @@
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.tokenattributes.*;
-import org.apache.lucene.util.Version;
public class ShingleFilterTest extends BaseTokenStreamTestCase {
@@ -836,7 +835,7 @@
public void testReset() throws Exception {
- Tokenizer wsTokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("please divide this sentence"));
+ Tokenizer wsTokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("please divide this sentence"));
TokenStream filter = new ShingleFilter(wsTokenizer, 2);
assertTokenStreamContents(filter,
new String[]{"please","please divide","divide","divide this","this","this sentence","sentence"},
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java Sat Feb 27 19:14:01 2010
@@ -31,7 +31,6 @@
import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix;
import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix.Column;
import org.apache.lucene.analysis.tokenattributes.*;
-import org.apache.lucene.util.Version;
public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
@@ -41,11 +40,11 @@
public void testIterator() throws IOException {
- WhitespaceTokenizer wst = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("one two three four five"));
+ WhitespaceTokenizer wst = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("one two three four five"));
ShingleMatrixFilter smf = new ShingleMatrixFilter(wst, 2, 2, '_', false, new ShingleMatrixFilter.OneDimensionalNonWeightedTokenSettingsCodec());
int i;
- for(i=0; smf.incrementToken(); i++);
+ for(i=0; smf.incrementToken(); i++) {}
assertEquals(4, i);
// call next once more. this should return false again rather than throwing an exception (LUCENE-1939)
@@ -65,11 +64,11 @@
assertFalse(ts.incrementToken());
TokenListStream tls;
- LinkedList tokens;
+ LinkedList<Token> tokens;
// test a plain old token stream with synonyms translated to rows.
- tokens = new LinkedList();
+ tokens = new LinkedList<Token>();
tokens.add(createToken("please", 0, 6));
tokens.add(createToken("divide", 7, 13));
tokens.add(createToken("this", 14, 18));
@@ -101,11 +100,11 @@
TokenStream ts;
TokenStream tls;
- LinkedList tokens;
+ LinkedList<Token> tokens;
// test a plain old token stream with synonyms tranlated to rows.
- tokens = new LinkedList();
+ tokens = new LinkedList<Token>();
tokens.add(tokenFactory("hello", 1, 0, 4));
tokens.add(tokenFactory("greetings", 0, 0, 4));
tokens.add(tokenFactory("world", 1, 5, 10));
@@ -145,7 +144,7 @@
ShingleMatrixFilter.defaultSettingsCodec = new ShingleMatrixFilter.SimpleThreeDimensionalTokenSettingsCodec();
- tokens = new LinkedList();
+ tokens = new LinkedList<Token>();
tokens.add(tokenFactory("hello", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newColumn));
tokens.add(tokenFactory("greetings", 0, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newRow));
tokens.add(tokenFactory("world", 1, 1f, 5, 10, ShingleMatrixFilter.TokenPositioner.newColumn));
@@ -286,7 +285,7 @@
//
- tokens = new LinkedList();
+ tokens = new LinkedList<Token>();
tokens.add(tokenFactory("hello", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newColumn));
tokens.add(tokenFactory("greetings", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newRow));
tokens.add(tokenFactory("and", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.sameRow));
@@ -413,11 +412,6 @@
}
- private Token tokenFactory(String text, int startOffset, int endOffset) {
- return tokenFactory(text, 1, 1f, startOffset, endOffset);
- }
-
-
private Token tokenFactory(String text, int posIncr, int startOffset, int endOffset) {
Token token = new Token(startOffset, endOffset);
token.setTermBuffer(text);
@@ -430,10 +424,6 @@
return tokenFactory(text, posIncr, 1f, 0, 0);
}
- private Token tokenFactory(String text, int posIncr, float weight) {
- return tokenFactory(text, posIncr, weight, 0, 0);
- }
-
private Token tokenFactory(String text, int posIncr, float weight, int startOffset, int endOffset) {
Token token = new Token(startOffset, endOffset);
token.setTermBuffer(text);
@@ -460,17 +450,6 @@
assertEquals(text, termAtt.term());
}
- private void assertNext(TokenStream ts, String text, int positionIncrement, float boost) throws IOException {
- TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
- PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class);
- PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
-
- assertTrue(ts.incrementToken());
- assertEquals(text, termAtt.term());
- assertEquals(positionIncrement, posIncrAtt.getPositionIncrement());
- assertEquals(boost, payloadAtt.getPayload() == null ? 1f : PayloadHelper.decodeFloat(payloadAtt.getPayload().getData()), 0);
- }
-
private void assertNext(TokenStream ts, String text, int positionIncrement, float boost, int startOffset, int endOffset) throws IOException {
TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class);
@@ -505,7 +484,7 @@
public static class TokenListStream extends TokenStream {
- private Collection tokens;
+ private Collection<Token> tokens;
TermAttribute termAtt;
PositionIncrementAttribute posIncrAtt;
PayloadAttribute payloadAtt;
@@ -513,7 +492,7 @@
TypeAttribute typeAtt;
FlagsAttribute flagsAtt;
- public TokenListStream(Collection tokens) {
+ public TokenListStream(Collection<Token> tokens) {
this.tokens = tokens;
termAtt = addAttribute(TermAttribute.class);
posIncrAtt = addAttribute(PositionIncrementAttribute.class);
@@ -523,7 +502,7 @@
flagsAtt = addAttribute(FlagsAttribute.class);
}
- private Iterator iterator;
+ private Iterator<Token> iterator;
@Override
public boolean incrementToken() throws IOException {
@@ -533,7 +512,7 @@
if (!iterator.hasNext()) {
return false;
}
- Token prototype = (Token) iterator.next();
+ Token prototype = iterator.next();
clearAttributes();
termAtt.setTermBuffer(prototype.termBuffer(), 0, prototype.termLength());
posIncrAtt.setPositionIncrement(prototype.getPositionIncrement());
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/DateRecognizerSinkTokenizerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/DateRecognizerSinkTokenizerTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/DateRecognizerSinkTokenizerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/DateRecognizerSinkTokenizerTest.java Sat Feb 27 19:14:01 2010
@@ -25,7 +25,6 @@
import org.apache.lucene.analysis.TeeSinkTokenFilter;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.TeeSinkTokenFilter.SinkTokenStream;
-import org.apache.lucene.util.Version;
public class DateRecognizerSinkTokenizerTest extends BaseTokenStreamTestCase {
@@ -37,7 +36,7 @@
public void test() throws IOException {
DateRecognizerSinkFilter sinkFilter = new DateRecognizerSinkFilter(new SimpleDateFormat("MM/dd/yyyy", Locale.US));
String test = "The quick red fox jumped over the lazy brown dogs on 7/11/2006 The dogs finally reacted on 7/12/2006";
- TeeSinkTokenFilter tee = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)));
+ TeeSinkTokenFilter tee = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)));
SinkTokenStream sink = tee.newSinkTokenStream(sinkFilter);
int count = 0;
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenRangeSinkTokenizerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenRangeSinkTokenizerTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenRangeSinkTokenizerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenRangeSinkTokenizerTest.java Sat Feb 27 19:14:01 2010
@@ -23,7 +23,6 @@
import org.apache.lucene.analysis.TeeSinkTokenFilter;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.TeeSinkTokenFilter.SinkTokenStream;
-import org.apache.lucene.util.Version;
public class TokenRangeSinkTokenizerTest extends BaseTokenStreamTestCase {
@@ -35,7 +34,7 @@
public void test() throws IOException {
TokenRangeSinkFilter sinkFilter = new TokenRangeSinkFilter(2, 4);
String test = "The quick red fox jumped over the lazy brown dogs";
- TeeSinkTokenFilter tee = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)));
+ TeeSinkTokenFilter tee = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)));
SinkTokenStream rangeToks = tee.newSinkTokenStream(sinkFilter);
int count = 0;
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenTypeSinkTokenizerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenTypeSinkTokenizerTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenTypeSinkTokenizerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenTypeSinkTokenizerTest.java Sat Feb 27 19:14:01 2010
@@ -27,11 +27,9 @@
import org.apache.lucene.analysis.TeeSinkTokenFilter.SinkTokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.util.Version;
public class TokenTypeSinkTokenizerTest extends BaseTokenStreamTestCase {
-
public TokenTypeSinkTokenizerTest(String s) {
super(s);
}
@@ -40,7 +38,7 @@
TokenTypeSinkFilter sinkFilter = new TokenTypeSinkFilter("D");
String test = "The quick red fox jumped over the lazy brown dogs";
- TeeSinkTokenFilter ttf = new TeeSinkTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test))));
+ TeeSinkTokenFilter ttf = new TeeSinkTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test))));
SinkTokenStream sink = ttf.newSinkTokenStream(sinkFilter);
boolean seenDogs = false;
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java Sat Feb 27 19:14:01 2010
@@ -33,13 +33,13 @@
public class TestSnowball extends BaseTokenStreamTestCase {
public void testEnglish() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English");
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "English");
assertAnalyzesTo(a, "he abhorred accents",
new String[]{"he", "abhor", "accent"});
}
public void testStopwords() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English",
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "English",
StandardAnalyzer.STOP_WORDS_SET);
assertAnalyzesTo(a, "the quick brown fox jumped",
new String[]{"quick", "brown", "fox", "jump"});
@@ -50,7 +50,7 @@
* we lowercase I correct for non-Turkish languages in either case.
*/
public void testEnglishLowerCase() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English");
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "English");
assertAnalyzesTo(a, "cryogenic", new String[] { "cryogen" });
assertAnalyzesTo(a, "CRYOGENIC", new String[] { "cryogen" });
@@ -63,7 +63,7 @@
* Test turkish lowercasing
*/
public void testTurkish() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "Turkish");
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "Turkish");
assertAnalyzesTo(a, "aÄacı", new String[] { "aÄaç" });
assertAnalyzesTo(a, "AÄACI", new String[] { "aÄaç" });
@@ -84,7 +84,7 @@
public void testReusableTokenStream() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English");
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "English");
assertAnalyzesToReuse(a, "he abhorred accents",
new String[]{"he", "abhor", "accent"});
assertAnalyzesToReuse(a, "she abhorred him",
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java Sat Feb 27 19:14:01 2010
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestSwedishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new SwedishAnalyzer(Version.LUCENE_CURRENT);
+ new SwedishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new SwedishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new SwedishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "jaktkarlarne", "jaktkarl");
checkOneTermReuse(a, "jaktkarlens", "jaktkarl");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set<String> exclusionSet = new HashSet<String>();
exclusionSet.add("jaktkarlarne");
- Analyzer a = new SwedishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new SwedishAnalyzer(TEST_VERSION_CURRENT,
SwedishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "jaktkarlarne", "jaktkarlarne");
checkOneTermReuse(a, "jaktkarlens", "jaktkarl");
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java Sat Feb 27 19:14:01 2010
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
/**
* Test case for ThaiAnalyzer, modified from TestFrenchAnalyzer
@@ -32,7 +31,7 @@
* testcase for offsets
*/
public void testOffsets() throws Exception {
- assertAnalyzesTo(new ThaiAnalyzer(Version.LUCENE_CURRENT), "à¹à¸à¸à¸°à¸à¸´à¸§à¸¢à¸à¸£à¹à¸à¹à¸à¸¡à¸ªà¹",
+ assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT), "à¹à¸à¸à¸°à¸à¸´à¸§à¸¢à¸à¸£à¹à¸à¹à¸à¸¡à¸ªà¹",
new String[] { "à¹à¸", "à¸à¸°à¸à¸´à¸§", "ยà¸", "รà¹à¸", "à¹à¸à¸¡à¸ªà¹"},
new int[] { 0, 2, 7, 9, 12 },
new int[] { 2, 7, 9, 12, 17});
@@ -50,7 +49,7 @@
* Instead, allow the definition of alphanum to include relevant categories like nonspacing marks!
*/
public void testBuggyTokenType() throws Exception {
- assertAnalyzesTo(new ThaiAnalyzer(Version.LUCENE_CURRENT), "à¹à¸à¸à¸°à¸à¸´à¸§à¸¢à¸à¸£à¹à¸à¹à¸à¸¡à¸ªà¹ à¹à¹à¹",
+ assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT), "à¹à¸à¸à¸°à¸à¸´à¸§à¸¢à¸à¸£à¹à¸à¹à¸à¸¡à¸ªà¹ à¹à¹à¹",
new String[] { "à¹à¸", "à¸à¸°à¸à¸´à¸§", "ยà¸", "รà¹à¸", "à¹à¸à¸¡à¸ªà¹", "à¹à¹à¹" },
new String[] { "<ALPHANUM>", "<ALPHANUM>", "<ALPHANUM>", "<ALPHANUM>", "<ALPHANUM>", "<ALPHANUM>" });
}
@@ -64,7 +63,7 @@
*/
public void testAnalyzer() throws Exception {
- ThaiAnalyzer analyzer = new ThaiAnalyzer(Version.LUCENE_CURRENT);
+ ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(analyzer, "", new String[] {});
@@ -89,7 +88,7 @@
* Test that position increments are adjusted correctly for stopwords.
*/
public void testPositionIncrements() throws Exception {
- ThaiAnalyzer analyzer = new ThaiAnalyzer(Version.LUCENE_CURRENT);
+ ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(analyzer, "à¸à¸£à¸°à¹à¸¢à¸à¸§à¹à¸² the à¸à¸£à¸°à¹à¸¢à¸à¸§à¹à¸²",
new String[] { "à¸à¸£à¸°à¹à¸¢à¸", "วà¹à¸²", "à¸à¸£à¸°à¹à¸¢à¸", "วà¹à¸²" },
@@ -106,7 +105,7 @@
}
public void testReusableTokenStream() throws Exception {
- ThaiAnalyzer analyzer = new ThaiAnalyzer(Version.LUCENE_CURRENT);
+ ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(analyzer, "", new String[] {});
assertAnalyzesToReuse(
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java Sat Feb 27 19:14:01 2010
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestTurkishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new TurkishAnalyzer(Version.LUCENE_CURRENT);
+ new TurkishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new TurkishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new TurkishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "aÄacı", "aÄaç");
checkOneTermReuse(a, "aÄaç", "aÄaç");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set<String> exclusionSet = new HashSet<String>();
exclusionSet.add("aÄacı");
- Analyzer a = new TurkishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new TurkishAnalyzer(TEST_VERSION_CURRENT,
TurkishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "aÄacı", "aÄacı");
checkOneTermReuse(a, "aÄaç", "aÄaç");
Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java Sat Feb 27 19:14:01 2010
@@ -22,7 +22,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the Turkish lowercase filter.
@@ -33,7 +32,7 @@
* Test composed forms
*/
public void testTurkishLowerCaseFilter() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"\u0130STANBUL \u0130ZM\u0130R ISPARTA"));
TurkishLowerCaseFilter filter = new TurkishLowerCaseFilter(stream);
assertTokenStreamContents(filter, new String[] {"istanbul", "izmir",
@@ -44,7 +43,7 @@
* Test decomposed forms
*/
public void testDecomposed() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"\u0049\u0307STANBUL \u0049\u0307ZM\u0049\u0307R ISPARTA"));
TurkishLowerCaseFilter filter = new TurkishLowerCaseFilter(stream);
assertTokenStreamContents(filter, new String[] {"istanbul", "izmir",
@@ -57,7 +56,7 @@
* to U+0130 + U+0316, and is lowercased the same way.
*/
public void testDecomposed2() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"\u0049\u0316\u0307STANBUL \u0049\u0307ZM\u0049\u0307R I\u0316SPARTA"));
TurkishLowerCaseFilter filter = new TurkishLowerCaseFilter(stream);
assertTokenStreamContents(filter, new String[] {"i\u0316stanbul", "izmir",
Modified: lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java (original)
+++ lucene/java/trunk/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java Sat Feb 27 19:14:01 2010
@@ -20,8 +20,6 @@
import java.io.File;
import java.io.IOException;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.queryParser.QueryParser;
@@ -31,13 +29,13 @@
import org.apache.lucene.store.FSDirectory;
import org.apache.tools.ant.Project;
import org.apache.tools.ant.types.FileSet;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
/**
* Test cases for index task
*
*/
-public class IndexTaskTest extends TestCase {
+public class IndexTaskTest extends LuceneTestCase {
private final static String docHandler =
"org.apache.lucene.ant.FileExtensionDocumentHandler";
@@ -55,7 +53,8 @@
*@exception IOException Description of Exception
*/
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
+ super.setUp();
Project project = new Project();
IndexTask task = new IndexTask();
@@ -71,12 +70,12 @@
dir = FSDirectory.open(indexDir);
searcher = new IndexSearcher(dir, true);
- analyzer = new StopAnalyzer(Version.LUCENE_CURRENT);
+ analyzer = new StopAnalyzer(TEST_VERSION_CURRENT);
}
public void testSearch() throws Exception {
- Query query = new QueryParser(Version.LUCENE_CURRENT, "contents",analyzer).parse("test");
+ Query query = new QueryParser(TEST_VERSION_CURRENT, "contents",analyzer).parse("test");
int numHits = searcher.search(query, null, 1000).totalHits;
@@ -88,9 +87,10 @@
* TODO: remove indexDir?
*/
@Override
- public void tearDown() throws IOException {
+ protected void tearDown() throws Exception {
searcher.close();
dir.close();
+ super.tearDown();
}
}
Modified: lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishContentSource.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishContentSource.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishContentSource.java (original)
+++ lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishContentSource.java Sat Feb 27 19:14:01 2010
@@ -13,10 +13,12 @@
public class LongToEnglishContentSource extends ContentSource{
private long counter = Long.MIN_VALUE + 10;
+ @Override
public void close() throws IOException {
}
//TODO: reduce/clean up synchonization
+ @Override
public synchronized DocData getNextDocData(DocData docData) throws NoMoreDataException, IOException {
docData.clear();
docData.setBody(English.longToEnglish(counter));
Modified: lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java (original)
+++ lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java Sat Feb 27 19:14:01 2010
@@ -30,7 +30,7 @@
/**
* Create the next query, of the given size.
* @param size the size of the query - number of terms, etc.
- * @exception if cannot make the query, or if size>0 was specified but this feature is not supported.
+ * @exception Exception if cannot make the query, or if size>0 was specified but this feature is not supported.
*/
public Query makeQuery (int size) throws Exception;
Modified: lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java (original)
+++ lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java Sat Feb 27 19:14:01 2010
@@ -29,8 +29,6 @@
*/
public class Points {
- private Config config;
-
// stat points ordered by their start time.
// for now we collect points as TaskStats objects.
// later might optimize to collect only native data.
@@ -44,7 +42,6 @@
* Create a Points statistics object.
*/
public Points (Config config) {
- this.config = config;
}
/**
Modified: lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java (original)
+++ lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java Sat Feb 27 19:14:01 2010
@@ -211,7 +211,7 @@
public Object clone() throws CloneNotSupportedException {
TaskStats c = (TaskStats) super.clone();
if (c.countsByTime != null) {
- c.countsByTime = (int[]) c.countsByTime.clone();
+ c.countsByTime = c.countsByTime.clone();
}
return c;
}
Modified: lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java?rev=917019&r1=917018&r2=917019&view=diff
==============================================================================
--- lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java (original)
+++ lucene/java/trunk/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java Sat Feb 27 19:14:01 2010
@@ -70,7 +70,7 @@
// Parent sequence sets stopNow
reopenCount = 0;
while(!stopNow) {
- long waitForMsec = (long) (pauseMSec - (System.currentTimeMillis() - t));
+ long waitForMsec = (pauseMSec - (System.currentTimeMillis() - t));
if (waitForMsec > 0) {
Thread.sleep(waitForMsec);
//System.out.println("NRT wait: " + waitForMsec + " msec");