You are viewing a plain text version of this content. The canonical link for it is here.
Posted to java-commits@lucene.apache.org by us...@apache.org on 2010/02/27 23:51:55 UTC
svn commit: r917067 [2/8] - in /lucene/java/branches/flex_1458: ./ contrib/
contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/
contrib/analyzers/common/src/java/org/apache/lucene/analysis/bg/
contrib/analyzers/common/src/java/org/apache/l...
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -311,7 +311,7 @@
return new String(output, 0, len);
} finally {
- if (input != null) input.close();
+ input.close();
}
}
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java Sat Feb 27 22:51:44 2010
@@ -124,7 +124,7 @@
if (!input.incrementToken()) {
return false;
} else {
- curTermBuffer = (char[]) termAtt.termBuffer().clone();
+ curTermBuffer = termAtt.termBuffer().clone();
curTermLength = termAtt.termLength();
curGramSize = minGram;
tokStart = offsetAtt.startOffset();
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java Sat Feb 27 22:51:44 2010
@@ -79,7 +79,7 @@
if (!input.incrementToken()) {
return false;
} else {
- curTermBuffer = (char[]) termAtt.termBuffer().clone();
+ curTermBuffer = termAtt.termBuffer().clone();
curTermLength = termAtt.termLength();
curGramSize = minGram;
curPos = 0;
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -192,7 +192,7 @@
* if there stopwords, it is a StopFilter around wrapped.
*/
TokenStream withStopFilter;
- };
+ }
@Override
public TokenStream reusableTokenStream(String fieldName, Reader reader)
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -29,7 +29,6 @@
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -110,13 +109,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -26,7 +26,6 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -160,16 +159,17 @@
this(matchVersion, stopwords.keySet());
}
- /**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the
- * provided {@link Reader}.
- *
- * @return {@link TokenStreamComponents} built from a
- * {@link StandardTokenizer} filtered with {@link StandardFilter},
- * {@link LowerCaseFilter}, {@link StopFilter},
- * {@link KeywordMarkerTokenFilter} if a stem exclusion set is provided,
- * and {@link SnowballFilter}
- */
+ /**
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
+ *
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided, and {@link SnowballFilter}
+ */
@Override
protected TokenStreamComponents createComponents(String fieldName,
Reader reader) {
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianStemmer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianStemmer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianStemmer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianStemmer.java Sat Feb 27 22:51:44 2010
@@ -26,7 +26,7 @@
class RussianStemmer
{
// positions of RV, R1 and R2 respectively
- private int RV, R1, R2;
+ private int RV, /*R1,*/ R2;
// letters (currently unused letters are commented out)
private final static char A = '\u0430';
@@ -263,11 +263,7 @@
if (!findAndRemoveEnding(stemmingZone, adjectiveEndings))
return false;
// if adjective ending was found, try for participle ending.
- // variable r is unused, we are just interested in the side effect of
- // findAndRemoveEnding():
- boolean r =
- findAndRemoveEnding(stemmingZone, participleEndings1, participle1Predessors)
- ||
+ if (!findAndRemoveEnding(stemmingZone, participleEndings1, participle1Predessors))
findAndRemoveEnding(stemmingZone, participleEndings2);
return true;
}
@@ -391,7 +387,7 @@
private void markPositions(String word)
{
RV = 0;
- R1 = 0;
+// R1 = 0;
R2 = 0;
int i = 0;
// find RV
@@ -409,7 +405,7 @@
}
if (word.length() - 1 < ++i)
return; // R1 zone is empty
- R1 = i;
+// R1 = i;
// find R2
while (word.length() > i && !isVowel(word.charAt(i)))
{
@@ -532,13 +528,9 @@
if (!perfectiveGerund(stemmingZone))
{
reflexive(stemmingZone);
- // variable r is unused, we are just interested in the flow that gets
- // created by logical expression: apply adjectival(); if that fails,
- // apply verb() etc
- boolean r =
- adjectival(stemmingZone)
- || verb(stemmingZone)
- || noun(stemmingZone);
+ if (!adjectival(stemmingZone))
+ if (!verb(stemmingZone))
+ noun(stemmingZone);
}
// Step 2
removeI(stemmingZone);
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java Sat Feb 27 22:51:44 2010
@@ -391,8 +391,8 @@
}
/**
- * {@see #advance()}
* @return the current value.
+ * @see #advance()
*/
public int getValue() {
return value;
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java Sat Feb 27 22:51:44 2010
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.EmptyTokenStream;
import org.apache.lucene.analysis.payloads.PayloadHelper;
-import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix.Column;
import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix.Column.Row;
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java Sat Feb 27 22:51:44 2010
@@ -19,7 +19,6 @@
import java.text.DateFormat;
import java.text.ParseException;
-import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.lucene.analysis.TeeSinkTokenFilter.SinkFilter;
@@ -42,7 +41,7 @@
* Uses {@link java.text.SimpleDateFormat#getDateInstance()} as the {@link java.text.DateFormat} object.
*/
public DateRecognizerSinkFilter() {
- this(SimpleDateFormat.getDateInstance());
+ this(DateFormat.getDateInstance());
}
public DateRecognizerSinkFilter(DateFormat dateFormat) {
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -19,7 +19,6 @@
import java.io.Reader;
import org.apache.lucene.analysis.ReusableAnalyzerBase;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.StopFilter;
@@ -45,12 +44,14 @@
}
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the
- * provided {@link Reader}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
*
- * @return {@link TokenStreamComponents} built from a
- * {@link StandardTokenizer} filtered with {@link StandardFilter},
- * {@link ThaiWordFilter}, and {@link StopFilter}
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link ThaiWordFilter}, and
+ * {@link StopFilter}
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -28,7 +28,6 @@
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -109,11 +108,14 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link TurkishLowerCaseFilter},
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link TurkishLowerCaseFilter},
* {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
* exclusion set is provided and {@link SnowballFilter}.
*/
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -24,7 +24,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
-import org.apache.lucene.util.Version;
/**
* Test the Arabic Analyzer
@@ -35,14 +34,14 @@
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new ArabicAnalyzer(Version.LUCENE_CURRENT);
+ new ArabicAnalyzer(TEST_VERSION_CURRENT);
}
/**
* Some simple tests showing some features of the analyzer, how some regular forms will conflate
*/
public void testBasicFeatures() throws Exception {
- ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT);
+ ArabicAnalyzer a = new ArabicAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "ÙبÙر", new String[] { "ÙبÙر" });
assertAnalyzesTo(a, "ÙبÙرة", new String[] { "ÙبÙر" }); // feminine marker
@@ -63,7 +62,7 @@
* Simple tests to show things are getting reset correctly, etc.
*/
public void testReusableTokenStream() throws Exception {
- ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT);
+ ArabicAnalyzer a = new ArabicAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(a, "ÙبÙر", new String[] { "ÙبÙر" });
assertAnalyzesToReuse(a, "ÙبÙرة", new String[] { "ÙبÙر" }); // feminine marker
}
@@ -72,7 +71,7 @@
* Non-arabic text gets treated in a similar way as SimpleAnalyzer.
*/
public void testEnglishInput() throws Exception {
- assertAnalyzesTo(new ArabicAnalyzer(Version.LUCENE_CURRENT), "English text.", new String[] {
+ assertAnalyzesTo(new ArabicAnalyzer(TEST_VERSION_CURRENT), "English text.", new String[] {
"english", "text" });
}
@@ -82,7 +81,7 @@
public void testCustomStopwords() throws Exception {
Set<String> set = new HashSet<String>();
Collections.addAll(set, "the", "and", "a");
- ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT, set);
+ ArabicAnalyzer a = new ArabicAnalyzer(TEST_VERSION_CURRENT, set);
assertAnalyzesTo(a, "The quick brown fox.", new String[] { "quick",
"brown", "fox" });
}
@@ -90,12 +89,12 @@
public void testWithStemExclusionSet() throws IOException {
Set<String> set = new HashSet<String>();
set.add("ساÙدÙات");
- ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ ArabicAnalyzer a = new ArabicAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(a, "ÙبÙرة the quick ساÙدÙات", new String[] { "ÙبÙر","the", "quick", "ساÙدÙات" });
assertAnalyzesToReuse(a, "ÙبÙرة the quick ساÙدÙات", new String[] { "ÙبÙر","the", "quick", "ساÙدÙات" });
- a = new ArabicAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, CharArraySet.EMPTY_SET);
+ a = new ArabicAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, CharArraySet.EMPTY_SET);
assertAnalyzesTo(a, "ÙبÙرة the quick ساÙدÙات", new String[] { "ÙبÙر","the", "quick", "ساÙد" });
assertAnalyzesToReuse(a, "ÙبÙرة the quick ساÙدÙات", new String[] { "ÙبÙر","the", "quick", "ساÙد" });
}
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java Sat Feb 27 22:51:44 2010
@@ -21,11 +21,9 @@
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
/**
* Test the Arabic Normalization Filter
- *
*/
public class TestArabicNormalizationFilter extends BaseTokenStreamTestCase {
@@ -86,7 +84,7 @@
}
private void check(final String input, final String expected) throws IOException {
- ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(Version.LUCENE_CURRENT, new StringReader(input));
+ ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
ArabicNormalizationFilter filter = new ArabicNormalizationFilter(tokenStream);
assertTokenStreamContents(filter, new String[]{expected});
}
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java Sat Feb 27 22:51:44 2010
@@ -23,7 +23,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
-import org.apache.lucene.util.Version;
/**
* Test the Arabic Normalization Filter
@@ -116,16 +115,16 @@
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("ساÙدÙات");
- ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(Version.LUCENE_CURRENT, new StringReader("ساÙدÙات"));
+ ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT, new StringReader("ساÙدÙات"));
ArabicStemFilter filter = new ArabicStemFilter(new KeywordMarkerTokenFilter(tokenStream, set));
assertTokenStreamContents(filter, new String[]{"ساÙدÙات"});
}
private void check(final String input, final String expected) throws IOException {
- ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(Version.LUCENE_CURRENT, new StringReader(input));
+ ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
ArabicStemFilter filter = new ArabicStemFilter(tokenStream);
assertTokenStreamContents(filter, new String[]{expected});
}
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -34,23 +34,23 @@
* This test fails with NPE when the stopwords file is missing in classpath
*/
public void testResourcesAvailable() {
- new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ new BulgarianAnalyzer(TEST_VERSION_CURRENT);
}
public void testStopwords() throws IOException {
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "Ðак Ñе казваÑ?", new String[] {"казваÑ"});
}
public void testCustomStopwords() throws IOException {
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT, Collections
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT, Collections
.emptySet());
assertAnalyzesTo(a, "Ðак Ñе казваÑ?",
new String[] {"как", "Ñе", "казваÑ"});
}
public void testReusableTokenStream() throws IOException {
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(a, "докÑменÑи", new String[] {"докÑменÑ"});
assertAnalyzesToReuse(a, "докÑменÑ", new String[] {"докÑменÑ"});
}
@@ -59,7 +59,7 @@
* Test some examples from the paper
*/
public void testBasicExamples() throws IOException {
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "енеÑгийни кÑизи", new String[] {"енеÑгийн", "кÑиз"});
assertAnalyzesTo(a, "ÐÑомнаÑа енеÑгиÑ", new String[] {"аÑомн", "енеÑг"});
@@ -72,7 +72,7 @@
public void testWithStemExclusionSet() throws IOException {
CharArraySet set = new CharArraySet(Version.LUCENE_31, 1, true);
set.add("ÑÑÑоеве");
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(a, "ÑÑÑоевеÑе ÑÑÑоеве", new String[] { "ÑÑÑой", "ÑÑÑоеве" });
}
}
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java Sat Feb 27 22:51:44 2010
@@ -35,7 +35,7 @@
* common (and some rare) plural pattern is listed.
*/
public void testMasculineNouns() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
// -и pattern
assertAnalyzesTo(a, "гÑад", new String[] {"гÑад"});
@@ -101,7 +101,7 @@
* Test showing how feminine noun forms conflate
*/
public void testFeminineNouns() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "веÑÑ", new String[] {"веÑÑ"});
assertAnalyzesTo(a, "веÑÑÑа", new String[] {"веÑÑ"});
@@ -114,7 +114,7 @@
* plural pattern is listed
*/
public void testNeuterNouns() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
// -а pattern
assertAnalyzesTo(a, "дÑÑво", new String[] {"дÑÑв"});
@@ -142,7 +142,7 @@
* Test showing how adjectival forms conflate
*/
public void testAdjectives() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "кÑаÑив", new String[] {"кÑаÑив"});
assertAnalyzesTo(a, "кÑаÑивиÑ", new String[] {"кÑаÑив"});
assertAnalyzesTo(a, "кÑаÑивиÑÑ", new String[] {"кÑаÑив"});
@@ -158,7 +158,7 @@
* Test some exceptional rules, implemented as rewrites.
*/
public void testExceptions() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
// Ñи -> к
assertAnalyzesTo(a, "ÑобÑÑвеник", new String[] {"ÑобÑÑвеник"});
@@ -215,7 +215,7 @@
public void testWithKeywordAttribute() throws IOException {
CharArraySet set = new CharArraySet(Version.LUCENE_31, 1, true);
set.add("ÑÑÑоеве");
- WhitespaceTokenizer tokenStream = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ WhitespaceTokenizer tokenStream = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader("ÑÑÑоевеÑе ÑÑÑоеве"));
BulgarianStemFilter filter = new BulgarianStemFilter(
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java Sat Feb 27 22:51:44 2010
@@ -25,7 +25,6 @@
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.LowerCaseTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the Brazilian Stem Filter, which only modifies the term text.
@@ -128,7 +127,7 @@
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new BrazilianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new BrazilianAnalyzer(TEST_VERSION_CURRENT);
checkReuse(a, "boa", "boa");
checkReuse(a, "boainain", "boainain");
checkReuse(a, "boas", "boas");
@@ -136,35 +135,35 @@
}
public void testStemExclusionTable() throws Exception {
- BrazilianAnalyzer a = new BrazilianAnalyzer(Version.LUCENE_CURRENT);
+ BrazilianAnalyzer a = new BrazilianAnalyzer(TEST_VERSION_CURRENT);
a.setStemExclusionTable(new String[] { "quintessência" });
checkReuse(a, "quintessência", "quintessência"); // excluded words will be completely unchanged.
}
public void testStemExclusionTableBWCompat() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("BrasÃlia");
BrazilianStemFilter filter = new BrazilianStemFilter(
- new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader("BrasÃlia Brasilia")), set);
+ new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader("BrasÃlia Brasilia")), set);
assertTokenStreamContents(filter, new String[] { "brasÃlia", "brasil" });
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("BrasÃlia");
BrazilianStemFilter filter = new BrazilianStemFilter(
- new KeywordMarkerTokenFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(
"BrasÃlia Brasilia")), set));
assertTokenStreamContents(filter, new String[] { "brasÃlia", "brasil" });
}
public void testWithKeywordAttributeAndExclusionTable() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("BrasÃlia");
- CharArraySet set1 = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set1 = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set1.add("Brasilia");
BrazilianStemFilter filter = new BrazilianStemFilter(
- new KeywordMarkerTokenFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(
"BrasÃlia Brasilia")), set), set1);
assertTokenStreamContents(filter, new String[] { "brasÃlia", "brasilia" });
}
@@ -174,14 +173,14 @@
* when using reusable token streams.
*/
public void testExclusionTableReuse() throws Exception {
- BrazilianAnalyzer a = new BrazilianAnalyzer(Version.LUCENE_CURRENT);
+ BrazilianAnalyzer a = new BrazilianAnalyzer(TEST_VERSION_CURRENT);
checkReuse(a, "quintessência", "quintessente");
a.setStemExclusionTable(new String[] { "quintessência" });
checkReuse(a, "quintessência", "quintessência");
}
private void check(final String input, final String expected) throws Exception {
- checkOneTerm(new BrazilianAnalyzer(Version.LUCENE_CURRENT), input, expected);
+ checkOneTerm(new BrazilianAnalyzer(TEST_VERSION_CURRENT), input, expected);
}
private void checkReuse(Analyzer a, String input, String expected) throws Exception {
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java Sat Feb 27 22:51:44 2010
@@ -21,7 +21,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.util.Version;
public class TestCJKTokenizer extends BaseTokenStreamTestCase {
@@ -42,7 +41,7 @@
}
public void checkCJKToken(final String str, final TestToken[] out_tokens) throws IOException {
- Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CJKAnalyzer(TEST_VERSION_CURRENT);
String terms[] = new String[out_tokens.length];
int startOffsets[] = new int[out_tokens.length];
int endOffsets[] = new int[out_tokens.length];
@@ -57,7 +56,7 @@
}
public void checkCJKTokenReusable(final Analyzer a, final String str, final TestToken[] out_tokens) throws IOException {
- Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CJKAnalyzer(TEST_VERSION_CURRENT);
String terms[] = new String[out_tokens.length];
int startOffsets[] = new int[out_tokens.length];
int endOffsets[] = new int[out_tokens.length];
@@ -213,13 +212,13 @@
}
public void testTokenStream() throws Exception {
- Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CJKAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(analyzer, "\u4e00\u4e01\u4e02",
new String[] { "\u4e00\u4e01", "\u4e01\u4e02"});
}
public void testReusableTokenStream() throws Exception {
- Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CJKAnalyzer(TEST_VERSION_CURRENT);
String str = "\u3042\u3044\u3046\u3048\u304aabc\u304b\u304d\u304f\u3051\u3053";
TestToken[] out_tokens = {
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java Sat Feb 27 22:51:44 2010
@@ -28,17 +28,11 @@
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
static final File dataDir = new File(System.getProperty("dataDir", "./bin"));
static final File testFile = new File(dataDir, "org/apache/lucene/analysis/compound/da_UTF8.xml");
- @Override
- protected void setUp() throws Exception {
- super.setUp();
- }
-
public void testHyphenationCompoundWordsDA() throws Exception {
String[] dict = { "læse", "hest" };
@@ -47,8 +41,8 @@
HyphenationTree hyphenator = HyphenationCompoundWordTokenFilter
.getHyphenationTree(reader);
- HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"min veninde som er lidt af en læsehest")), hyphenator,
dict, CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE,
@@ -67,8 +61,8 @@
.getHyphenationTree(reader);
// the word basket will not be added due to the longest match option
- HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"basketballkurv")), hyphenator, dict,
CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE, 40, true);
@@ -84,8 +78,8 @@
"Pelar", "Glas", "Ãgon", "Fodral", "Bas", "Fiol", "Makare", "Gesäll",
"Sko", "Vind", "Rute", "Torkare", "Blad" };
- DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader(
"Bildörr Bilmotor Biltak Slagborr Hammarborr Pelarborr Glasögonfodral Basfiolsfodral Basfiolsfodralmakaregesäll Skomakare Vindrutetorkare Vindrutetorkarblad abba")),
dict);
@@ -113,8 +107,8 @@
"Pelar", "Glas", "Ãgon", "Fodral", "Bas", "Fiols", "Makare", "Gesäll",
"Sko", "Vind", "Rute", "Torkare", "Blad", "Fiolsfodral" };
- DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("Basfiolsfodralmakaregesäll")),
+ DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("Basfiolsfodralmakaregesäll")),
dict, CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MAX_SUBWORD_SIZE, true);
@@ -129,9 +123,9 @@
String[] dict = { "Rind", "Fleisch", "Draht", "Schere", "Gesetz",
"Aufgabe", "Ãberwachung" };
- Tokenizer wsTokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ Tokenizer wsTokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Rindfleischüberwachungsgesetz"));
- DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(Version.LUCENE_CURRENT,
+ DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(TEST_VERSION_CURRENT,
wsTokenizer, dict,
CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE,
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -48,7 +48,7 @@
}
public void testStopWord() throws Exception {
- assertAnalyzesTo(new CzechAnalyzer(Version.LUCENE_CURRENT), "Pokud mluvime o volnem",
+ assertAnalyzesTo(new CzechAnalyzer(TEST_VERSION_CURRENT), "Pokud mluvime o volnem",
new String[] { "mluvim", "voln" });
}
@@ -63,7 +63,7 @@
}
public void testReusableTokenStream() throws Exception {
- Analyzer analyzer = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CzechAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(analyzer, "Pokud mluvime o volnem", new String[] { "mluvim", "voln" });
assertAnalyzesToReuse(analyzer, "Äeská Republika", new String[] { "Äesk", "republik" });
}
@@ -112,9 +112,9 @@
}
public void testWithStemExclusionSet() throws IOException{
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("hole");
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(cz, "hole desek", new String[] {"hole", "desk"});
}
}
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemmer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemmer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemmer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemmer.java Sat Feb 27 22:51:44 2010
@@ -24,7 +24,6 @@
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the Czech Stemmer.
@@ -38,7 +37,7 @@
* Test showing how masculine noun forms conflate
*/
public void testMasculineNouns() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* animate ending with a hard consonant */
assertAnalyzesTo(cz, "pán", new String[] { "pán" });
@@ -106,7 +105,7 @@
* Test showing how feminine noun forms conflate
*/
public void testFeminineNouns() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* ending with hard consonant */
assertAnalyzesTo(cz, "kost", new String[] { "kost" });
@@ -150,7 +149,7 @@
* Test showing how neuter noun forms conflate
*/
public void testNeuterNouns() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* ending with o */
assertAnalyzesTo(cz, "mÄsto", new String[] { "mÄst" });
@@ -193,7 +192,7 @@
* Test showing how adjectival forms conflate
*/
public void testAdjectives() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* ending with ý/á/é */
assertAnalyzesTo(cz, "mladý", new String[] { "mlad" });
@@ -221,7 +220,7 @@
* Test some possessive suffixes
*/
public void testPossessive() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(cz, "Karlův", new String[] { "karl" });
assertAnalyzesTo(cz, "jazykový", new String[] { "jazyk" });
}
@@ -230,7 +229,7 @@
* Test some exceptional rules, implemented as rewrites.
*/
public void testExceptions() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* rewrite of Å¡t -> sk */
assertAnalyzesTo(cz, "Äeský", new String[] { "Äesk" });
@@ -270,16 +269,16 @@
* Test that very short words are not stemmed.
*/
public void testDontStem() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(cz, "e", new String[] { "e" });
assertAnalyzesTo(cz, "zi", new String[] { "zi" });
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("hole");
CzechStemFilter filter = new CzechStemFilter(new KeywordMarkerTokenFilter(
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("hole desek")), set));
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("hole desek")), set));
assertTokenStreamContents(filter, new String[] { "hole", "desk" });
}
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestDanishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new DanishAnalyzer(Version.LUCENE_CURRENT);
+ new DanishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new DanishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new DanishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "undersøg", "undersøg");
checkOneTermReuse(a, "undersøgelse", "undersøg");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set<String> exclusionSet = new HashSet<String>();
exclusionSet.add("undersøgelse");
- Analyzer a = new DanishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new DanishAnalyzer(TEST_VERSION_CURRENT,
DanishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "undersøgelse", "undersøgelse");
checkOneTermReuse(a, "undersøg", "undersøg");
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -29,38 +29,38 @@
public class TestGermanAnalyzer extends BaseTokenStreamTestCase {
public void testReusableTokenStream() throws Exception {
- Analyzer a = new GermanAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new GermanAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "Tisch", "tisch");
checkOneTermReuse(a, "Tische", "tisch");
checkOneTermReuse(a, "Tischen", "tisch");
}
public void testExclusionTableBWCompat() throws IOException {
- GermanStemFilter filter = new GermanStemFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT,
+ GermanStemFilter filter = new GermanStemFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT,
new StringReader("Fischen Trinken")));
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("fischen");
filter.setExclusionSet(set);
assertTokenStreamContents(filter, new String[] { "fischen", "trink" });
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("fischen");
GermanStemFilter filter = new GermanStemFilter(
- new KeywordMarkerTokenFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Fischen Trinken")), set));
assertTokenStreamContents(filter, new String[] { "fischen", "trink" });
}
public void testWithKeywordAttributeAndExclusionTable() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("fischen");
- CharArraySet set1 = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set1 = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set1.add("trinken");
set1.add("fischen");
GermanStemFilter filter = new GermanStemFilter(
- new KeywordMarkerTokenFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Fischen Trinken")), set));
filter.setExclusionSet(set1);
assertTokenStreamContents(filter, new String[] { "fischen", "trinken" });
@@ -71,7 +71,7 @@
* when using reusable token streams.
*/
public void testExclusionTableReuse() throws Exception {
- GermanAnalyzer a = new GermanAnalyzer(Version.LUCENE_CURRENT);
+ GermanAnalyzer a = new GermanAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "tischen", "tisch");
a.setStemExclusionTable(new String[] { "tischen" });
checkOneTermReuse(a, "tischen", "tischen");
@@ -81,7 +81,7 @@
* these only pass with LUCENE_CURRENT, not if you use o.a.l.a.de.GermanStemmer
*/
public void testGermanSpecials() throws Exception {
- GermanAnalyzer a = new GermanAnalyzer(Version.LUCENE_CURRENT);
+ GermanAnalyzer a = new GermanAnalyzer(TEST_VERSION_CURRENT);
// a/o/u + e is equivalent to the umlaut form
checkOneTermReuse(a, "Schaltflächen", "schaltflach");
checkOneTermReuse(a, "Schaltflaechen", "schaltflach");
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java Sat Feb 27 22:51:44 2010
@@ -28,7 +28,6 @@
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the German stemmer. The stemming algorithm is known to work less
@@ -40,7 +39,7 @@
public void testStemming() throws Exception {
Tokenizer tokenizer = new KeywordTokenizer(new StringReader(""));
- TokenFilter filter = new GermanStemFilter(new LowerCaseFilter(Version.LUCENE_CURRENT, tokenizer));
+ TokenFilter filter = new GermanStemFilter(new LowerCaseFilter(TEST_VERSION_CURRENT, tokenizer));
// read test cases from external file:
File dataDir = new File(System.getProperty("dataDir", "./bin"));
File testFile = new File(dataDir, "org/apache/lucene/analysis/de/data.txt");
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java Sat Feb 27 22:51:44 2010
@@ -32,7 +32,7 @@
* @throws Exception in case an error occurs
*/
public void testAnalyzer() throws Exception {
- Analyzer a = new GreekAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new GreekAnalyzer(TEST_VERSION_CURRENT);
// Verify the correct analysis of capitals and small accented letters
assertAnalyzesTo(a, "\u039c\u03af\u03b1 \u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03ac \u03ba\u03b1\u03bb\u03ae \u03ba\u03b1\u03b9 \u03c0\u03bb\u03bf\u03cd\u03c3\u03b9\u03b1 \u03c3\u03b5\u03b9\u03c1\u03ac \u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03ae\u03c1\u03c9\u03bd \u03c4\u03b7\u03c2 \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ae\u03c2 \u03b3\u03bb\u03ce\u03c3\u03c3\u03b1\u03c2",
new String[] { "\u03bc\u03b9\u03b1", "\u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03b1", "\u03ba\u03b1\u03bb\u03b7", "\u03c0\u03bb\u03bf\u03c5\u03c3\u03b9\u03b1", "\u03c3\u03b5\u03b9\u03c1\u03b1", "\u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03b7\u03c1\u03c9\u03bd",
@@ -48,7 +48,7 @@
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new GreekAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new GreekAnalyzer(TEST_VERSION_CURRENT);
// Verify the correct analysis of capitals and small accented letters
assertAnalyzesToReuse(a, "\u039c\u03af\u03b1 \u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03ac \u03ba\u03b1\u03bb\u03ae \u03ba\u03b1\u03b9 \u03c0\u03bb\u03bf\u03cd\u03c3\u03b9\u03b1 \u03c3\u03b5\u03b9\u03c1\u03ac \u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03ae\u03c1\u03c9\u03bd \u03c4\u03b7\u03c2 \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ae\u03c2 \u03b3\u03bb\u03ce\u03c3\u03c3\u03b1\u03c2",
new String[] { "\u03bc\u03b9\u03b1", "\u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03b1", "\u03ba\u03b1\u03bb\u03b7", "\u03c0\u03bb\u03bf\u03c5\u03c3\u03b9\u03b1", "\u03c3\u03b5\u03b9\u03c1\u03b1", "\u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03b7\u03c1\u03c9\u03bd",
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestEnglishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new EnglishAnalyzer(Version.LUCENE_CURRENT);
+ new EnglishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new EnglishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new EnglishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "books", "book");
checkOneTermReuse(a, "book", "book");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set<String> exclusionSet = new HashSet<String>();
exclusionSet.add("books");
- Analyzer a = new EnglishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new EnglishAnalyzer(TEST_VERSION_CURRENT,
EnglishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "books", "books");
checkOneTermReuse(a, "book", "book");
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestSpanishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new SpanishAnalyzer(Version.LUCENE_CURRENT);
+ new SpanishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new SpanishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new SpanishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "chicana", "chican");
checkOneTermReuse(a, "chicano", "chican");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set<String> exclusionSet = new HashSet<String>();
exclusionSet.add("chicano");
- Analyzer a = new SpanishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new SpanishAnalyzer(TEST_VERSION_CURRENT,
SpanishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "chicana", "chican");
checkOneTermReuse(a, "chicano", "chicano");
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -19,7 +19,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.util.Version;
/**
* Test the Persian Analyzer
@@ -31,7 +30,7 @@
* This test fails with NPE when the stopwords file is missing in classpath
*/
public void testResourcesAvailable() {
- new PersianAnalyzer(Version.LUCENE_CURRENT);
+ new PersianAnalyzer(TEST_VERSION_CURRENT);
}
/**
@@ -42,7 +41,7 @@
* These verb forms are from http://en.wikipedia.org/wiki/Persian_grammar
*/
public void testBehaviorVerbs() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
// active present indicative
assertAnalyzesTo(a, "Ù
ÛâØ®Ùرد", new String[] { "Ø®Ùرد" });
// active preterite indicative
@@ -118,7 +117,7 @@
* These verb forms are from http://en.wikipedia.org/wiki/Persian_grammar
*/
public void testBehaviorVerbsDefective() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
// active present indicative
assertAnalyzesTo(a, "Ù
Ù Ø®Ùرد", new String[] { "Ø®Ùرد" });
// active preterite indicative
@@ -189,7 +188,7 @@
* nouns, removing the plural -ha.
*/
public void testBehaviorNouns() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "برگ Ùا", new String[] { "برگ" });
assertAnalyzesTo(a, "برگâÙا", new String[] { "برگ" });
}
@@ -199,7 +198,7 @@
* (lowercased, etc)
*/
public void testBehaviorNonPersian() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "English test.", new String[] { "english", "test" });
}
@@ -207,7 +206,7 @@
* Basic test ensuring that reusableTokenStream works correctly.
*/
public void testReusableTokenStream() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(a, "Ø®Ùرد٠Ù
٠شد٠بÙد٠باشد", new String[] { "Ø®ÙردÙ" });
assertAnalyzesToReuse(a, "برگâÙا", new String[] { "برگ" });
}
@@ -216,7 +215,7 @@
* Test that custom stopwords work, and are not case-sensitive.
*/
public void testCustomStopwords() throws Exception {
- PersianAnalyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT, new String[] { "the", "and", "a" });
+ PersianAnalyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT, new String[] { "the", "and", "a" });
assertAnalyzesTo(a, "The quick brown fox.", new String[] { "quick",
"brown", "fox" });
}
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java Sat Feb 27 22:51:44 2010
@@ -22,7 +22,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.ar.ArabicLetterTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the Persian Normalization Filter
@@ -55,7 +54,7 @@
}
private void check(final String input, final String expected) throws IOException {
- ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(Version.LUCENE_CURRENT,
+ ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT,
new StringReader(input));
PersianNormalizationFilter filter = new PersianNormalizationFilter(
tokenStream);
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestFinnishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new FinnishAnalyzer(Version.LUCENE_CURRENT);
+ new FinnishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new FinnishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new FinnishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "edeltäjiinsä", "edeltäj");
checkOneTermReuse(a, "edeltäjistään", "edeltäj");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set<String> exclusionSet = new HashSet<String>();
exclusionSet.add("edeltäjistään");
- Analyzer a = new FinnishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new FinnishAnalyzer(TEST_VERSION_CURRENT,
FinnishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "edeltäjiinsä", "edeltäj");
checkOneTermReuse(a, "edeltäjistään", "edeltäjistään");
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java Sat Feb 27 22:51:44 2010
@@ -29,7 +29,6 @@
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
/**
*
@@ -38,19 +37,19 @@
public void testElision() throws Exception {
String test = "Plop, juste pour voir l'embrouille avec O'brian. M'enfin.";
- Tokenizer tokenizer = new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(test));
- Set articles = new HashSet();
+ Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(test));
+ Set<String> articles = new HashSet<String>();
articles.add("l");
articles.add("M");
- TokenFilter filter = new ElisionFilter(Version.LUCENE_CURRENT, tokenizer, articles);
- List tas = filtre(filter);
+ TokenFilter filter = new ElisionFilter(TEST_VERSION_CURRENT, tokenizer, articles);
+ List<String> tas = filter(filter);
assertEquals("embrouille", tas.get(4));
assertEquals("O'brian", tas.get(6));
assertEquals("enfin", tas.get(7));
}
- private List filtre(TokenFilter filter) throws IOException {
- List tas = new ArrayList();
+ private List<String> filter(TokenFilter filter) throws IOException {
+ List<String> tas = new ArrayList<String>();
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
while (filter.incrementToken()) {
tas.add(termAtt.term());
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -32,7 +32,7 @@
public class TestFrenchAnalyzer extends BaseTokenStreamTestCase {
public void testAnalyzer() throws Exception {
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(fa, "", new String[] {
});
@@ -204,7 +204,7 @@
}
public void testReusableTokenStream() throws Exception {
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT);
// stopwords
assertAnalyzesToReuse(
fa,
@@ -229,27 +229,27 @@
* when using reusable token streams.
*/
public void testExclusionTableReuse() throws Exception {
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(fa, "habitable", new String[] { "habit" });
fa.setStemExclusionTable(new String[] { "habitable" });
assertAnalyzesToReuse(fa, "habitable", new String[] { "habitable" });
}
public void testExclusionTableViaCtor() throws Exception {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("habitable");
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT,
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT,
CharArraySet.EMPTY_SET, set);
assertAnalyzesToReuse(fa, "habitable chiste", new String[] { "habitable",
"chist" });
- fa = new FrenchAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ fa = new FrenchAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(fa, "habitable chiste", new String[] { "habitable",
"chist" });
}
public void testElision() throws Exception {
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(fa, "voir l'embrouille", new String[] { "voir", "embrouill" });
}
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java Sat Feb 27 22:51:44 2010
@@ -5,7 +5,6 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -31,11 +30,11 @@
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new HindiAnalyzer(Version.LUCENE_CURRENT);
+ new HindiAnalyzer(TEST_VERSION_CURRENT);
}
public void testBasics() throws Exception {
- Analyzer a = new HindiAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new HindiAnalyzer(TEST_VERSION_CURRENT);
// two ways to write 'hindi' itself.
checkOneTermReuse(a, "हिनà¥à¤¦à¥", "हिà¤à¤¦");
checkOneTermReuse(a, "हिà¤à¤¦à¥", "हिà¤à¤¦");
@@ -44,7 +43,7 @@
public void testExclusionSet() throws Exception {
Set<String> exclusionSet = new HashSet<String>();
exclusionSet.add("हिà¤à¤¦à¥");
- Analyzer a = new HindiAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new HindiAnalyzer(TEST_VERSION_CURRENT,
HindiAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "हिà¤à¤¦à¥", "हिà¤à¤¦à¥");
}
Modified: lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiNormalizer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiNormalizer.java?rev=917067&r1=917066&r2=917067&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiNormalizer.java (original)
+++ lucene/java/branches/flex_1458/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiNormalizer.java Sat Feb 27 22:51:44 2010
@@ -24,7 +24,6 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test HindiNormalizer
@@ -60,7 +59,7 @@
check("à¤à¤à¤à¥ ॡà¤à¤à¥à¥à¥à¥£à¥à¥", "à¤
à¤à¤à¤à¤à¤à¤à¤¿à¥à¥à¥¢à¥à¥");
}
private void check(String input, String output) throws IOException {
- Tokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader(input));
TokenFilter tf = new HindiNormalizationFilter(tokenizer);
assertTokenStreamContents(tf, new String[] { output });