You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rj...@apache.org on 2014/08/09 00:42:54 UTC
svn commit: r1616901 [8/11] - in /lucene/dev/trunk: lucene/
lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/
lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/
lucene/analysis/common/src/java/org/apache/lucene/analysis/br/ luc...
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java Fri Aug 8 22:42:48 2014
@@ -22,7 +22,6 @@ import java.io.IOException;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.util.CharArraySet;
-import org.apache.lucene.util.Version;
/**
* Test case for RussianAnalyzer.
@@ -33,12 +32,12 @@ public class TestRussianAnalyzer extends
/** Check that RussianAnalyzer doesnt discard any numbers */
public void testDigitsInRussianCharset() throws IOException
{
- RussianAnalyzer ra = new RussianAnalyzer(TEST_VERSION_CURRENT);
+ RussianAnalyzer ra = new RussianAnalyzer();
assertAnalyzesTo(ra, "text 1000", new String[] { "text", "1000" });
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new RussianAnalyzer(TEST_VERSION_CURRENT);
+ Analyzer a = new RussianAnalyzer();
assertAnalyzesTo(a, "ÐмеÑÑе Ñ Ñем о Ñиле ÑлекÑÑомагниÑной ÑнеÑгии имели пÑедÑÑавление еÑе",
new String[] { "вмеÑÑ", "Ñил", "ÑлекÑÑомагниÑн", "ÑнеÑг", "имел", "пÑедÑÑавлен" });
assertAnalyzesTo(a, "Ðо знание ÑÑо Ñ
ÑанилоÑÑ Ð² Ñайне",
@@ -47,9 +46,9 @@ public class TestRussianAnalyzer extends
public void testWithStemExclusionSet() throws Exception {
- CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet( 1, true);
set.add("пÑедÑÑавление");
- Analyzer a = new RussianAnalyzer(TEST_VERSION_CURRENT, RussianAnalyzer.getDefaultStopSet() , set);
+ Analyzer a = new RussianAnalyzer( RussianAnalyzer.getDefaultStopSet() , set);
assertAnalyzesTo(a, "ÐмеÑÑе Ñ Ñем о Ñиле ÑлекÑÑомагниÑной ÑнеÑгии имели пÑедÑÑавление еÑе",
new String[] { "вмеÑÑ", "Ñил", "ÑлекÑÑомагниÑн", "ÑнеÑг", "имел", "пÑедÑÑавление" });
@@ -57,6 +56,6 @@ public class TestRussianAnalyzer extends
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
- checkRandomData(random(), new RussianAnalyzer(TEST_VERSION_CURRENT), 1000*RANDOM_MULTIPLIER);
+ checkRandomData(random(), new RussianAnalyzer(), 1000*RANDOM_MULTIPLIER);
}
}
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java Fri Aug 8 22:42:48 2014
@@ -49,7 +49,7 @@ public class TestRussianLightStemFilter
}
public void testKeyword() throws IOException {
- final CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("ÑнеÑгии"), false);
+ final CharArraySet exclusionSet = new CharArraySet( asSet("ÑнеÑгии"), false);
Analyzer a = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java Fri Aug 8 22:42:48 2014
@@ -309,9 +309,9 @@ public class ShingleAnalyzerWrapperTest
Analyzer delegate = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
- CharArraySet stopSet = StopFilter.makeStopSet(TEST_VERSION_CURRENT, "into");
+ CharArraySet stopSet = StopFilter.makeStopSet("into");
Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
- TokenFilter filter = new StopFilter(TEST_VERSION_CURRENT, tokenizer, stopSet);
+ TokenFilter filter = new StopFilter(tokenizer, stopSet);
return new TokenStreamComponents(tokenizer, filter);
}
};
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java Fri Aug 8 22:42:48 2014
@@ -981,7 +981,7 @@ public class ShingleFilterTest extends B
}
public void testReset() throws Exception {
- Tokenizer wsTokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT);
+ Tokenizer wsTokenizer = new WhitespaceTokenizer();
wsTokenizer.setReader(new StringReader("please divide this sentence"));
TokenStream filter = new ShingleFilter(wsTokenizer, 2);
assertTokenStreamContents(filter,
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java Fri Aug 8 22:42:48 2014
@@ -165,7 +165,7 @@ public class TestTeeSinkTokenFilter exte
assertTokenStreamContents(dogDetector, new String[]{"Dogs", "Dogs"});
source1.reset();
- TokenStream lowerCasing = new LowerCaseFilter(TEST_VERSION_CURRENT, source1);
+ TokenStream lowerCasing = new LowerCaseFilter(source1);
String[] lowerCaseTokens = new String[tokens1.length];
for (int i = 0; i < tokens1.length; i++)
lowerCaseTokens[i] = tokens1[i].toLowerCase(Locale.ROOT);
@@ -173,7 +173,7 @@ public class TestTeeSinkTokenFilter exte
}
private StandardTokenizer standardTokenizer(StringBuilder builder) throws IOException {
- StandardTokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT);
+ StandardTokenizer tokenizer = new StandardTokenizer();
tokenizer.setReader(new StringReader(builder.toString()));
return tokenizer;
}
@@ -191,10 +191,10 @@ public class TestTeeSinkTokenFilter exte
buffer.append(English.intToEnglish(i).toUpperCase(Locale.ROOT)).append(' ');
}
//make sure we produce the same tokens
- TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(TEST_VERSION_CURRENT, standardTokenizer(buffer)));
+ TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(standardTokenizer(buffer)));
TokenStream sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(100));
teeStream.consumeAllTokens();
- TokenStream stream = new ModuloTokenFilter(new StandardFilter(TEST_VERSION_CURRENT, standardTokenizer(buffer)), 100);
+ TokenStream stream = new ModuloTokenFilter(new StandardFilter(standardTokenizer(buffer)), 100);
CharTermAttribute tfTok = stream.addAttribute(CharTermAttribute.class);
CharTermAttribute sinkTok = sink.addAttribute(CharTermAttribute.class);
for (int i=0; stream.incrementToken(); i++) {
@@ -207,12 +207,12 @@ public class TestTeeSinkTokenFilter exte
int tfPos = 0;
long start = System.currentTimeMillis();
for (int i = 0; i < 20; i++) {
- stream = new StandardFilter(TEST_VERSION_CURRENT, standardTokenizer(buffer));
+ stream = new StandardFilter(standardTokenizer(buffer));
PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
while (stream.incrementToken()) {
tfPos += posIncrAtt.getPositionIncrement();
}
- stream = new ModuloTokenFilter(new StandardFilter(TEST_VERSION_CURRENT, standardTokenizer(buffer)), modCounts[j]);
+ stream = new ModuloTokenFilter(new StandardFilter(standardTokenizer(buffer)), modCounts[j]);
posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
while (stream.incrementToken()) {
tfPos += posIncrAtt.getPositionIncrement();
@@ -224,7 +224,7 @@ public class TestTeeSinkTokenFilter exte
//simulate one field with one sink
start = System.currentTimeMillis();
for (int i = 0; i < 20; i++) {
- teeStream = new TeeSinkTokenFilter(new StandardFilter(TEST_VERSION_CURRENT, standardTokenizer(buffer)));
+ teeStream = new TeeSinkTokenFilter(new StandardFilter( standardTokenizer(buffer)));
sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(modCounts[j]));
PositionIncrementAttribute posIncrAtt = teeStream.getAttribute(PositionIncrementAttribute.class);
while (teeStream.incrementToken()) {
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java Fri Aug 8 22:42:48 2014
@@ -27,12 +27,12 @@ public class TestSwedishAnalyzer extends
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new SwedishAnalyzer(TEST_VERSION_CURRENT);
+ new SwedishAnalyzer();
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new SwedishAnalyzer(TEST_VERSION_CURRENT);
+ Analyzer a = new SwedishAnalyzer();
// stemming
checkOneTerm(a, "jaktkarlarne", "jaktkarl");
checkOneTerm(a, "jaktkarlens", "jaktkarl");
@@ -42,8 +42,8 @@ public class TestSwedishAnalyzer extends
/** test use of exclusion set */
public void testExclude() throws IOException {
- CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("jaktkarlarne"), false);
- Analyzer a = new SwedishAnalyzer(TEST_VERSION_CURRENT,
+ CharArraySet exclusionSet = new CharArraySet( asSet("jaktkarlarne"), false);
+ Analyzer a = new SwedishAnalyzer(
SwedishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTerm(a, "jaktkarlarne", "jaktkarlarne");
checkOneTerm(a, "jaktkarlens", "jaktkarl");
@@ -51,6 +51,6 @@ public class TestSwedishAnalyzer extends
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
- checkRandomData(random(), new SwedishAnalyzer(TEST_VERSION_CURRENT), 1000*RANDOM_MULTIPLIER);
+ checkRandomData(random(), new SwedishAnalyzer(), 1000*RANDOM_MULTIPLIER);
}
}
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java Fri Aug 8 22:42:48 2014
@@ -49,7 +49,7 @@ public class TestSwedishLightStemFilter
}
public void testKeyword() throws IOException {
- final CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("jaktkarlens"), false);
+ final CharArraySet exclusionSet = new CharArraySet( asSet("jaktkarlens"), false);
Analyzer a = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java Fri Aug 8 22:42:48 2014
@@ -100,7 +100,7 @@ public class TestSolrSynonymParser exten
@Test(expected=ParseException.class)
public void testInvalidPositionsInput() throws Exception {
String testFile = "testola => the test";
- SolrSynonymParser parser = new SolrSynonymParser(true, true, new EnglishAnalyzer(TEST_VERSION_CURRENT));
+ SolrSynonymParser parser = new SolrSynonymParser(true, true, new EnglishAnalyzer());
parser.parse(new StringReader(testFile));
}
@@ -108,7 +108,7 @@ public class TestSolrSynonymParser exten
@Test(expected=ParseException.class)
public void testInvalidPositionsOutput() throws Exception {
String testFile = "the test => testola";
- SolrSynonymParser parser = new SolrSynonymParser(true, true, new EnglishAnalyzer(TEST_VERSION_CURRENT));
+ SolrSynonymParser parser = new SolrSynonymParser(true, true, new EnglishAnalyzer());
parser.parse(new StringReader(testFile));
}
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java Fri Aug 8 22:42:48 2014
@@ -46,14 +46,14 @@ public class TestThaiAnalyzer extends Ba
* testcase for offsets
*/
public void testOffsets() throws Exception {
- assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET), "à¸à¸²à¸£à¸à¸µà¹à¹à¸à¹à¸à¹à¸à¸à¹à¸ªà¸à¸à¸§à¹à¸²à¸à¸²à¸à¸à¸µ",
+ assertAnalyzesTo(new ThaiAnalyzer(CharArraySet.EMPTY_SET), "à¸à¸²à¸£à¸à¸µà¹à¹à¸à¹à¸à¹à¸à¸à¹à¸ªà¸à¸à¸§à¹à¸²à¸à¸²à¸à¸à¸µ",
new String[] { "à¸à¸²à¸£", "à¸à¸µà¹", "à¹à¸à¹", "à¸à¹à¸à¸", "à¹à¸ªà¸à¸", "วà¹à¸²", "à¸à¸²à¸", "à¸à¸µ" },
new int[] { 0, 3, 6, 9, 13, 17, 20, 23 },
new int[] { 3, 6, 9, 13, 17, 20, 23, 25 });
}
public void testStopWords() throws Exception {
- assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT), "à¸à¸²à¸£à¸à¸µà¹à¹à¸à¹à¸à¹à¸à¸à¹à¸ªà¸à¸à¸§à¹à¸²à¸à¸²à¸à¸à¸µ",
+ assertAnalyzesTo(new ThaiAnalyzer(), "à¸à¸²à¸£à¸à¸µà¹à¹à¸à¹à¸à¹à¸à¸à¹à¸ªà¸à¸à¸§à¹à¸²à¸à¸²à¸à¸à¸µ",
new String[] { "à¹à¸ªà¸à¸", "à¸à¸²à¸", "à¸à¸µ" },
new int[] { 13, 20, 23 },
new int[] { 17, 23, 25 },
@@ -65,7 +65,7 @@ public class TestThaiAnalyzer extends Ba
*/
// note this test uses stopfilter's stopset
public void testPositionIncrements() throws Exception {
- final ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ final ThaiAnalyzer analyzer = new ThaiAnalyzer(StopAnalyzer.ENGLISH_STOP_WORDS_SET);
assertAnalyzesTo(analyzer, "à¸à¸²à¸£à¸à¸µà¹à¹à¸à¹à¸à¹à¸à¸ the à¹à¸ªà¸à¸à¸§à¹à¸²à¸à¸²à¸à¸à¸µ",
new String[] { "à¸à¸²à¸£", "à¸à¸µà¹", "à¹à¸à¹", "à¸à¹à¸à¸", "à¹à¸ªà¸à¸", "วà¹à¸²", "à¸à¸²à¸", "à¸à¸µ" },
new int[] { 0, 3, 6, 9, 18, 22, 25, 28 },
@@ -81,7 +81,7 @@ public class TestThaiAnalyzer extends Ba
}
public void testReusableTokenStream() throws Exception {
- ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET);
+ ThaiAnalyzer analyzer = new ThaiAnalyzer(CharArraySet.EMPTY_SET);
assertAnalyzesTo(analyzer, "", new String[] {});
assertAnalyzesTo(
@@ -97,18 +97,18 @@ public class TestThaiAnalyzer extends Ba
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
- checkRandomData(random(), new ThaiAnalyzer(TEST_VERSION_CURRENT), 1000*RANDOM_MULTIPLIER);
+ checkRandomData(random(), new ThaiAnalyzer(), 1000*RANDOM_MULTIPLIER);
}
/** blast some random large strings through the analyzer */
public void testRandomHugeStrings() throws Exception {
Random random = random();
- checkRandomData(random, new ThaiAnalyzer(TEST_VERSION_CURRENT), 100*RANDOM_MULTIPLIER, 8192);
+ checkRandomData(random, new ThaiAnalyzer(), 100*RANDOM_MULTIPLIER, 8192);
}
// LUCENE-3044
public void testAttributeReuse() throws Exception {
- ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT);
+ ThaiAnalyzer analyzer = new ThaiAnalyzer();
// just consume
TokenStream ts = analyzer.tokenStream("dummy", "ภาษาà¹à¸à¸¢");
assertTokenStreamContents(ts, new String[] { "ภาษา", "à¹à¸à¸¢" });
@@ -119,7 +119,7 @@ public class TestThaiAnalyzer extends Ba
}
public void testTwoSentences() throws Exception {
- assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET), "This is a test. à¸à¸²à¸£à¸à¸µà¹à¹à¸à¹à¸à¹à¸à¸à¹à¸ªà¸à¸à¸§à¹à¸²à¸à¸²à¸à¸à¸µ",
+ assertAnalyzesTo(new ThaiAnalyzer(CharArraySet.EMPTY_SET), "This is a test. à¸à¸²à¸£à¸à¸µà¹à¹à¸à¹à¸à¹à¸à¸à¹à¸ªà¸à¸à¸§à¹à¸²à¸à¸²à¸à¸à¸µ",
new String[] { "this", "is", "a", "test", "à¸à¸²à¸£", "à¸à¸µà¹", "à¹à¸à¹", "à¸à¹à¸à¸", "à¹à¸ªà¸à¸", "วà¹à¸²", "à¸à¸²à¸", "à¸à¸µ" },
new int[] { 0, 5, 8, 10, 16, 19, 22, 25, 29, 33, 36, 39 },
new int[] { 4, 7, 9, 14, 19, 22, 25, 29, 33, 36, 39, 41 });
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java Fri Aug 8 22:42:48 2014
@@ -27,12 +27,12 @@ public class TestTurkishAnalyzer extends
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new TurkishAnalyzer(TEST_VERSION_CURRENT);
+ new TurkishAnalyzer();
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new TurkishAnalyzer(TEST_VERSION_CURRENT);
+ Analyzer a = new TurkishAnalyzer();
// stemming
checkOneTerm(a, "aÄacı", "aÄaç");
checkOneTerm(a, "aÄaç", "aÄaç");
@@ -45,15 +45,14 @@ public class TestTurkishAnalyzer extends
/** test use of exclusion set */
public void testExclude() throws IOException {
- CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("aÄacı"), false);
- Analyzer a = new TurkishAnalyzer(TEST_VERSION_CURRENT,
- TurkishAnalyzer.getDefaultStopSet(), exclusionSet);
+ CharArraySet exclusionSet = new CharArraySet(asSet("aÄacı"), false);
+ Analyzer a = new TurkishAnalyzer(TurkishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTerm(a, "aÄacı", "aÄacı");
checkOneTerm(a, "aÄaç", "aÄaç");
}
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
- checkRandomData(random(), new TurkishAnalyzer(TEST_VERSION_CURRENT), 1000*RANDOM_MULTIPLIER);
+ checkRandomData(random(), new TurkishAnalyzer(), 1000*RANDOM_MULTIPLIER);
}
}
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java Fri Aug 8 22:42:48 2014
@@ -25,7 +25,7 @@ import org.apache.lucene.util.LuceneTest
public class TestCharArrayMap extends LuceneTestCase {
public void doRandom(int iter, boolean ignoreCase) {
- CharArrayMap<Integer> map = new CharArrayMap<>(TEST_VERSION_CURRENT, 1, ignoreCase);
+ CharArrayMap<Integer> map = new CharArrayMap<>(1, ignoreCase);
HashMap<String,Integer> hmap = new HashMap<>();
char[] key;
@@ -64,7 +64,7 @@ public class TestCharArrayMap extends Lu
}
public void testMethods() {
- CharArrayMap<Integer> cm = new CharArrayMap<>(TEST_VERSION_CURRENT, 2, false);
+ CharArrayMap<Integer> cm = new CharArrayMap<>(2, false);
HashMap<String,Integer> hm = new HashMap<>();
hm.put("foo",1);
hm.put("bar",2);
@@ -133,7 +133,7 @@ public class TestCharArrayMap extends Lu
}
public void testModifyOnUnmodifiable(){
- CharArrayMap<Integer> map = new CharArrayMap<>(TEST_VERSION_CURRENT, 2, false);
+ CharArrayMap<Integer> map = new CharArrayMap<>(2, false);
map.put("foo",1);
map.put("bar",2);
final int size = map.size();
@@ -230,7 +230,7 @@ public class TestCharArrayMap extends Lu
}
public void testToString() {
- CharArrayMap<Integer> cm = new CharArrayMap<>(TEST_VERSION_CURRENT, Collections.singletonMap("test",1), false);
+ CharArrayMap<Integer> cm = new CharArrayMap<>(Collections.singletonMap("test",1), false);
assertEquals("[test]",cm.keySet().toString());
assertEquals("[1]",cm.values().toString());
assertEquals("[test=1]",cm.entrySet().toString());
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArraySet.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArraySet.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArraySet.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArraySet.java Fri Aug 8 22:42:48 2014
@@ -20,7 +20,6 @@ package org.apache.lucene.analysis.util;
import java.util.*;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
public class TestCharArraySet extends LuceneTestCase {
@@ -35,7 +34,7 @@ public class TestCharArraySet extends Lu
public void testRehash() throws Exception {
- CharArraySet cas = new CharArraySet(TEST_VERSION_CURRENT, 0, true);
+ CharArraySet cas = new CharArraySet(0, true);
for(int i=0;i<TEST_STOP_WORDS.length;i++)
cas.add(TEST_STOP_WORDS[i]);
assertEquals(TEST_STOP_WORDS.length, cas.size());
@@ -46,7 +45,7 @@ public class TestCharArraySet extends Lu
public void testNonZeroOffset() {
String[] words={"Hello","World","this","is","a","test"};
char[] findme="xthisy".toCharArray();
- CharArraySet set= new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+ CharArraySet set= new CharArraySet(10, true);
set.addAll(Arrays.asList(words));
assertTrue(set.contains(findme, 1, 4));
assertTrue(set.contains(new String(findme,1,4)));
@@ -58,7 +57,7 @@ public class TestCharArraySet extends Lu
}
public void testObjectContains() {
- CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+ CharArraySet set = new CharArraySet(10, true);
Integer val = Integer.valueOf(1);
set.add(val);
assertTrue(set.contains(val));
@@ -74,7 +73,7 @@ public class TestCharArraySet extends Lu
}
public void testClear(){
- CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10,true);
+ CharArraySet set=new CharArraySet(10,true);
set.addAll(Arrays.asList(TEST_STOP_WORDS));
assertEquals("Not all words added", TEST_STOP_WORDS.length, set.size());
set.clear();
@@ -88,7 +87,7 @@ public class TestCharArraySet extends Lu
}
public void testModifyOnUnmodifiable(){
- CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+ CharArraySet set=new CharArraySet(10, true);
set.addAll(Arrays.asList(TEST_STOP_WORDS));
final int size = set.size();
set = CharArraySet.unmodifiableSet(set);
@@ -144,7 +143,7 @@ public class TestCharArraySet extends Lu
// current key (now a char[]) on a Set<String> would not hit any element of the CAS and therefor never call
// remove() on the iterator
try{
- set.removeAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true));
+ set.removeAll(new CharArraySet(Arrays.asList(TEST_STOP_WORDS), true));
fail("Modified unmodifiable set");
}catch (UnsupportedOperationException e) {
// expected
@@ -152,7 +151,7 @@ public class TestCharArraySet extends Lu
}
try{
- set.retainAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(NOT_IN_SET), true));
+ set.retainAll(new CharArraySet(Arrays.asList(NOT_IN_SET), true));
fail("Modified unmodifiable set");
}catch (UnsupportedOperationException e) {
// expected
@@ -173,7 +172,7 @@ public class TestCharArraySet extends Lu
}
public void testUnmodifiableSet(){
- CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 10,true);
+ CharArraySet set = new CharArraySet(10,true);
set.addAll(Arrays.asList(TEST_STOP_WORDS));
set.add(Integer.valueOf(1));
final int size = set.size();
@@ -203,7 +202,7 @@ public class TestCharArraySet extends Lu
"\ud801\udc1c\ud801\udc1cCDE", "A\ud801\udc1cB"};
String[] lowerArr = new String[] {"abc\ud801\udc44",
"\ud801\udc44\ud801\udc44cde", "a\ud801\udc44b"};
- CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true);
+ CharArraySet set = new CharArraySet(Arrays.asList(TEST_STOP_WORDS), true);
for (String upper : upperArr) {
set.add(upper);
}
@@ -211,7 +210,7 @@ public class TestCharArraySet extends Lu
assertTrue(String.format(Locale.ROOT, missing, upperArr[i]), set.contains(upperArr[i]));
assertTrue(String.format(Locale.ROOT, missing, lowerArr[i]), set.contains(lowerArr[i]));
}
- set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), false);
+ set = new CharArraySet(Arrays.asList(TEST_STOP_WORDS), false);
for (String upper : upperArr) {
set.add(upper);
}
@@ -229,7 +228,7 @@ public class TestCharArraySet extends Lu
String[] lowerArr = new String[] { "abc\uD800", "abc\uD800efg",
"\uD800efg", "\uD800\ud801\udc44b" };
- CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays
+ CharArraySet set = new CharArraySet(Arrays
.asList(TEST_STOP_WORDS), true);
for (String upper : upperArr) {
set.add(upper);
@@ -238,7 +237,7 @@ public class TestCharArraySet extends Lu
assertTrue(String.format(Locale.ROOT, missing, upperArr[i]), set.contains(upperArr[i]));
assertTrue(String.format(Locale.ROOT, missing, lowerArr[i]), set.contains(lowerArr[i]));
}
- set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS),
+ set = new CharArraySet(Arrays.asList(TEST_STOP_WORDS),
false);
for (String upper : upperArr) {
set.add(upper);
@@ -252,8 +251,8 @@ public class TestCharArraySet extends Lu
@SuppressWarnings("deprecated")
public void testCopyCharArraySetBWCompat() {
- CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
- CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
+ CharArraySet setIngoreCase = new CharArraySet(10, true);
+ CharArraySet setCaseSensitive = new CharArraySet(10, false);
List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
List<String> stopwordsUpper = new ArrayList<>();
@@ -265,8 +264,8 @@ public class TestCharArraySet extends Lu
setCaseSensitive.addAll(Arrays.asList(TEST_STOP_WORDS));
setCaseSensitive.add(Integer.valueOf(1));
- CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, setIngoreCase);
- CharArraySet copyCaseSens = CharArraySet.copy(TEST_VERSION_CURRENT, setCaseSensitive);
+ CharArraySet copy = CharArraySet.copy(setIngoreCase);
+ CharArraySet copyCaseSens = CharArraySet.copy(setCaseSensitive);
assertEquals(setIngoreCase.size(), copy.size());
assertEquals(setCaseSensitive.size(), copy.size());
@@ -299,8 +298,8 @@ public class TestCharArraySet extends Lu
* Test the static #copy() function with a CharArraySet as a source
*/
public void testCopyCharArraySet() {
- CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
- CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
+ CharArraySet setIngoreCase = new CharArraySet(10, true);
+ CharArraySet setCaseSensitive = new CharArraySet(10, false);
List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
List<String> stopwordsUpper = new ArrayList<>();
@@ -312,8 +311,8 @@ public class TestCharArraySet extends Lu
setCaseSensitive.addAll(Arrays.asList(TEST_STOP_WORDS));
setCaseSensitive.add(Integer.valueOf(1));
- CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, setIngoreCase);
- CharArraySet copyCaseSens = CharArraySet.copy(TEST_VERSION_CURRENT, setCaseSensitive);
+ CharArraySet copy = CharArraySet.copy(setIngoreCase);
+ CharArraySet copyCaseSens = CharArraySet.copy(setCaseSensitive);
assertEquals(setIngoreCase.size(), copy.size());
assertEquals(setCaseSensitive.size(), copy.size());
@@ -355,7 +354,7 @@ public class TestCharArraySet extends Lu
}
set.addAll(Arrays.asList(TEST_STOP_WORDS));
- CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, set);
+ CharArraySet copy = CharArraySet.copy(set);
assertEquals(set.size(), copy.size());
assertEquals(set.size(), copy.size());
@@ -380,12 +379,12 @@ public class TestCharArraySet extends Lu
}
/**
- * Tests a special case of {@link CharArraySet#copy(Version, Set)} where the
+ * Tests a special case of {@link CharArraySet#copy(Set)} where the
* set to copy is the {@link CharArraySet#EMPTY_SET}
*/
public void testCopyEmptySet() {
assertSame(CharArraySet.EMPTY_SET,
- CharArraySet.copy(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET));
+ CharArraySet.copy(CharArraySet.EMPTY_SET));
}
/**
@@ -408,7 +407,7 @@ public class TestCharArraySet extends Lu
* Test for NPE
*/
public void testContainsWithNull() {
- CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(1, true);
try {
set.contains((char[]) null, 0, 10);
fail("null value must raise NPE");
@@ -424,7 +423,7 @@ public class TestCharArraySet extends Lu
}
public void testToString() {
- CharArraySet set = CharArraySet.copy(TEST_VERSION_CURRENT, Collections.singleton("test"));
+ CharArraySet set = CharArraySet.copy(Collections.singleton("test"));
assertEquals("[test]", set.toString());
set.add("test2");
assertTrue(set.toString().contains(", "));
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java Fri Aug 8 22:42:48 2014
@@ -52,7 +52,7 @@ public class TestCharTokenizers extends
}
// internal buffer size is 1024 make sure we have a surrogate pair right at the border
builder.insert(1023, "\ud801\udc1c");
- Tokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT, newAttributeFactory());
+ Tokenizer tokenizer = new LowerCaseTokenizer(newAttributeFactory());
tokenizer.setReader(new StringReader(builder.toString()));
assertTokenStreamContents(tokenizer, builder.toString().toLowerCase(Locale.ROOT).split(" "));
}
@@ -70,7 +70,7 @@ public class TestCharTokenizers extends
builder.append("a");
}
builder.append("\ud801\udc1cabc");
- Tokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT, newAttributeFactory());
+ Tokenizer tokenizer = new LowerCaseTokenizer(newAttributeFactory());
tokenizer.setReader(new StringReader(builder.toString()));
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(Locale.ROOT)});
}
@@ -85,7 +85,7 @@ public class TestCharTokenizers extends
for (int i = 0; i < 255; i++) {
builder.append("A");
}
- Tokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT, newAttributeFactory());
+ Tokenizer tokenizer = new LowerCaseTokenizer(newAttributeFactory());
tokenizer.setReader(new StringReader(builder.toString() + builder.toString()));
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(Locale.ROOT), builder.toString().toLowerCase(Locale.ROOT)});
}
@@ -100,7 +100,7 @@ public class TestCharTokenizers extends
builder.append("A");
}
builder.append("\ud801\udc1c");
- Tokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT, newAttributeFactory());
+ Tokenizer tokenizer = new LowerCaseTokenizer(newAttributeFactory());
tokenizer.setReader(new StringReader(builder.toString() + builder.toString()));
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(Locale.ROOT), builder.toString().toLowerCase(Locale.ROOT)});
}
@@ -110,7 +110,7 @@ public class TestCharTokenizers extends
Analyzer analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
- Tokenizer tokenizer = new LetterTokenizer(TEST_VERSION_CURRENT, newAttributeFactory()) {
+ Tokenizer tokenizer = new LetterTokenizer(newAttributeFactory()) {
@Override
protected int normalize(int c) {
if (c > 0xffff) {
@@ -148,7 +148,7 @@ public class TestCharTokenizers extends
Analyzer analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
- Tokenizer tokenizer = new LetterTokenizer(TEST_VERSION_CURRENT, newAttributeFactory()) {
+ Tokenizer tokenizer = new LetterTokenizer(newAttributeFactory()) {
@Override
protected int normalize(int c) {
if (c <= 0xffff) {
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharacterUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharacterUtils.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharacterUtils.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharacterUtils.java Fri Aug 8 22:42:48 2014
@@ -46,7 +46,7 @@ public class TestCharacterUtils extends
} catch (IndexOutOfBoundsException e) {
}
- CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+ CharacterUtils java5 = CharacterUtils.getInstance();
assertEquals((int) 'A', java5.codePointAt(cpAt3, 0));
assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
cpAt3, 3));
@@ -68,7 +68,7 @@ public class TestCharacterUtils extends
assertEquals((int) '\ud801', java4.codePointAt(cpAt3, 3, 5));
assertEquals((int) '\ud801', java4.codePointAt(highSurrogateAt3, 3, 4));
- CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+ CharacterUtils java5 = CharacterUtils.getInstance();
assertEquals((int) 'A', java5.codePointAt(cpAt3, 0, 2));
assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
cpAt3, 3, 5));
@@ -78,7 +78,7 @@ public class TestCharacterUtils extends
@Test
public void testCodePointCount() {
CharacterUtils java4 = CharacterUtils.getJava4Instance();
- CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+ CharacterUtils java5 = CharacterUtils.getInstance();
final String s = TestUtil.randomUnicodeString(random());
assertEquals(s.length(), java4.codePointCount(s));
assertEquals(Character.codePointCount(s, 0, s.length()), java5.codePointCount(s));
@@ -87,7 +87,7 @@ public class TestCharacterUtils extends
@Test
public void testOffsetByCodePoint() {
CharacterUtils java4 = CharacterUtils.getJava4Instance();
- CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+ CharacterUtils java5 = CharacterUtils.getInstance();
for (int i = 0; i < 10; ++i) {
final char[] s = TestUtil.randomUnicodeString(random()).toCharArray();
final int index = TestUtil.nextInt(random(), 0, s.length);
@@ -119,7 +119,7 @@ public class TestCharacterUtils extends
public void testConversions() {
CharacterUtils java4 = CharacterUtils.getJava4Instance();
- CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+ CharacterUtils java5 = CharacterUtils.getInstance();
testConversions(java4);
testConversions(java5);
}
@@ -159,7 +159,7 @@ public class TestCharacterUtils extends
@Test
public void testFillNoHighSurrogate() throws IOException {
CharacterUtils versions[] = new CharacterUtils[] {
- CharacterUtils.getInstance(TEST_VERSION_CURRENT),
+ CharacterUtils.getInstance(),
CharacterUtils.getJava4Instance() };
for (CharacterUtils instance : versions) {
Reader reader = new StringReader("helloworld");
@@ -181,7 +181,7 @@ public class TestCharacterUtils extends
@Test
public void testFillJava15() throws IOException {
String input = "1234\ud801\udc1c789123\ud801\ud801\udc1c\ud801";
- CharacterUtils instance = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+ CharacterUtils instance = CharacterUtils.getInstance();
Reader reader = new StringReader(input);
CharacterBuffer buffer = CharacterUtils.newCharacterBuffer(5);
assertTrue(instance.fill(buffer, reader));
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java Fri Aug 8 22:42:48 2014
@@ -40,9 +40,9 @@ public class TestElision extends BaseTok
public void testElision() throws Exception {
String test = "Plop, juste pour voir l'embrouille avec O'brian. M'enfin.";
- Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, newAttributeFactory());
+ Tokenizer tokenizer = new StandardTokenizer(newAttributeFactory());
tokenizer.setReader(new StringReader(test));
- CharArraySet articles = new CharArraySet(TEST_VERSION_CURRENT, asSet("l", "M"), false);
+ CharArraySet articles = new CharArraySet(asSet("l", "M"), false);
TokenFilter filter = new ElisionFilter(tokenizer, articles);
List<String> tas = filter(filter);
assertEquals("embrouille", tas.get(4));
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestFilesystemResourceLoader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestFilesystemResourceLoader.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestFilesystemResourceLoader.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestFilesystemResourceLoader.java Fri Aug 8 22:42:48 2014
@@ -50,8 +50,7 @@ public class TestFilesystemResourceLoade
private void assertClasspathDelegation(ResourceLoader rl) throws Exception {
// try a stopwords file from classpath
CharArraySet set = WordlistLoader.getSnowballWordSet(
- new InputStreamReader(rl.openResource("org/apache/lucene/analysis/snowball/english_stop.txt"), StandardCharsets.UTF_8),
- TEST_VERSION_CURRENT
+ new InputStreamReader(rl.openResource("org/apache/lucene/analysis/snowball/english_stop.txt"), StandardCharsets.UTF_8)
);
assertTrue(set.contains("you"));
// try to load a class; we use string comparison because classloader may be different...
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestWordlistLoader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestWordlistLoader.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestWordlistLoader.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestWordlistLoader.java Fri Aug 8 22:42:48 2014
@@ -29,15 +29,15 @@ public class TestWordlistLoader extends
public void testWordlistLoading() throws IOException {
String s = "ONE\n two \nthree";
- CharArraySet wordSet1 = WordlistLoader.getWordSet(new StringReader(s), TEST_VERSION_CURRENT);
+ CharArraySet wordSet1 = WordlistLoader.getWordSet(new StringReader(s));
checkSet(wordSet1);
- CharArraySet wordSet2 = WordlistLoader.getWordSet(new BufferedReader(new StringReader(s)), TEST_VERSION_CURRENT);
+ CharArraySet wordSet2 = WordlistLoader.getWordSet(new BufferedReader(new StringReader(s)));
checkSet(wordSet2);
}
public void testComments() throws Exception {
String s = "ONE\n two \nthree\n#comment";
- CharArraySet wordSet1 = WordlistLoader.getWordSet(new StringReader(s), "#", TEST_VERSION_CURRENT);
+ CharArraySet wordSet1 = WordlistLoader.getWordSet(new StringReader(s), "#");
checkSet(wordSet1);
assertFalse(wordSet1.contains("#comment"));
assertFalse(wordSet1.contains("comment"));
@@ -66,7 +66,7 @@ public class TestWordlistLoader extends
" two \n" + // stopword with leading/trailing space
" three four five \n" + // multiple stopwords
"six seven | comment\n"; //multiple stopwords + comment
- CharArraySet wordset = WordlistLoader.getSnowballWordSet(new StringReader(s), TEST_VERSION_CURRENT);
+ CharArraySet wordset = WordlistLoader.getSnowballWordSet(new StringReader(s));
assertEquals(7, wordset.size());
assertTrue(wordset.contains("ONE"));
assertTrue(wordset.contains("two"));
Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java Fri Aug 8 22:42:48 2014
@@ -35,7 +35,7 @@ public class TestCollationKeyAnalyzer ex
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
private Collator collator = Collator.getInstance(new Locale("ar"));
- private Analyzer analyzer = new CollationKeyAnalyzer(TEST_VERSION_CURRENT, collator);
+ private Analyzer analyzer = new CollationKeyAnalyzer(collator);
private BytesRef firstRangeBeginning = new BytesRef(collator.getCollationKey(firstRangeBeginningOriginal).toByteArray());
private BytesRef firstRangeEnd = new BytesRef(collator.getCollationKey(firstRangeEndOriginal).toByteArray());
@@ -65,7 +65,7 @@ public class TestCollationKeyAnalyzer ex
for (int i = 0; i < iters; i++) {
Collator collator = Collator.getInstance(Locale.GERMAN);
collator.setStrength(Collator.PRIMARY);
- assertThreadSafe(new CollationKeyAnalyzer(TEST_VERSION_CURRENT, collator));
+ assertThreadSafe(new CollationKeyAnalyzer(collator));
}
}
}
Modified: lucene/dev/trunk/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java Fri Aug 8 22:42:48 2014
@@ -77,7 +77,7 @@ public class TestICUNormalizer2CharFilte
CharFilter reader = new ICUNormalizer2CharFilter(new StringReader(input),
Normalizer2.getInstance(null, "nfkc_cf", Normalizer2.Mode.COMPOSE));
- Tokenizer tokenStream = new NGramTokenizer(TEST_VERSION_CURRENT, newAttributeFactory(), 1, 1);
+ Tokenizer tokenStream = new NGramTokenizer(newAttributeFactory(), 1, 1);
tokenStream.setReader(reader);
assertTokenStreamContents(tokenStream,
Modified: lucene/dev/trunk/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestWithCJKBigramFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestWithCJKBigramFilter.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestWithCJKBigramFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestWithCJKBigramFilter.java Fri Aug 8 22:42:48 2014
@@ -18,7 +18,6 @@ package org.apache.lucene.analysis.icu.s
*/
import java.io.IOException;
-import java.io.Reader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
@@ -43,7 +42,7 @@ public class TestWithCJKBigramFilter ext
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer source = new ICUTokenizer(newAttributeFactory(), new DefaultICUTokenizerConfig(false));
TokenStream result = new CJKBigramFilter(source);
- return new TokenStreamComponents(source, new StopFilter(TEST_VERSION_CURRENT, result, CharArraySet.EMPTY_SET));
+ return new TokenStreamComponents(source, new StopFilter(result, CharArraySet.EMPTY_SET));
}
};
@@ -61,7 +60,7 @@ public class TestWithCJKBigramFilter ext
// some halfwidth katakana forms, which will affect the bigramming.
TokenStream result = new ICUNormalizer2Filter(source);
result = new CJKBigramFilter(source);
- return new TokenStreamComponents(source, new StopFilter(TEST_VERSION_CURRENT, result, CharArraySet.EMPTY_SET));
+ return new TokenStreamComponents(source, new StopFilter(result, CharArraySet.EMPTY_SET));
}
};
Modified: lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java Fri Aug 8 22:42:48 2014
@@ -18,7 +18,6 @@ package org.apache.lucene.analysis.ja;
*/
import java.io.IOException;
-import java.io.Reader;
import java.util.HashSet;
import java.util.Set;
@@ -31,7 +30,6 @@ import org.apache.lucene.analysis.ja.Jap
import org.apache.lucene.analysis.ja.dict.UserDictionary;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
-import org.apache.lucene.util.Version;
/**
* Analyzer for Japanese that uses morphological analysis.
@@ -42,12 +40,12 @@ public class JapaneseAnalyzer extends St
private final Set<String> stoptags;
private final UserDictionary userDict;
- public JapaneseAnalyzer(Version matchVersion) {
- this(matchVersion, null, JapaneseTokenizer.DEFAULT_MODE, DefaultSetHolder.DEFAULT_STOP_SET, DefaultSetHolder.DEFAULT_STOP_TAGS);
+ public JapaneseAnalyzer() {
+ this(null, JapaneseTokenizer.DEFAULT_MODE, DefaultSetHolder.DEFAULT_STOP_SET, DefaultSetHolder.DEFAULT_STOP_TAGS);
}
- public JapaneseAnalyzer(Version matchVersion, UserDictionary userDict, Mode mode, CharArraySet stopwords, Set<String> stoptags) {
- super(matchVersion, stopwords);
+ public JapaneseAnalyzer(UserDictionary userDict, Mode mode, CharArraySet stopwords, Set<String> stoptags) {
+ super(stopwords);
this.userDict = userDict;
this.mode = mode;
this.stoptags = stoptags;
@@ -89,11 +87,11 @@ public class JapaneseAnalyzer extends St
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new JapaneseTokenizer(userDict, true, mode);
TokenStream stream = new JapaneseBaseFormFilter(tokenizer);
- stream = new JapanesePartOfSpeechStopFilter(matchVersion, stream, stoptags);
+ stream = new JapanesePartOfSpeechStopFilter(stream, stoptags);
stream = new CJKWidthFilter(stream);
- stream = new StopFilter(matchVersion, stream, stopwords);
+ stream = new StopFilter(stream, stopwords);
stream = new JapaneseKatakanaStemFilter(stream);
- stream = new LowerCaseFilter(matchVersion, stream);
+ stream = new LowerCaseFilter(stream);
return new TokenStreamComponents(tokenizer, stream);
}
}
Modified: lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilter.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilter.java Fri Aug 8 22:42:48 2014
@@ -22,7 +22,6 @@ import java.util.Set;
import org.apache.lucene.analysis.ja.tokenattributes.PartOfSpeechAttribute;
import org.apache.lucene.analysis.util.FilteringTokenFilter;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.util.Version;
/**
* Removes tokens that match a set of part-of-speech tags.
@@ -33,12 +32,11 @@ public final class JapanesePartOfSpeechS
/**
* Create a new {@link JapanesePartOfSpeechStopFilter}.
- * @param version the Lucene match version
* @param input the {@link TokenStream} to consume
* @param stopTags the part-of-speech tags that should be removed
*/
- public JapanesePartOfSpeechStopFilter(Version version, TokenStream input, Set<String> stopTags) {
- super(version, input);
+ public JapanesePartOfSpeechStopFilter(TokenStream input, Set<String> stopTags) {
+ super(input);
this.stopTags = stopTags;
}
Modified: lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java?rev=1616901&r1=1616900&r2=1616901&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java (original)
+++ lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java Fri Aug 8 22:42:48 2014
@@ -70,7 +70,7 @@ public class JapanesePartOfSpeechStopFil
public TokenStream create(TokenStream stream) {
// if stoptags is null, it means the file is empty
if (stopTags != null) {
- final TokenStream filter = new JapanesePartOfSpeechStopFilter(luceneMatchVersion, stream, stopTags);
+ final TokenStream filter = new JapanesePartOfSpeechStopFilter(stream, stopTags);
return filter;
} else {
return stream;