You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rj...@apache.org on 2014/08/21 05:12:58 UTC
svn commit: r1619283 [8/11] - in /lucene/dev/branches/branch_4x: ./ lucene/
lucene/analysis/
lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/
lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/
lucene/analysis/common/src/java/o...
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java Thu Aug 21 03:12:52 2014
@@ -27,12 +27,12 @@ public class TestTurkishAnalyzer extends
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new TurkishAnalyzer(TEST_VERSION_CURRENT);
+ new TurkishAnalyzer();
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new TurkishAnalyzer(TEST_VERSION_CURRENT);
+ Analyzer a = new TurkishAnalyzer();
// stemming
checkOneTerm(a, "aÄacı", "aÄaç");
checkOneTerm(a, "aÄaç", "aÄaç");
@@ -45,15 +45,14 @@ public class TestTurkishAnalyzer extends
/** test use of exclusion set */
public void testExclude() throws IOException {
- CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("aÄacı"), false);
- Analyzer a = new TurkishAnalyzer(TEST_VERSION_CURRENT,
- TurkishAnalyzer.getDefaultStopSet(), exclusionSet);
+ CharArraySet exclusionSet = new CharArraySet(asSet("aÄacı"), false);
+ Analyzer a = new TurkishAnalyzer(TurkishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTerm(a, "aÄacı", "aÄacı");
checkOneTerm(a, "aÄaç", "aÄaç");
}
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
- checkRandomData(random(), new TurkishAnalyzer(TEST_VERSION_CURRENT), 1000*RANDOM_MULTIPLIER);
+ checkRandomData(random(), new TurkishAnalyzer(), 1000*RANDOM_MULTIPLIER);
}
}
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java Thu Aug 21 03:12:52 2014
@@ -25,7 +25,7 @@ import org.apache.lucene.util.LuceneTest
public class TestCharArrayMap extends LuceneTestCase {
public void doRandom(int iter, boolean ignoreCase) {
- CharArrayMap<Integer> map = new CharArrayMap<>(TEST_VERSION_CURRENT, 1, ignoreCase);
+ CharArrayMap<Integer> map = new CharArrayMap<>(1, ignoreCase);
HashMap<String,Integer> hmap = new HashMap<>();
char[] key;
@@ -64,7 +64,7 @@ public class TestCharArrayMap extends Lu
}
public void testMethods() {
- CharArrayMap<Integer> cm = new CharArrayMap<>(TEST_VERSION_CURRENT, 2, false);
+ CharArrayMap<Integer> cm = new CharArrayMap<>(2, false);
HashMap<String,Integer> hm = new HashMap<>();
hm.put("foo",1);
hm.put("bar",2);
@@ -133,7 +133,7 @@ public class TestCharArrayMap extends Lu
}
public void testModifyOnUnmodifiable(){
- CharArrayMap<Integer> map = new CharArrayMap<>(TEST_VERSION_CURRENT, 2, false);
+ CharArrayMap<Integer> map = new CharArrayMap<>(2, false);
map.put("foo",1);
map.put("bar",2);
final int size = map.size();
@@ -230,7 +230,7 @@ public class TestCharArrayMap extends Lu
}
public void testToString() {
- CharArrayMap<Integer> cm = new CharArrayMap<>(TEST_VERSION_CURRENT, Collections.singletonMap("test",1), false);
+ CharArrayMap<Integer> cm = new CharArrayMap<>(Collections.singletonMap("test",1), false);
assertEquals("[test]",cm.keySet().toString());
assertEquals("[1]",cm.values().toString());
assertEquals("[test=1]",cm.entrySet().toString());
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArraySet.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArraySet.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArraySet.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArraySet.java Thu Aug 21 03:12:52 2014
@@ -35,7 +35,7 @@ public class TestCharArraySet extends Lu
public void testRehash() throws Exception {
- CharArraySet cas = new CharArraySet(TEST_VERSION_CURRENT, 0, true);
+ CharArraySet cas = new CharArraySet(0, true);
for(int i=0;i<TEST_STOP_WORDS.length;i++)
cas.add(TEST_STOP_WORDS[i]);
assertEquals(TEST_STOP_WORDS.length, cas.size());
@@ -46,7 +46,7 @@ public class TestCharArraySet extends Lu
public void testNonZeroOffset() {
String[] words={"Hello","World","this","is","a","test"};
char[] findme="xthisy".toCharArray();
- CharArraySet set= new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+ CharArraySet set= new CharArraySet(10, true);
set.addAll(Arrays.asList(words));
assertTrue(set.contains(findme, 1, 4));
assertTrue(set.contains(new String(findme,1,4)));
@@ -58,7 +58,7 @@ public class TestCharArraySet extends Lu
}
public void testObjectContains() {
- CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+ CharArraySet set = new CharArraySet(10, true);
Integer val = Integer.valueOf(1);
set.add(val);
assertTrue(set.contains(val));
@@ -74,7 +74,7 @@ public class TestCharArraySet extends Lu
}
public void testClear(){
- CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10,true);
+ CharArraySet set=new CharArraySet(10,true);
set.addAll(Arrays.asList(TEST_STOP_WORDS));
assertEquals("Not all words added", TEST_STOP_WORDS.length, set.size());
set.clear();
@@ -88,7 +88,7 @@ public class TestCharArraySet extends Lu
}
public void testModifyOnUnmodifiable(){
- CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+ CharArraySet set=new CharArraySet(10, true);
set.addAll(Arrays.asList(TEST_STOP_WORDS));
final int size = set.size();
set = CharArraySet.unmodifiableSet(set);
@@ -144,7 +144,7 @@ public class TestCharArraySet extends Lu
// current key (now a char[]) on a Set<String> would not hit any element of the CAS and therefor never call
// remove() on the iterator
try{
- set.removeAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true));
+ set.removeAll(new CharArraySet(Arrays.asList(TEST_STOP_WORDS), true));
fail("Modified unmodifiable set");
}catch (UnsupportedOperationException e) {
// expected
@@ -152,7 +152,7 @@ public class TestCharArraySet extends Lu
}
try{
- set.retainAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(NOT_IN_SET), true));
+ set.retainAll(new CharArraySet(Arrays.asList(NOT_IN_SET), true));
fail("Modified unmodifiable set");
}catch (UnsupportedOperationException e) {
// expected
@@ -173,7 +173,7 @@ public class TestCharArraySet extends Lu
}
public void testUnmodifiableSet(){
- CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 10,true);
+ CharArraySet set = new CharArraySet(10,true);
set.addAll(Arrays.asList(TEST_STOP_WORDS));
set.add(Integer.valueOf(1));
final int size = set.size();
@@ -203,7 +203,7 @@ public class TestCharArraySet extends Lu
"\ud801\udc1c\ud801\udc1cCDE", "A\ud801\udc1cB"};
String[] lowerArr = new String[] {"abc\ud801\udc44",
"\ud801\udc44\ud801\udc44cde", "a\ud801\udc44b"};
- CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true);
+ CharArraySet set = new CharArraySet(Arrays.asList(TEST_STOP_WORDS), true);
for (String upper : upperArr) {
set.add(upper);
}
@@ -211,7 +211,7 @@ public class TestCharArraySet extends Lu
assertTrue(String.format(Locale.ROOT, missing, upperArr[i]), set.contains(upperArr[i]));
assertTrue(String.format(Locale.ROOT, missing, lowerArr[i]), set.contains(lowerArr[i]));
}
- set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), false);
+ set = new CharArraySet(Arrays.asList(TEST_STOP_WORDS), false);
for (String upper : upperArr) {
set.add(upper);
}
@@ -229,7 +229,7 @@ public class TestCharArraySet extends Lu
String[] lowerArr = new String[] { "abc\uD800", "abc\uD800efg",
"\uD800efg", "\uD800\ud801\udc44b" };
- CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays
+ CharArraySet set = new CharArraySet(Arrays
.asList(TEST_STOP_WORDS), true);
for (String upper : upperArr) {
set.add(upper);
@@ -238,7 +238,7 @@ public class TestCharArraySet extends Lu
assertTrue(String.format(Locale.ROOT, missing, upperArr[i]), set.contains(upperArr[i]));
assertTrue(String.format(Locale.ROOT, missing, lowerArr[i]), set.contains(lowerArr[i]));
}
- set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS),
+ set = new CharArraySet(Arrays.asList(TEST_STOP_WORDS),
false);
for (String upper : upperArr) {
set.add(upper);
@@ -323,8 +323,8 @@ public class TestCharArraySet extends Lu
@SuppressWarnings("deprecated")
public void testCopyCharArraySetBWCompat() {
- CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
- CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
+ CharArraySet setIngoreCase = new CharArraySet(10, true);
+ CharArraySet setCaseSensitive = new CharArraySet(10, false);
List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
List<String> stopwordsUpper = new ArrayList<>();
@@ -336,8 +336,8 @@ public class TestCharArraySet extends Lu
setCaseSensitive.addAll(Arrays.asList(TEST_STOP_WORDS));
setCaseSensitive.add(Integer.valueOf(1));
- CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, setIngoreCase);
- CharArraySet copyCaseSens = CharArraySet.copy(TEST_VERSION_CURRENT, setCaseSensitive);
+ CharArraySet copy = CharArraySet.copy(setIngoreCase);
+ CharArraySet copyCaseSens = CharArraySet.copy(setCaseSensitive);
assertEquals(setIngoreCase.size(), copy.size());
assertEquals(setCaseSensitive.size(), copy.size());
@@ -370,8 +370,8 @@ public class TestCharArraySet extends Lu
* Test the static #copy() function with a CharArraySet as a source
*/
public void testCopyCharArraySet() {
- CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
- CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
+ CharArraySet setIngoreCase = new CharArraySet(10, true);
+ CharArraySet setCaseSensitive = new CharArraySet(10, false);
List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
List<String> stopwordsUpper = new ArrayList<>();
@@ -383,8 +383,8 @@ public class TestCharArraySet extends Lu
setCaseSensitive.addAll(Arrays.asList(TEST_STOP_WORDS));
setCaseSensitive.add(Integer.valueOf(1));
- CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, setIngoreCase);
- CharArraySet copyCaseSens = CharArraySet.copy(TEST_VERSION_CURRENT, setCaseSensitive);
+ CharArraySet copy = CharArraySet.copy(setIngoreCase);
+ CharArraySet copyCaseSens = CharArraySet.copy(setCaseSensitive);
assertEquals(setIngoreCase.size(), copy.size());
assertEquals(setCaseSensitive.size(), copy.size());
@@ -426,7 +426,7 @@ public class TestCharArraySet extends Lu
}
set.addAll(Arrays.asList(TEST_STOP_WORDS));
- CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, set);
+ CharArraySet copy = CharArraySet.copy(set);
assertEquals(set.size(), copy.size());
assertEquals(set.size(), copy.size());
@@ -451,12 +451,12 @@ public class TestCharArraySet extends Lu
}
/**
- * Tests a special case of {@link CharArraySet#copy(Version, Set)} where the
+ * Tests a special case of {@link CharArraySet#copy(Set)} where the
* set to copy is the {@link CharArraySet#EMPTY_SET}
*/
public void testCopyEmptySet() {
assertSame(CharArraySet.EMPTY_SET,
- CharArraySet.copy(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET));
+ CharArraySet.copy(CharArraySet.EMPTY_SET));
}
/**
@@ -479,7 +479,7 @@ public class TestCharArraySet extends Lu
* Test for NPE
*/
public void testContainsWithNull() {
- CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(1, true);
try {
set.contains((char[]) null, 0, 10);
fail("null value must raise NPE");
@@ -495,7 +495,7 @@ public class TestCharArraySet extends Lu
}
public void testToString() {
- CharArraySet set = CharArraySet.copy(TEST_VERSION_CURRENT, Collections.singleton("test"));
+ CharArraySet set = CharArraySet.copy(Collections.singleton("test"));
assertEquals("[test]", set.toString());
set.add("test2");
assertTrue(set.toString().contains(", "));
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java Thu Aug 21 03:12:52 2014
@@ -54,7 +54,7 @@ public class TestCharTokenizers extends
}
// internal buffer size is 1024 make sure we have a surrogate pair right at the border
builder.insert(1023, "\ud801\udc1c");
- Tokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT, newAttributeFactory(), new StringReader(builder.toString()));
+ Tokenizer tokenizer = new LowerCaseTokenizer(newAttributeFactory(), new StringReader(builder.toString()));
assertTokenStreamContents(tokenizer, builder.toString().toLowerCase(Locale.ROOT).split(" "));
}
@@ -71,7 +71,7 @@ public class TestCharTokenizers extends
builder.append("a");
}
builder.append("\ud801\udc1cabc");
- Tokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT, newAttributeFactory(), new StringReader(builder.toString()));
+ Tokenizer tokenizer = new LowerCaseTokenizer(newAttributeFactory(), new StringReader(builder.toString()));
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(Locale.ROOT)});
}
}
@@ -85,7 +85,7 @@ public class TestCharTokenizers extends
for (int i = 0; i < 255; i++) {
builder.append("A");
}
- Tokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT, newAttributeFactory(), new StringReader(builder.toString() + builder.toString()));
+ Tokenizer tokenizer = new LowerCaseTokenizer(newAttributeFactory(), new StringReader(builder.toString() + builder.toString()));
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(Locale.ROOT), builder.toString().toLowerCase(Locale.ROOT)});
}
@@ -99,7 +99,7 @@ public class TestCharTokenizers extends
builder.append("A");
}
builder.append("\ud801\udc1c");
- Tokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT, newAttributeFactory(), new StringReader(builder.toString() + builder.toString()));
+ Tokenizer tokenizer = new LowerCaseTokenizer(newAttributeFactory(), new StringReader(builder.toString() + builder.toString()));
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(Locale.ROOT), builder.toString().toLowerCase(Locale.ROOT)});
}
@@ -108,7 +108,7 @@ public class TestCharTokenizers extends
Analyzer analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
- Tokenizer tokenizer = new LetterTokenizer(TEST_VERSION_CURRENT, newAttributeFactory(), reader) {
+ Tokenizer tokenizer = new LetterTokenizer(newAttributeFactory(), reader) {
@Override
protected int normalize(int c) {
if (c > 0xffff) {
@@ -149,7 +149,7 @@ public class TestCharTokenizers extends
Analyzer analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
- Tokenizer tokenizer = new LetterTokenizer(TEST_VERSION_CURRENT, newAttributeFactory(), reader) {
+ Tokenizer tokenizer = new LetterTokenizer(newAttributeFactory(), reader) {
@Override
protected int normalize(int c) {
if (c <= 0xffff) {
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharacterUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharacterUtils.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharacterUtils.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharacterUtils.java Thu Aug 21 03:12:52 2014
@@ -47,7 +47,7 @@ public class TestCharacterUtils extends
} catch (IndexOutOfBoundsException e) {
}
- CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+ CharacterUtils java5 = CharacterUtils.getInstance();
assertEquals((int) 'A', java5.codePointAt(cpAt3, 0));
assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
cpAt3, 3));
@@ -69,7 +69,7 @@ public class TestCharacterUtils extends
assertEquals((int) '\ud801', java4.codePointAt(cpAt3, 3, 5));
assertEquals((int) '\ud801', java4.codePointAt(highSurrogateAt3, 3, 4));
- CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+ CharacterUtils java5 = CharacterUtils.getInstance();
assertEquals((int) 'A', java5.codePointAt(cpAt3, 0, 2));
assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
cpAt3, 3, 5));
@@ -79,7 +79,7 @@ public class TestCharacterUtils extends
@Test
public void testCodePointCount() {
CharacterUtils java4 = CharacterUtils.getJava4Instance();
- CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+ CharacterUtils java5 = CharacterUtils.getInstance();
final String s = TestUtil.randomUnicodeString(random());
assertEquals(s.length(), java4.codePointCount(s));
assertEquals(Character.codePointCount(s, 0, s.length()), java5.codePointCount(s));
@@ -88,7 +88,7 @@ public class TestCharacterUtils extends
@Test
public void testOffsetByCodePoint() {
CharacterUtils java4 = CharacterUtils.getJava4Instance();
- CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+ CharacterUtils java5 = CharacterUtils.getInstance();
for (int i = 0; i < 10; ++i) {
final char[] s = TestUtil.randomUnicodeString(random()).toCharArray();
final int index = TestUtil.nextInt(random(), 0, s.length);
@@ -120,7 +120,7 @@ public class TestCharacterUtils extends
public void testConversions() {
CharacterUtils java4 = CharacterUtils.getJava4Instance();
- CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+ CharacterUtils java5 = CharacterUtils.getInstance();
testConversions(java4);
testConversions(java5);
}
@@ -181,7 +181,7 @@ public class TestCharacterUtils extends
@Test
public void testFillJava15() throws IOException {
String input = "1234\ud801\udc1c789123\ud801\ud801\udc1c\ud801";
- CharacterUtils instance = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+ CharacterUtils instance = CharacterUtils.getInstance();
Reader reader = new StringReader(input);
CharacterBuffer buffer = CharacterUtils.newCharacterBuffer(5);
assertTrue(instance.fill(buffer, reader));
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java Thu Aug 21 03:12:52 2014
@@ -40,8 +40,8 @@ public class TestElision extends BaseTok
public void testElision() throws Exception {
String test = "Plop, juste pour voir l'embrouille avec O'brian. M'enfin.";
- Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, newAttributeFactory(), new StringReader(test));
- CharArraySet articles = new CharArraySet(TEST_VERSION_CURRENT, asSet("l", "M"), false);
+ Tokenizer tokenizer = new StandardTokenizer(newAttributeFactory(), new StringReader(test));
+ CharArraySet articles = new CharArraySet(asSet("l", "M"), false);
TokenFilter filter = new ElisionFilter(tokenizer, articles);
List<String> tas = filter(filter);
assertEquals("embrouille", tas.get(4));
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestFilesystemResourceLoader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestFilesystemResourceLoader.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestFilesystemResourceLoader.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestFilesystemResourceLoader.java Thu Aug 21 03:12:52 2014
@@ -50,8 +50,7 @@ public class TestFilesystemResourceLoade
private void assertClasspathDelegation(ResourceLoader rl) throws Exception {
// try a stopwords file from classpath
CharArraySet set = WordlistLoader.getSnowballWordSet(
- new InputStreamReader(rl.openResource("org/apache/lucene/analysis/snowball/english_stop.txt"), StandardCharsets.UTF_8),
- TEST_VERSION_CURRENT
+ new InputStreamReader(rl.openResource("org/apache/lucene/analysis/snowball/english_stop.txt"), StandardCharsets.UTF_8)
);
assertTrue(set.contains("you"));
// try to load a class; we use string comparison because classloader may be different...
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestWordlistLoader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestWordlistLoader.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestWordlistLoader.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestWordlistLoader.java Thu Aug 21 03:12:52 2014
@@ -29,15 +29,15 @@ public class TestWordlistLoader extends
public void testWordlistLoading() throws IOException {
String s = "ONE\n two \nthree";
- CharArraySet wordSet1 = WordlistLoader.getWordSet(new StringReader(s), TEST_VERSION_CURRENT);
+ CharArraySet wordSet1 = WordlistLoader.getWordSet(new StringReader(s));
checkSet(wordSet1);
- CharArraySet wordSet2 = WordlistLoader.getWordSet(new BufferedReader(new StringReader(s)), TEST_VERSION_CURRENT);
+ CharArraySet wordSet2 = WordlistLoader.getWordSet(new BufferedReader(new StringReader(s)));
checkSet(wordSet2);
}
public void testComments() throws Exception {
String s = "ONE\n two \nthree\n#comment";
- CharArraySet wordSet1 = WordlistLoader.getWordSet(new StringReader(s), "#", TEST_VERSION_CURRENT);
+ CharArraySet wordSet1 = WordlistLoader.getWordSet(new StringReader(s), "#");
checkSet(wordSet1);
assertFalse(wordSet1.contains("#comment"));
assertFalse(wordSet1.contains("comment"));
@@ -66,7 +66,7 @@ public class TestWordlistLoader extends
" two \n" + // stopword with leading/trailing space
" three four five \n" + // multiple stopwords
"six seven | comment\n"; //multiple stopwords + comment
- CharArraySet wordset = WordlistLoader.getSnowballWordSet(new StringReader(s), TEST_VERSION_CURRENT);
+ CharArraySet wordset = WordlistLoader.getSnowballWordSet(new StringReader(s));
assertEquals(7, wordset.size());
assertTrue(wordset.contains("ONE"));
assertTrue(wordset.contains("two"));
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyAnalyzer.java Thu Aug 21 03:12:52 2014
@@ -22,6 +22,7 @@ import org.apache.lucene.analysis.Analyz
import org.apache.lucene.analysis.CollationTestBase;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.Version;
import java.text.Collator;
import java.util.Locale;
@@ -37,7 +38,7 @@ public class TestCollationKeyAnalyzer ex
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
// characters properly.
private Collator collator = Collator.getInstance(new Locale("ar"));
- private Analyzer analyzer = new CollationKeyAnalyzer(TEST_VERSION_CURRENT, collator);
+ private Analyzer analyzer = new CollationKeyAnalyzer(Version.LATEST, collator);
private BytesRef firstRangeBeginning = new BytesRef(collator.getCollationKey(firstRangeBeginningOriginal).toByteArray());
private BytesRef firstRangeEnd = new BytesRef(collator.getCollationKey(firstRangeEndOriginal).toByteArray());
@@ -84,7 +85,7 @@ public class TestCollationKeyAnalyzer ex
for (int i = 0; i < iters; i++) {
Collator collator = Collator.getInstance(Locale.GERMAN);
collator.setStrength(Collator.PRIMARY);
- assertThreadSafe(new CollationKeyAnalyzer(TEST_VERSION_CURRENT, collator));
+ assertThreadSafe(new CollationKeyAnalyzer(Version.LATEST, collator));
}
}
}
Modified: lucene/dev/branches/branch_4x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java Thu Aug 21 03:12:52 2014
@@ -76,7 +76,7 @@ public class TestICUNormalizer2CharFilte
CharFilter reader = new ICUNormalizer2CharFilter(new StringReader(input),
Normalizer2.getInstance(null, "nfkc_cf", Normalizer2.Mode.COMPOSE));
- Tokenizer tokenStream = new NGramTokenizer(TEST_VERSION_CURRENT, newAttributeFactory(), reader, 1, 1);
+ Tokenizer tokenStream = new NGramTokenizer(newAttributeFactory(), reader, 1, 1);
assertTokenStreamContents(tokenStream,
new String[] {"ã", "ã´", "5", "°", "c", "n", "o", "(", "æ ª", ")", "ã°", "ã©", "ã ", "ã¶", "ã¾"},
Modified: lucene/dev/branches/branch_4x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestWithCJKBigramFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestWithCJKBigramFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestWithCJKBigramFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestWithCJKBigramFilter.java Thu Aug 21 03:12:52 2014
@@ -43,7 +43,7 @@ public class TestWithCJKBigramFilter ext
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer source = new ICUTokenizer(newAttributeFactory(), reader, new DefaultICUTokenizerConfig(false));
TokenStream result = new CJKBigramFilter(source);
- return new TokenStreamComponents(source, new StopFilter(TEST_VERSION_CURRENT, result, CharArraySet.EMPTY_SET));
+ return new TokenStreamComponents(source, new StopFilter(result, CharArraySet.EMPTY_SET));
}
};
@@ -61,7 +61,7 @@ public class TestWithCJKBigramFilter ext
// some halfwidth katakana forms, which will affect the bigramming.
TokenStream result = new ICUNormalizer2Filter(source);
result = new CJKBigramFilter(source);
- return new TokenStreamComponents(source, new StopFilter(TEST_VERSION_CURRENT, result, CharArraySet.EMPTY_SET));
+ return new TokenStreamComponents(source, new StopFilter(result, CharArraySet.EMPTY_SET));
}
};
Modified: lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java Thu Aug 21 03:12:52 2014
@@ -31,7 +31,6 @@ import org.apache.lucene.analysis.ja.Jap
import org.apache.lucene.analysis.ja.dict.UserDictionary;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
-import org.apache.lucene.util.Version;
/**
* Analyzer for Japanese that uses morphological analysis.
@@ -42,12 +41,12 @@ public class JapaneseAnalyzer extends St
private final Set<String> stoptags;
private final UserDictionary userDict;
- public JapaneseAnalyzer(Version matchVersion) {
- this(matchVersion, null, JapaneseTokenizer.DEFAULT_MODE, DefaultSetHolder.DEFAULT_STOP_SET, DefaultSetHolder.DEFAULT_STOP_TAGS);
+ public JapaneseAnalyzer() {
+ this(null, JapaneseTokenizer.DEFAULT_MODE, DefaultSetHolder.DEFAULT_STOP_SET, DefaultSetHolder.DEFAULT_STOP_TAGS);
}
- public JapaneseAnalyzer(Version matchVersion, UserDictionary userDict, Mode mode, CharArraySet stopwords, Set<String> stoptags) {
- super(matchVersion, stopwords);
+ public JapaneseAnalyzer(UserDictionary userDict, Mode mode, CharArraySet stopwords, Set<String> stoptags) {
+ super(stopwords);
this.userDict = userDict;
this.mode = mode;
this.stoptags = stoptags;
@@ -89,11 +88,11 @@ public class JapaneseAnalyzer extends St
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer tokenizer = new JapaneseTokenizer(reader, userDict, true, mode);
TokenStream stream = new JapaneseBaseFormFilter(tokenizer);
- stream = new JapanesePartOfSpeechStopFilter(matchVersion, stream, stoptags);
+ stream = new JapanesePartOfSpeechStopFilter(stream, stoptags);
stream = new CJKWidthFilter(stream);
- stream = new StopFilter(matchVersion, stream, stopwords);
+ stream = new StopFilter(stream, stopwords);
stream = new JapaneseKatakanaStemFilter(stream);
- stream = new LowerCaseFilter(matchVersion, stream);
+ stream = new LowerCaseFilter(stream);
return new TokenStreamComponents(tokenizer, stream);
}
}
Modified: lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilter.java Thu Aug 21 03:12:52 2014
@@ -40,10 +40,18 @@ public final class JapanesePartOfSpeechS
/**
* Create a new {@link JapanesePartOfSpeechStopFilter}.
- * @param version the Lucene match version
* @param input the {@link TokenStream} to consume
* @param stopTags the part-of-speech tags that should be removed
*/
+ public JapanesePartOfSpeechStopFilter(TokenStream input, Set<String> stopTags) {
+ super(input);
+ this.stopTags = stopTags;
+ }
+
+ /**
+ * @deprecated Use {@link #JapanesePartOfSpeechStopFilter(TokenStream,Set)}
+ */
+ @Deprecated
public JapanesePartOfSpeechStopFilter(Version version, TokenStream input, Set<String> stopTags) {
super(version, input);
this.stopTags = stopTags;
Modified: lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java Thu Aug 21 03:12:52 2014
@@ -27,6 +27,7 @@ import org.apache.lucene.analysis.util.C
import org.apache.lucene.analysis.util.ResourceLoader;
import org.apache.lucene.analysis.util.ResourceLoaderAware;
import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.apache.lucene.util.Version;
/**
* Factory for {@link org.apache.lucene.analysis.ja.JapanesePartOfSpeechStopFilter}.
@@ -51,6 +52,10 @@ public class JapanesePartOfSpeechStopFil
super(args);
stopTagFiles = get(args, "tags");
enablePositionIncrements = getBoolean(args, "enablePositionIncrements", true);
+ if (enablePositionIncrements == false &&
+ (luceneMatchVersion == null || luceneMatchVersion.onOrAfter(Version.LUCENE_4_4_0))) {
+ throw new IllegalArgumentException("enablePositionIncrements=false is not supported anymore as of Lucene 4.4");
+ }
if (!args.isEmpty()) {
throw new IllegalArgumentException("Unknown parameters: " + args);
}
@@ -73,6 +78,9 @@ public class JapanesePartOfSpeechStopFil
public TokenStream create(TokenStream stream) {
// if stoptags is null, it means the file is empty
if (stopTags != null) {
+ if (luceneMatchVersion == null) {
+ return new JapanesePartOfSpeechStopFilter(stream, stopTags);
+ }
@SuppressWarnings("deprecation")
final TokenStream filter = new JapanesePartOfSpeechStopFilter(luceneMatchVersion, enablePositionIncrements, stream, stopTags);
return filter;