You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rj...@apache.org on 2014/08/21 05:12:58 UTC

svn commit: r1619283 [7/11] - in /lucene/dev/branches/branch_4x: ./ lucene/ lucene/analysis/ lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/ lucene/analysis/common/src/java/o...

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java Thu Aug 21 03:12:52 2014
@@ -47,7 +47,7 @@ public class TestTrimFilter extends Base
                     new Token(new String(ccc, 0, ccc.length), 11, 15),
                     new Token(new String(whitespace, 0, whitespace.length), 16, 20),
                     new Token(new String(empty, 0, empty.length), 21, 21));
-    ts = new TrimFilter(TEST_VERSION_CURRENT, ts, false);
+    ts = new TrimFilter(ts);
 
     assertTokenStreamContents(ts, new String[] { "a", "b", "cCc", "", ""});
 
@@ -131,7 +131,7 @@ public class TestTrimFilter extends Base
       @Override
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
         Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.KEYWORD, false);
-        return new TokenStreamComponents(tokenizer, new TrimFilter(TEST_VERSION_CURRENT, tokenizer, false));
+        return new TokenStreamComponents(tokenizer, new TrimFilter(tokenizer));
       } 
     };
     checkRandomData(random(), b, 1000*RANDOM_MULTIPLIER);
@@ -143,7 +143,7 @@ public class TestTrimFilter extends Base
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
         Tokenizer tokenizer = new KeywordTokenizer(reader);
         final boolean updateOffsets = random().nextBoolean();
-        final Version version = updateOffsets ? Version.LUCENE_4_3 : TEST_VERSION_CURRENT;
+        final Version version = updateOffsets ? Version.LUCENE_4_3 : Version.LATEST;
         return new TokenStreamComponents(tokenizer, new TrimFilter(version, tokenizer, updateOffsets));
       }
     };

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java Thu Aug 21 03:12:52 2014
@@ -66,14 +66,14 @@ public class TestWordDelimiterFilter ext
     int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
     // test that subwords and catenated subwords have
     // the correct offsets.
-    WordDelimiterFilter wdf = new WordDelimiterFilter(TEST_VERSION_CURRENT, new SingleTokenTokenStream(new Token("foo-bar", 5, 12)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+    WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("foo-bar", 5, 12)), DEFAULT_WORD_DELIM_TABLE, flags, null);
 
     assertTokenStreamContents(wdf, 
         new String[] { "foo", "foobar", "bar" },
         new int[] { 5, 5, 9 }, 
         new int[] { 8, 12, 12 });
 
-    wdf = new WordDelimiterFilter(TEST_VERSION_CURRENT, new SingleTokenTokenStream(new Token("foo-bar", 5, 6)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+    wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("foo-bar", 5, 6)), DEFAULT_WORD_DELIM_TABLE, flags, null);
     
     assertTokenStreamContents(wdf,
         new String[] { "foo", "bar", "foobar" },
@@ -84,7 +84,7 @@ public class TestWordDelimiterFilter ext
   @Test
   public void testOffsetChange() throws Exception {
     int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
-    WordDelimiterFilter wdf = new WordDelimiterFilter(TEST_VERSION_CURRENT, new SingleTokenTokenStream(new Token("übelkeit)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+    WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("übelkeit)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
     
     assertTokenStreamContents(wdf,
         new String[] { "übelkeit" },
@@ -95,7 +95,7 @@ public class TestWordDelimiterFilter ext
   @Test
   public void testOffsetChange2() throws Exception {
     int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
-    WordDelimiterFilter wdf = new WordDelimiterFilter(TEST_VERSION_CURRENT, new SingleTokenTokenStream(new Token("(übelkeit", 7, 17)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+    WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("(übelkeit", 7, 17)), DEFAULT_WORD_DELIM_TABLE, flags, null);
     
     assertTokenStreamContents(wdf,
         new String[] { "übelkeit" },
@@ -106,7 +106,7 @@ public class TestWordDelimiterFilter ext
   @Test
   public void testOffsetChange3() throws Exception {
     int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
-    WordDelimiterFilter wdf = new WordDelimiterFilter(TEST_VERSION_CURRENT, new SingleTokenTokenStream(new Token("(übelkeit", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+    WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("(übelkeit", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
     
     assertTokenStreamContents(wdf,
         new String[] { "übelkeit" },
@@ -117,7 +117,7 @@ public class TestWordDelimiterFilter ext
   @Test
   public void testOffsetChange4() throws Exception {
     int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
-    WordDelimiterFilter wdf = new WordDelimiterFilter(TEST_VERSION_CURRENT, new SingleTokenTokenStream(new Token("(foo,bar)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
+    WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("(foo,bar)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
     
     assertTokenStreamContents(wdf,
         new String[] { "foo", "foobar", "bar"},
@@ -127,8 +127,8 @@ public class TestWordDelimiterFilter ext
 
   public void doSplit(final String input, String... output) throws Exception {
     int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
-    WordDelimiterFilter wdf = new WordDelimiterFilter(TEST_VERSION_CURRENT, new MockTokenizer(
-                new StringReader(input), MockTokenizer.KEYWORD, false), WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, flags, null);
+    WordDelimiterFilter wdf = new WordDelimiterFilter(keywordMockTokenizer(input),
+        WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, flags, null);
     
     assertTokenStreamContents(wdf, output);
   }
@@ -171,8 +171,7 @@ public class TestWordDelimiterFilter ext
   public void doSplitPossessive(int stemPossessive, final String input, final String... output) throws Exception {
     int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS;
     flags |= (stemPossessive == 1) ? STEM_ENGLISH_POSSESSIVE : 0;
-    WordDelimiterFilter wdf = new WordDelimiterFilter(TEST_VERSION_CURRENT, new MockTokenizer(
-        new StringReader(input), MockTokenizer.KEYWORD, false), flags, null);
+    WordDelimiterFilter wdf = new WordDelimiterFilter(keywordMockTokenizer(input), flags, null);
 
     assertTokenStreamContents(wdf, output);
   }
@@ -212,14 +211,14 @@ public class TestWordDelimiterFilter ext
   @Test
   public void testPositionIncrements() throws Exception {
     final int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
-    final CharArraySet protWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet<>(Arrays.asList("NUTCH")), false);
+    final CharArraySet protWords = new CharArraySet(new HashSet<>(Arrays.asList("NUTCH")), false);
     
     /* analyzer that uses whitespace + wdf */
     Analyzer a = new Analyzer() {
       @Override
       public TokenStreamComponents createComponents(String field, Reader reader) {
         Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT, 
+        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(
             tokenizer,
             flags, protWords));
       }
@@ -247,7 +246,7 @@ public class TestWordDelimiterFilter ext
       @Override
       public TokenStreamComponents createComponents(String field, Reader reader) {
         Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT, 
+        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(
             new LargePosIncTokenFilter(tokenizer),
             flags, protWords));
       }
@@ -280,9 +279,8 @@ public class TestWordDelimiterFilter ext
       @Override
       public TokenStreamComponents createComponents(String field, Reader reader) {
         Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-        StopFilter filter = new StopFilter(TEST_VERSION_CURRENT,
-            tokenizer, StandardAnalyzer.STOP_WORDS_SET);
-        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT, filter, flags, protWords));
+        StopFilter filter = new StopFilter(tokenizer, StandardAnalyzer.STOP_WORDS_SET);
+        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(filter, flags, protWords));
       }
     };
 
@@ -309,7 +307,7 @@ public class TestWordDelimiterFilter ext
       @Override
       public TokenStreamComponents createComponents(String field, Reader reader) {
         Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT, tokenizer, flags, null));
+        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(tokenizer, flags, null));
       }
     };
     
@@ -329,7 +327,7 @@ public class TestWordDelimiterFilter ext
       @Override
       public TokenStreamComponents createComponents(String field, Reader reader) {
         Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT, tokenizer, flags, null));
+        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(tokenizer, flags, null));
       }
     };
     
@@ -347,7 +345,7 @@ public class TestWordDelimiterFilter ext
       final int flags = random().nextInt(512);
       final CharArraySet protectedWords;
       if (random().nextBoolean()) {
-        protectedWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet<>(Arrays.asList("a", "b", "cd")), false);
+        protectedWords = new CharArraySet(new HashSet<>(Arrays.asList("a", "b", "cd")), false);
       } else {
         protectedWords = null;
       }
@@ -357,7 +355,7 @@ public class TestWordDelimiterFilter ext
         @Override
         protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
           Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-          return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT, tokenizer, flags, protectedWords));
+          return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(tokenizer, flags, protectedWords));
         }
       };
       // TODO: properly support positionLengthAttribute
@@ -372,7 +370,7 @@ public class TestWordDelimiterFilter ext
       final int flags = random().nextInt(512);
       final CharArraySet protectedWords;
       if (random().nextBoolean()) {
-        protectedWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet<>(Arrays.asList("a", "b", "cd")), false);
+        protectedWords = new CharArraySet(new HashSet<>(Arrays.asList("a", "b", "cd")), false);
       } else {
         protectedWords = null;
       }
@@ -382,7 +380,7 @@ public class TestWordDelimiterFilter ext
         @Override
         protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
           Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-          return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT, tokenizer, flags, protectedWords));
+          return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(tokenizer, flags, protectedWords));
         }
       };
       // TODO: properly support positionLengthAttribute
@@ -396,7 +394,7 @@ public class TestWordDelimiterFilter ext
       final int flags = i;
       final CharArraySet protectedWords;
       if (random.nextBoolean()) {
-        protectedWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet<>(Arrays.asList("a", "b", "cd")), false);
+        protectedWords = new CharArraySet(new HashSet<>(Arrays.asList("a", "b", "cd")), false);
       } else {
         protectedWords = null;
       }
@@ -405,7 +403,7 @@ public class TestWordDelimiterFilter ext
         @Override
         protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
           Tokenizer tokenizer = new KeywordTokenizer(reader);
-          return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT, tokenizer, flags, protectedWords));
+          return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(tokenizer, flags, protectedWords));
         }
       };
       // depending upon options, this thing may or may not preserve the empty term

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java Thu Aug 21 03:12:52 2014
@@ -53,8 +53,8 @@ public class EdgeNGramTokenFilterTest ex
 
   public void testInvalidInput() throws Exception {
     boolean gotException = false;
-    try {        
-      new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 0, 0);
+    try {
+      new EdgeNGramTokenFilter(input, 0, 0);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -63,8 +63,8 @@ public class EdgeNGramTokenFilterTest ex
 
   public void testInvalidInput2() throws Exception {
     boolean gotException = false;
-    try {        
-      new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 2, 1);
+    try {
+      new EdgeNGramTokenFilter(input, 2, 1);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -73,8 +73,8 @@ public class EdgeNGramTokenFilterTest ex
 
   public void testInvalidInput3() throws Exception {
     boolean gotException = false;
-    try {        
-      new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, -1, 2);
+    try {
+      new EdgeNGramTokenFilter(input, -1, 2);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -82,7 +82,7 @@ public class EdgeNGramTokenFilterTest ex
   }
 
   public void testFrontUnigram() throws Exception {
-    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 1, 1);
+    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, 1, 1);
     assertTokenStreamContents(tokenizer, new String[]{"a"}, new int[]{0}, new int[]{5});
   }
 
@@ -92,12 +92,12 @@ public class EdgeNGramTokenFilterTest ex
   }
 
   public void testOversizedNgrams() throws Exception {
-    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 6, 6);
+    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, 6, 6);
     assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0]);
   }
 
   public void testFrontRangeOfNgrams() throws Exception {
-    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
+    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, 1, 3);
     assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{5,5,5});
   }
 
@@ -115,8 +115,8 @@ public class EdgeNGramTokenFilterTest ex
   }
 
   public void testFilterPositions() throws Exception {
-    TokenStream ts = new MockTokenizer(new StringReader("abcde vwxyz"), MockTokenizer.WHITESPACE, false);
-    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, ts, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
+    TokenStream ts = whitespaceMockTokenizer("abcde vwxyz");
+    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(ts, 1, 3);
     assertTokenStreamContents(tokenizer,
                               new String[]{"a","ab","abc","v","vw","vwx"},
                               new int[]{0,0,0,6,6,6},
@@ -161,7 +161,7 @@ public class EdgeNGramTokenFilterTest ex
   public void testFirstTokenPositionIncrement() throws Exception {
     TokenStream ts = new MockTokenizer(new StringReader("a abc"), MockTokenizer.WHITESPACE, false);
     ts = new PositionFilter(ts); // All but first token will get 0 position increment
-    EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, ts, EdgeNGramTokenFilter.Side.FRONT, 2, 3);
+    EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(ts, 2, 3);
     // The first token "a" will not be output, since it's smaller than the mingram size of 2.
     // The second token on input to EdgeNGramTokenFilter will have position increment of 0,
     // which should be increased to 1, since this is the first output token in the stream.
@@ -174,14 +174,14 @@ public class EdgeNGramTokenFilterTest ex
   }
   
   public void testSmallTokenInStream() throws Exception {
-    input = new MockTokenizer(new StringReader("abc de fgh"), MockTokenizer.WHITESPACE, false);
-    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 3, 3);
+    input = whitespaceMockTokenizer("abc de fgh");
+    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, 3, 3);
     assertTokenStreamContents(tokenizer, new String[]{"abc","fgh"}, new int[]{0,7}, new int[]{3,10});
   }
   
   public void testReset() throws Exception {
-    WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
-    EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
+    WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(new StringReader("abcde"));
+    EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(tokenizer, 1, 3);
     assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{5,5,5});
     tokenizer.setReader(new StringReader("abcde"));
     assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{5,5,5});
@@ -218,7 +218,7 @@ public class EdgeNGramTokenFilterTest ex
         protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
           Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
           return new TokenStreamComponents(tokenizer, 
-            new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, min, max));
+            new EdgeNGramTokenFilter(tokenizer, min, max));
         }    
       };
       checkRandomData(random(), a, 100*RANDOM_MULTIPLIER);
@@ -241,8 +241,8 @@ public class EdgeNGramTokenFilterTest ex
       @Override
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
         Tokenizer tokenizer = new KeywordTokenizer(reader);
-        return new TokenStreamComponents(tokenizer, 
-            new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, EdgeNGramTokenFilter.Side.FRONT, 2, 15));
+        return new TokenStreamComponents(tokenizer,
+            new EdgeNGramTokenFilter(tokenizer, 2, 15));
       }    
     };
     checkAnalysisConsistency(random, a, random.nextBoolean(), "");
@@ -259,9 +259,9 @@ public class EdgeNGramTokenFilterTest ex
   }
 
   public void testGraphs() throws IOException {
-    TokenStream tk = new LetterTokenizer(TEST_VERSION_CURRENT, new StringReader("abc d efgh ij klmno p q"));
+    TokenStream tk = new LetterTokenizer(new StringReader("abc d efgh ij klmno p q"));
     tk = new ShingleFilter(tk);
-    tk = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tk, 7, 10);
+    tk = new EdgeNGramTokenFilter(tk, 7, 10);
     assertTokenStreamContents(tk,
         new String[] { "efgh ij", "ij klmn", "ij klmno", "klmno p" },
         new int[]    { 6,11,11,14 },
@@ -278,7 +278,7 @@ public class EdgeNGramTokenFilterTest ex
     final int minGram = TestUtil.nextInt(random(), 1, 3);
     final int maxGram = TestUtil.nextInt(random(), minGram, 10);
     TokenStream tk = new KeywordTokenizer(new StringReader(s));
-    tk = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tk, minGram, maxGram);
+    tk = new EdgeNGramTokenFilter(tk, minGram, maxGram);
     final CharTermAttribute termAtt = tk.addAttribute(CharTermAttribute.class);
     final OffsetAttribute offsetAtt = tk.addAttribute(OffsetAttribute.class);
     tk.reset();

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java Thu Aug 21 03:12:52 2014
@@ -45,8 +45,8 @@ public class EdgeNGramTokenizerTest exte
 
   public void testInvalidInput() throws Exception {
     boolean gotException = false;
-    try {        
-      new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, 0, 0);
+    try {
+      new EdgeNGramTokenizer(input, 0, 0);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -55,8 +55,8 @@ public class EdgeNGramTokenizerTest exte
 
   public void testInvalidInput2() throws Exception {
     boolean gotException = false;
-    try {        
-      new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, 2, 1);
+    try {
+      new EdgeNGramTokenizer(input, 2, 1);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -65,8 +65,8 @@ public class EdgeNGramTokenizerTest exte
 
   public void testInvalidInput3() throws Exception {
     boolean gotException = false;
-    try {        
-      new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, -1, 2);
+    try {
+      new EdgeNGramTokenizer(input, -1, 2);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -74,7 +74,7 @@ public class EdgeNGramTokenizerTest exte
   }
 
   public void testFrontUnigram() throws Exception {
-    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, 1, 1);
+    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, 1, 1);
     assertTokenStreamContents(tokenizer, new String[]{"a"}, new int[]{0}, new int[]{1}, 5 /* abcde */);
   }
 
@@ -84,12 +84,12 @@ public class EdgeNGramTokenizerTest exte
   }
 
   public void testOversizedNgrams() throws Exception {
-    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, 6, 6);
+    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, 6, 6);
     assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0], 5 /* abcde */);
   }
 
   public void testFrontRangeOfNgrams() throws Exception {
-    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, 1, 3);
+    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, 1, 3);
     assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3}, 5 /* abcde */);
   }
 
@@ -99,7 +99,7 @@ public class EdgeNGramTokenizerTest exte
   }
   
   public void testReset() throws Exception {
-    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, 1, 3);
+    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, 1, 3);
     assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3}, 5 /* abcde */);
     tokenizer.setReader(new StringReader("abcde"));
     assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3}, 5 /* abcde */);
@@ -114,7 +114,7 @@ public class EdgeNGramTokenizerTest exte
       Analyzer a = new Analyzer() {
         @Override
         protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
-          Tokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, reader, min, max);
+          Tokenizer tokenizer = new EdgeNGramTokenizer(reader, min, max);
           return new TokenStreamComponents(tokenizer, tokenizer);
         }    
       };

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java Thu Aug 21 03:12:52 2014
@@ -51,7 +51,7 @@ public class NGramTokenFilterTest extend
   public void testInvalidInput() throws Exception {
     boolean gotException = false;
     try {        
-      new NGramTokenFilter(TEST_VERSION_CURRENT, input, 2, 1);
+      new NGramTokenFilter(input, 2, 1);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -61,7 +61,7 @@ public class NGramTokenFilterTest extend
   public void testInvalidInput2() throws Exception {
     boolean gotException = false;
     try {        
-      new NGramTokenFilter(TEST_VERSION_CURRENT, input, 0, 1);
+      new NGramTokenFilter(input, 0, 1);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -69,17 +69,17 @@ public class NGramTokenFilterTest extend
   }
 
   public void testUnigrams() throws Exception {
-    NGramTokenFilter filter = new NGramTokenFilter(TEST_VERSION_CURRENT, input, 1, 1);
+    NGramTokenFilter filter = new NGramTokenFilter(input, 1, 1);
     assertTokenStreamContents(filter, new String[]{"a","b","c","d","e"}, new int[]{0,0,0,0,0}, new int[]{5,5,5,5,5}, new int[]{1,0,0,0,0});
   }
   
   public void testBigrams() throws Exception {
-    NGramTokenFilter filter = new NGramTokenFilter(TEST_VERSION_CURRENT, input, 2, 2);
+    NGramTokenFilter filter = new NGramTokenFilter(input, 2, 2);
     assertTokenStreamContents(filter, new String[]{"ab","bc","cd","de"}, new int[]{0,0,0,0}, new int[]{5,5,5,5}, new int[]{1,0,0,0});
   }
   
   public void testNgrams() throws Exception {
-    NGramTokenFilter filter = new NGramTokenFilter(TEST_VERSION_CURRENT, input, 1, 3);
+    NGramTokenFilter filter = new NGramTokenFilter(input, 1, 3);
     assertTokenStreamContents(filter,
         new String[]{"a","ab","abc","b","bc","bcd","c","cd","cde","d","de","e"},
         new int[]{0,0,0,0,0,0,0,0,0,0,0,0},
@@ -91,7 +91,7 @@ public class NGramTokenFilterTest extend
   }
 
   public void testNgramsNoIncrement() throws Exception {
-    NGramTokenFilter filter = new NGramTokenFilter(TEST_VERSION_CURRENT, input, 1, 3);
+    NGramTokenFilter filter = new NGramTokenFilter(input, 1, 3);
     assertTokenStreamContents(filter,
         new String[]{"a","ab","abc","b","bc","bcd","c","cd","cde","d","de","e"},
         new int[]{0,0,0,0,0,0,0,0,0,0,0,0},
@@ -103,19 +103,19 @@ public class NGramTokenFilterTest extend
   }
 
   public void testOversizedNgrams() throws Exception {
-    NGramTokenFilter filter = new NGramTokenFilter(TEST_VERSION_CURRENT, input, 6, 7);
+    NGramTokenFilter filter = new NGramTokenFilter(input, 6, 7);
     assertTokenStreamContents(filter, new String[0], new int[0], new int[0]);
   }
   
   public void testSmallTokenInStream() throws Exception {
     input = new MockTokenizer(new StringReader("abc de fgh"), MockTokenizer.WHITESPACE, false);
-    NGramTokenFilter filter = new NGramTokenFilter(TEST_VERSION_CURRENT, input, 3, 3);
+    NGramTokenFilter filter = new NGramTokenFilter(input, 3, 3);
     assertTokenStreamContents(filter, new String[]{"abc","fgh"}, new int[]{0,7}, new int[]{3,10}, new int[] {1, 2});
   }
   
   public void testReset() throws Exception {
-    WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
-    NGramTokenFilter filter = new NGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, 1, 1);
+    WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(new StringReader("abcde"));
+    NGramTokenFilter filter = new NGramTokenFilter(tokenizer, 1, 1);
     assertTokenStreamContents(filter, new String[]{"a","b","c","d","e"}, new int[]{0,0,0,0,0}, new int[]{5,5,5,5,5}, new int[]{1,0,0,0,0});
     tokenizer.setReader(new StringReader("abcde"));
     assertTokenStreamContents(filter, new String[]{"a","b","c","d","e"}, new int[]{0,0,0,0,0}, new int[]{5,5,5,5,5}, new int[]{1,0,0,0,0});
@@ -131,7 +131,7 @@ public class NGramTokenFilterTest extend
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
         Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
         TokenFilter filters = new ASCIIFoldingFilter(tokenizer);
-        filters = new NGramTokenFilter(TEST_VERSION_CURRENT, filters, 2, 2);
+        filters = new NGramTokenFilter(filters, 2, 2);
         return new TokenStreamComponents(tokenizer, filters);
       }
     };
@@ -152,7 +152,7 @@ public class NGramTokenFilterTest extend
         protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
           Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
           return new TokenStreamComponents(tokenizer, 
-              new NGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, min, max));
+              new NGramTokenFilter(tokenizer, min, max));
         }    
       };
       checkRandomData(random(), a, 200*RANDOM_MULTIPLIER, 20);
@@ -166,14 +166,14 @@ public class NGramTokenFilterTest extend
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
         Tokenizer tokenizer = new KeywordTokenizer(reader);
         return new TokenStreamComponents(tokenizer, 
-            new NGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, 2, 15));
+            new NGramTokenFilter(tokenizer, 2, 15));
       }    
     };
     checkAnalysisConsistency(random, a, random.nextBoolean(), "");
   }
 
   public void testLucene43() throws IOException {
-    NGramTokenFilter filter = new NGramTokenFilter(Version.LUCENE_4_3, input, 2, 3);
+    TokenFilter filter = new Lucene43NGramTokenFilter(input, 2, 3);
     assertTokenStreamContents(filter,
         new String[]{"ab","bc","cd","de","abc","bcd","cde"},
         new int[]{0,1,2,3,0,1,2},
@@ -190,7 +190,7 @@ public class NGramTokenFilterTest extend
     final int minGram = TestUtil.nextInt(random(), 1, 3);
     final int maxGram = TestUtil.nextInt(random(), minGram, 10);
     TokenStream tk = new KeywordTokenizer(new StringReader(s));
-    tk = new NGramTokenFilter(TEST_VERSION_CURRENT, tk, minGram, maxGram);
+    tk = new NGramTokenFilter(tk, minGram, maxGram);
     final CharTermAttribute termAtt = tk.addAttribute(CharTermAttribute.class);
     final OffsetAttribute offsetAtt = tk.addAttribute(OffsetAttribute.class);
     tk.reset();

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java Thu Aug 21 03:12:52 2014
@@ -35,6 +35,7 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.util.TestUtil;
 
 import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.apache.lucene.util.Version;
 
 /**
  * Tests {@link NGramTokenizer} for correctness.
@@ -50,8 +51,8 @@ public class NGramTokenizerTest extends 
   
   public void testInvalidInput() throws Exception {
     boolean gotException = false;
-    try {        
-      new NGramTokenizer(TEST_VERSION_CURRENT, input, 2, 1);
+    try {
+      NGramTokenizer tok = new NGramTokenizer(input, 2, 1);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -60,8 +61,8 @@ public class NGramTokenizerTest extends 
   
   public void testInvalidInput2() throws Exception {
     boolean gotException = false;
-    try {        
-      new NGramTokenizer(TEST_VERSION_CURRENT, input, 0, 1);
+    try {
+      NGramTokenizer tok = new NGramTokenizer(input, 0, 1);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -69,17 +70,17 @@ public class NGramTokenizerTest extends 
   }
   
   public void testUnigrams() throws Exception {
-    NGramTokenizer tokenizer = new NGramTokenizer(TEST_VERSION_CURRENT, input, 1, 1);
+    NGramTokenizer tokenizer = new NGramTokenizer(input, 1, 1);
     assertTokenStreamContents(tokenizer, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5}, 5 /* abcde */);
   }
   
   public void testBigrams() throws Exception {
-    NGramTokenizer tokenizer = new NGramTokenizer(TEST_VERSION_CURRENT, input, 2, 2);
+    NGramTokenizer tokenizer = new NGramTokenizer(input, 2, 2);
     assertTokenStreamContents(tokenizer, new String[]{"ab","bc","cd","de"}, new int[]{0,1,2,3}, new int[]{2,3,4,5}, 5 /* abcde */);
   }
   
   public void testNgrams() throws Exception {
-    NGramTokenizer tokenizer = new NGramTokenizer(TEST_VERSION_CURRENT, input, 1, 3);
+    NGramTokenizer tokenizer = new NGramTokenizer(input, 1, 3);
     assertTokenStreamContents(tokenizer,
         new String[]{"a","ab", "abc", "b", "bc", "bcd", "c", "cd", "cde", "d", "de", "e"},
         new int[]{0,0,0,1,1,1,2,2,2,3,3,4},
@@ -93,12 +94,12 @@ public class NGramTokenizerTest extends 
   }
   
   public void testOversizedNgrams() throws Exception {
-    NGramTokenizer tokenizer = new NGramTokenizer(TEST_VERSION_CURRENT, input, 6, 7);
+    NGramTokenizer tokenizer = new NGramTokenizer(input, 6, 7);
     assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0], 5 /* abcde */);
   }
   
   public void testReset() throws Exception {
-    NGramTokenizer tokenizer = new NGramTokenizer(TEST_VERSION_CURRENT, input, 1, 1);
+    NGramTokenizer tokenizer = new NGramTokenizer(input, 1, 1);
     assertTokenStreamContents(tokenizer, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5}, 5 /* abcde */);
     tokenizer.setReader(new StringReader("abcde"));
     assertTokenStreamContents(tokenizer, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5}, 5 /* abcde */);
@@ -112,7 +113,7 @@ public class NGramTokenizerTest extends 
       Analyzer a = new Analyzer() {
         @Override
         protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
-          Tokenizer tokenizer = new NGramTokenizer(TEST_VERSION_CURRENT, reader, min, max);
+          Tokenizer tokenizer = new NGramTokenizer(reader, min, max);
           return new TokenStreamComponents(tokenizer, tokenizer);
         }    
       };
@@ -157,7 +158,7 @@ public class NGramTokenizerTest extends 
     for (int i = 0; i < codePoints.length; ++i) {
       offsets[i+1] = offsets[i] + Character.charCount(codePoints[i]);
     }
-    final TokenStream grams = new NGramTokenizer(TEST_VERSION_CURRENT, new StringReader(s), minGram, maxGram, edgesOnly) {
+    final TokenStream grams = new NGramTokenizer(Version.LATEST, new StringReader(s), minGram, maxGram, edgesOnly) {
       @Override
       protected boolean isTokenChar(int chr) {
         return nonTokenChars.indexOf(chr) < 0;

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java Thu Aug 21 03:12:52 2014
@@ -125,14 +125,14 @@ public class TestDutchStemmer extends Ba
   }
   
   public void testSnowballCorrectness() throws Exception {
-    Analyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT);
+    Analyzer a = new DutchAnalyzer();
     checkOneTerm(a, "opheffen", "opheff");
     checkOneTerm(a, "opheffende", "opheff");
     checkOneTerm(a, "opheffing", "opheff");
   }
   
   public void testReusableTokenStream() throws Exception {
-    Analyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT); 
+    Analyzer a = new DutchAnalyzer(); 
     checkOneTerm(a, "lichaamsziek", "lichaamsziek");
     checkOneTerm(a, "lichamelijk", "licham");
     checkOneTerm(a, "lichamelijke", "licham");
@@ -142,10 +142,10 @@ public class TestDutchStemmer extends Ba
   public void testExclusionTableViaCtor() throws IOException {
     CharArraySet set = new CharArraySet(Version.LUCENE_3_0, 1, true);
     set.add("lichamelijk");
-    DutchAnalyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
+    DutchAnalyzer a = new DutchAnalyzer( CharArraySet.EMPTY_SET, set);
     assertAnalyzesTo(a, "lichamelijk lichamelijke", new String[] { "lichamelijk", "licham" });
     
-    a = new DutchAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
+    a = new DutchAnalyzer( CharArraySet.EMPTY_SET, set);
     assertAnalyzesTo(a, "lichamelijk lichamelijke", new String[] { "lichamelijk", "licham" });
 
   }
@@ -155,7 +155,7 @@ public class TestDutchStemmer extends Ba
    * even if you use a non-default ctor.
    */
   public void testStemOverrides() throws IOException {
-    DutchAnalyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET);
+    DutchAnalyzer a = new DutchAnalyzer( CharArraySet.EMPTY_SET);
     checkOneTerm(a, "fiets", "fiets");
   }
   /**
@@ -171,7 +171,7 @@ public class TestDutchStemmer extends Ba
   }
 
   public void testEmptyStemDictionary() throws IOException {
-    DutchAnalyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, 
+    DutchAnalyzer a = new DutchAnalyzer( CharArraySet.EMPTY_SET, 
         CharArraySet.EMPTY_SET, CharArrayMap.<String>emptyMap());
     checkOneTerm(a, "fiets", "fiet");
   }
@@ -207,12 +207,12 @@ public class TestDutchStemmer extends Ba
   }
   
   private void check(final String input, final String expected) throws Exception {
-    checkOneTerm(new DutchAnalyzer(TEST_VERSION_CURRENT), input, expected); 
+    checkOneTerm(new DutchAnalyzer(), input, expected); 
   }
   
   /** blast some random strings through the analyzer */
   public void testRandomStrings() throws Exception {
-    checkRandomData(random(), new DutchAnalyzer(TEST_VERSION_CURRENT), 1000*RANDOM_MULTIPLIER);
+    checkRandomData(random(), new DutchAnalyzer(), 1000*RANDOM_MULTIPLIER);
   }
   
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java Thu Aug 21 03:12:52 2014
@@ -27,12 +27,12 @@ public class TestNorwegianAnalyzer exten
   /** This test fails with NPE when the 
    * stopwords file is missing in classpath */
   public void testResourcesAvailable() {
-    new NorwegianAnalyzer(TEST_VERSION_CURRENT);
+    new NorwegianAnalyzer();
   }
   
   /** test stopwords and stemming */
   public void testBasics() throws IOException {
-    Analyzer a = new NorwegianAnalyzer(TEST_VERSION_CURRENT);
+    Analyzer a = new NorwegianAnalyzer();
     // stemming
     checkOneTerm(a, "havnedistriktene", "havnedistrikt");
     checkOneTerm(a, "havnedistrikter", "havnedistrikt");
@@ -42,8 +42,8 @@ public class TestNorwegianAnalyzer exten
   
   /** test use of exclusion set */
   public void testExclude() throws IOException {
-    CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("havnedistriktene"), false);
-    Analyzer a = new NorwegianAnalyzer(TEST_VERSION_CURRENT, 
+    CharArraySet exclusionSet = new CharArraySet( asSet("havnedistriktene"), false);
+    Analyzer a = new NorwegianAnalyzer( 
         NorwegianAnalyzer.getDefaultStopSet(), exclusionSet);
     checkOneTerm(a, "havnedistriktene", "havnedistriktene");
     checkOneTerm(a, "havnedistrikter", "havnedistrikt");
@@ -51,6 +51,6 @@ public class TestNorwegianAnalyzer exten
   
   /** blast some random strings through the analyzer */
   public void testRandomStrings() throws Exception {
-    checkRandomData(random(), new NorwegianAnalyzer(TEST_VERSION_CURRENT), 1000*RANDOM_MULTIPLIER);
+    checkRandomData(random(), new NorwegianAnalyzer(), 1000*RANDOM_MULTIPLIER);
   }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilter.java Thu Aug 21 03:12:52 2014
@@ -67,7 +67,7 @@ public class TestNorwegianLightStemFilte
   }
   
   public void testKeyword() throws IOException {
-    final CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("sekretæren"), false);
+    final CharArraySet exclusionSet = new CharArraySet( asSet("sekretæren"), false);
     Analyzer a = new Analyzer() {
       @Override
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilter.java Thu Aug 21 03:12:52 2014
@@ -66,7 +66,7 @@ public class TestNorwegianMinimalStemFil
   }
   
   public void testKeyword() throws IOException {
-    final CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("sekretæren"), false);
+    final CharArraySet exclusionSet = new CharArraySet( asSet("sekretæren"), false);
     Analyzer a = new Analyzer() {
       @Override
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java Thu Aug 21 03:12:52 2014
@@ -27,12 +27,12 @@ public class TestPortugueseAnalyzer exte
   /** This test fails with NPE when the 
    * stopwords file is missing in classpath */
   public void testResourcesAvailable() {
-    new PortugueseAnalyzer(TEST_VERSION_CURRENT);
+    new PortugueseAnalyzer();
   }
   
   /** test stopwords and stemming */
   public void testBasics() throws IOException {
-    Analyzer a = new PortugueseAnalyzer(TEST_VERSION_CURRENT);
+    Analyzer a = new PortugueseAnalyzer();
     // stemming
     checkOneTerm(a, "quilométricas", "quilometric");
     checkOneTerm(a, "quilométricos", "quilometric");
@@ -42,8 +42,8 @@ public class TestPortugueseAnalyzer exte
   
   /** test use of exclusion set */
   public void testExclude() throws IOException {
-    CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("quilométricas"), false);
-    Analyzer a = new PortugueseAnalyzer(TEST_VERSION_CURRENT, 
+    CharArraySet exclusionSet = new CharArraySet( asSet("quilométricas"), false);
+    Analyzer a = new PortugueseAnalyzer( 
         PortugueseAnalyzer.getDefaultStopSet(), exclusionSet);
     checkOneTerm(a, "quilométricas", "quilométricas");
     checkOneTerm(a, "quilométricos", "quilometric");
@@ -51,6 +51,6 @@ public class TestPortugueseAnalyzer exte
   
   /** blast some random strings through the analyzer */
   public void testRandomStrings() throws Exception {
-    checkRandomData(random(), new PortugueseAnalyzer(TEST_VERSION_CURRENT), 1000*RANDOM_MULTIPLIER);
+    checkRandomData(random(), new PortugueseAnalyzer(), 1000*RANDOM_MULTIPLIER);
   }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java Thu Aug 21 03:12:52 2014
@@ -93,7 +93,7 @@ public class TestPortugueseLightStemFilt
   }
   
   public void testKeyword() throws IOException {
-    final CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("quilométricas"), false);
+    final CharArraySet exclusionSet = new CharArraySet( asSet("quilométricas"), false);
     Analyzer a = new Analyzer() {
       @Override
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java Thu Aug 21 03:12:52 2014
@@ -67,7 +67,7 @@ public class TestPortugueseMinimalStemFi
   }
   
   public void testKeyword() throws IOException {
-    final CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("quilométricas"), false);
+    final CharArraySet exclusionSet = new CharArraySet( asSet("quilométricas"), false);
     Analyzer a = new Analyzer() {
       @Override
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java Thu Aug 21 03:12:52 2014
@@ -67,7 +67,7 @@ public class TestPortugueseStemFilter ex
   }
   
   public void testKeyword() throws IOException {
-    final CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("quilométricas"), false);
+    final CharArraySet exclusionSet = new CharArraySet( asSet("quilométricas"), false);
     Analyzer a = new Analyzer() {
       @Override
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java Thu Aug 21 03:12:52 2014
@@ -64,7 +64,7 @@ public class QueryAutoStopWordAnalyzerTe
 
   public void testNoStopwords() throws Exception {
     // Note: an empty list of fields passed in
-    protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Collections.<String>emptyList(), 1);
+    protectedAnalyzer = new QueryAutoStopWordAnalyzer( appAnalyzer, reader, Collections.<String>emptyList(), 1);
     TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("variedField", "quick");
     assertTokenStreamContents(protectedTokenStream, new String[]{"quick"});
 
@@ -73,13 +73,13 @@ public class QueryAutoStopWordAnalyzerTe
   }
 
   public void testDefaultStopwordsAllFields() throws Exception {
-    protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader);
+    protectedAnalyzer = new QueryAutoStopWordAnalyzer( appAnalyzer, reader);
     TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "boring");
     assertTokenStreamContents(protectedTokenStream, new String[0]); // Default stop word filtering will remove boring
   }
 
   public void testStopwordsAllFieldsMaxPercentDocs() throws Exception {
-    protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, 1f / 2f);
+    protectedAnalyzer = new QueryAutoStopWordAnalyzer( appAnalyzer, reader, 1f / 2f);
 
     TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "boring");
     // A filter on terms in > one half of docs remove boring
@@ -89,36 +89,36 @@ public class QueryAutoStopWordAnalyzerTe
      // A filter on terms in > half of docs should not remove vaguelyBoring
     assertTokenStreamContents(protectedTokenStream, new String[]{"vaguelyboring"});
 
-    protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, 1f / 4f);
+    protectedAnalyzer = new QueryAutoStopWordAnalyzer( appAnalyzer, reader, 1f / 4f);
     protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "vaguelyboring");
      // A filter on terms in > quarter of docs should remove vaguelyBoring
     assertTokenStreamContents(protectedTokenStream, new String[0]);
   }
 
   public void testStopwordsPerFieldMaxPercentDocs() throws Exception {
-    protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Arrays.asList("variedField"), 1f / 2f);
+    protectedAnalyzer = new QueryAutoStopWordAnalyzer( appAnalyzer, reader, Arrays.asList("variedField"), 1f / 2f);
     TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "boring");
     // A filter on one Field should not affect queries on another
     assertTokenStreamContents(protectedTokenStream, new String[]{"boring"});
 
-    protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Arrays.asList("variedField", "repetitiveField"), 1f / 2f);
+    protectedAnalyzer = new QueryAutoStopWordAnalyzer( appAnalyzer, reader, Arrays.asList("variedField", "repetitiveField"), 1f / 2f);
     protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "boring");
     // A filter on the right Field should affect queries on it
     assertTokenStreamContents(protectedTokenStream, new String[0]);
   }
 
   public void testStopwordsPerFieldMaxDocFreq() throws Exception {
-    protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Arrays.asList("repetitiveField"), 10);
+    protectedAnalyzer = new QueryAutoStopWordAnalyzer( appAnalyzer, reader, Arrays.asList("repetitiveField"), 10);
     int numStopWords = protectedAnalyzer.getStopWords("repetitiveField").length;
     assertTrue("Should have identified stop words", numStopWords > 0);
 
-    protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Arrays.asList("repetitiveField", "variedField"), 10);
+    protectedAnalyzer = new QueryAutoStopWordAnalyzer( appAnalyzer, reader, Arrays.asList("repetitiveField", "variedField"), 10);
     int numNewStopWords = protectedAnalyzer.getStopWords("repetitiveField").length + protectedAnalyzer.getStopWords("variedField").length;
     assertTrue("Should have identified more stop words", numNewStopWords > numStopWords);
   }
 
   public void testNoFieldNamePollution() throws Exception {
-    protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Arrays.asList("repetitiveField"), 10);
+    protectedAnalyzer = new QueryAutoStopWordAnalyzer( appAnalyzer, reader, Arrays.asList("repetitiveField"), 10);
 
     TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "boring");
     // Check filter set up OK
@@ -131,7 +131,6 @@ public class QueryAutoStopWordAnalyzerTe
   
   public void testTokenStream() throws Exception {
     QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(
-        TEST_VERSION_CURRENT,
         new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), reader, 10);
     TokenStream ts = a.tokenStream("repetitiveField", "this boring");
     assertTokenStreamContents(ts, new String[] { "this" });

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java Thu Aug 21 03:12:52 2014
@@ -33,27 +33,27 @@ public class TestReverseStringFilter ext
   public void testFilter() throws Exception {
     TokenStream stream = new MockTokenizer(new StringReader("Do have a nice day"),
         MockTokenizer.WHITESPACE, false);     // 1-4 length string
-    ReverseStringFilter filter = new ReverseStringFilter(TEST_VERSION_CURRENT, stream);
+    ReverseStringFilter filter = new ReverseStringFilter(stream);
     assertTokenStreamContents(filter, new String[] { "oD", "evah", "a", "ecin", "yad" });
   }
   
   public void testFilterWithMark() throws Exception {
     TokenStream stream = new MockTokenizer(new StringReader("Do have a nice day"),
         MockTokenizer.WHITESPACE, false); // 1-4 length string
-    ReverseStringFilter filter = new ReverseStringFilter(TEST_VERSION_CURRENT, stream, '\u0001');
+    ReverseStringFilter filter = new ReverseStringFilter(stream, '\u0001');
     assertTokenStreamContents(filter, 
         new String[] { "\u0001oD", "\u0001evah", "\u0001a", "\u0001ecin", "\u0001yad" });
   }
 
   public void testReverseString() throws Exception {
-    assertEquals( "A", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "A" ) );
-    assertEquals( "BA", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "AB" ) );
-    assertEquals( "CBA", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "ABC" ) );
+    assertEquals( "A", ReverseStringFilter.reverse( "A" ) );
+    assertEquals( "BA", ReverseStringFilter.reverse( "AB" ) );
+    assertEquals( "CBA", ReverseStringFilter.reverse( "ABC" ) );
   }
   
   public void testReverseChar() throws Exception {
     char[] buffer = { 'A', 'B', 'C', 'D', 'E', 'F' };
-    ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 2, 3 );
+    ReverseStringFilter.reverse( buffer, 2, 3 );
     assertEquals( "ABEDCF", new String( buffer ) );
   }
   
@@ -68,37 +68,37 @@ public class TestReverseStringFilter ext
   
   public void testReverseSupplementary() throws Exception {
     // supplementary at end
-    assertEquals("𩬅艱鍟䇹愯瀛", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "瀛愯䇹鍟艱𩬅"));
+    assertEquals("𩬅艱鍟䇹愯瀛", ReverseStringFilter.reverse("瀛愯䇹鍟艱𩬅"));
     // supplementary at end - 1
-    assertEquals("a𩬅艱鍟䇹愯瀛", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "瀛愯䇹鍟艱𩬅a"));
+    assertEquals("a𩬅艱鍟䇹愯瀛", ReverseStringFilter.reverse("瀛愯䇹鍟艱𩬅a"));
     // supplementary at start
-    assertEquals("fedcba𩬅", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "𩬅abcdef"));
+    assertEquals("fedcba𩬅", ReverseStringFilter.reverse("𩬅abcdef"));
     // supplementary at start + 1
-    assertEquals("fedcba𩬅z", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "z𩬅abcdef"));
+    assertEquals("fedcba𩬅z", ReverseStringFilter.reverse("z𩬅abcdef"));
     // supplementary medial
-    assertEquals("gfe𩬅dcba", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "abcd𩬅efg"));
+    assertEquals("gfe𩬅dcba", ReverseStringFilter.reverse("abcd𩬅efg"));
   }
 
   public void testReverseSupplementaryChar() throws Exception {
     // supplementary at end
     char[] buffer = "abc瀛愯䇹鍟艱𩬅".toCharArray();
-    ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 7);
+    ReverseStringFilter.reverse(buffer, 3, 7);
     assertEquals("abc𩬅艱鍟䇹愯瀛", new String(buffer));
     // supplementary at end - 1
     buffer = "abc瀛愯䇹鍟艱𩬅d".toCharArray();
-    ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 8);
+    ReverseStringFilter.reverse(buffer, 3, 8);
     assertEquals("abcd𩬅艱鍟䇹愯瀛", new String(buffer));
     // supplementary at start
     buffer = "abc𩬅瀛愯䇹鍟艱".toCharArray();
-    ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 7);
+    ReverseStringFilter.reverse(buffer, 3, 7);
     assertEquals("abc艱鍟䇹愯瀛𩬅", new String(buffer));
     // supplementary at start + 1
     buffer = "abcd𩬅瀛愯䇹鍟艱".toCharArray();
-    ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 8);
+    ReverseStringFilter.reverse(buffer, 3, 8);
     assertEquals("abc艱鍟䇹愯瀛𩬅d", new String(buffer));
     // supplementary medial
     buffer = "abc瀛愯𩬅def".toCharArray();
-    ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 7);
+    ReverseStringFilter.reverse(buffer, 3, 7);
     assertEquals("abcfed𩬅愯瀛", new String(buffer));
   }
   
@@ -108,7 +108,7 @@ public class TestReverseStringFilter ext
       @Override
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
         Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-        return new TokenStreamComponents(tokenizer, new ReverseStringFilter(TEST_VERSION_CURRENT, tokenizer));
+        return new TokenStreamComponents(tokenizer, new ReverseStringFilter(tokenizer));
       }
     };
     checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
@@ -119,7 +119,7 @@ public class TestReverseStringFilter ext
       @Override
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
         Tokenizer tokenizer = new KeywordTokenizer(reader);
-        return new TokenStreamComponents(tokenizer, new ReverseStringFilter(TEST_VERSION_CURRENT, tokenizer));
+        return new TokenStreamComponents(tokenizer, new ReverseStringFilter(tokenizer));
       }
     };
     checkOneTerm(a, "", "");

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java Thu Aug 21 03:12:52 2014
@@ -27,12 +27,12 @@ public class TestRomanianAnalyzer extend
   /** This test fails with NPE when the 
    * stopwords file is missing in classpath */
   public void testResourcesAvailable() {
-    new RomanianAnalyzer(TEST_VERSION_CURRENT);
+    new RomanianAnalyzer();
   }
   
   /** test stopwords and stemming */
   public void testBasics() throws IOException {
-    Analyzer a = new RomanianAnalyzer(TEST_VERSION_CURRENT);
+    Analyzer a = new RomanianAnalyzer();
     // stemming
     checkOneTerm(a, "absenţa", "absenţ");
     checkOneTerm(a, "absenţi", "absenţ");
@@ -42,8 +42,8 @@ public class TestRomanianAnalyzer extend
   
   /** test use of exclusion set */
   public void testExclude() throws IOException {
-    CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("absenţa"), false);
-    Analyzer a = new RomanianAnalyzer(TEST_VERSION_CURRENT, 
+    CharArraySet exclusionSet = new CharArraySet( asSet("absenţa"), false);
+    Analyzer a = new RomanianAnalyzer( 
         RomanianAnalyzer.getDefaultStopSet(), exclusionSet);
     checkOneTerm(a, "absenţa", "absenţa");
     checkOneTerm(a, "absenţi", "absenţ");
@@ -51,6 +51,6 @@ public class TestRomanianAnalyzer extend
   
   /** blast some random strings through the analyzer */
   public void testRandomStrings() throws Exception {
-    checkRandomData(random(), new RomanianAnalyzer(TEST_VERSION_CURRENT), 1000*RANDOM_MULTIPLIER);
+    checkRandomData(random(), new RomanianAnalyzer(), 1000*RANDOM_MULTIPLIER);
   }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java Thu Aug 21 03:12:52 2014
@@ -33,7 +33,7 @@ public class TestRussianAnalyzer extends
      /** Check that RussianAnalyzer doesnt discard any numbers */
     public void testDigitsInRussianCharset() throws IOException
     {
-      RussianAnalyzer ra = new RussianAnalyzer(TEST_VERSION_CURRENT);
+      RussianAnalyzer ra = new RussianAnalyzer();
       assertAnalyzesTo(ra, "text 1000", new String[] { "text", "1000" });
     }
     
@@ -48,7 +48,7 @@ public class TestRussianAnalyzer extends
     }
     
     public void testReusableTokenStream() throws Exception {
-      Analyzer a = new RussianAnalyzer(TEST_VERSION_CURRENT);
+      Analyzer a = new RussianAnalyzer();
       assertAnalyzesTo(a, "Вместе с тем о силе электромагнитной энергии имели представление еще",
           new String[] { "вмест", "сил", "электромагнитн", "энерг", "имел", "представлен" });
       assertAnalyzesTo(a, "Но знание это хранилось в тайне",
@@ -57,9 +57,9 @@ public class TestRussianAnalyzer extends
     
     
     public void testWithStemExclusionSet() throws Exception {
-      CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
+      CharArraySet set = new CharArraySet( 1, true);
       set.add("представление");
-      Analyzer a = new RussianAnalyzer(TEST_VERSION_CURRENT, RussianAnalyzer.getDefaultStopSet() , set);
+      Analyzer a = new RussianAnalyzer( RussianAnalyzer.getDefaultStopSet() , set);
       assertAnalyzesTo(a, "Вместе с тем о силе электромагнитной энергии имели представление еще",
           new String[] { "вмест", "сил", "электромагнитн", "энерг", "имел", "представление" });
      
@@ -67,6 +67,6 @@ public class TestRussianAnalyzer extends
     
     /** blast some random strings through the analyzer */
     public void testRandomStrings() throws Exception {
-      checkRandomData(random(), new RussianAnalyzer(TEST_VERSION_CURRENT), 1000*RANDOM_MULTIPLIER);
+      checkRandomData(random(), new RussianAnalyzer(), 1000*RANDOM_MULTIPLIER);
     }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java Thu Aug 21 03:12:52 2014
@@ -50,7 +50,7 @@ public class TestRussianLightStemFilter 
   }
   
   public void testKeyword() throws IOException {
-    final CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("энергии"), false);
+    final CharArraySet exclusionSet = new CharArraySet( asSet("энергии"), false);
     Analyzer a = new Analyzer() {
       @Override
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java Thu Aug 21 03:12:52 2014
@@ -318,9 +318,9 @@ public class ShingleAnalyzerWrapperTest 
     Analyzer delegate = new Analyzer() {
       @Override
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
-        CharArraySet stopSet = StopFilter.makeStopSet(TEST_VERSION_CURRENT, "into");
+        CharArraySet stopSet = StopFilter.makeStopSet("into");
         Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-        TokenFilter filter = new StopFilter(TEST_VERSION_CURRENT, tokenizer, stopSet);
+        TokenFilter filter = new StopFilter(tokenizer, stopSet);
         return new TokenStreamComponents(tokenizer, filter);
       }
     };

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java Thu Aug 21 03:12:52 2014
@@ -981,7 +981,7 @@ public class ShingleFilterTest extends B
   }
   
   public void testReset() throws Exception {
-    Tokenizer wsTokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("please divide this sentence"));
+    Tokenizer wsTokenizer = new WhitespaceTokenizer(new StringReader("please divide this sentence"));
     TokenStream filter = new ShingleFilter(wsTokenizer, 2);
     assertTokenStreamContents(filter,
       new String[]{"please","please divide","divide","divide this","this","this sentence","sentence"},

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java Thu Aug 21 03:12:52 2014
@@ -163,13 +163,17 @@ public class TestTeeSinkTokenFilter exte
     assertTokenStreamContents(dogDetector, new String[]{"Dogs", "Dogs"});
     
     source1.reset();
-    TokenStream lowerCasing = new LowerCaseFilter(TEST_VERSION_CURRENT, source1);
+    TokenStream lowerCasing = new LowerCaseFilter(source1);
     String[] lowerCaseTokens = new String[tokens1.length];
     for (int i = 0; i < tokens1.length; i++)
       lowerCaseTokens[i] = tokens1[i].toLowerCase(Locale.ROOT);
     assertTokenStreamContents(lowerCasing, lowerCaseTokens);
   }
 
+  private StandardTokenizer standardTokenizer(StringBuilder builder) throws IOException {
+    return new StandardTokenizer(new StringReader(builder.toString()));
+  }
+
   /**
    * Not an explicit test, just useful to print out some info on performance
    */
@@ -183,10 +187,10 @@ public class TestTeeSinkTokenFilter exte
         buffer.append(English.intToEnglish(i).toUpperCase(Locale.ROOT)).append(' ');
       }
       //make sure we produce the same tokens
-      TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(TEST_VERSION_CURRENT, new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))));
+      TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))));
       TokenStream sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(100));
       teeStream.consumeAllTokens();
-      TokenStream stream = new ModuloTokenFilter(new StandardFilter(TEST_VERSION_CURRENT, new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))), 100);
+      TokenStream stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), 100);
       CharTermAttribute tfTok = stream.addAttribute(CharTermAttribute.class);
       CharTermAttribute sinkTok = sink.addAttribute(CharTermAttribute.class);
       for (int i=0; stream.incrementToken(); i++) {
@@ -199,12 +203,12 @@ public class TestTeeSinkTokenFilter exte
         int tfPos = 0;
         long start = System.currentTimeMillis();
         for (int i = 0; i < 20; i++) {
-          stream = new StandardFilter(TEST_VERSION_CURRENT, new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString())));
+          stream = new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString())));
           PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
           while (stream.incrementToken()) {
             tfPos += posIncrAtt.getPositionIncrement();
           }
-          stream = new ModuloTokenFilter(new StandardFilter(TEST_VERSION_CURRENT, new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))), modCounts[j]);
+          stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), modCounts[j]);
           posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
           while (stream.incrementToken()) {
             tfPos += posIncrAtt.getPositionIncrement();
@@ -216,7 +220,7 @@ public class TestTeeSinkTokenFilter exte
         //simulate one field with one sink
         start = System.currentTimeMillis();
         for (int i = 0; i < 20; i++) {
-          teeStream = new TeeSinkTokenFilter(new StandardFilter(TEST_VERSION_CURRENT, new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))));
+          teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))));
           sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(modCounts[j]));
           PositionIncrementAttribute posIncrAtt = teeStream.getAttribute(PositionIncrementAttribute.class);
           while (teeStream.incrementToken()) {

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java Thu Aug 21 03:12:52 2014
@@ -27,12 +27,12 @@ public class TestSwedishAnalyzer extends
   /** This test fails with NPE when the 
    * stopwords file is missing in classpath */
   public void testResourcesAvailable() {
-    new SwedishAnalyzer(TEST_VERSION_CURRENT);
+    new SwedishAnalyzer();
   }
   
   /** test stopwords and stemming */
   public void testBasics() throws IOException {
-    Analyzer a = new SwedishAnalyzer(TEST_VERSION_CURRENT);
+    Analyzer a = new SwedishAnalyzer();
     // stemming
     checkOneTerm(a, "jaktkarlarne", "jaktkarl");
     checkOneTerm(a, "jaktkarlens", "jaktkarl");
@@ -42,8 +42,8 @@ public class TestSwedishAnalyzer extends
   
   /** test use of exclusion set */
   public void testExclude() throws IOException {
-    CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("jaktkarlarne"), false);
-    Analyzer a = new SwedishAnalyzer(TEST_VERSION_CURRENT, 
+    CharArraySet exclusionSet = new CharArraySet( asSet("jaktkarlarne"), false);
+    Analyzer a = new SwedishAnalyzer( 
         SwedishAnalyzer.getDefaultStopSet(), exclusionSet);
     checkOneTerm(a, "jaktkarlarne", "jaktkarlarne");
     checkOneTerm(a, "jaktkarlens", "jaktkarl");
@@ -51,6 +51,6 @@ public class TestSwedishAnalyzer extends
   
   /** blast some random strings through the analyzer */
   public void testRandomStrings() throws Exception {
-    checkRandomData(random(), new SwedishAnalyzer(TEST_VERSION_CURRENT), 1000*RANDOM_MULTIPLIER);
+    checkRandomData(random(), new SwedishAnalyzer(), 1000*RANDOM_MULTIPLIER);
   }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java Thu Aug 21 03:12:52 2014
@@ -50,7 +50,7 @@ public class TestSwedishLightStemFilter 
   }
   
   public void testKeyword() throws IOException {
-    final CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("jaktkarlens"), false);
+    final CharArraySet exclusionSet = new CharArraySet( asSet("jaktkarlens"), false);
     Analyzer a = new Analyzer() {
       @Override
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java Thu Aug 21 03:12:52 2014
@@ -100,7 +100,7 @@ public class TestSolrSynonymParser exten
   @Test(expected=ParseException.class)
   public void testInvalidPositionsInput() throws Exception {
     String testFile = "testola => the test";
-    SolrSynonymParser parser = new SolrSynonymParser(true, true, new EnglishAnalyzer(TEST_VERSION_CURRENT));
+    SolrSynonymParser parser = new SolrSynonymParser(true, true, new EnglishAnalyzer());
     parser.parse(new StringReader(testFile));
   }
   
@@ -108,7 +108,7 @@ public class TestSolrSynonymParser exten
   @Test(expected=ParseException.class)
   public void testInvalidPositionsOutput() throws Exception {
     String testFile = "the test => testola";
-    SolrSynonymParser parser = new SolrSynonymParser(true, true, new EnglishAnalyzer(TEST_VERSION_CURRENT));
+    SolrSynonymParser parser = new SolrSynonymParser(true, true, new EnglishAnalyzer());
     parser.parse(new StringReader(testFile));
   }
   

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java?rev=1619283&r1=1619282&r2=1619283&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java Thu Aug 21 03:12:52 2014
@@ -47,14 +47,14 @@ public class TestThaiAnalyzer extends Ba
    * testcase for offsets
    */
   public void testOffsets() throws Exception {
-    assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET), "การที่ได้ต้องแสดงว่างานดี",
+    assertAnalyzesTo(new ThaiAnalyzer(CharArraySet.EMPTY_SET), "การที่ได้ต้องแสดงว่างานดี",
         new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" },
         new int[] { 0, 3, 6, 9, 13, 17, 20, 23 },
         new int[] { 3, 6, 9, 13, 17, 20, 23, 25 });
   }
 
   public void testStopWords() throws Exception {
-    assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT), "การที่ได้ต้องแสดงว่างานดี",
+    assertAnalyzesTo(new ThaiAnalyzer(), "การที่ได้ต้องแสดงว่างานดี",
         new String[] { "แสดง", "งาน", "ดี" },
         new int[] { 13, 20, 23 },
         new int[] { 17, 23, 25 },
@@ -110,7 +110,7 @@ public class TestThaiAnalyzer extends Ba
    */
   // note this test uses stopfilter's stopset
   public void testPositionIncrements() throws Exception {
-    final ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+    final ThaiAnalyzer analyzer = new ThaiAnalyzer(StopAnalyzer.ENGLISH_STOP_WORDS_SET);
     assertAnalyzesTo(analyzer, "การที่ได้ต้อง the แสดงว่างานดี", 
         new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" },
         new int[] { 0, 3, 6, 9, 18, 22, 25, 28 },
@@ -126,7 +126,7 @@ public class TestThaiAnalyzer extends Ba
   }
 
   public void testReusableTokenStream() throws Exception {
-    ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET);
+    ThaiAnalyzer analyzer = new ThaiAnalyzer(CharArraySet.EMPTY_SET);
     assertAnalyzesTo(analyzer, "", new String[] {});
 
       assertAnalyzesTo(
@@ -159,13 +159,13 @@ public class TestThaiAnalyzer extends Ba
 
   /** blast some random strings through the analyzer */
   public void testRandomStrings() throws Exception {
-    checkRandomData(random(), new ThaiAnalyzer(TEST_VERSION_CURRENT), 1000*RANDOM_MULTIPLIER);
+    checkRandomData(random(), new ThaiAnalyzer(), 1000*RANDOM_MULTIPLIER);
   }
   
   /** blast some random large strings through the analyzer */
   public void testRandomHugeStrings() throws Exception {
     Random random = random();
-    checkRandomData(random, new ThaiAnalyzer(TEST_VERSION_CURRENT), 100*RANDOM_MULTIPLIER, 8192);
+    checkRandomData(random, new ThaiAnalyzer(), 100*RANDOM_MULTIPLIER, 8192);
   }
   
   // LUCENE-3044
@@ -181,7 +181,7 @@ public class TestThaiAnalyzer extends Ba
   }
   
   public void testTwoSentences() throws Exception {
-    assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET), "This is a test. การที่ได้ต้องแสดงว่างานดี", 
+    assertAnalyzesTo(new ThaiAnalyzer(CharArraySet.EMPTY_SET), "This is a test. การที่ได้ต้องแสดงว่างานดี",
           new String[] { "this", "is", "a", "test", "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" },
           new int[] { 0, 5, 8, 10, 16, 19, 22, 25, 29, 33, 36, 39 },
           new int[] { 4, 7, 9, 14, 19, 22, 25, 29, 33, 36, 39, 41 });