You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2013/04/06 13:08:21 UTC

svn commit: r1465224 [6/9] - in /lucene/dev/branches/branch_4x: ./ lucene/ lucene/analysis/ lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/ lucene/analysis/common/src/java/or...

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -17,45 +17,38 @@ package org.apache.lucene.analysis.misce
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 import org.apache.lucene.analysis.util.CharArraySet;
 import org.apache.lucene.analysis.util.ClasspathResourceLoader;
 import org.apache.lucene.analysis.util.ResourceLoader;
 
-import java.util.Map;
-import java.util.HashMap;
-
-/**
- *
- *
- **/
-public class TestKeepFilterFactory extends BaseTokenStreamTestCase {
+public class TestKeepFilterFactory extends BaseTokenStreamFactoryTestCase {
 
   public void testInform() throws Exception {
     ResourceLoader loader = new ClasspathResourceLoader(getClass());
     assertTrue("loader is null and it shouldn't be", loader != null);
-    KeepWordFilterFactory factory = new KeepWordFilterFactory();
-    Map<String, String> args = new HashMap<String, String>();
-    args.put("words", "keep-1.txt");
-    args.put("ignoreCase", "true");
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
-    factory.init(args);
-    factory.inform(loader);
+    KeepWordFilterFactory factory = (KeepWordFilterFactory) tokenFilterFactory("KeepWord",
+        "words", "keep-1.txt",
+        "ignoreCase", "true");
     CharArraySet words = factory.getWords();
     assertTrue("words is null and it shouldn't be", words != null);
     assertTrue("words Size: " + words.size() + " is not: " + 2, words.size() == 2);
 
-
-    factory = new KeepWordFilterFactory();
-    args.put("words", "keep-1.txt, keep-2.txt");
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
-    factory.init(args);
-    factory.inform(loader);
+    factory = (KeepWordFilterFactory) tokenFilterFactory("KeepWord",
+        "words", "keep-1.txt, keep-2.txt",
+        "ignoreCase", "true");
     words = factory.getWords();
     assertTrue("words is null and it shouldn't be", words != null);
     assertTrue("words Size: " + words.size() + " is not: " + 4, words.size() == 4);
-
-
-
+  }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("KeepWord", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
   }
 }
\ No newline at end of file

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -17,114 +17,89 @@ package org.apache.lucene.analysis.misce
  * limitations under the License.
  */
 
-import java.io.IOException;
 import java.io.Reader;
 import java.io.StringReader;
-import java.util.HashMap;
-import java.util.Map;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.analysis.en.PorterStemFilter;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.util.ResourceLoader;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 import org.apache.lucene.analysis.util.StringMockResourceLoader;
 
 /**
  * Simple tests to ensure the keyword marker filter factory is working.
  */
-public class TestKeywordMarkerFilterFactory extends BaseTokenStreamTestCase {
+public class TestKeywordMarkerFilterFactory extends BaseTokenStreamFactoryTestCase {
   
-  public void testKeywords() throws IOException {
+  public void testKeywords() throws Exception {
     Reader reader = new StringReader("dogs cats");
-    Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-    KeywordMarkerFilterFactory factory = new KeywordMarkerFilterFactory();
-    Map<String,String> args = new HashMap<String,String>();
-    ResourceLoader loader = new StringMockResourceLoader("cats");
-    args.put("protected", "protwords.txt");
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
-    factory.init(args);
-    factory.inform(loader);
-    
-    TokenStream ts = new PorterStemFilter(factory.create(tokenizer));
-    assertTokenStreamContents(ts, new String[] { "dog", "cats" });
-    
-    
-    reader = new StringReader("dogs cats");
-    tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-    factory = new KeywordMarkerFilterFactory();
-    args = new HashMap<String,String>();
-    
-    args.put("pattern", "cats|Dogs");
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
-    factory.init(args);
-    factory.inform(null);
-    
-    ts = new PorterStemFilter(factory.create(tokenizer));
-    assertTokenStreamContents(ts, new String[] { "dog", "cats" });
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("KeywordMarker", TEST_VERSION_CURRENT,
+        new StringMockResourceLoader("cats"),
+        "protected", "protwords.txt").create(stream);
+    stream = tokenFilterFactory("PorterStem").create(stream);
+    assertTokenStreamContents(stream, new String[] { "dog", "cats" });
   }
   
-  public void testKeywordsMixed() throws IOException {
+  public void testKeywords2() throws Exception {
+    Reader reader = new StringReader("dogs cats");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("KeywordMarker",
+        "pattern", "cats|Dogs").create(stream);
+    stream = tokenFilterFactory("PorterStem").create(stream);
+    assertTokenStreamContents(stream, new String[] { "dog", "cats" });
+  }
+  
+  public void testKeywordsMixed() throws Exception {
     Reader reader = new StringReader("dogs cats birds");
-    Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-    KeywordMarkerFilterFactory factory = new KeywordMarkerFilterFactory();
-    Map<String,String> args = new HashMap<String,String>();
-    ResourceLoader loader = new StringMockResourceLoader("cats");
-    args.put("protected", "protwords.txt");
-    args.put("pattern", "birds|Dogs");
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
-    factory.init(args);
-    factory.inform(loader);
-    
-    TokenStream ts = new PorterStemFilter(factory.create(tokenizer));
-    assertTokenStreamContents(ts, new String[] { "dog", "cats", "birds" });
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("KeywordMarker", TEST_VERSION_CURRENT,
+        new StringMockResourceLoader("cats"),
+        "protected", "protwords.txt",
+        "pattern", "birds|Dogs").create(stream);
+    stream = tokenFilterFactory("PorterStem").create(stream);
+    assertTokenStreamContents(stream, new String[] { "dog", "cats", "birds" });
+  }
+  
+  public void testKeywordsCaseInsensitive() throws Exception {
+    Reader reader = new StringReader("dogs cats Cats");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("KeywordMarker", TEST_VERSION_CURRENT,
+        new StringMockResourceLoader("cats"),
+        "protected", "protwords.txt",
+        "ignoreCase", "true").create(stream);
+    stream = tokenFilterFactory("PorterStem").create(stream);
+    assertTokenStreamContents(stream, new String[] { "dog", "cats", "Cats" });
   }
   
-  public void testKeywordsCaseInsensitive() throws IOException {
+  public void testKeywordsCaseInsensitive2() throws Exception {
     Reader reader = new StringReader("dogs cats Cats");
-    Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-    KeywordMarkerFilterFactory factory = new KeywordMarkerFilterFactory();
-    Map<String,String> args = new HashMap<String,String>();
-    ResourceLoader loader = new StringMockResourceLoader("cats");
-    args.put("protected", "protwords.txt");
-    args.put("ignoreCase", "true");
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
-    factory.init(args);
-    factory.inform(loader);
-    
-    TokenStream ts = new PorterStemFilter(factory.create(tokenizer));
-    assertTokenStreamContents(ts, new String[] { "dog", "cats", "Cats" });
-    
-    reader = new StringReader("dogs cats Cats");
-    tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-    factory = new KeywordMarkerFilterFactory();
-    args = new HashMap<String,String>();
-    
-    args.put("pattern", "Cats");
-    args.put("ignoreCase", "true");
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
-    factory.init(args);
-    factory.inform(null);
-    
-    ts = new PorterStemFilter(factory.create(tokenizer));
-    assertTokenStreamContents(ts, new String[] { "dog", "cats", "Cats" });
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("KeywordMarker",
+        "pattern", "Cats",
+        "ignoreCase", "true").create(stream);
+    stream = tokenFilterFactory("PorterStem").create(stream);;
+    assertTokenStreamContents(stream, new String[] { "dog", "cats", "Cats" });
   }
   
-  public void testKeywordsCaseInsensitiveMixed() throws IOException {
+  public void testKeywordsCaseInsensitiveMixed() throws Exception {
     Reader reader = new StringReader("dogs cats Cats Birds birds");
-    Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-    KeywordMarkerFilterFactory factory = new KeywordMarkerFilterFactory();
-    Map<String,String> args = new HashMap<String,String>();
-    ResourceLoader loader = new StringMockResourceLoader("cats");
-    args.put("protected", "protwords.txt");
-    args.put("pattern", "birds");
-    args.put("ignoreCase", "true");
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
-    factory.init(args);
-    factory.inform(loader);
-    
-    TokenStream ts = new PorterStemFilter(factory.create(tokenizer));
-    assertTokenStreamContents(ts, new String[] { "dog", "cats", "Cats", "Birds", "birds" });
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("KeywordMarker", TEST_VERSION_CURRENT,
+        new StringMockResourceLoader("cats"),
+        "protected", "protwords.txt",
+        "pattern", "birds",
+        "ignoreCase", "true").create(stream);
+    stream = tokenFilterFactory("PorterStem").create(stream);
+    assertTokenStreamContents(stream, new String[] { "dog", "cats", "Cats", "Birds", "birds" });
+  }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("KeywordMarker", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
   }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -16,35 +16,44 @@ package org.apache.lucene.analysis.misce
  * limitations under the License.
  */
 
-import java.io.IOException;
+import java.io.Reader;
 import java.io.StringReader;
-import java.util.HashMap;
-import java.util.Map;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
-public class TestLengthFilterFactory extends BaseTokenStreamTestCase {
+public class TestLengthFilterFactory extends BaseTokenStreamFactoryTestCase {
 
-  public void test() throws IOException {
-    LengthFilterFactory factory = new LengthFilterFactory();
-    Map<String, String> args = new HashMap<String, String>();
-    args.put(LengthFilterFactory.MIN_KEY, String.valueOf(4));
-    args.put(LengthFilterFactory.MAX_KEY, String.valueOf(10));
-    // default: args.put("enablePositionIncrements", "false");
-    factory.init(args);
-    String test = "foo foobar super-duper-trooper";
-    TokenStream stream = factory.create(new MockTokenizer(new StringReader(test), MockTokenizer.WHITESPACE, false));
+  public void test() throws Exception {
+    Reader reader = new StringReader("foo foobar super-duper-trooper");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("Length",
+        "min", "4",
+        "max", "10").create(stream);
     assertTokenStreamContents(stream, new String[] { "foobar" }, new int[] { 1 });
+  }
 
-    factory = new LengthFilterFactory();
-    args = new HashMap<String, String>();
-    args.put(LengthFilterFactory.MIN_KEY, String.valueOf(4));
-    args.put(LengthFilterFactory.MAX_KEY, String.valueOf(10));
-    args.put("enablePositionIncrements", "true");
-    factory.init(args);
-    stream = factory.create(new MockTokenizer(new StringReader(test), MockTokenizer.WHITESPACE, false));
+  public void testPositionIncrements() throws Exception {
+    Reader reader = new StringReader("foo foobar super-duper-trooper");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("Length",
+        "min", "4",
+        "max", "10",
+        "enablePositionIncrements", "true").create(stream);
     assertTokenStreamContents(stream, new String[] { "foobar" }, new int[] { 2 });
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("Length", 
+          "min", "4", 
+          "max", "5", 
+          "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }
\ No newline at end of file

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -16,40 +16,46 @@ package org.apache.lucene.analysis.misce
  * limitations under the License.
  */
 
-import java.io.IOException;
+import java.io.Reader;
 import java.io.StringReader;
-import java.util.HashMap;
-import java.util.Map;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
-public class TestLimitTokenCountFilterFactory extends BaseTokenStreamTestCase {
+public class TestLimitTokenCountFilterFactory extends BaseTokenStreamFactoryTestCase {
 
-  public void test() throws IOException {
-    LimitTokenCountFilterFactory factory = new LimitTokenCountFilterFactory();
-    Map<String, String> args = new HashMap<String, String>();
-    args.put(LimitTokenCountFilterFactory.MAX_TOKEN_COUNT_KEY, "3");
-    factory.init(args);
-    String test = "A1 B2 C3 D4 E5 F6";
-    MockTokenizer tok = new MockTokenizer(new StringReader(test), MockTokenizer.WHITESPACE, false);
+  public void test() throws Exception {
+    Reader reader = new StringReader("A1 B2 C3 D4 E5 F6");
+    MockTokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
     // LimitTokenCountFilter doesn't consume the entire stream that it wraps
-    tok.setEnableChecks(false); 
-    TokenStream stream = factory.create(tok);
+    tokenizer.setEnableChecks(false);
+    TokenStream stream = tokenizer;
+    stream = tokenFilterFactory("LimitTokenCount",
+        "maxTokenCount", "3").create(stream);
     assertTokenStreamContents(stream, new String[] { "A1", "B2", "C3" });
+  }
 
+  public void testRequired() throws Exception {
     // param is required
-    factory = new LimitTokenCountFilterFactory();
-    args = new HashMap<String, String>();
-    IllegalArgumentException iae = null;
     try {
-      factory.init(args);
+      tokenFilterFactory("LimitTokenCount");
+      fail();
     } catch (IllegalArgumentException e) {
       assertTrue("exception doesn't mention param: " + e.getMessage(),
                  0 < e.getMessage().indexOf(LimitTokenCountFilterFactory.MAX_TOKEN_COUNT_KEY));
-      iae = e;
     }
-    assertNotNull("no exception thrown", iae);
+  }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("LimitTokenCount", 
+          "maxTokenCount", "3", 
+          "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
   }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenPositionFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenPositionFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenPositionFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenPositionFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -16,69 +16,69 @@ package org.apache.lucene.analysis.misce
  * limitations under the License.
  */
 
-import java.io.IOException;
+import java.io.Reader;
 import java.io.StringReader;
-import java.util.HashMap;
-import java.util.Map;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.shingle.ShingleFilter;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
-public class TestLimitTokenPositionFilterFactory extends BaseTokenStreamTestCase {
+public class TestLimitTokenPositionFilterFactory extends BaseTokenStreamFactoryTestCase {
 
-  public void testMaxPosition1() throws IOException {
-    LimitTokenPositionFilterFactory factory = new LimitTokenPositionFilterFactory();
-    Map<String, String> args = new HashMap<String, String>();
-    args.put(LimitTokenPositionFilterFactory.MAX_TOKEN_POSITION_KEY, "1");
-    factory.init(args);
-    String test = "A1 B2 C3 D4 E5 F6";
-    MockTokenizer tok = new MockTokenizer(new StringReader(test), MockTokenizer.WHITESPACE, false);
+  public void testMaxPosition1() throws Exception {
+    Reader reader = new StringReader("A1 B2 C3 D4 E5 F6");
+    MockTokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
     // LimitTokenPositionFilter doesn't consume the entire stream that it wraps
-    tok.setEnableChecks(false);
-    TokenStream stream = factory.create(tok);
+    tokenizer.setEnableChecks(false);
+    TokenStream stream = tokenizer;
+    stream = tokenFilterFactory("LimitTokenPosition",
+        "maxTokenPosition", "1").create(stream);
     assertTokenStreamContents(stream, new String[] { "A1" });
   }
   
-  public void testMissingParam() {
-    LimitTokenPositionFilterFactory factory = new LimitTokenPositionFilterFactory();
-    Map<String, String> args = new HashMap<String, String>();
-    IllegalArgumentException iae = null;
+  public void testMissingParam() throws Exception {
     try {
-      factory.init(args);
+      tokenFilterFactory("LimitTokenPosition");
+      fail();
     } catch (IllegalArgumentException e) {
       assertTrue("exception doesn't mention param: " + e.getMessage(),
           0 < e.getMessage().indexOf(LimitTokenPositionFilterFactory.MAX_TOKEN_POSITION_KEY));
-      iae = e;
     }
-    assertNotNull("no exception thrown", iae);
   }
 
-  public void testMaxPosition1WithShingles() throws IOException {
-    LimitTokenPositionFilterFactory factory = new LimitTokenPositionFilterFactory();
-    Map<String, String> args = new HashMap<String, String>();
-    args.put(LimitTokenPositionFilterFactory.MAX_TOKEN_POSITION_KEY, "1");
-    factory.init(args);
-    String input = "one two three four five";
-    MockTokenizer tok = new MockTokenizer(new StringReader(input), MockTokenizer.WHITESPACE, false);
+  public void testMaxPosition1WithShingles() throws Exception {
+    Reader reader = new StringReader("one two three four five");
+    MockTokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
     // LimitTokenPositionFilter doesn't consume the entire stream that it wraps
-    tok.setEnableChecks(false);
-    ShingleFilter shingleFilter = new ShingleFilter(tok, 2, 3);
-    shingleFilter.setOutputUnigrams(true);
-    TokenStream stream = factory.create(shingleFilter);
+    tokenizer.setEnableChecks(false);
+    TokenStream stream = tokenizer;
+    stream = tokenFilterFactory("Shingle",
+        "minShingleSize", "2",
+        "maxShingleSize", "3",
+        "outputUnigrams", "true").create(stream);
+    stream = tokenFilterFactory("LimitTokenPosition",
+        "maxTokenPosition", "1").create(stream);
     assertTokenStreamContents(stream, new String[] { "one", "one two", "one two three" });
   }
   
-  public void testConsumeAllTokens() throws IOException {
-    LimitTokenPositionFilterFactory factory = new LimitTokenPositionFilterFactory();
-    Map<String, String> args = new HashMap<String, String>();
-    args.put(LimitTokenPositionFilterFactory.MAX_TOKEN_POSITION_KEY, "3");
-    args.put(LimitTokenPositionFilterFactory.CONSUME_ALL_TOKENS_KEY, "true");
-    factory.init(args);
-    String test = "A1 B2 C3 D4 E5 F6";
-    MockTokenizer tok = new MockTokenizer(new StringReader(test), MockTokenizer.WHITESPACE, false);
-    TokenStream stream = factory.create(tok);
+  public void testConsumeAllTokens() throws Exception {
+    Reader reader = new StringReader("A1 B2 C3 D4 E5 F6");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("LimitTokenPosition",
+        "maxTokenPosition", "3",
+        "consumeAllTokens", "true").create(stream);
     assertTokenStreamContents(stream, new String[] { "A1", "B2", "C3" });
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("LimitTokenPosition", 
+          "maxTokenPosition", "3", 
+          "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -17,54 +17,24 @@ package org.apache.lucene.analysis.misce
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.CannedTokenStream;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-
-import java.util.Iterator;
-import java.util.Arrays;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
 /** Simple tests to ensure this factory is working */
-public class TestRemoveDuplicatesTokenFilterFactory extends BaseTokenStreamTestCase {
+public class TestRemoveDuplicatesTokenFilterFactory extends BaseTokenStreamFactoryTestCase {
 
   public static Token tok(int pos, String t, int start, int end) {
     Token tok = new Token(t,start,end);
     tok.setPositionIncrement(pos);
     return tok;
   }
-  public static Token tok(int pos, String t) {
-    return tok(pos, t, 0,0);
-  }
 
-  public void testDups(final String expected, final Token... tokens)
-    throws Exception {
-
-    final Iterator<Token> toks = Arrays.asList(tokens).iterator();
-    RemoveDuplicatesTokenFilterFactory factory = new RemoveDuplicatesTokenFilterFactory();
-    final TokenStream ts = factory.create
-      (new TokenStream() {
-          CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
-          OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
-          PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
-          @Override
-          public boolean incrementToken() {
-            if (toks.hasNext()) {
-              clearAttributes();
-              Token tok = toks.next();
-              termAtt.setEmpty().append(tok);
-              offsetAtt.setOffset(tok.startOffset(), tok.endOffset());
-              posIncAtt.setPositionIncrement(tok.getPositionIncrement());
-              return true;
-            } else {
-              return false;
-            }
-          }
-        });
-    
-    assertTokenStreamContents(ts, expected.split("\\s"));   
+  public void testDups(final String expected, final Token... tokens) throws Exception {
+    TokenStream stream = new CannedTokenStream(tokens);
+    stream = tokenFilterFactory("RemoveDuplicates").create(stream);
+    assertTokenStreamContents(stream, expected.split("\\s"));   
   }
  
   public void testSimpleDups() throws Exception {
@@ -77,4 +47,14 @@ public class TestRemoveDuplicatesTokenFi
              ,tok(1,"E",21, 25)
              ); 
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("RemoveDuplicates", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -17,53 +17,49 @@ package org.apache.lucene.analysis.misce
  * limitations under the License.
  */
 
-import java.io.IOException;
 import java.io.Reader;
 import java.io.StringReader;
-import java.util.HashMap;
-import java.util.Map;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.analysis.en.PorterStemFilter;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.util.ResourceLoader;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 import org.apache.lucene.analysis.util.StringMockResourceLoader;
 
 /**
  * Simple tests to ensure the stemmer override filter factory is working.
  */
-public class TestStemmerOverrideFilterFactory extends BaseTokenStreamTestCase {
-  public void testKeywords() throws IOException {
+public class TestStemmerOverrideFilterFactory extends BaseTokenStreamFactoryTestCase {
+  public void testKeywords() throws Exception {
     // our stemdict stems dogs to 'cat'
     Reader reader = new StringReader("testing dogs");
-    Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-    StemmerOverrideFilterFactory factory = new StemmerOverrideFilterFactory();
-    Map<String,String> args = new HashMap<String,String>();
-    ResourceLoader loader = new StringMockResourceLoader("dogs\tcat");
-    args.put("dictionary", "stemdict.txt");
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
-    factory.init(args);
-    factory.inform(loader);
-    
-    TokenStream ts = new PorterStemFilter(factory.create(tokenizer));
-    assertTokenStreamContents(ts, new String[] { "test", "cat" });
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("StemmerOverride", TEST_VERSION_CURRENT,
+        new StringMockResourceLoader("dogs\tcat"),
+        "dictionary", "stemdict.txt").create(stream);
+    stream = tokenFilterFactory("PorterStem").create(stream);
+
+    assertTokenStreamContents(stream, new String[] { "test", "cat" });
   }
   
-  public void testKeywordsCaseInsensitive() throws IOException {
+  public void testKeywordsCaseInsensitive() throws Exception {
     Reader reader = new StringReader("testing DoGs");
-    Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-    StemmerOverrideFilterFactory factory = new StemmerOverrideFilterFactory();
-    Map<String,String> args = new HashMap<String,String>();
-    ResourceLoader loader = new StringMockResourceLoader("dogs\tcat");
-    args.put("dictionary", "stemdict.txt");
-    args.put("ignoreCase", "true");
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
-    factory.init(args);
-    factory.inform(loader);
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("StemmerOverride", TEST_VERSION_CURRENT,
+        new StringMockResourceLoader("dogs\tcat"),
+        "dictionary", "stemdict.txt",
+        "ignoreCase", "true").create(stream);
+    stream = tokenFilterFactory("PorterStem").create(stream);
     
-    TokenStream ts = new PorterStemFilter(factory.create(tokenizer));
-    assertTokenStreamContents(ts, new String[] { "test", "cat" });
+    assertTokenStreamContents(stream, new String[] { "test", "cat" });
+  }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("StemmerOverride", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
   }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -17,24 +17,31 @@ package org.apache.lucene.analysis.misce
  * limitations under the License.
  */
 
+import java.io.Reader;
 import java.io.StringReader;
-import java.util.HashMap;
-import java.util.Map;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
 /**
  * Simple tests to ensure this factory is working
  */
-public class TestTrimFilterFactory extends BaseTokenStreamTestCase {
+public class TestTrimFilterFactory extends BaseTokenStreamFactoryTestCase {
   public void testTrimming() throws Exception {
-    TrimFilterFactory factory = new TrimFilterFactory();
-    Map<String,String> args = new HashMap<String,String>();
-    args.put("updateOffsets", "false");
-    factory.init(args);
-    TokenStream ts = factory.create(new MockTokenizer(new StringReader("trim me    "), MockTokenizer.KEYWORD, false));
-    assertTokenStreamContents(ts, new String[] { "trim me" });
+    Reader reader = new StringReader("trim me    ");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.KEYWORD, false);
+    stream = tokenFilterFactory("Trim").create(stream);
+    assertTokenStreamContents(stream, new String[] { "trim me" });
+  }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("Trim", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
   }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramFilters.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramFilters.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramFilters.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramFilters.java Sat Apr  6 11:08:17 2013
@@ -19,146 +19,158 @@ package org.apache.lucene.analysis.ngram
 
 import java.io.Reader;
 import java.io.StringReader;
-import java.util.HashMap;
-import java.util.Map;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
 /**
  * Simple tests to ensure the NGram filter factories are working.
  */
-public class TestNGramFilters extends BaseTokenStreamTestCase {
+public class TestNGramFilters extends BaseTokenStreamFactoryTestCase {
   /**
    * Test NGramTokenizerFactory
    */
   public void testNGramTokenizer() throws Exception {
     Reader reader = new StringReader("test");
-    Map<String,String> args = new HashMap<String,String>();
-    NGramTokenizerFactory factory = new NGramTokenizerFactory();
-    factory.init(args);
-    Tokenizer stream = factory.create(reader);
+    TokenStream stream = tokenizerFactory("NGram").create(reader);
     assertTokenStreamContents(stream, 
         new String[] { "t", "e", "s", "t", "te", "es", "st" });
   }
+
   /**
    * Test NGramTokenizerFactory with min and max gram options
    */
   public void testNGramTokenizer2() throws Exception {
     Reader reader = new StringReader("test");
-    Map<String,String> args = new HashMap<String,String>();
-    args.put("minGramSize", "2");
-    args.put("maxGramSize", "3");
-    NGramTokenizerFactory factory = new NGramTokenizerFactory();
-    factory.init(args);
-    Tokenizer stream = factory.create(reader);
+    TokenStream stream = tokenizerFactory("NGram",
+        "minGramSize", "2",
+        "maxGramSize", "3").create(reader);
     assertTokenStreamContents(stream, 
         new String[] { "te", "es", "st", "tes", "est" });
   }
+
   /**
    * Test the NGramFilterFactory
    */
   public void testNGramFilter() throws Exception {
     Reader reader = new StringReader("test");
-    Map<String,String> args = new HashMap<String,String>();
-    NGramFilterFactory factory = new NGramFilterFactory();
-    factory.init(args);
-    TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("NGram").create(stream);
     assertTokenStreamContents(stream, 
         new String[] { "t", "e", "s", "t", "te", "es", "st" });
   }
+
   /**
    * Test the NGramFilterFactory with min and max gram options
    */
   public void testNGramFilter2() throws Exception {
     Reader reader = new StringReader("test");
-    Map<String,String> args = new HashMap<String,String>();
-    args.put("minGramSize", "2");
-    args.put("maxGramSize", "3");
-    NGramFilterFactory factory = new NGramFilterFactory();
-    factory.init(args);
-    TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("NGram",
+        "minGramSize", "2",
+        "maxGramSize", "3").create(stream);
     assertTokenStreamContents(stream, 
         new String[] { "te", "es", "st", "tes", "est" });
   }
+
   /**
    * Test EdgeNGramTokenizerFactory
    */
   public void testEdgeNGramTokenizer() throws Exception {
     Reader reader = new StringReader("test");
-    Map<String,String> args = new HashMap<String,String>();
-    EdgeNGramTokenizerFactory factory = new EdgeNGramTokenizerFactory();
-    factory.init(args);
-    Tokenizer stream = factory.create(reader);
+    TokenStream stream = tokenizerFactory("EdgeNGram").create(reader);
     assertTokenStreamContents(stream, 
         new String[] { "t" });
   }
+
   /**
    * Test EdgeNGramTokenizerFactory with min and max gram size
    */
   public void testEdgeNGramTokenizer2() throws Exception {
     Reader reader = new StringReader("test");
-    Map<String,String> args = new HashMap<String,String>();
-    args.put("minGramSize", "1");
-    args.put("maxGramSize", "2");
-    EdgeNGramTokenizerFactory factory = new EdgeNGramTokenizerFactory();
-    factory.init(args);
-    Tokenizer stream = factory.create(reader);
+    TokenStream stream = tokenizerFactory("EdgeNGram",
+        "minGramSize", "1",
+        "maxGramSize", "2").create(reader);
     assertTokenStreamContents(stream, 
         new String[] { "t", "te" });
   }
+
   /**
    * Test EdgeNGramTokenizerFactory with side option
    */
   public void testEdgeNGramTokenizer3() throws Exception {
     Reader reader = new StringReader("ready");
-    Map<String,String> args = new HashMap<String,String>();
-    args.put("side", "back");
-    EdgeNGramTokenizerFactory factory = new EdgeNGramTokenizerFactory();
-    factory.init(args);
-    Tokenizer stream = factory.create(reader);
+    TokenStream stream = tokenizerFactory("EdgeNGram",
+        "side", "back").create(reader);
     assertTokenStreamContents(stream, 
         new String[] { "y" });
   }
+
   /**
    * Test EdgeNGramFilterFactory
    */
   public void testEdgeNGramFilter() throws Exception {
     Reader reader = new StringReader("test");
-    Map<String,String> args = new HashMap<String,String>();
-    EdgeNGramFilterFactory factory = new EdgeNGramFilterFactory();
-    factory.init(args);
-    TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("EdgeNGram").create(stream);
     assertTokenStreamContents(stream, 
         new String[] { "t" });
   }
+
   /**
    * Test EdgeNGramFilterFactory with min and max gram size
    */
   public void testEdgeNGramFilter2() throws Exception {
     Reader reader = new StringReader("test");
-    Map<String,String> args = new HashMap<String,String>();
-    args.put("minGramSize", "1");
-    args.put("maxGramSize", "2");
-    EdgeNGramFilterFactory factory = new EdgeNGramFilterFactory();
-    factory.init(args);
-    TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("EdgeNGram",
+        "minGramSize", "1",
+        "maxGramSize", "2").create(stream);
     assertTokenStreamContents(stream, 
         new String[] { "t", "te" });
   }
+
   /**
    * Test EdgeNGramFilterFactory with side option
    */
   public void testEdgeNGramFilter3() throws Exception {
     Reader reader = new StringReader("ready");
-    Map<String,String> args = new HashMap<String,String>();
-    args.put("side", "back");
-    EdgeNGramFilterFactory factory = new EdgeNGramFilterFactory();
-    factory.init(args);
-    TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("EdgeNGram",
+        "side", "back").create(stream);
     assertTokenStreamContents(stream, 
         new String[] { "y" });
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenizerFactory("NGram", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+    
+    try {
+      tokenizerFactory("EdgeNGram", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+    
+    try {
+      tokenFilterFactory("NGram", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+    
+    try {
+      tokenFilterFactory("EdgeNGram", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -20,18 +20,28 @@ package org.apache.lucene.analysis.no;
 import java.io.Reader;
 import java.io.StringReader;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
 /**
  * Simple tests to ensure the Norwegian Light stem factory is working.
  */
-public class TestNorwegianLightStemFilterFactory extends BaseTokenStreamTestCase {
+public class TestNorwegianLightStemFilterFactory extends BaseTokenStreamFactoryTestCase {
   public void testStemming() throws Exception {
     Reader reader = new StringReader("epler eple");
-    NorwegianLightStemFilterFactory factory = new NorwegianLightStemFilterFactory();
-    TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("NorwegianLightStem").create(stream);
     assertTokenStreamContents(stream, new String[] { "epl", "epl" });
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("NorwegianLightStem", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -20,18 +20,28 @@ package org.apache.lucene.analysis.no;
 import java.io.Reader;
 import java.io.StringReader;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
 /**
  * Simple tests to ensure the Norwegian Minimal stem factory is working.
  */
-public class TestNorwegianMinimalStemFilterFactory extends BaseTokenStreamTestCase {
+public class TestNorwegianMinimalStemFilterFactory extends BaseTokenStreamFactoryTestCase {
   public void testStemming() throws Exception {
     Reader reader = new StringReader("eple eplet epler eplene eplets eplenes");
-    NorwegianMinimalStemFilterFactory factory = new NorwegianMinimalStemFilterFactory();
-    TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("NorwegianMinimalStem").create(stream);
     assertTokenStreamContents(stream, new String[] { "epl", "epl", "epl", "epl", "epl", "epl" });
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("NorwegianMinimalStem", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -17,31 +17,27 @@ package org.apache.lucene.analysis.patte
  * limitations under the License.
  */
 
-import java.io.IOException;
+import java.io.Reader;
 import java.io.StringReader;
-import java.util.HashMap;
-import java.util.Map;
 
-import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
 /**
  * Simple tests to ensure this factory is working
  */
-public class TestPatternReplaceCharFilterFactory extends BaseTokenStreamTestCase {
+public class TestPatternReplaceCharFilterFactory extends BaseTokenStreamFactoryTestCase {
   
   //           1111
   // 01234567890123
   // this is test.
-  public void testNothingChange() throws IOException {
-    final String BLOCK = "this is test.";
-    PatternReplaceCharFilterFactory factory = new PatternReplaceCharFilterFactory();
-    Map<String,String> args = new HashMap<String,String>();
-    args.put("pattern", "(aa)\\s+(bb)\\s+(cc)");
-    args.put("replacement", "$1$2$3");
-    factory.init(args);
-    CharFilter cs = factory.create(
-          new StringReader( BLOCK ) );
-    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
+  public void testNothingChange() throws Exception {
+    Reader reader = new StringReader("this is test.");
+    reader = charFilterFactory("PatternReplace",
+        "pattern", "(aa)\\s+(bb)\\s+(cc)",
+        "replacement", "$1$2$3").create(reader);
+    TokenStream ts = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
     assertTokenStreamContents(ts,
         new String[] { "this", "is", "test." },
         new int[] { 0, 5, 8 },
@@ -50,37 +46,38 @@ public class TestPatternReplaceCharFilte
   
   // 012345678
   // aa bb cc
-  public void testReplaceByEmpty() throws IOException {
-    final String BLOCK = "aa bb cc";
-    PatternReplaceCharFilterFactory factory = new PatternReplaceCharFilterFactory();
-    Map<String,String> args = new HashMap<String,String>();
-    args.put("pattern", "(aa)\\s+(bb)\\s+(cc)");
-    factory.init(args);
-    CharFilter cs = factory.create(
-          new StringReader( BLOCK ) );
-    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
-    ts.reset();
-    assertFalse(ts.incrementToken());
-    ts.end();
-    ts.close();
+  public void testReplaceByEmpty() throws Exception {
+    Reader reader = new StringReader("aa bb cc");
+    reader = charFilterFactory("PatternReplace",
+        "pattern", "(aa)\\s+(bb)\\s+(cc)").create(reader);
+    TokenStream ts = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    assertTokenStreamContents(ts, new String[] {});
   }
   
   // 012345678
   // aa bb cc
   // aa#bb#cc
-  public void test1block1matchSameLength() throws IOException {
-    final String BLOCK = "aa bb cc";
-    PatternReplaceCharFilterFactory factory = new PatternReplaceCharFilterFactory();
-    Map<String,String> args = new HashMap<String,String>();
-    args.put("pattern", "(aa)\\s+(bb)\\s+(cc)");
-    args.put("replacement", "$1#$2#$3");
-    factory.init(args);
-    CharFilter cs = factory.create(
-          new StringReader( BLOCK ) );
-    TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false);
+  public void test1block1matchSameLength() throws Exception {
+    Reader reader = new StringReader("aa bb cc");
+    reader = charFilterFactory("PatternReplace",
+        "pattern", "(aa)\\s+(bb)\\s+(cc)",
+        "replacement", "$1#$2#$3").create(reader);
+    TokenStream ts = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
     assertTokenStreamContents(ts,
         new String[] { "aa#bb#cc" },
         new int[] { 0 },
         new int[] { 8 });
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      charFilterFactory("PatternReplace",
+          "pattern", "something",
+          "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -17,30 +17,38 @@ package org.apache.lucene.analysis.patte
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
+import java.io.Reader;
 import java.io.StringReader;
-import java.util.HashMap;
-import java.util.Map;
 
 /**
  * Simple tests to ensure this factory is working
  */
-public class TestPatternReplaceFilterFactory extends BaseTokenStreamTestCase {
+public class TestPatternReplaceFilterFactory extends BaseTokenStreamFactoryTestCase {
 
   public void testReplaceAll() throws Exception {
-    String input = "aabfooaabfooabfoob ab caaaaaaaaab";
-    PatternReplaceFilterFactory factory = new PatternReplaceFilterFactory();
-    Map<String,String> args = new HashMap<String,String>();
-    args.put("pattern", "a*b");
-    args.put("replacement", "-");
-    factory.init(args);
-    TokenStream ts = factory.create
-            (new MockTokenizer(new StringReader(input), MockTokenizer.WHITESPACE, false));
+    Reader reader = new StringReader("aabfooaabfooabfoob ab caaaaaaaaab");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("PatternReplace",
+        "pattern", "a*b",
+        "replacement", "-").create(stream);
                    
-    assertTokenStreamContents(ts, 
+    assertTokenStreamContents(stream, 
         new String[] { "-foo-foo-foo-", "-", "c-" });
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("PatternReplace",
+          "pattern", "something",
+          "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizerFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizerFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizerFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizerFactory.java Sat Apr  6 11:08:17 2013
@@ -17,25 +17,31 @@ package org.apache.lucene.analysis.patte
  * limitations under the License.
  */
 
+import java.io.Reader;
 import java.io.StringReader;
-import java.util.HashMap;
-import java.util.Map;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
 /** Simple Tests to ensure this factory is working */
-public class TestPatternTokenizerFactory extends BaseTokenStreamTestCase {
+public class TestPatternTokenizerFactory extends BaseTokenStreamFactoryTestCase {
   public void testFactory() throws Exception {
-    final String INPUT = "Günther Günther is here";
-
+    final Reader reader = new StringReader("Günther Günther is here");
     // create PatternTokenizer
-    Map<String,String> args = new HashMap<String, String>();
-    args.put( PatternTokenizerFactory.PATTERN, "[,;/\\s]+" );
-    PatternTokenizerFactory tokFactory = new PatternTokenizerFactory();
-    tokFactory.init( args );
-    TokenStream stream = tokFactory.create( new StringReader(INPUT) );
+    TokenStream stream = tokenizerFactory("Pattern", "pattern", "[,;/\\s]+").create(reader);
     assertTokenStreamContents(stream,
         new String[] { "Günther", "Günther", "is", "here" });
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenizerFactory("Pattern",
+          "pattern", "something",
+          "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestDelimitedPayloadTokenFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestDelimitedPayloadTokenFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestDelimitedPayloadTokenFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestDelimitedPayloadTokenFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -17,63 +17,63 @@ package org.apache.lucene.analysis.paylo
  * limitations under the License.
  */
 
+import java.io.Reader;
 import java.io.StringReader;
-import java.util.HashMap;
-import java.util.Map;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter;
-import org.apache.lucene.analysis.payloads.FloatEncoder;
 import org.apache.lucene.analysis.payloads.PayloadHelper;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.analysis.util.ResourceLoader;
-import org.apache.lucene.analysis.util.StringMockResourceLoader;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
-public class TestDelimitedPayloadTokenFilterFactory extends BaseTokenStreamTestCase {
+public class TestDelimitedPayloadTokenFilterFactory extends BaseTokenStreamFactoryTestCase {
 
   public void testEncoder() throws Exception {
-    Map<String,String> args = new HashMap<String, String>();
-    args.put(DelimitedPayloadTokenFilterFactory.ENCODER_ATTR, "float");
-    DelimitedPayloadTokenFilterFactory factory = new DelimitedPayloadTokenFilterFactory();
-    factory.init(args);
-    ResourceLoader loader = new StringMockResourceLoader("solr/collection1");
-    factory.inform(loader);
-
-    TokenStream input = new MockTokenizer(new StringReader("the|0.1 quick|0.1 red|0.1"), MockTokenizer.WHITESPACE, false);
-    DelimitedPayloadTokenFilter tf = factory.create(input);
-    tf.reset();
-    while (tf.incrementToken()){
-      PayloadAttribute payAttr = tf.getAttribute(PayloadAttribute.class);
-      assertTrue("payAttr is null and it shouldn't be", payAttr != null);
+    Reader reader = new StringReader("the|0.1 quick|0.1 red|0.1");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("DelimitedPayload", "encoder", "float").create(stream);
+
+    stream.reset();
+    while (stream.incrementToken()) {
+      PayloadAttribute payAttr = stream.getAttribute(PayloadAttribute.class);
+      assertNotNull(payAttr);
       byte[] payData = payAttr.getPayload().bytes;
-      assertTrue("payData is null and it shouldn't be", payData != null);
-      assertTrue("payData is null and it shouldn't be", payData != null);
+      assertNotNull(payData);
       float payFloat = PayloadHelper.decodeFloat(payData);
-      assertTrue(payFloat + " does not equal: " + 0.1f, payFloat == 0.1f);
+      assertEquals(0.1f, payFloat, 0.0f);
     }
+    stream.end();
+    stream.close();
   }
 
   public void testDelim() throws Exception {
-    Map<String,String> args = new HashMap<String, String>();
-    args.put(DelimitedPayloadTokenFilterFactory.ENCODER_ATTR, FloatEncoder.class.getName());
-    args.put(DelimitedPayloadTokenFilterFactory.DELIMITER_ATTR, "*");
-    DelimitedPayloadTokenFilterFactory factory = new DelimitedPayloadTokenFilterFactory();
-    factory.init(args);
-    ResourceLoader loader = new StringMockResourceLoader("solr/collection1");
-    factory.inform(loader);
-
-    TokenStream input = new MockTokenizer(new StringReader("the*0.1 quick*0.1 red*0.1"), MockTokenizer.WHITESPACE, false);
-    DelimitedPayloadTokenFilter tf = factory.create(input);
-    tf.reset();
-    while (tf.incrementToken()){
-      PayloadAttribute payAttr = tf.getAttribute(PayloadAttribute.class);
-      assertTrue("payAttr is null and it shouldn't be", payAttr != null);
+    Reader reader = new StringReader("the*0.1 quick*0.1 red*0.1");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("DelimitedPayload",
+        "encoder", "float",
+        "delimiter", "*").create(stream);
+    stream.reset();
+    while (stream.incrementToken()) {
+      PayloadAttribute payAttr = stream.getAttribute(PayloadAttribute.class);
+      assertNotNull(payAttr);
       byte[] payData = payAttr.getPayload().bytes;
-      assertTrue("payData is null and it shouldn't be", payData != null);
+      assertNotNull(payData);
       float payFloat = PayloadHelper.decodeFloat(payData);
-      assertTrue(payFloat + " does not equal: " + 0.1f, payFloat == 0.1f);
+      assertEquals(0.1f, payFloat, 0.0f);
+    }
+    stream.end();
+    stream.close();
+  }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("DelimitedPayload", 
+          "encoder", "float",
+          "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
     }
   }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -20,18 +20,28 @@ package org.apache.lucene.analysis.pt;
 import java.io.Reader;
 import java.io.StringReader;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
 /**
  * Simple tests to ensure the Portuguese Light stem factory is working.
  */
-public class TestPortugueseLightStemFilterFactory extends BaseTokenStreamTestCase {
+public class TestPortugueseLightStemFilterFactory extends BaseTokenStreamFactoryTestCase {
   public void testStemming() throws Exception {
     Reader reader = new StringReader("evidentemente");
-    PortugueseLightStemFilterFactory factory = new PortugueseLightStemFilterFactory();
-    TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("PortugueseLightStem").create(stream);
     assertTokenStreamContents(stream, new String[] { "evident" });
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("PortugueseLightStem", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -20,18 +20,28 @@ package org.apache.lucene.analysis.pt;
 import java.io.Reader;
 import java.io.StringReader;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
 /**
  * Simple tests to ensure the Portuguese Minimal stem factory is working.
  */
-public class TestPortugueseMinimalStemFilterFactory extends BaseTokenStreamTestCase {
+public class TestPortugueseMinimalStemFilterFactory extends BaseTokenStreamFactoryTestCase {
   public void testStemming() throws Exception {
     Reader reader = new StringReader("questões");
-    PortugueseMinimalStemFilterFactory factory = new PortugueseMinimalStemFilterFactory();
-    TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("PortugueseMinimalStem").create(stream);
     assertTokenStreamContents(stream, new String[] { "questão" });
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("PortugueseMinimalStem", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -20,18 +20,28 @@ package org.apache.lucene.analysis.pt;
 import java.io.Reader;
 import java.io.StringReader;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
 /**
  * Simple tests to ensure the Portuguese stem factory is working.
  */
-public class TestPortugueseStemFilterFactory extends BaseTokenStreamTestCase {
+public class TestPortugueseStemFilterFactory extends BaseTokenStreamFactoryTestCase {
   public void testStemming() throws Exception {
     Reader reader = new StringReader("maluquice");
-    PortugueseStemFilterFactory factory = new PortugueseStemFilterFactory();
-    TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("PortugueseStem").create(stream);
     assertTokenStreamContents(stream, new String[] { "maluc" });
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("PortugueseStem", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -19,29 +19,32 @@ package org.apache.lucene.analysis.rever
 
 import java.io.Reader;
 import java.io.StringReader;
-import java.util.Collections;
-import java.util.Map;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
 /**
  * Simple tests to ensure the Reverse string filter factory is working.
  */
-public class TestReverseStringFilterFactory extends BaseTokenStreamTestCase {
+public class TestReverseStringFilterFactory extends BaseTokenStreamFactoryTestCase {
   /**
    * Ensure the filter actually reverses text.
    */
   public void testReversing() throws Exception {
     Reader reader = new StringReader("simple test");
-    Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-    ReverseStringFilterFactory factory = new ReverseStringFilterFactory();
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
-    Map<String, String> args = Collections.emptyMap();
-    factory.init(args);
-    TokenStream stream = factory.create(tokenizer);
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("ReverseString").create(stream);
     assertTokenStreamContents(stream, new String[] { "elpmis", "tset" });
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("ReverseString", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianFilters.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianFilters.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianFilters.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianFilters.java Sat Apr  6 11:08:17 2013
@@ -19,27 +19,31 @@ package org.apache.lucene.analysis.ru;
 
 import java.io.Reader;
 import java.io.StringReader;
-import java.util.Collections;
-import java.util.Map;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
 /**
  * Simple tests to ensure the Russian filter factories are working.
  */
-public class TestRussianFilters extends BaseTokenStreamTestCase {
+public class TestRussianFilters extends BaseTokenStreamFactoryTestCase {
   /**
    * Test RussianLetterTokenizerFactory
    */
   public void testTokenizer() throws Exception {
     Reader reader = new StringReader("Вместе с тем о силе электромагнитной 100");
-    RussianLetterTokenizerFactory factory = new RussianLetterTokenizerFactory();
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
-    Map<String, String> args = Collections.emptyMap();
-    factory.init(args);
-    Tokenizer stream = factory.create(reader);
+    TokenStream stream = tokenizerFactory("RussianLetter").create(reader);
     assertTokenStreamContents(stream, new String[] {"Вместе", "с", "тем", "о",
         "силе", "электромагнитной", "100"});
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenizerFactory("RussianLetter", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }

Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilterFactory.java?rev=1465224&r1=1465223&r2=1465224&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilterFactory.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilterFactory.java Sat Apr  6 11:08:17 2013
@@ -20,18 +20,28 @@ package org.apache.lucene.analysis.ru;
 import java.io.Reader;
 import java.io.StringReader;
 
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
 
 /**
  * Simple tests to ensure the Russian light stem factory is working.
  */
-public class TestRussianLightStemFilterFactory extends BaseTokenStreamTestCase {
+public class TestRussianLightStemFilterFactory extends BaseTokenStreamFactoryTestCase {
   public void testStemming() throws Exception {
     Reader reader = new StringReader("журналы");
-    RussianLightStemFilterFactory factory = new RussianLightStemFilterFactory();
-    TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("RussianLightStem").create(stream);
     assertTokenStreamContents(stream, new String[] { "журнал" });
   }
+  
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("RussianLightStem", "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
 }