You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by jp...@apache.org on 2017/06/07 08:45:42 UTC

lucene-solr:branch_6x: LUCENE-7855: The advanced parameters of the Wikipedia tokenizer are added to the factory

Repository: lucene-solr
Updated Branches:
  refs/heads/branch_6x 3bb7cfcef -> 28d80c7b5


LUCENE-7855: The advanced parameters of the Wikipedia tokenizer are added to the factory

Closes #209


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/28d80c7b
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/28d80c7b
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/28d80c7b

Branch: refs/heads/branch_6x
Commit: 28d80c7b599bf9363adfefc1b32a463ff6250d1a
Parents: 3bb7cfc
Author: jpgilaberte <jp...@stratio.com>
Authored: Mon May 29 13:06:05 2017 +0200
Committer: Adrien Grand <jp...@gmail.com>
Committed: Wed Jun 7 10:39:38 2017 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |  5 ++
 .../wikipedia/WikipediaTokenizerFactory.java    | 21 +++--
 .../TestWikipediaTokenizerFactory.java          | 84 ++++++++++++++++----
 3 files changed, 90 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/28d80c7b/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index b2d2d45..0b98762 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -5,6 +5,11 @@ http://s.apache.org/luceneversions
 
 ======================= Lucene 6.7.0 =======================
 
+New Features
+
+* LUCENE-7855: Added advanced options of the Wikipedia tokenizer to its factory.
+  (Juan Pedro via Adrien Grand)
+
 Other
 
 * LUCENE-7800: Remove code that potentially rethrows checked exceptions 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/28d80c7b/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java
index 3a57096..83e08aa 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java
@@ -16,9 +16,9 @@
  */
 package org.apache.lucene.analysis.wikipedia;
 
-
 import java.util.Collections;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.lucene.analysis.util.TokenizerFactory;
 import org.apache.lucene.util.AttributeFactory;
@@ -33,19 +33,28 @@ import org.apache.lucene.util.AttributeFactory;
  * &lt;/fieldType&gt;</pre>
  */
 public class WikipediaTokenizerFactory extends TokenizerFactory {
-  
+  public static final String TOKEN_OUTPUT = "tokenOutput";
+  public static final String UNTOKENIZED_TYPES = "untokenizedTypes";
+
+  protected final int tokenOutput;
+  protected Set<String> untokenizedTypes;
+
   /** Creates a new WikipediaTokenizerFactory */
   public WikipediaTokenizerFactory(Map<String,String> args) {
     super(args);
+    tokenOutput = getInt(args, TOKEN_OUTPUT, WikipediaTokenizer.TOKENS_ONLY);
+    untokenizedTypes = getSet(args, UNTOKENIZED_TYPES);
+
+    if (untokenizedTypes == null) {
+      untokenizedTypes = Collections.emptySet();
+    }
     if (!args.isEmpty()) {
       throw new IllegalArgumentException("Unknown parameters: " + args);
     }
   }
-  
-  // TODO: add support for WikipediaTokenizer's advanced options.
+
   @Override
   public WikipediaTokenizer create(AttributeFactory factory) {
-    return new WikipediaTokenizer(factory, WikipediaTokenizer.TOKENS_ONLY,
-        Collections.<String>emptySet());
+    return new WikipediaTokenizer(factory, tokenOutput, untokenizedTypes);
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/28d80c7b/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizerFactory.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizerFactory.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizerFactory.java
index ec345f9..7439620 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizerFactory.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizerFactory.java
@@ -17,34 +17,90 @@
 package org.apache.lucene.analysis.wikipedia;
 
 
-import java.io.Reader;
 import java.io.StringReader;
+import java.util.HashSet;
+import java.util.Set;
 
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
-import org.apache.lucene.analysis.wikipedia.WikipediaTokenizer;
 
 /**
  * Simple tests to ensure the wikipedia tokenizer is working.
  */
 public class TestWikipediaTokenizerFactory extends BaseTokenStreamFactoryTestCase {
+
+  private final String WIKIPEDIA = "Wikipedia";
+  private final String TOKEN_OUTPUT = "tokenOutput";
+  private final String UNTOKENIZED_TYPES = "untokenizedTypes";
+
   public void testTokenizer() throws Exception {
-    Reader reader = new StringReader("This is a [[Category:foo]]");
-    Tokenizer tokenizer = tokenizerFactory("Wikipedia").create(newAttributeFactory());
-    tokenizer.setReader(reader);
-    assertTokenStreamContents(tokenizer,
-        new String[] { "This", "is", "a", "foo" },
-        new int[] { 0, 5, 8, 21 },
-        new int[] { 4, 7, 9, 24 },
-        new String[] { "<ALPHANUM>", "<ALPHANUM>", "<ALPHANUM>", WikipediaTokenizer.CATEGORY },
-        new int[] { 1, 1, 1, 1, });
+    String text = "This is a [[Category:foo]]";
+    Tokenizer tf = tokenizerFactory(WIKIPEDIA).create(newAttributeFactory());
+    tf.setReader(new StringReader(text));
+    assertTokenStreamContents(tf,
+                              new String[] { "This", "is", "a", "foo" },
+                              new int[] { 0, 5, 8, 21 },
+                              new int[] { 4, 7, 9, 24 },
+                              new String[] { "<ALPHANUM>", "<ALPHANUM>", "<ALPHANUM>", WikipediaTokenizer.CATEGORY },
+                              new int[] { 1, 1, 1, 1, },
+                              text.length());
+  }
+
+  public void testTokenizerTokensOnly() throws Exception {
+    String text = "This is a [[Category:foo]]";
+    Tokenizer tf = tokenizerFactory(WIKIPEDIA, TOKEN_OUTPUT, new Integer( WikipediaTokenizer.TOKENS_ONLY).toString()).create(newAttributeFactory());
+    tf.setReader(new StringReader(text));
+    assertTokenStreamContents(tf,
+                              new String[] { "This", "is", "a", "foo" },
+                              new int[] { 0, 5, 8, 21 },
+                              new int[] { 4, 7, 9, 24 },
+                              new String[] { "<ALPHANUM>", "<ALPHANUM>", "<ALPHANUM>", WikipediaTokenizer.CATEGORY },
+                              new int[] { 1, 1, 1, 1, },
+                              text.length());
+  }
+
+    public void testTokenizerUntokenizedOnly() throws Exception {
+      String test = "[[Category:a b c d]] [[Category:e f g]] [[link here]] [[link there]] ''italics here'' something ''more italics'' [[Category:h   i   j]]";
+      Set<String> untoks = new HashSet<>();
+      untoks.add(WikipediaTokenizer.CATEGORY);
+      untoks.add(WikipediaTokenizer.ITALICS);
+      Tokenizer tf = tokenizerFactory(WIKIPEDIA, TOKEN_OUTPUT, new Integer(WikipediaTokenizer.UNTOKENIZED_ONLY).toString(), UNTOKENIZED_TYPES, WikipediaTokenizer.CATEGORY + ", " + WikipediaTokenizer.ITALICS).create(newAttributeFactory());
+      tf.setReader(new StringReader(test));
+      assertTokenStreamContents(tf,
+                                new String[] { "a b c d", "e f g", "link", "here", "link",
+                                               "there", "italics here", "something", "more italics", "h   i   j" },
+                                new int[] { 11, 32, 42, 47, 56, 61, 71, 86, 98, 124 },
+                                new int[] { 18, 37, 46, 51, 60, 66, 83, 95, 110, 133 },
+                                new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }
+      );
+    }
+
+    public void testTokenizerBoth() throws Exception {
+      String test = "[[Category:a b c d]] [[Category:e f g]] [[link here]] [[link there]] ''italics here'' something ''more italics'' [[Category:h   i   j]]";
+      Tokenizer tf = tokenizerFactory(WIKIPEDIA, TOKEN_OUTPUT, new Integer(WikipediaTokenizer.BOTH).toString(), UNTOKENIZED_TYPES, WikipediaTokenizer.CATEGORY + ", " + WikipediaTokenizer.ITALICS).create(newAttributeFactory());
+      tf.setReader(new StringReader(test));
+      assertTokenStreamContents(tf,
+                                new String[] { "a b c d", "a", "b", "c", "d", "e f g", "e", "f", "g",
+                                               "link", "here", "link", "there", "italics here", "italics", "here",
+                                               "something", "more italics", "more", "italics", "h   i   j", "h", "i", "j" },
+                                new int[] { 11, 11, 13, 15, 17, 32, 32, 34, 36, 42, 47, 56, 61, 71, 71, 79, 86, 98,  98,  103, 124, 124, 128, 132 },
+                                new int[] { 18, 12, 14, 16, 18, 37, 33, 35, 37, 46, 51, 60, 66, 83, 78, 83, 95, 110, 102, 110, 133, 125, 129, 133 },
+                                new int[] { 1,  0,  1,  1,  1,  1,  0,  1,  1,  1,  1,  1,  1,  1,  0,  1,  1,  1,   0,   1,   1,   0,   1,   1 }
+      );
   }
-  
+
   /** Test that bogus arguments result in exception */
   public void testBogusArguments() throws Exception {
     IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
-      tokenizerFactory("Wikipedia", "bogusArg", "bogusValue");
+      tokenizerFactory(WIKIPEDIA, "bogusArg", "bogusValue").create(newAttributeFactory());
     });
     assertTrue(expected.getMessage().contains("Unknown parameters"));
   }
-}
+
+  public void testIllegalArguments() throws Exception {
+    IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
+      Tokenizer tf = tokenizerFactory(WIKIPEDIA, TOKEN_OUTPUT, "-1").create(newAttributeFactory());
+    });
+    assertTrue(expected.getMessage().contains("tokenOutput must be TOKENS_ONLY, UNTOKENIZED_ONLY or BOTH"));
+  }
+}
\ No newline at end of file