You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2011/08/08 13:58:00 UTC

svn commit: r1154936 [6/6] - in /lucene/dev/trunk: lucene/ modules/analysis/common/ modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/ modules/analysis/commo...

Added: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.jflex
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.jflex?rev=1154936&view=auto
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.jflex (added)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.jflex Mon Aug  8 11:57:59 2011
@@ -0,0 +1,269 @@
+package org.apache.lucene.analysis.standard.std31;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
+import org.apache.lucene.analysis.standard.StandardTokenizerInterface;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+
+/**
+ * This class implements UAX29URLEmailTokenizer, except with a bug 
+ * (https://issues.apache.org/jira/browse/LUCENE-3358) where Han and Hiragana
+ * characters would be split from combining characters:
+ * @deprecated This class is only for exact backwards compatibility
+ */
+ @Deprecated
+%%
+
+%unicode 6.0
+%integer
+%final
+%public
+%class UAX29URLEmailTokenizerImpl31
+%implements StandardTokenizerInterface
+%function getNextToken
+%char
+
+%include src/java/org/apache/lucene/analysis/standard/std31/SUPPLEMENTARY.jflex-macro
+ALetter = ([\p{WB:ALetter}] | {ALetterSupp})
+Format =  ([\p{WB:Format}] | {FormatSupp})
+Numeric = ([\p{WB:Numeric}] | {NumericSupp})
+Extend =  ([\p{WB:Extend}] | {ExtendSupp})
+Katakana = ([\p{WB:Katakana}] | {KatakanaSupp})
+MidLetter = ([\p{WB:MidLetter}] | {MidLetterSupp})
+MidNum = ([\p{WB:MidNum}] | {MidNumSupp})
+MidNumLet = ([\p{WB:MidNumLet}] | {MidNumLetSupp})
+ExtendNumLet = ([\p{WB:ExtendNumLet}] | {ExtendNumLetSupp})
+ComplexContext = ([\p{LB:Complex_Context}] | {ComplexContextSupp})
+Han = ([\p{Script:Han}] | {HanSupp})
+Hiragana = ([\p{Script:Hiragana}] | {HiraganaSupp})
+
+// Script=Hangul & Aletter
+HangulEx       = (!(!\p{Script:Hangul}|!\p{WB:ALetter})) ({Format} | {Extend})*
+// UAX#29 WB4. X (Extend | Format)* --> X
+//
+ALetterEx      = {ALetter}                     ({Format} | {Extend})*
+// TODO: Convert hard-coded full-width numeric range to property intersection (something like [\p{Full-Width}&&\p{Numeric}]) once JFlex supports it
+NumericEx      = ({Numeric} | [\uFF10-\uFF19]) ({Format} | {Extend})*
+KatakanaEx     = {Katakana}                    ({Format} | {Extend})* 
+MidLetterEx    = ({MidLetter} | {MidNumLet})   ({Format} | {Extend})* 
+MidNumericEx   = ({MidNum} | {MidNumLet})      ({Format} | {Extend})*
+ExtendNumLetEx = {ExtendNumLet}                ({Format} | {Extend})*
+
+
+// URL and E-mail syntax specifications:
+//
+//     RFC-952:  DOD INTERNET HOST TABLE SPECIFICATION
+//     RFC-1035: DOMAIN NAMES - IMPLEMENTATION AND SPECIFICATION
+//     RFC-1123: Requirements for Internet Hosts - Application and Support
+//     RFC-1738: Uniform Resource Locators (URL)
+//     RFC-3986: Uniform Resource Identifier (URI): Generic Syntax
+//     RFC-5234: Augmented BNF for Syntax Specifications: ABNF
+//     RFC-5321: Simple Mail Transfer Protocol
+//     RFC-5322: Internet Message Format
+
+%include src/java/org/apache/lucene/analysis/standard/std31/ASCIITLD.jflex-macro
+
+DomainLabel = [A-Za-z0-9] ([-A-Za-z0-9]* [A-Za-z0-9])?
+DomainNameStrict = {DomainLabel} ("." {DomainLabel})* {ASCIITLD}
+DomainNameLoose  = {DomainLabel} ("." {DomainLabel})*
+
+IPv4DecimalOctet = "0"{0,2} [0-9] | "0"? [1-9][0-9] | "1" [0-9][0-9] | "2" ([0-4][0-9] | "5" [0-5])
+IPv4Address  = {IPv4DecimalOctet} ("." {IPv4DecimalOctet}){3} 
+IPv6Hex16Bit = [0-9A-Fa-f]{1,4}
+IPv6LeastSignificant32Bits = {IPv4Address} | ({IPv6Hex16Bit} ":" {IPv6Hex16Bit})
+IPv6Address =                                                  ({IPv6Hex16Bit} ":"){6} {IPv6LeastSignificant32Bits}
+            |                                             "::" ({IPv6Hex16Bit} ":"){5} {IPv6LeastSignificant32Bits}
+            |                            {IPv6Hex16Bit}?  "::" ({IPv6Hex16Bit} ":"){4} {IPv6LeastSignificant32Bits}
+            | (({IPv6Hex16Bit} ":"){0,1} {IPv6Hex16Bit})? "::" ({IPv6Hex16Bit} ":"){3} {IPv6LeastSignificant32Bits}
+            | (({IPv6Hex16Bit} ":"){0,2} {IPv6Hex16Bit})? "::" ({IPv6Hex16Bit} ":"){2} {IPv6LeastSignificant32Bits}
+            | (({IPv6Hex16Bit} ":"){0,3} {IPv6Hex16Bit})? "::"  {IPv6Hex16Bit} ":"     {IPv6LeastSignificant32Bits}
+            | (({IPv6Hex16Bit} ":"){0,4} {IPv6Hex16Bit})? "::"                         {IPv6LeastSignificant32Bits}
+            | (({IPv6Hex16Bit} ":"){0,5} {IPv6Hex16Bit})? "::"                         {IPv6Hex16Bit}
+            | (({IPv6Hex16Bit} ":"){0,6} {IPv6Hex16Bit})? "::"
+
+URIunreserved = [-._~A-Za-z0-9]
+URIpercentEncoded = "%" [0-9A-Fa-f]{2}
+URIsubDelims = [!$&'()*+,;=]
+URIloginSegment = ({URIunreserved} | {URIpercentEncoded} | {URIsubDelims})*
+URIlogin = {URIloginSegment} (":" {URIloginSegment})? "@"
+URIquery    = "?" ({URIunreserved} | {URIpercentEncoded} | {URIsubDelims} | [:@/?])*
+URIfragment = "#" ({URIunreserved} | {URIpercentEncoded} | {URIsubDelims} | [:@/?])*
+URIport = ":" [0-9]{1,5}
+URIhostStrict = ("[" {IPv6Address} "]") | {IPv4Address} | {DomainNameStrict}  
+URIhostLoose  = ("[" {IPv6Address} "]") | {IPv4Address} | {DomainNameLoose} 
+
+URIauthorityStrict =             {URIhostStrict} {URIport}?
+URIauthorityLoose  = {URIlogin}? {URIhostLoose}  {URIport}?
+
+HTTPsegment = ({URIunreserved} | {URIpercentEncoded} | [;:@&=])*
+HTTPpath = ("/" {HTTPsegment})*
+HTTPscheme = [hH][tT][tT][pP][sS]? "://"
+HTTPurlFull = {HTTPscheme} {URIauthorityLoose}  {HTTPpath}? {URIquery}? {URIfragment}?
+// {HTTPurlNoScheme} excludes {URIlogin}, because it could otherwise accept e-mail addresses
+HTTPurlNoScheme =          {URIauthorityStrict} {HTTPpath}? {URIquery}? {URIfragment}?
+HTTPurl = {HTTPurlFull} | {HTTPurlNoScheme}
+
+FTPorFILEsegment = ({URIunreserved} | {URIpercentEncoded} | [?:@&=])*
+FTPorFILEpath = "/" {FTPorFILEsegment} ("/" {FTPorFILEsegment})*
+FTPtype = ";" [tT][yY][pP][eE] "=" [aAiIdD]
+FTPscheme = [fF][tT][pP] "://"
+FTPurl = {FTPscheme} {URIauthorityLoose} {FTPorFILEpath} {FTPtype}? {URIfragment}?
+
+FILEscheme = [fF][iI][lL][eE] "://"
+FILEurl = {FILEscheme} {URIhostLoose}? {FTPorFILEpath} {URIfragment}?
+
+URL = {HTTPurl} | {FTPurl} | {FILEurl}
+
+EMAILquotedString = [\"] ([\u0001-\u0008\u000B\u000C\u000E-\u0021\u0023-\u005B\u005D-\u007E] | [\\] [\u0000-\u007F])* [\"]
+EMAILatomText = [A-Za-z0-9!#$%&'*+-/=?\^_`{|}~]
+EMAILlabel = {EMAILatomText}+ | {EMAILquotedString}
+EMAILlocalPart = {EMAILlabel} ("." {EMAILlabel})*
+EMAILdomainLiteralText = [\u0001-\u0008\u000B\u000C\u000E-\u005A\u005E-\u007F] | [\\] [\u0000-\u007F]
+// DFA minimization allows {IPv6Address} and {IPv4Address} to be included 
+// in the {EMAILbracketedHost} definition without incurring any size penalties, 
+// since {EMAILdomainLiteralText} recognizes all valid IP addresses.
+// The IP address regexes are included in {EMAILbracketedHost} simply as a 
+// reminder that they are acceptable bracketed host forms.
+EMAILbracketedHost = "[" ({EMAILdomainLiteralText}* | {IPv4Address} | [iI][pP][vV] "6:" {IPv6Address}) "]"
+EMAIL = {EMAILlocalPart} "@" ({DomainNameStrict} | {EMAILbracketedHost})
+
+
+%{
+  /** Alphanumeric sequences */
+  public static final int WORD_TYPE = UAX29URLEmailTokenizer.ALPHANUM;
+  
+  /** Numbers */
+  public static final int NUMERIC_TYPE = UAX29URLEmailTokenizer.NUM;
+  
+  /**
+   * Chars in class \p{Line_Break = Complex_Context} are from South East Asian
+   * scripts (Thai, Lao, Myanmar, Khmer, etc.).  Sequences of these are kept 
+   * together as as a single token rather than broken up, because the logic
+   * required to break them at word boundaries is too complex for UAX#29.
+   * <p>
+   * See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA
+   */
+  public static final int SOUTH_EAST_ASIAN_TYPE = UAX29URLEmailTokenizer.SOUTHEAST_ASIAN;
+  
+  public static final int IDEOGRAPHIC_TYPE = UAX29URLEmailTokenizer.IDEOGRAPHIC;
+  
+  public static final int HIRAGANA_TYPE = UAX29URLEmailTokenizer.HIRAGANA;
+  
+  public static final int KATAKANA_TYPE = UAX29URLEmailTokenizer.KATAKANA;
+  
+  public static final int HANGUL_TYPE = UAX29URLEmailTokenizer.HANGUL;
+  
+  public static final int EMAIL_TYPE = UAX29URLEmailTokenizer.EMAIL;
+  
+  public static final int URL_TYPE = UAX29URLEmailTokenizer.URL;
+
+  public final int yychar()
+  {
+    return yychar;
+  }
+
+  /**
+   * Fills CharTermAttribute with the current token text.
+   */
+  public final void getText(CharTermAttribute t) {
+    t.copyBuffer(zzBuffer, zzStartRead, zzMarkedPos-zzStartRead);
+  }
+%}
+
+%%
+
+// UAX#29 WB1. 	sot 	÷ 	
+//        WB2. 		÷ 	eot
+//
+<<EOF>> { return StandardTokenizerInterface.YYEOF; }
+
+{URL}   { return URL_TYPE; }
+{EMAIL} { return EMAIL_TYPE; }
+
+// UAX#29 WB8.   Numeric × Numeric
+//        WB11.  Numeric (MidNum | MidNumLet) × Numeric
+//        WB12.  Numeric × (MidNum | MidNumLet) Numeric
+//        WB13a. (ALetter | Numeric | Katakana | ExtendNumLet) × ExtendNumLet
+//        WB13b. ExtendNumLet × (ALetter | Numeric | Katakana)
+//
+{ExtendNumLetEx}* {NumericEx} ({ExtendNumLetEx}+ {NumericEx} 
+                              | {MidNumericEx} {NumericEx} 
+                              | {NumericEx})*
+{ExtendNumLetEx}* 
+  { return NUMERIC_TYPE; }
+
+// subset of the below for typing purposes only!
+{HangulEx}+
+  { return HANGUL_TYPE; }
+
+{KatakanaEx}+
+  { return KATAKANA_TYPE; }
+
+// UAX#29 WB5.   ALetter × ALetter
+//        WB6.   ALetter × (MidLetter | MidNumLet) ALetter
+//        WB7.   ALetter (MidLetter | MidNumLet) × ALetter
+//        WB9.   ALetter × Numeric
+//        WB10.  Numeric × ALetter
+//        WB13.  Katakana × Katakana
+//        WB13a. (ALetter | Numeric | Katakana | ExtendNumLet) × ExtendNumLet
+//        WB13b. ExtendNumLet × (ALetter | Numeric | Katakana)
+//
+{ExtendNumLetEx}*  ( {KatakanaEx} ({ExtendNumLetEx}* {KatakanaEx})* 
+                   | ( {NumericEx}  ({ExtendNumLetEx}+ {NumericEx} | {MidNumericEx} {NumericEx} | {NumericEx})*
+                     | {ALetterEx}  ({ExtendNumLetEx}+ {ALetterEx} | {MidLetterEx}  {ALetterEx} | {ALetterEx})* )+ ) 
+({ExtendNumLetEx}+ ( {KatakanaEx} ({ExtendNumLetEx}* {KatakanaEx})* 
+                   | ( {NumericEx}  ({ExtendNumLetEx}+ {NumericEx} | {MidNumericEx} {NumericEx} | {NumericEx})*
+                     | {ALetterEx}  ({ExtendNumLetEx}+ {ALetterEx} | {MidLetterEx}  {ALetterEx} | {ALetterEx})* )+ ) )*
+{ExtendNumLetEx}*  
+  { return WORD_TYPE; }
+
+
+// From UAX #29:
+//
+//    [C]haracters with the Line_Break property values of Contingent_Break (CB), 
+//    Complex_Context (SA/South East Asian), and XX (Unknown) are assigned word 
+//    boundary property values based on criteria outside of the scope of this
+//    annex.  That means that satisfactory treatment of languages like Chinese
+//    or Thai requires special handling.
+// 
+// In Unicode 6.0, only one character has the \p{Line_Break = Contingent_Break}
+// property: U+FFFC (  ) OBJECT REPLACEMENT CHARACTER.
+//
+// In the ICU implementation of UAX#29, \p{Line_Break = Complex_Context}
+// character sequences (from South East Asian scripts like Thai, Myanmar, Khmer,
+// Lao, etc.) are kept together.  This grammar does the same below.
+//
+// See also the Unicode Line Breaking Algorithm:
+//
+//    http://www.unicode.org/reports/tr14/#SA
+//
+{ComplexContext}+ { return SOUTH_EAST_ASIAN_TYPE; }
+
+// UAX#29 WB14.  Any ÷ Any
+//
+{Han} { return IDEOGRAPHIC_TYPE; }
+{Hiragana} { return HIRAGANA_TYPE; }
+
+
+// UAX#29 WB3.   CR × LF
+//        WB3a.  (Newline | CR | LF) ÷
+//        WB3b.  ÷ (Newline | CR | LF)
+//        WB14.  Any ÷ Any
+//
+[^] { /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */ }

Modified: lucene/dev/trunk/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java?rev=1154936&r1=1154935&r2=1154936&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java Mon Aug  8 11:57:59 2011
@@ -5,9 +5,11 @@ import org.apache.lucene.analysis.BaseTo
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 import org.apache.lucene.analysis.util.ReusableAnalyzerBase;
+import org.apache.lucene.util.Version;
 
 import java.io.BufferedReader;
 import java.io.IOException;
@@ -44,7 +46,7 @@ public class TestUAX29URLEmailTokenizer 
     sb.append(whitespace);
     sb.append("testing 1234");
     String input = sb.toString();
-    UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(new StringReader(input));
+    UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
     BaseTokenStreamTestCase.assertTokenStreamContents(tokenizer, new String[] { "testing", "1234" });
   }
 
@@ -53,7 +55,7 @@ public class TestUAX29URLEmailTokenizer 
     protected TokenStreamComponents createComponents
       (String fieldName, Reader reader) {
 
-      Tokenizer tokenizer = new UAX29URLEmailTokenizer(reader);
+      Tokenizer tokenizer = new UAX29URLEmailTokenizer(TEST_VERSION_CURRENT, reader);
       return new TokenStreamComponents(tokenizer);
     }
   };
@@ -69,7 +71,7 @@ public class TestUAX29URLEmailTokenizer 
     public final boolean incrementToken() throws java.io.IOException {
       boolean isTokenAvailable = false;
       while (input.incrementToken()) {
-        if (typeAtt.type() == UAX29URLEmailTokenizer.URL_TYPE) {
+        if (typeAtt.type() == UAX29URLEmailTokenizer.TOKEN_TYPES[UAX29URLEmailTokenizer.URL]) {
           isTokenAvailable = true;
           break;
         }
@@ -88,7 +90,7 @@ public class TestUAX29URLEmailTokenizer 
     public final boolean incrementToken() throws java.io.IOException {
       boolean isTokenAvailable = false;
       while (input.incrementToken()) {
-        if (typeAtt.type() == UAX29URLEmailTokenizer.EMAIL_TYPE) {
+        if (typeAtt.type() == UAX29URLEmailTokenizer.TOKEN_TYPES[UAX29URLEmailTokenizer.EMAIL]) {
           isTokenAvailable = true;
           break;
         }
@@ -100,7 +102,7 @@ public class TestUAX29URLEmailTokenizer 
   private Analyzer urlAnalyzer = new ReusableAnalyzerBase() {
     @Override
     protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
-      UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(reader);
+      UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(TEST_VERSION_CURRENT, reader);
       tokenizer.setMaxTokenLength(Integer.MAX_VALUE);  // Tokenize arbitrary length URLs
       TokenFilter filter = new URLFilter(tokenizer);
       return new TokenStreamComponents(tokenizer, filter);
@@ -110,7 +112,7 @@ public class TestUAX29URLEmailTokenizer 
   private Analyzer emailAnalyzer = new ReusableAnalyzerBase() {
     @Override
     protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
-      UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(reader);
+      UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(TEST_VERSION_CURRENT, reader);
       TokenFilter filter = new EmailFilter(tokenizer);
       return new TokenStreamComponents(tokenizer, filter);
     }
@@ -418,7 +420,32 @@ public class TestUAX29URLEmailTokenizer 
         new String[] { "仮", "名", "遣", "い", "カタカナ" },
         new String[] { "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<HIRAGANA>", "<KATAKANA>" });
   }
-  
+
+  public void testCombiningMarks() throws Exception {
+    checkOneTerm(a, "ざ", "ざ"); // hiragana
+    checkOneTerm(a, "ザ", "ザ"); // katakana
+    checkOneTerm(a, "壹゙", "壹゙"); // ideographic
+    checkOneTerm(a, "아゙",  "아゙"); // hangul
+  }
+
+  /** @deprecated remove this and sophisticated backwards layer in 5.0 */
+  @Deprecated
+  public void testCombiningMarksBackwards() throws Exception {
+    Analyzer a = new ReusableAnalyzerBase() {
+      @Override
+      protected TokenStreamComponents createComponents
+        (String fieldName, Reader reader) {
+
+        Tokenizer tokenizer = new UAX29URLEmailTokenizer(reader);
+        return new TokenStreamComponents(tokenizer);
+      }
+    };
+    checkOneTerm(a, "ざ", "さ"); // hiragana Bug
+    checkOneTerm(a, "ザ", "ザ"); // katakana Works
+    checkOneTerm(a, "壹゙", "壹"); // ideographic Bug
+    checkOneTerm(a, "아゙",  "아゙"); // hangul Works
+  }
+
   /** blast some random strings through the analyzer */
   public void testRandomStrings() throws Exception {
     checkRandomData(random, a, 10000*RANDOM_MULTIPLIER);

Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/analysis/UAX29URLEmailTokenizerFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/analysis/UAX29URLEmailTokenizerFactory.java?rev=1154936&r1=1154935&r2=1154936&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/analysis/UAX29URLEmailTokenizerFactory.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/analysis/UAX29URLEmailTokenizerFactory.java Mon Aug  8 11:57:59 2011
@@ -51,7 +51,7 @@ public class UAX29URLEmailTokenizerFacto
   }
 
   public UAX29URLEmailTokenizer create(Reader input) {
-    UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(input); 
+    UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(luceneMatchVersion, input); 
     tokenizer.setMaxTokenLength(maxTokenLength);
     return tokenizer;
   }

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/analysis/TestUAX29URLEmailTokenizerFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/analysis/TestUAX29URLEmailTokenizerFactory.java?rev=1154936&r1=1154935&r2=1154936&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/analysis/TestUAX29URLEmailTokenizerFactory.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/analysis/TestUAX29URLEmailTokenizerFactory.java Mon Aug  8 11:57:59 2011
@@ -19,6 +19,7 @@ package org.apache.solr.analysis;
 
 import java.io.Reader;
 import java.io.StringReader;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -173,4 +174,22 @@ public class TestUAX29URLEmailTokenizerF
     assertTokenStreamContents(stream, 
         new String[] {"one", "two", "three", longWord, "four", "five", "six" });
   }
+  
+  /** @deprecated nuke this test in lucene 5.0 */
+  @Deprecated
+  public void testMatchVersion() throws Exception {
+    Reader reader = new StringReader("ざ");
+    UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory();
+    factory.init(DEFAULT_VERSION_PARAM);
+    Tokenizer stream = factory.create(reader);
+    assertTokenStreamContents(stream, 
+        new String[] {"ざ"});
+    
+    reader = new StringReader("ざ");
+    factory = new UAX29URLEmailTokenizerFactory();
+    factory.init(Collections.singletonMap("luceneMatchVersion", "3.1"));
+    stream = factory.create(reader);
+    assertTokenStreamContents(stream, 
+        new String[] {"さ"}); // old broken behavior
+  }
 }