You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by sa...@apache.org on 2010/12/07 20:47:09 UTC
svn commit: r1043180 [3/3] - in /lucene/dev/branches/branch_3x: ./ lucene/
lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/th/
lucene/src/java/org/apache/lucene/analysis/standard/
lucene/src/test/org/apache/lucene/analysis/ solr/ so...
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex?rev=1043180&r1=1043179&r2=1043180&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex Tue Dec 7 19:47:08 2010
@@ -23,14 +23,11 @@ import org.apache.lucene.analysis.tokena
* This class implements Word Break rules from the Unicode Text Segmentation
* algorithm, as specified in
* <a href="http://unicode.org/reports/tr29/">Unicode Standard Annex #29</a>
- * URLs and email addresses are also tokenized according to the relevant RFCs.
* <p/>
* Tokens produced are of the following types:
* <ul>
* <li><ALPHANUM>: A sequence of alphabetic and numeric characters</li>
* <li><NUM>: A number</li>
- * <li><URL>: A URL</li>
- * <li><EMAIL>: An email address</li>
* <li><SOUTHEAST_ASIAN>: A sequence of characters from South and Southeast
* Asian languages, including Thai, Lao, Myanmar, and Khmer</li>
* <li><IDEOGRAPHIC>: A single CJKV ideographic character</li>
@@ -67,83 +64,6 @@ MidNumericEx = [\p{WB:MidNum}\p{WB:Mid
ExtendNumLetEx = \p{WB:ExtendNumLet} [\p{WB:Format}\p{WB:Extend}]*
-// URL and E-mail syntax specifications:
-//
-// RFC-952: DOD INTERNET HOST TABLE SPECIFICATION
-// RFC-1035: DOMAIN NAMES - IMPLEMENTATION AND SPECIFICATION
-// RFC-1123: Requirements for Internet Hosts - Application and Support
-// RFC-1738: Uniform Resource Locators (URL)
-// RFC-3986: Uniform Resource Identifier (URI): Generic Syntax
-// RFC-5234: Augmented BNF for Syntax Specifications: ABNF
-// RFC-5321: Simple Mail Transfer Protocol
-// RFC-5322: Internet Message Format
-
-%include src/java/org/apache/lucene/analysis/standard/ASCIITLD.jflex-macro
-
-DomainLabel = [A-Za-z0-9] ([-A-Za-z0-9]* [A-Za-z0-9])?
-DomainNameStrict = {DomainLabel} ("." {DomainLabel})* {ASCIITLD}
-DomainNameLoose = {DomainLabel} ("." {DomainLabel})*
-
-IPv4DecimalOctet = "0"{0,2} [0-9] | "0"? [1-9][0-9] | "1" [0-9][0-9] | "2" ([0-4][0-9] | "5" [0-5])
-IPv4Address = {IPv4DecimalOctet} ("." {IPv4DecimalOctet}){3}
-IPv6Hex16Bit = [0-9A-Fa-f]{1,4}
-IPv6LeastSignificant32Bits = {IPv4Address} | ({IPv6Hex16Bit} ":" {IPv6Hex16Bit})
-IPv6Address = ({IPv6Hex16Bit} ":"){6} {IPv6LeastSignificant32Bits}
- | "::" ({IPv6Hex16Bit} ":"){5} {IPv6LeastSignificant32Bits}
- | {IPv6Hex16Bit}? "::" ({IPv6Hex16Bit} ":"){4} {IPv6LeastSignificant32Bits}
- | (({IPv6Hex16Bit} ":"){0,1} {IPv6Hex16Bit})? "::" ({IPv6Hex16Bit} ":"){3} {IPv6LeastSignificant32Bits}
- | (({IPv6Hex16Bit} ":"){0,2} {IPv6Hex16Bit})? "::" ({IPv6Hex16Bit} ":"){2} {IPv6LeastSignificant32Bits}
- | (({IPv6Hex16Bit} ":"){0,3} {IPv6Hex16Bit})? "::" {IPv6Hex16Bit} ":" {IPv6LeastSignificant32Bits}
- | (({IPv6Hex16Bit} ":"){0,4} {IPv6Hex16Bit})? "::" {IPv6LeastSignificant32Bits}
- | (({IPv6Hex16Bit} ":"){0,5} {IPv6Hex16Bit})? "::" {IPv6Hex16Bit}
- | (({IPv6Hex16Bit} ":"){0,6} {IPv6Hex16Bit})? "::"
-
-URIunreserved = [-._~A-Za-z0-9]
-URIpercentEncoded = "%" [0-9A-Fa-f]{2}
-URIsubDelims = [!$&'()*+,;=]
-URIloginSegment = ({URIunreserved} | {URIpercentEncoded} | {URIsubDelims})*
-URIlogin = {URIloginSegment} (":" {URIloginSegment})? "@"
-URIquery = "?" ({URIunreserved} | {URIpercentEncoded} | {URIsubDelims} | [:@/?])*
-URIfragment = "#" ({URIunreserved} | {URIpercentEncoded} | {URIsubDelims} | [:@/?])*
-URIport = ":" [0-9]{1,5}
-URIhostStrict = ("[" {IPv6Address} "]") | {IPv4Address} | {DomainNameStrict}
-URIhostLoose = ("[" {IPv6Address} "]") | {IPv4Address} | {DomainNameLoose}
-
-URIauthorityStrict = {URIhostStrict} {URIport}?
-URIauthorityLoose = {URIlogin}? {URIhostLoose} {URIport}?
-
-HTTPsegment = ({URIunreserved} | {URIpercentEncoded} | [;:@&=])*
-HTTPpath = ("/" {HTTPsegment})*
-HTTPscheme = [hH][tT][tT][pP][sS]? "://"
-HTTPurlFull = {HTTPscheme} {URIauthorityLoose} {HTTPpath}? {URIquery}? {URIfragment}?
-// {HTTPurlNoScheme} excludes {URIlogin}, because it could otherwise accept e-mail addresses
-HTTPurlNoScheme = {URIauthorityStrict} {HTTPpath}? {URIquery}? {URIfragment}?
-HTTPurl = {HTTPurlFull} | {HTTPurlNoScheme}
-
-FTPorFILEsegment = ({URIunreserved} | {URIpercentEncoded} | [?:@&=])*
-FTPorFILEpath = "/" {FTPorFILEsegment} ("/" {FTPorFILEsegment})*
-FTPtype = ";" [tT][yY][pP][eE] "=" [aAiIdD]
-FTPscheme = [fF][tT][pP] "://"
-FTPurl = {FTPscheme} {URIauthorityLoose} {FTPorFILEpath} {FTPtype}? {URIfragment}?
-
-FILEscheme = [fF][iI][lL][eE] "://"
-FILEurl = {FILEscheme} {URIhostLoose}? {FTPorFILEpath} {URIfragment}?
-
-URL = {HTTPurl} | {FTPurl} | {FILEurl}
-
-EMAILquotedString = [\"] ([\u0001-\u0008\u000B\u000C\u000E-\u0021\u0023-\u005B\u005D-\u007E] | [\\] [\u0000-\u007F])* [\"]
-EMAILatomText = [A-Za-z0-9!#$%&'*+-/=?\^_`{|}~]
-EMAILlabel = {EMAILatomText}+ | {EMAILquotedString}
-EMAILlocalPart = {EMAILlabel} ("." {EMAILlabel})*
-EMAILdomainLiteralText = [\u0001-\u0008\u000B\u000C\u000E-\u005A\u005E-\u007F] | [\\] [\u0000-\u007F]
-// DFA minimization allows {IPv6Address} and {IPv4Address} to be included
-// in the {EMAILbracketedHost} definition without incurring any size penalties,
-// since {EMAILdomainLiteralText} recognizes all valid IP addresses.
-// The IP address regexes are included in {EMAILbracketedHost} simply as a
-// reminder that they are acceptable bracketed host forms.
-EMAILbracketedHost = "[" ({EMAILdomainLiteralText}* | {IPv4Address} | [iI][pP][vV] "6:" {IPv6Address}) "]"
-EMAIL = {EMAILlocalPart} "@" ({DomainNameStrict} | {EMAILbracketedHost})
-
%{
/** Alphanumeric sequences */
public static final int WORD_TYPE = StandardTokenizer.ALPHANUM;
@@ -151,12 +71,6 @@ EMAIL = {EMAILlocalPart} "@" ({DomainNam
/** Numbers */
public static final int NUMERIC_TYPE = StandardTokenizer.NUM;
- /** URLs with scheme: HTTP(S), FTP, or FILE; no-scheme URLs match HTTP syntax */
- public static final int URL_TYPE = StandardTokenizer.URL;
-
- /** E-mail addresses */
- public static final int EMAIL_TYPE = StandardTokenizer.EMAIL;
-
/**
* Chars in class \p{Line_Break = Complex_Context} are from South East Asian
* scripts (Thai, Lao, Myanmar, Khmer, etc.). Sequences of these are kept
@@ -191,9 +105,6 @@ EMAIL = {EMAILlocalPart} "@" ({DomainNam
//
<<EOF>> { return StandardTokenizerInterface.YYEOF; }
-{URL} { return URL_TYPE; }
-{EMAIL} { return EMAIL_TYPE; }
-
// UAX#29 WB8. Numeric à Numeric
// WB11. Numeric (MidNum | MidNumLet) Ã Numeric
// WB12. Numeric à (MidNum | MidNumLet) Numeric
Copied: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java (from r1043071, lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java)
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java?p2=lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java&p1=lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java&r1=1043071&r2=1043180&rev=1043180&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java Tue Dec 7 19:47:08 2010
@@ -1,4 +1,4 @@
-/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 12/4/10 7:24 PM */
+/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 12/7/10 11:53 AM */
package org.apache.lucene.analysis.standard;
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/analysis/standard/package.html
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/analysis/standard/package.html?rev=1043180&r1=1043179&r2=1043180&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/analysis/standard/package.html (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/analysis/standard/package.html Tue Dec 7 19:47:08 2010
@@ -27,7 +27,10 @@
as of Lucene 3.1, implements the Word Break rules from the Unicode Text
Segmentation algorithm, as specified in
<a href="http://unicode.org/reports/tr29/">Unicode Standard Annex #29</a>.
- URLs and email addresses are also tokenized according to the relevant RFCs.
+ Unlike <code>UAX29URLEmailTokenizer</code>, URLs and email addresses are
+ <b>not</b> tokenized as single tokens, but are instead split up into
+ tokens according to the UAX#29 word break rules.
+ <br/>
<code><a href="StandardAnalyzer">StandardAnalyzer</a></code> includes
<code>StandardTokenizer</code>,
<code><a href="StandardFilter">StandardFilter</a></code>,
@@ -46,13 +49,11 @@
<code><a href="../../../../../../all/org/apache/lucene/analysis/LowerCaseFilter.html">LowerCaseFilter</a></code>
and <code><a href="../../../../../../all/org/apache/lucene/analysis/StopFilter.html">StopFilter</a></code>.
</li>
- <li><code><a href="UAX29Tokenizer.html">UAX29Tokenizer</a></code>:
- implements the Word Break rules from the Unicode Text Segmentation
- algorithm, as specified in
+ <li><code><a href="UAX29URLEmailTokenizer.html">UAX29URLEmailTokenizer</a></code>:
+ implements the Word Break rules from the Unicode Text Segmentation
+ algorithm, as specified in
<a href="http://unicode.org/reports/tr29/">Unicode Standard Annex #29</a>.
- Unlike <code>StandardTokenizer</code>, URLs and email addresses are
- <b>not</b> tokenized as single tokens, but are instead split up into
- tokens according to the UAX#29 word break rules.
+ URLs and email addresses are also tokenized according to the relevant RFCs.
</li>
</ul>
</body>
Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java?rev=1043180&r1=1043179&r2=1043180&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java Tue Dec 7 19:47:08 2010
@@ -1,16 +1,11 @@
package org.apache.lucene.analysis;
import org.apache.lucene.analysis.standard.StandardTokenizer;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import java.io.BufferedReader;
import java.io.IOException;
-import java.io.InputStreamReader;
import java.io.Reader;
import java.io.StringReader;
-import java.util.ArrayList;
import java.util.Arrays;
-import java.util.List;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -52,63 +47,6 @@ public class TestStandardAnalyzer extend
}
};
- /** Passes through tokens with type "<URL>" and blocks all other types. */
- private class URLFilter extends TokenFilter {
- private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
- public URLFilter(TokenStream in) {
- super(in);
- }
- @Override
- public final boolean incrementToken() throws java.io.IOException {
- boolean isTokenAvailable = false;
- while (input.incrementToken()) {
- if (typeAtt.type() == StandardTokenizer.TOKEN_TYPES[StandardTokenizer.URL]) {
- isTokenAvailable = true;
- break;
- }
- }
- return isTokenAvailable;
- }
- }
-
- /** Passes through tokens with type "<EMAIL>" and blocks all other types. */
- private class EmailFilter extends TokenFilter {
- private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
- public EmailFilter(TokenStream in) {
- super(in);
- }
- @Override
- public final boolean incrementToken() throws java.io.IOException {
- boolean isTokenAvailable = false;
- while (input.incrementToken()) {
- if (typeAtt.type() == StandardTokenizer.TOKEN_TYPES[StandardTokenizer.EMAIL]) {
- isTokenAvailable = true;
- break;
- }
- }
- return isTokenAvailable;
- }
- }
-
- private Analyzer urlAnalyzer = new ReusableAnalyzerBase() {
- @Override
- protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
- StandardTokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
- tokenizer.setMaxTokenLength(Integer.MAX_VALUE); // Tokenize arbitrary length URLs
- TokenFilter filter = new URLFilter(tokenizer);
- return new TokenStreamComponents(tokenizer, filter);
- }
- };
-
- private Analyzer emailAnalyzer = new ReusableAnalyzerBase() {
- @Override
- protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
- Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
- TokenFilter filter = new EmailFilter(tokenizer);
- return new TokenStreamComponents(tokenizer, filter);
- }
- };
-
public void testArmenian() throws Exception {
BaseTokenStreamTestCase.assertAnalyzesTo(a, "ÕÕ«ÖÕ«ÕºÕ¥Õ¤Õ«Õ¡ÕµÕ« 13 Õ´Õ«Õ¬Õ«Õ¸Õ¶ Õ°Õ¸Õ¤Õ¾Õ¡Õ®Õ¶Õ¥ÖÕ¨ (4,600` Õ°Õ¡ÕµÕ¥ÖÕ¥Õ¶ Õ¾Õ«ÖÕ«ÕºÕ¥Õ¤Õ«Õ¡ÕµÕ¸ÖÕ´) Õ£ÖÕ¾Õ¥Õ¬ Õ¥Õ¶ Õ¯Õ¡Õ´Õ¡Õ¾Õ¸ÖÕ¶Õ¥ÖÕ« Õ¯Õ¸Õ²Õ´Õ«Ö Õ¸Ö Õ°Õ¡Õ´Õ¡ÖÕµÕ¡ Õ¢Õ¸Õ¬Õ¸Ö Õ°Õ¸Õ¤Õ¾Õ¡Õ®Õ¶Õ¥ÖÕ¨ Õ¯Õ¡ÖÕ¸Õ² Õ§ ÕÕ´Õ¢Õ¡Õ£ÖÕ¥Õ¬ ÖÕ¡Õ¶Õ¯Õ¡Ö Õ´Õ¡ÖÕ¤ Õ¸Õ¾ Õ¯Õ¡ÖÕ¸Õ² Õ§ Õ¢Õ¡ÖÕ¥Õ¬ ÕÕ«ÖÕ«ÕºÕ¥Õ¤Õ«Õ¡ÕµÕ« Õ¯Õ¡ÕµÖÕ¨Ö",
new String[] { "ÕÕ«ÖÕ«ÕºÕ¥Õ¤Õ«Õ¡ÕµÕ«", "13", "Õ´Õ«Õ¬Õ«Õ¸Õ¶", "Õ°Õ¸Õ¤Õ¾Õ¡Õ®Õ¶Õ¥ÖÕ¨", "4,600", "Õ°Õ¡ÕµÕ¥ÖÕ¥Õ¶", "Õ¾Õ«ÖÕ«ÕºÕ¥Õ¤Õ«Õ¡ÕµÕ¸ÖÕ´", "Õ£ÖÕ¾Õ¥Õ¬", "Õ¥Õ¶", "Õ¯Õ¡Õ´Õ¡Õ¾Õ¸ÖÕ¶Õ¥ÖÕ«", "Õ¯Õ¸Õ²Õ´Õ«Ö",
@@ -255,138 +193,6 @@ public class TestStandardAnalyzer extend
new String[] { "<ALPHANUM>", "<ALPHANUM>", "<NUM>", "<ALPHANUM>" });
}
- public void testWikiURLs() throws Exception {
- Reader reader = null;
- String luceneResourcesWikiPage;
- try {
- reader = new InputStreamReader
- (getClass().getResourceAsStream("LuceneResourcesWikiPage.html"), "UTF-8");
- StringBuilder builder = new StringBuilder();
- char[] buffer = new char[1024];
- int numCharsRead;
- while (-1 != (numCharsRead = reader.read(buffer))) {
- builder.append(buffer, 0, numCharsRead);
- }
- luceneResourcesWikiPage = builder.toString();
- } finally {
- if (null != reader) {
- reader.close();
- }
- }
- assertTrue(null != luceneResourcesWikiPage
- && luceneResourcesWikiPage.length() > 0);
- BufferedReader bufferedReader = null;
- String[] urls;
- try {
- List<String> urlList = new ArrayList<String>();
- bufferedReader = new BufferedReader(new InputStreamReader
- (getClass().getResourceAsStream("LuceneResourcesWikiPageURLs.txt"), "UTF-8"));
- String line;
- while (null != (line = bufferedReader.readLine())) {
- line = line.trim();
- if (line.length() > 0) {
- urlList.add(line);
- }
- }
- urls = urlList.toArray(new String[urlList.size()]);
- } finally {
- if (null != bufferedReader) {
- bufferedReader.close();
- }
- }
- assertTrue(null != urls && urls.length > 0);
- BaseTokenStreamTestCase.assertAnalyzesTo
- (urlAnalyzer, luceneResourcesWikiPage, urls);
- }
-
- public void testEmails() throws Exception {
- Reader reader = null;
- String randomTextWithEmails;
- try {
- reader = new InputStreamReader
- (getClass().getResourceAsStream("random.text.with.email.addresses.txt"), "UTF-8");
- StringBuilder builder = new StringBuilder();
- char[] buffer = new char[1024];
- int numCharsRead;
- while (-1 != (numCharsRead = reader.read(buffer))) {
- builder.append(buffer, 0, numCharsRead);
- }
- randomTextWithEmails = builder.toString();
- } finally {
- if (null != reader) {
- reader.close();
- }
- }
- assertTrue(null != randomTextWithEmails
- && randomTextWithEmails.length() > 0);
- BufferedReader bufferedReader = null;
- String[] emails;
- try {
- List<String> emailList = new ArrayList<String>();
- bufferedReader = new BufferedReader(new InputStreamReader
- (getClass().getResourceAsStream("email.addresses.from.random.text.with.email.addresses.txt"), "UTF-8"));
- String line;
- while (null != (line = bufferedReader.readLine())) {
- line = line.trim();
- if (line.length() > 0) {
- emailList.add(line);
- }
- }
- emails = emailList.toArray(new String[emailList.size()]);
- } finally {
- if (null != bufferedReader) {
- bufferedReader.close();
- }
- }
- assertTrue(null != emails && emails.length > 0);
- BaseTokenStreamTestCase.assertAnalyzesTo
- (emailAnalyzer, randomTextWithEmails, emails);
- }
-
- public void testURLs() throws Exception {
- Reader reader = null;
- String randomTextWithURLs;
- try {
- reader = new InputStreamReader
- (getClass().getResourceAsStream("random.text.with.urls.txt"), "UTF-8");
- StringBuilder builder = new StringBuilder();
- char[] buffer = new char[1024];
- int numCharsRead;
- while (-1 != (numCharsRead = reader.read(buffer))) {
- builder.append(buffer, 0, numCharsRead);
- }
- randomTextWithURLs = builder.toString();
- } finally {
- if (null != reader) {
- reader.close();
- }
- }
- assertTrue(null != randomTextWithURLs
- && randomTextWithURLs.length() > 0);
- BufferedReader bufferedReader = null;
- String[] urls;
- try {
- List<String> urlList = new ArrayList<String>();
- bufferedReader = new BufferedReader(new InputStreamReader
- (getClass().getResourceAsStream("urls.from.random.text.with.urls.txt"), "UTF-8"));
- String line;
- while (null != (line = bufferedReader.readLine())) {
- line = line.trim();
- if (line.length() > 0) {
- urlList.add(line);
- }
- }
- urls = urlList.toArray(new String[urlList.size()]);
- } finally {
- if (null != bufferedReader) {
- bufferedReader.close();
- }
- }
- assertTrue(null != urls && urls.length > 0);
- BaseTokenStreamTestCase.assertAnalyzesTo
- (urlAnalyzer, randomTextWithURLs, urls);
- }
-
public void testUnicodeWordBreaks() throws Exception {
WordBreakTestUnicode_6_0_0 wordBreakTest = new WordBreakTestUnicode_6_0_0();
wordBreakTest.test(a);
Copied: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/analysis/TestUAX29URLEmailTokenizer.java (from r1043071, lucene/dev/trunk/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java)
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/analysis/TestUAX29URLEmailTokenizer.java?p2=lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/analysis/TestUAX29URLEmailTokenizer.java&p1=lucene/dev/trunk/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java&r1=1043071&r2=1043180&rev=1043180&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/analysis/TestUAX29URLEmailTokenizer.java Tue Dec 7 19:47:08 2010
@@ -1,13 +1,7 @@
-package org.apache.lucene.analysis.core;
+package org.apache.lucene.analysis;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.analysis.TokenFilter;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.analysis.util.ReusableAnalyzerBase;
import java.io.BufferedReader;
import java.io.IOException;
Modified: lucene/dev/branches/branch_3x/solr/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/solr/CHANGES.txt?rev=1043180&r1=1043179&r2=1043180&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/solr/CHANGES.txt (original)
+++ lucene/dev/branches/branch_3x/solr/CHANGES.txt Tue Dec 7 19:47:08 2010
@@ -246,8 +246,10 @@ New Features
* SOLR-1336: Add SmartChinese (word segmentation for Simplified Chinese)
tokenizer and filters to contrib/analysis-extras (rmuir)
-* SOLR-2211: Added UAX29TokenizerFactory, which implements UAX#29, a unicode algorithm
- with good results for most languages. (Tom Burton-West via rmuir)
+* SOLR-2211,LUCENE-2763: Added UAX29URLEmailTokenizerFactory, which implements
+ UAX#29, a unicode algorithm with good results for most languages, as well as
+ URL and E-mail tokenization according to the relevant RFCs.
+ (Tom Burton-West via rmuir)
* SOLR-2237: Added StempelPolishStemFilterFactory to contrib/analysis-extras (rmuir)