You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2016/06/14 20:38:27 UTC

[08/12] lucene-solr:master: LUCENE-7318: graduate StandardAnalyzer and make it the default for IndexWriterConfig

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/87016b5f/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestStandardAnalyzer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestStandardAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestStandardAnalyzer.java
deleted file mode 100644
index 6c6ddc8..0000000
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestStandardAnalyzer.java
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.analysis.standard;
-
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.util.Arrays;
-import java.util.Random;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.analysis.MockGraphTokenFilter;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.util.TestUtil;
-
-public class TestStandardAnalyzer extends BaseTokenStreamTestCase {
-
-  // LUCENE-5897: slow tokenization of strings of the form (\p{WB:ExtendNumLet}[\p{WB:Format}\p{WB:Extend}]*)+
-  @Slow
-  public void testLargePartiallyMatchingToken() throws Exception {
-    // TODO: get these lists of chars matching a property from ICU4J
-    // http://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakProperty.txt
-    char[] WordBreak_ExtendNumLet_chars = "_\u203f\u2040\u2054\ufe33\ufe34\ufe4d\ufe4e\ufe4f\uff3f".toCharArray();
-
-    // http://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakProperty.txt
-    int[] WordBreak_Format_chars // only the first char in ranges 
-        = { 0xAD, 0x600, 0x61C, 0x6DD, 0x70F, 0x180E, 0x200E, 0x202A, 0x2060, 0x2066, 0xFEFF,
-            0xFFF9, 0x110BD, 0x1D173, 0xE0001, 0xE0020 };
-
-    // http://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakProperty.txt
-    int[] WordBreak_Extend_chars // only the first char in ranges
-        = { 0x300, 0x483, 0x591, 0x5bf, 0x5c1, 0x5c4, 0x5c7, 0x610, 0x64b, 0x670, 0x6d6, 0x6df,
-            0x6e7, 0x6ea, 0x711, 0x730, 0x7a6, 0x7eb, 0x816, 0x81b, 0x825, 0x829, 0x859, 0x8e4,
-            0x900, 0x93a, 0x93e, 0x951, 0x962, 0x981, 0x9bc, 0x9be, 0x9c7, 0x9cb, 0x9d7, 0x9e2,
-            0xa01, 0xa3c, 0xa3e, 0xa47, 0xa4b, 0xa51, 0xa70, 0xa75, 0xa81, 0xabc, 0xabe, 0xac7,
-            0xacb, 0xae2, 0xb01, 0xb3c, 0xb3e, 0xb47, 0xb4b, 0xb56, 0xb62, 0xb82, 0xbbe, 0xbc6,
-            0xbca, 0xbd7, 0xc01, 0xc3e, 0xc46, 0xc4a, 0xc55, 0xc62, 0xc82, 0xcbc, 0xcbe, 0xcc6,
-            0xcca, 0xcd5, 0xce2, 0xd02, 0xd3e, 0xd46, 0xd4a, 0xd57, 0xd62, 0xd82, 0xdca, 0xdcf,
-            0xdd6, 0xdd8, 0xdf2, 0xe31, 0xe34, 0xe47, 0xeb1, 0xeb4, 0xebb, 0xec8, 0xf18, 0xf35,
-            0xf37, 0xf39, 0xf3e, 0xf71, 0xf86, 0xf8d, 0xf99, 0xfc6, 0x102b, 0x1056, 0x105e, 0x1062,
-            0x1067, 0x1071, 0x1082, 0x108f, 0x109a, 0x135d, 0x1712, 0x1732, 0x1752, 0x1772, 0x17b4, 
-            0x17dd, 0x180b, 0x18a9, 0x1920, 0x1930, 0x19b0, 0x19c8, 0x1a17, 0x1a55, 0x1a60, 0x1a7f,
-            0x1b00, 0x1b34, 0x1b6b, 0x1b80, 0x1ba1, 0x1be6, 0x1c24, 0x1cd0, 0x1cd4, 0x1ced, 0x1cf2, 
-            0x1dc0, 0x1dfc, 0x200c, 0x20d0, 0x2cef, 0x2d7f, 0x2de0, 0x302a, 0x3099, 0xa66f, 0xa674,
-            0xa69f, 0xa6f0, 0xa802, 0xa806, 0xa80b, 0xa823, 0xa880, 0xa8b4, 0xa8e0, 0xa926, 0xa947, 
-            0xa980, 0xa9b3, 0xaa29, 0xaa43, 0xaa4c, 0xaa7b, 0xaab0, 0xaab2, 0xaab7, 0xaabe, 0xaac1,
-            0xaaeb, 0xaaf5, 0xabe3, 0xabec, 0xfb1e, 0xfe00, 0xfe20, 0xff9e, 0x101fd, 0x10a01,
-            0x10a05, 0x10a0C, 0x10a38, 0x10a3F, 0x11000, 0x11001, 0x11038, 0x11080, 0x11082,
-            0x110b0, 0x110b3, 0x110b7, 0x110b9, 0x11100, 0x11127, 0x1112c, 0x11180, 0x11182,
-            0x111b3, 0x111b6, 0x111bF, 0x116ab, 0x116ac, 0x116b0, 0x116b6, 0x16f51, 0x16f8f,
-            0x1d165, 0x1d167, 0x1d16d, 0x1d17b, 0x1d185, 0x1d1aa, 0x1d242, 0xe0100 }; 
-        
-    StringBuilder builder = new StringBuilder();
-    int numChars = TestUtil.nextInt(random(), 100 * 1024, 1024 * 1024);
-    for (int i = 0 ; i < numChars ; ) {
-      builder.append(WordBreak_ExtendNumLet_chars[random().nextInt(WordBreak_ExtendNumLet_chars.length)]);
-      ++i;
-      if (random().nextBoolean()) {
-        int numFormatExtendChars = TestUtil.nextInt(random(), 1, 8);
-        for (int j = 0; j < numFormatExtendChars; ++j) {
-          int codepoint;
-          if (random().nextBoolean()) {
-            codepoint = WordBreak_Format_chars[random().nextInt(WordBreak_Format_chars.length)];
-          } else {
-            codepoint = WordBreak_Extend_chars[random().nextInt(WordBreak_Extend_chars.length)];
-          }
-          char[] chars = Character.toChars(codepoint);
-          builder.append(chars);
-          i += chars.length;
-        }
-      }
-    }
-    StandardTokenizer ts = new StandardTokenizer();
-    ts.setReader(new StringReader(builder.toString()));
-    ts.reset();
-    while (ts.incrementToken()) { }
-    ts.end();
-    ts.close();
-
-    int newBufferSize = TestUtil.nextInt(random(), 200, 8192);
-    ts.setMaxTokenLength(newBufferSize); // try a different buffer size
-    ts.setReader(new StringReader(builder.toString()));
-    ts.reset();
-    while (ts.incrementToken()) { }
-    ts.end();
-    ts.close();
-  }
-  
-  public void testHugeDoc() throws IOException {
-    StringBuilder sb = new StringBuilder();
-    char whitespace[] = new char[4094];
-    Arrays.fill(whitespace, ' ');
-    sb.append(whitespace);
-    sb.append("testing 1234");
-    String input = sb.toString();
-    StandardTokenizer tokenizer = new StandardTokenizer();
-    tokenizer.setReader(new StringReader(input));
-    BaseTokenStreamTestCase.assertTokenStreamContents(tokenizer, new String[] { "testing", "1234" });
-  }
-
-  private Analyzer a;
-  
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    a = new Analyzer() {
-      @Override
-      protected TokenStreamComponents createComponents(String fieldName) {
-        Tokenizer tokenizer = new StandardTokenizer(newAttributeFactory());
-        return new TokenStreamComponents(tokenizer);
-      }
-    };
-  }
-  
-  @Override
-  public void tearDown() throws Exception {
-    a.close();
-    super.tearDown();
-  }
-
-  public void testArmenian() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\u054e\u056b\u0584\u056b\u057a\u0565\u0564\u056b\u0561\u0575\u056b 13 \u0574\u056b\u056c\u056b\u0578\u0576 \u0570\u0578\u0564\u057e\u0561\u056e\u0576\u0565\u0580\u0568 (4,600` \u0570\u0561\u0575\u0565\u0580\u0565\u0576 \u057e\u056b\u0584\u056b\u057a\u0565\u0564\u056b\u0561\u0575\u0578\u0582\u0574) \u0563\u0580\u057e\u0565\u056c \u0565\u0576 \u056f\u0561\u0574\u0561\u057e\u0578\u0580\u0576\u0565\u0580\u056b \u056f\u0578\u0572\u0574\u056b\u0581 \u0578\u0582 \u0570\u0561\u0574\u0561\u0580\u0575\u0561 \u0562\u0578\u056c\u0578\u0580 \u0570\u0578\u0564\u057e\u0561\u056e\u0576\u0565\u0580\u0568 \u056f\u0561\u0580\u0578\u0572 \u0567 \u056d\u0574\u0562\u0561\u0563\u0580\u0565\u056c \u0581\u0561\u0576\u056f\u0561\u0581 \u0574\u0561\u0580\u0564 \u0578\u057e \u056f\u0561\u0580\u0578\u0572 \u0567 \u0562\u0561\u0581\u0565\u056c \u054e\u056b\u0584\u056b\u057a\u0565\u0564\u056b\u0561\u0575\u056b \u056f\u0561\u0575\u0584\u0568\u0589",
-        new String[] { "\u054e\u056b\u0584\u056b\u057a\u0565\u0564\u056b\u0561\u0575\u056b", "13", "\u0574\u056b\u056c\u056b\u0578\u0576", "\u0570\u0578\u0564\u057e\u0561\u056e\u0576\u0565\u0580\u0568", "4,600", "\u0570\u0561\u0575\u0565\u0580\u0565\u0576", "\u057e\u056b\u0584\u056b\u057a\u0565\u0564\u056b\u0561\u0575\u0578\u0582\u0574", "\u0563\u0580\u057e\u0565\u056c", "\u0565\u0576", "\u056f\u0561\u0574\u0561\u057e\u0578\u0580\u0576\u0565\u0580\u056b", "\u056f\u0578\u0572\u0574\u056b\u0581", 
-        "\u0578\u0582", "\u0570\u0561\u0574\u0561\u0580\u0575\u0561", "\u0562\u0578\u056c\u0578\u0580", "\u0570\u0578\u0564\u057e\u0561\u056e\u0576\u0565\u0580\u0568", "\u056f\u0561\u0580\u0578\u0572", "\u0567", "\u056d\u0574\u0562\u0561\u0563\u0580\u0565\u056c", "\u0581\u0561\u0576\u056f\u0561\u0581", "\u0574\u0561\u0580\u0564", "\u0578\u057e", "\u056f\u0561\u0580\u0578\u0572", "\u0567", "\u0562\u0561\u0581\u0565\u056c", "\u054e\u056b\u0584\u056b\u057a\u0565\u0564\u056b\u0561\u0575\u056b", "\u056f\u0561\u0575\u0584\u0568" } );
-  }
-  
-  public void testAmharic() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\u12ca\u12aa\u1354\u12f5\u12eb \u12e8\u1263\u1208 \u1265\u12d9 \u124b\u1295\u124b \u12e8\u1270\u121f\u120b \u1275\u12ad\u12ad\u1208\u129b\u1293 \u1290\u133b \u1218\u12dd\u1308\u1260 \u12d5\u12cd\u1240\u1275 (\u12a2\u1295\u1233\u12ed\u12ad\u120e\u1352\u12f2\u12eb) \u1290\u12cd\u1362 \u121b\u1295\u129b\u12cd\u121d",
-        new String[] { "\u12ca\u12aa\u1354\u12f5\u12eb", "\u12e8\u1263\u1208", "\u1265\u12d9", "\u124b\u1295\u124b", "\u12e8\u1270\u121f\u120b", "\u1275\u12ad\u12ad\u1208\u129b\u1293", "\u1290\u133b", "\u1218\u12dd\u1308\u1260", "\u12d5\u12cd\u1240\u1275", "\u12a2\u1295\u1233\u12ed\u12ad\u120e\u1352\u12f2\u12eb", "\u1290\u12cd", "\u121b\u1295\u129b\u12cd\u121d" } );
-  }
-  
-  public void testArabic() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\u0627\u0644\u0641\u064a\u0644\u0645 \u0627\u0644\u0648\u062b\u0627\u0626\u0642\u064a \u0627\u0644\u0623\u0648\u0644 \u0639\u0646 \u0648\u064a\u0643\u064a\u0628\u064a\u062f\u064a\u0627 \u064a\u0633\u0645\u0649 \"\u0627\u0644\u062d\u0642\u064a\u0642\u0629 \u0628\u0627\u0644\u0623\u0631\u0642\u0627\u0645: \u0642\u0635\u0629 \u0648\u064a\u0643\u064a\u0628\u064a\u062f\u064a\u0627\" (\u0628\u0627\u0644\u0625\u0646\u062c\u0644\u064a\u0632\u064a\u0629: Truth in Numbers: The Wikipedia Story)\u060c \u0633\u064a\u062a\u0645 \u0625\u0637\u0644\u0627\u0642\u0647 \u0641\u064a 2008.",
-        new String[] { "\u0627\u0644\u0641\u064a\u0644\u0645", "\u0627\u0644\u0648\u062b\u0627\u0626\u0642\u064a", "\u0627\u0644\u0623\u0648\u0644", "\u0639\u0646", "\u0648\u064a\u0643\u064a\u0628\u064a\u062f\u064a\u0627", "\u064a\u0633\u0645\u0649", "\u0627\u0644\u062d\u0642\u064a\u0642\u0629", "\u0628\u0627\u0644\u0623\u0631\u0642\u0627\u0645", "\u0642\u0635\u0629", "\u0648\u064a\u0643\u064a\u0628\u064a\u062f\u064a\u0627",
-        "\u0628\u0627\u0644\u0625\u0646\u062c\u0644\u064a\u0632\u064a\u0629", "Truth", "in", "Numbers", "The", "Wikipedia", "Story", "\u0633\u064a\u062a\u0645", "\u0625\u0637\u0644\u0627\u0642\u0647", "\u0641\u064a", "2008" } ); 
-  }
-  
-  public void testAramaic() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\u0718\u071d\u0729\u071d\u0726\u0715\u071d\u0710 (\u0710\u0722\u0713\u0720\u071d\u0710: Wikipedia) \u0717\u0718 \u0710\u071d\u0722\u0723\u0729\u0720\u0718\u0726\u0715\u071d\u0710 \u071a\u0710\u072a\u072c\u0710 \u0715\u0710\u0722\u071b\u072a\u0722\u071b \u0712\u0720\u072b\u0722\u0308\u0710 \u0723\u0713\u071d\u0710\u0308\u0710\u0702 \u072b\u0721\u0717 \u0710\u072c\u0710 \u0721\u0722 \u0721\u0308\u0720\u072c\u0710 \u0715\"\u0718\u071d\u0729\u071d\" \u0718\"\u0710\u071d\u0722\u0723\u0729\u0720\u0718\u0726\u0715\u071d\u0710\"\u0700",
-        new String[] { "\u0718\u071d\u0729\u071d\u0726\u0715\u071d\u0710", "\u0710\u0722\u0713\u0720\u071d\u0710", "Wikipedia", "\u0717\u0718", "\u0710\u071d\u0722\u0723\u0729\u0720\u0718\u0726\u0715\u071d\u0710", "\u071a\u0710\u072a\u072c\u0710", "\u0715\u0710\u0722\u071b\u072a\u0722\u071b", "\u0712\u0720\u072b\u0722\u0308\u0710", "\u0723\u0713\u071d\u0710\u0308\u0710", "\u072b\u0721\u0717",
-        "\u0710\u072c\u0710", "\u0721\u0722", "\u0721\u0308\u0720\u072c\u0710", "\u0715", "\u0718\u071d\u0729\u071d", "\u0718", "\u0710\u071d\u0722\u0723\u0729\u0720\u0718\u0726\u0715\u071d\u0710"});
-  }
-  
-  public void testBengali() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\u098f\u0987 \u09ac\u09bf\u09b6\u09cd\u09ac\u0995\u09cb\u09b7 \u09aa\u09b0\u09bf\u099a\u09be\u09b2\u09a8\u09be \u0995\u09b0\u09c7 \u0989\u0987\u0995\u09bf\u09ae\u09bf\u09a1\u09bf\u09af\u09bc\u09be \u09ab\u09be\u0989\u09a8\u09cd\u09a1\u09c7\u09b6\u09a8 (\u098f\u0995\u099f\u09bf \u0985\u09b2\u09be\u09ad\u099c\u09a8\u0995 \u09b8\u0982\u09b8\u09cd\u09a5\u09be)\u0964 \u0989\u0987\u0995\u09bf\u09aa\u09bf\u09a1\u09bf\u09af\u09bc\u09be\u09b0 \u09b6\u09c1\u09b0\u09c1 \u09e7\u09eb \u099c\u09be\u09a8\u09c1\u09af\u09bc\u09be\u09b0\u09bf, \u09e8\u09e6\u09e6\u09e7 \u09b8\u09be\u09b2\u09c7\u0964 \u098f\u0996\u09a8 \u09aa\u09b0\u09cd\u09af\u09a8\u09cd\u09a4 \u09e8\u09e6\u09e6\u099f\u09bf\u09b0\u0993 \u09ac\u09c7\u09b6\u09c0 \u09ad\u09be\u09b7\u09be\u09af\u09bc \u0989\u0987\u0995\u09bf\u09aa\u09bf\u09a1\u09bf\u09af\u09bc\u09be \u09b0\u09af\u09bc\u09c7\u099b\u09c7\u0964",
-        new String[] { "\u098f\u0987", "\u09ac\u09bf\u09b6\u09cd\u09ac\u0995\u09cb\u09b7", "\u09aa\u09b0\u09bf\u099a\u09be\u09b2\u09a8\u09be", "\u0995\u09b0\u09c7", "\u0989\u0987\u0995\u09bf\u09ae\u09bf\u09a1\u09bf\u09af\u09bc\u09be", "\u09ab\u09be\u0989\u09a8\u09cd\u09a1\u09c7\u09b6\u09a8", "\u098f\u0995\u099f\u09bf", "\u0985\u09b2\u09be\u09ad\u099c\u09a8\u0995", "\u09b8\u0982\u09b8\u09cd\u09a5\u09be", "\u0989\u0987\u0995\u09bf\u09aa\u09bf\u09a1\u09bf\u09af\u09bc\u09be\u09b0",
-        "\u09b6\u09c1\u09b0\u09c1", "\u09e7\u09eb", "\u099c\u09be\u09a8\u09c1\u09af\u09bc\u09be\u09b0\u09bf", "\u09e8\u09e6\u09e6\u09e7", "\u09b8\u09be\u09b2\u09c7", "\u098f\u0996\u09a8", "\u09aa\u09b0\u09cd\u09af\u09a8\u09cd\u09a4", "\u09e8\u09e6\u09e6\u099f\u09bf\u09b0\u0993", "\u09ac\u09c7\u09b6\u09c0", "\u09ad\u09be\u09b7\u09be\u09af\u09bc", "\u0989\u0987\u0995\u09bf\u09aa\u09bf\u09a1\u09bf\u09af\u09bc\u09be", "\u09b0\u09af\u09bc\u09c7\u099b\u09c7" });
-  }
-  
-  public void testFarsi() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\u0648\u06cc\u06a9\u06cc \u067e\u062f\u06cc\u0627\u06cc \u0627\u0646\u06af\u0644\u06cc\u0633\u06cc \u062f\u0631 \u062a\u0627\u0631\u06cc\u062e \u06f2\u06f5 \u062f\u06cc \u06f1\u06f3\u06f7\u06f9 \u0628\u0647 \u0635\u0648\u0631\u062a \u0645\u06a9\u0645\u0644\u06cc \u0628\u0631\u0627\u06cc \u062f\u0627\u0646\u0634\u0646\u0627\u0645\u0647\u0654 \u062a\u062e\u0635\u0635\u06cc \u0646\u0648\u067e\u062f\u06cc\u0627 \u0646\u0648\u0634\u062a\u0647 \u0634\u062f.",
-        new String[] { "\u0648\u06cc\u06a9\u06cc", "\u067e\u062f\u06cc\u0627\u06cc", "\u0627\u0646\u06af\u0644\u06cc\u0633\u06cc", "\u062f\u0631", "\u062a\u0627\u0631\u06cc\u062e", "\u06f2\u06f5", "\u062f\u06cc", "\u06f1\u06f3\u06f7\u06f9", "\u0628\u0647", "\u0635\u0648\u0631\u062a", "\u0645\u06a9\u0645\u0644\u06cc",
-        "\u0628\u0631\u0627\u06cc", "\u062f\u0627\u0646\u0634\u0646\u0627\u0645\u0647\u0654", "\u062a\u062e\u0635\u0635\u06cc", "\u0646\u0648\u067e\u062f\u06cc\u0627", "\u0646\u0648\u0634\u062a\u0647", "\u0634\u062f" });
-  }
-  
-  public void testGreek() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\u0393\u03c1\u03ac\u03c6\u03b5\u03c4\u03b1\u03b9 \u03c3\u03b5 \u03c3\u03c5\u03bd\u03b5\u03c1\u03b3\u03b1\u03c3\u03af\u03b1 \u03b1\u03c0\u03cc \u03b5\u03b8\u03b5\u03bb\u03bf\u03bd\u03c4\u03ad\u03c2 \u03bc\u03b5 \u03c4\u03bf \u03bb\u03bf\u03b3\u03b9\u03c3\u03bc\u03b9\u03ba\u03cc wiki, \u03ba\u03ac\u03c4\u03b9 \u03c0\u03bf\u03c5 \u03c3\u03b7\u03bc\u03b1\u03af\u03bd\u03b5\u03b9 \u03cc\u03c4\u03b9 \u03ac\u03c1\u03b8\u03c1\u03b1 \u03bc\u03c0\u03bf\u03c1\u03b5\u03af \u03bd\u03b1 \u03c0\u03c1\u03bf\u03c3\u03c4\u03b5\u03b8\u03bf\u03cd\u03bd \u03ae \u03bd\u03b1 \u03b1\u03bb\u03bb\u03ac\u03be\u03bf\u03c5\u03bd \u03b1\u03c0\u03cc \u03c4\u03bf\u03bd \u03ba\u03b1\u03b8\u03ad\u03bd\u03b1.",
-        new String[] { "\u0393\u03c1\u03ac\u03c6\u03b5\u03c4\u03b1\u03b9", "\u03c3\u03b5", "\u03c3\u03c5\u03bd\u03b5\u03c1\u03b3\u03b1\u03c3\u03af\u03b1", "\u03b1\u03c0\u03cc", "\u03b5\u03b8\u03b5\u03bb\u03bf\u03bd\u03c4\u03ad\u03c2", "\u03bc\u03b5", "\u03c4\u03bf", "\u03bb\u03bf\u03b3\u03b9\u03c3\u03bc\u03b9\u03ba\u03cc", "wiki", "\u03ba\u03ac\u03c4\u03b9", "\u03c0\u03bf\u03c5",
-        "\u03c3\u03b7\u03bc\u03b1\u03af\u03bd\u03b5\u03b9", "\u03cc\u03c4\u03b9", "\u03ac\u03c1\u03b8\u03c1\u03b1", "\u03bc\u03c0\u03bf\u03c1\u03b5\u03af", "\u03bd\u03b1", "\u03c0\u03c1\u03bf\u03c3\u03c4\u03b5\u03b8\u03bf\u03cd\u03bd", "\u03ae", "\u03bd\u03b1", "\u03b1\u03bb\u03bb\u03ac\u03be\u03bf\u03c5\u03bd", "\u03b1\u03c0\u03cc", "\u03c4\u03bf\u03bd", "\u03ba\u03b1\u03b8\u03ad\u03bd\u03b1" });
-  }
-
-  public void testThai() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\u0e01\u0e32\u0e23\u0e17\u0e35\u0e48\u0e44\u0e14\u0e49\u0e15\u0e49\u0e2d\u0e07\u0e41\u0e2a\u0e14\u0e07\u0e27\u0e48\u0e32\u0e07\u0e32\u0e19\u0e14\u0e35. \u0e41\u0e25\u0e49\u0e27\u0e40\u0e18\u0e2d\u0e08\u0e30\u0e44\u0e1b\u0e44\u0e2b\u0e19? \u0e51\u0e52\u0e53\u0e54",
-        new String[] { "\u0e01\u0e32\u0e23\u0e17\u0e35\u0e48\u0e44\u0e14\u0e49\u0e15\u0e49\u0e2d\u0e07\u0e41\u0e2a\u0e14\u0e07\u0e27\u0e48\u0e32\u0e07\u0e32\u0e19\u0e14\u0e35", "\u0e41\u0e25\u0e49\u0e27\u0e40\u0e18\u0e2d\u0e08\u0e30\u0e44\u0e1b\u0e44\u0e2b\u0e19", "\u0e51\u0e52\u0e53\u0e54" });
-  }
-  
-  public void testLao() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\u0eaa\u0eb2\u0e97\u0eb2\u0ea5\u0eb0\u0e99\u0eb0\u0ea5\u0eb1\u0e94 \u0e9b\u0eb0\u0e8a\u0eb2\u0e97\u0eb4\u0e9b\u0eb0\u0ec4\u0e95 \u0e9b\u0eb0\u0e8a\u0eb2\u0e8a\u0ebb\u0e99\u0ea5\u0eb2\u0ea7", 
-        new String[] { "\u0eaa\u0eb2\u0e97\u0eb2\u0ea5\u0eb0\u0e99\u0eb0\u0ea5\u0eb1\u0e94", "\u0e9b\u0eb0\u0e8a\u0eb2\u0e97\u0eb4\u0e9b\u0eb0\u0ec4\u0e95", "\u0e9b\u0eb0\u0e8a\u0eb2\u0e8a\u0ebb\u0e99\u0ea5\u0eb2\u0ea7" });
-  }
-  
-  public void testTibetan() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\u0f66\u0fa3\u0f7c\u0f53\u0f0b\u0f58\u0f5b\u0f7c\u0f51\u0f0b\u0f51\u0f44\u0f0b\u0f63\u0f66\u0f0b\u0f60\u0f51\u0f72\u0f66\u0f0b\u0f56\u0f7c\u0f51\u0f0b\u0f61\u0f72\u0f42\u0f0b\u0f58\u0f72\u0f0b\u0f49\u0f58\u0f66\u0f0b\u0f42\u0f7c\u0f44\u0f0b\u0f60\u0f55\u0f7a\u0f63\u0f0b\u0f51\u0f74\u0f0b\u0f42\u0f4f\u0f7c\u0f44\u0f0b\u0f56\u0f62\u0f0b\u0f67\u0f0b\u0f45\u0f44\u0f0b\u0f51\u0f42\u0f7a\u0f0b\u0f58\u0f5a\u0f53\u0f0b\u0f58\u0f46\u0f72\u0f66\u0f0b\u0f66\u0f7c\u0f0d \u0f0d",
-                     new String[] { "\u0f66\u0fa3\u0f7c\u0f53", "\u0f58\u0f5b\u0f7c\u0f51", "\u0f51\u0f44", "\u0f63\u0f66", "\u0f60\u0f51\u0f72\u0f66", "\u0f56\u0f7c\u0f51", "\u0f61\u0f72\u0f42", 
-                                    "\u0f58\u0f72", "\u0f49\u0f58\u0f66", "\u0f42\u0f7c\u0f44", "\u0f60\u0f55\u0f7a\u0f63", "\u0f51\u0f74", "\u0f42\u0f4f\u0f7c\u0f44", "\u0f56\u0f62", 
-                                    "\u0f67", "\u0f45\u0f44", "\u0f51\u0f42\u0f7a", "\u0f58\u0f5a\u0f53", "\u0f58\u0f46\u0f72\u0f66", "\u0f66\u0f7c" });
-  }
-  
-  /*
-   * For chinese, tokenize as char (these can later form bigrams or whatever)
-   */
-  public void testChinese() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\u6211\u662f\u4e2d\u56fd\u4eba\u3002 \uff11\uff12\uff13\uff14 \uff34\uff45\uff53\uff54\uff53 ",
-        new String[] { "\u6211", "\u662f", "\u4e2d", "\u56fd", "\u4eba", "\uff11\uff12\uff13\uff14", "\uff34\uff45\uff53\uff54\uff53"});
-  }
-  
-  public void testEmpty() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "", new String[] {});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, ".", new String[] {});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, " ", new String[] {});
-  }
-  
-  /* test various jira issues this analyzer is related to */
-  
-  public void testLUCENE1545() throws Exception {
-    /*
-     * Standard analyzer does not correctly tokenize combining character U+0364 COMBINING LATIN SMALL LETTRE E.
-     * The word "mo\u0364chte" is incorrectly tokenized into "mo" "chte", the combining character is lost.
-     * Expected result is only on token "mo\u0364chte".
-     */
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "mo\u0364chte", new String[] { "mo\u0364chte" }); 
-  }
-  
-  /* Tests from StandardAnalyzer, just to show behavior is similar */
-  public void testAlphanumericSA() throws Exception {
-    // alphanumeric tokens
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "B2B", new String[]{"B2B"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "2B", new String[]{"2B"});
-  }
-
-  public void testDelimitersSA() throws Exception {
-    // other delimiters: "-", "/", ","
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "some-dashed-phrase", new String[]{"some", "dashed", "phrase"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "dogs,chase,cats", new String[]{"dogs", "chase", "cats"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "ac/dc", new String[]{"ac", "dc"});
-  }
-
-  public void testApostrophesSA() throws Exception {
-    // internal apostrophes: O'Reilly, you're, O'Reilly's
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "O'Reilly", new String[]{"O'Reilly"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "you're", new String[]{"you're"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "she's", new String[]{"she's"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "Jim's", new String[]{"Jim's"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "don't", new String[]{"don't"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "O'Reilly's", new String[]{"O'Reilly's"});
-  }
-
-  public void testNumericSA() throws Exception {
-    // floating point, serial, model numbers, ip addresses, etc.
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "21.35", new String[]{"21.35"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "R2D2 C3PO", new String[]{"R2D2", "C3PO"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "216.239.63.104", new String[]{"216.239.63.104"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "216.239.63.104", new String[]{"216.239.63.104"});
-  }
-
-  public void testTextWithNumbersSA() throws Exception {
-    // numbers
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", new String[]{"David", "has", "5000", "bones"});
-  }
-
-  public void testVariousTextSA() throws Exception {
-    // various
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "C embedded developers wanted", new String[]{"C", "embedded", "developers", "wanted"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "foo bar FOO BAR", new String[]{"foo", "bar", "FOO", "BAR"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "foo      bar .  FOO <> BAR", new String[]{"foo", "bar", "FOO", "BAR"});
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\"QUOTED\" word", new String[]{"QUOTED", "word"});
-  }
-
-  public void testKoreanSA() throws Exception {
-    // Korean words
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\uc548\ub155\ud558\uc138\uc694 \ud55c\uae00\uc785\ub2c8\ub2e4", new String[]{"\uc548\ub155\ud558\uc138\uc694", "\ud55c\uae00\uc785\ub2c8\ub2e4"});
-  }
-  
-  public void testOffsets() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", 
-        new String[] {"David", "has", "5000", "bones"},
-        new int[] {0, 6, 10, 15},
-        new int[] {5, 9, 14, 20});
-  }
-  
-  public void testTypes() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", 
-        new String[] {"David", "has", "5000", "bones"},
-        new String[] { "<ALPHANUM>", "<ALPHANUM>", "<NUM>", "<ALPHANUM>" });
-  }
-  
-  public void testUnicodeWordBreaks() throws Exception {
-    WordBreakTestUnicode_6_3_0 wordBreakTest = new WordBreakTestUnicode_6_3_0();
-    wordBreakTest.test(a);
-  }
-  
-  public void testSupplementary() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\U00029b05\u8271\u935f\u41f9\u612f\u701b", 
-        new String[] {"\U00029b05", "\u8271", "\u935f", "\u41f9", "\u612f", "\u701b"},
-        new String[] { "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>" });
-  }
-  
-  public void testKorean() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\ud6c8\ubbfc\uc815\uc74c",
-        new String[] { "\ud6c8\ubbfc\uc815\uc74c" },
-        new String[] { "<HANGUL>" });
-  }
-  
-  public void testJapanese() throws Exception {
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "\u4eee\u540d\u9063\u3044 \u30ab\u30bf\u30ab\u30ca",
-        new String[] { "\u4eee", "\u540d", "\u9063", "\u3044", "\u30ab\u30bf\u30ab\u30ca" },
-        new String[] { "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<HIRAGANA>", "<KATAKANA>" });
-  }
-  
-  public void testCombiningMarks() throws Exception {
-    checkOneTerm(a, "\u3055\u3099", "\u3055\u3099"); // hiragana
-    checkOneTerm(a, "\u30b5\u3099", "\u30b5\u3099"); // katakana
-    checkOneTerm(a, "\u58f9\u3099", "\u58f9\u3099"); // ideographic
-    checkOneTerm(a, "\uc544\u3099",  "\uc544\u3099"); // hangul
-  }
-
-  /**
-   * Multiple consecutive chars in \p{WB:MidLetter}, \p{WB:MidNumLet},
-   * and/or \p{MidNum} should trigger a token split.
-   */
-  public void testMid() throws Exception {
-    // ':' is in \p{WB:MidLetter}, which should trigger a split unless there is a Letter char on both sides
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "A:B", new String[] { "A:B" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "A::B", new String[] { "A", "B" });
-
-    // '.' is in \p{WB:MidNumLet}, which should trigger a split unless there is a Letter or Numeric char on both sides
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "1.2", new String[] { "1.2" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "A.B", new String[] { "A.B" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "1..2", new String[] { "1", "2" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "A..B", new String[] { "A", "B" });
-
-    // ',' is in \p{WB:MidNum}, which should trigger a split unless there is a Numeric char on both sides
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "1,2", new String[] { "1,2" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "1,,2", new String[] { "1", "2" });
-
-    // Mixed consecutive \p{WB:MidLetter} and \p{WB:MidNumLet} should trigger a split
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "A.:B", new String[] { "A", "B" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "A:.B", new String[] { "A", "B" });
-
-    // Mixed consecutive \p{WB:MidNum} and \p{WB:MidNumLet} should trigger a split
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "1,.2", new String[] { "1", "2" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "1.,2", new String[] { "1", "2" });
-
-    // '_' is in \p{WB:ExtendNumLet}
-
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "A:B_A:B", new String[] { "A:B_A:B" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "A:B_A::B", new String[] { "A:B_A", "B" });
-
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "1.2_1.2", new String[] { "1.2_1.2" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "A.B_A.B", new String[] { "A.B_A.B" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "1.2_1..2", new String[] { "1.2_1", "2" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "A.B_A..B", new String[] { "A.B_A", "B" });
-
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "1,2_1,2", new String[] { "1,2_1,2" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "1,2_1,,2", new String[] { "1,2_1", "2" });
-
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "C_A.:B", new String[] { "C_A", "B" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "C_A:.B", new String[] { "C_A", "B" });
-
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "3_1,.2", new String[] { "3_1", "2" });
-    BaseTokenStreamTestCase.assertAnalyzesTo(a, "3_1.,2", new String[] { "3_1", "2" });
-  }
-
-
-
-  /** blast some random strings through the analyzer */
-  public void testRandomStrings() throws Exception {
-    Analyzer analyzer = new StandardAnalyzer();
-    checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER);
-    analyzer.close();
-  }
-  
-  /** blast some random large strings through the analyzer */
-  public void testRandomHugeStrings() throws Exception {
-    Analyzer analyzer = new StandardAnalyzer();
-    checkRandomData(random(), analyzer, 100*RANDOM_MULTIPLIER, 8192);
-    analyzer.close();
-  }
-
-  // Adds random graph after:
-  public void testRandomHugeStringsGraphAfter() throws Exception {
-    Random random = random();
-    Analyzer analyzer = new Analyzer() {
-      @Override
-      protected TokenStreamComponents createComponents(String fieldName) {
-        Tokenizer tokenizer = new StandardTokenizer(newAttributeFactory());
-        TokenStream tokenStream = new MockGraphTokenFilter(random(), tokenizer);
-        return new TokenStreamComponents(tokenizer, tokenStream);
-      }
-    };
-    checkRandomData(random, analyzer, 100*RANDOM_MULTIPLIER, 8192);
-    analyzer.close();
-  }
-}