You are viewing a plain text version of this content. The canonical link for it is here.
Posted to java-commits@lucene.apache.org by rm...@apache.org on 2010/01/17 20:25:57 UTC

svn commit: r900196 - in /lucene/java/trunk: ./ contrib/ contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/ contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk...

Author: rmuir
Date: Sun Jan 17 19:25:57 2010
New Revision: 900196

URL: http://svn.apache.org/viewvc?rev=900196&view=rev
Log:
LUCENE-2207: CJKTokenizer generates tokens with incorrect offsets
LUCENE-2219: Chinese, SmartChinese, Wikipedia tokenizers generate incorrect offsets, test end() in BaseTokenStreamTestCase

Modified:
    lucene/java/trunk/CHANGES.txt
    lucene/java/trunk/contrib/CHANGES.txt
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizer.java
    lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizer.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
    lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
    lucene/java/trunk/contrib/analyzers/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SentenceTokenizer.java
    lucene/java/trunk/contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizer.java
    lucene/java/trunk/contrib/wikipedia/src/test/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerTest.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java

Modified: lucene/java/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/java/trunk/CHANGES.txt?rev=900196&r1=900195&r2=900196&view=diff
==============================================================================
--- lucene/java/trunk/CHANGES.txt (original)
+++ lucene/java/trunk/CHANGES.txt Sun Jan 17 19:25:57 2010
@@ -203,6 +203,9 @@
   that checks if clearAttributes() was called correctly.
   (Uwe Schindler, Robert Muir)
   
+* LUCENE-2207, LUCENE-2219: Improve BaseTokenStreamTestCase to check if 
+  end() is implemented correctly.  (Koji Sekiguchi, Robert Muir)
+  
 ======================= Release 3.0.0 2009-11-25 =======================
 
 Changes in backwards compatibility policy

Modified: lucene/java/trunk/contrib/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/CHANGES.txt?rev=900196&r1=900195&r2=900196&view=diff
==============================================================================
--- lucene/java/trunk/contrib/CHANGES.txt (original)
+++ lucene/java/trunk/contrib/CHANGES.txt Sun Jan 17 19:25:57 2010
@@ -45,6 +45,10 @@
    ShingleMatrix, PrefixAware, compounds, NGramTokenFilter,
    EdgeNGramTokenFilter, Highlighter, and MemoryIndex.
    (Uwe Schindler, Robert Muir)
+
+ * LUCENE-2207, LUCENE-2219: Fix incorrect offset calculations in end() for 
+   CJKTokenizer, ChineseTokenizer, SmartChinese SentenceTokenizer, 
+   and WikipediaTokenizer.  (Koji Sekiguchi, Robert Muir)
    
 API Changes
 

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizer.java?rev=900196&r1=900195&r2=900196&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizer.java Sun Jan 17 19:25:57 2010
@@ -175,9 +175,13 @@
                         length = 0;
                         preIsTokened = false;
                     }
+                    else{
+                      offset--;
+                    }
 
                     break;
                 } else {
+                    offset--;
                     return false;
                 }
             } else {
@@ -288,6 +292,7 @@
           typeAtt.setType(TOKEN_TYPE_NAMES[tokenType]);
           return true;
         } else if (dataLen == -1) {
+          offset--;
           return false;
         }
 
@@ -299,7 +304,7 @@
     @Override
     public final void end() {
       // set final offset
-      final int finalOffset = offset;
+      final int finalOffset = correctOffset(offset);
       this.offsetAtt.setOffset(finalOffset, finalOffset);
     }
     

Modified: lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizer.java?rev=900196&r1=900195&r2=900196&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizer.java Sun Jan 17 19:25:57 2010
@@ -129,8 +129,10 @@
                 bufferIndex = 0;
             }
 
-            if (dataLen == -1) return flush();
-            else
+            if (dataLen == -1) {
+              offset--;
+              return flush();
+            } else
                 c = ioBuffer[bufferIndex++];
 
 
@@ -162,7 +164,7 @@
     @Override
     public final void end() {
       // set final offset
-      final int finalOffset = offset;
+      final int finalOffset = correctOffset(offset);
       this.offsetAtt.setOffset(finalOffset, finalOffset);
     }
 

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java?rev=900196&r1=900195&r2=900196&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java Sun Jan 17 19:25:57 2010
@@ -18,14 +18,9 @@
  */
 
 import java.io.IOException;
-import java.io.StringReader;
 
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 import org.apache.lucene.util.Version;
 
 public class TestCJKTokenizer extends BaseTokenStreamTestCase {
@@ -47,33 +42,33 @@
   }
 
   public void checkCJKToken(final String str, final TestToken[] out_tokens) throws IOException {
-    CJKTokenizer tokenizer = new CJKTokenizer(new StringReader(str));
-    TermAttribute termAtt = tokenizer.getAttribute(TermAttribute.class);
-    OffsetAttribute offsetAtt = tokenizer.getAttribute(OffsetAttribute.class);
-    TypeAttribute typeAtt = tokenizer.getAttribute(TypeAttribute.class);
+    Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+    String terms[] = new String[out_tokens.length];
+    int startOffsets[] = new int[out_tokens.length];
+    int endOffsets[] = new int[out_tokens.length];
+    String types[] = new String[out_tokens.length];
     for (int i = 0; i < out_tokens.length; i++) {
-      assertTrue(tokenizer.incrementToken());
-      assertEquals(termAtt.term(), out_tokens[i].termText);
-      assertEquals(offsetAtt.startOffset(), out_tokens[i].start);
-      assertEquals(offsetAtt.endOffset(), out_tokens[i].end);
-      assertEquals(typeAtt.type(), out_tokens[i].type);
+      terms[i] = out_tokens[i].termText;
+      startOffsets[i] = out_tokens[i].start;
+      endOffsets[i] = out_tokens[i].end;
+      types[i] = out_tokens[i].type;
     }
-    assertFalse(tokenizer.incrementToken());
+    assertAnalyzesTo(analyzer, str, terms, startOffsets, endOffsets, types, null);
   }
   
   public void checkCJKTokenReusable(final Analyzer a, final String str, final TestToken[] out_tokens) throws IOException {
-    TokenStream ts = a.reusableTokenStream("dummy", new StringReader(str));
-    TermAttribute termAtt = ts.getAttribute(TermAttribute.class);
-    OffsetAttribute offsetAtt = ts.getAttribute(OffsetAttribute.class);
-    TypeAttribute typeAtt = ts.getAttribute(TypeAttribute.class);
+    Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+    String terms[] = new String[out_tokens.length];
+    int startOffsets[] = new int[out_tokens.length];
+    int endOffsets[] = new int[out_tokens.length];
+    String types[] = new String[out_tokens.length];
     for (int i = 0; i < out_tokens.length; i++) {
-      assertTrue(ts.incrementToken());
-      assertEquals(termAtt.term(), out_tokens[i].termText);
-      assertEquals(offsetAtt.startOffset(), out_tokens[i].start);
-      assertEquals(offsetAtt.endOffset(), out_tokens[i].end);
-      assertEquals(typeAtt.type(), out_tokens[i].type);
+      terms[i] = out_tokens[i].termText;
+      startOffsets[i] = out_tokens[i].start;
+      endOffsets[i] = out_tokens[i].end;
+      types[i] = out_tokens[i].type;
     }
-    assertFalse(ts.incrementToken());
+    assertAnalyzesToReuse(analyzer, str, terms, startOffsets, endOffsets, types, null);
   }
   
   public void testJa1() throws IOException {
@@ -219,13 +214,8 @@
   
   public void testTokenStream() throws Exception {
     Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
-    TokenStream ts = analyzer.tokenStream("dummy", new StringReader("\u4e00\u4e01\u4e02"));
-    TermAttribute termAtt = ts.getAttribute(TermAttribute.class);
-    assertTrue(ts.incrementToken());
-    assertEquals("\u4e00\u4e01", termAtt.term());
-    assertTrue(ts.incrementToken());
-    assertEquals("\u4e01\u4e02", termAtt.term());
-    assertFalse(ts.incrementToken());
+    assertAnalyzesTo(analyzer, "\u4e00\u4e01\u4e02", 
+        new String[] { "\u4e00\u4e01", "\u4e01\u4e02"});
   }
   
   public void testReusableTokenStream() throws Exception {
@@ -261,4 +251,24 @@
     };
     checkCJKTokenReusable(analyzer, str, out_tokens2);
   }
+  
+  /**
+   * LUCENE-2207: wrong offset calculated by end() 
+   */
+  public void testFinalOffset() throws IOException {
+    checkCJKToken("あい", new TestToken[] { 
+        newToken("あい", 0, 2, CJKTokenizer.DOUBLE_TOKEN_TYPE) });
+    checkCJKToken("あい   ", new TestToken[] { 
+        newToken("あい", 0, 2, CJKTokenizer.DOUBLE_TOKEN_TYPE) });
+    checkCJKToken("test", new TestToken[] { 
+        newToken("test", 0, 4, CJKTokenizer.SINGLE_TOKEN_TYPE) });
+    checkCJKToken("test   ", new TestToken[] { 
+        newToken("test", 0, 4, CJKTokenizer.SINGLE_TOKEN_TYPE) });
+    checkCJKToken("あいtest", new TestToken[] {
+        newToken("あい", 0, 2, CJKTokenizer.DOUBLE_TOKEN_TYPE),
+        newToken("test", 2, 6, CJKTokenizer.SINGLE_TOKEN_TYPE) });
+    checkCJKToken("testあい    ", new TestToken[] { 
+        newToken("test", 0, 4, CJKTokenizer.SINGLE_TOKEN_TYPE),
+        newToken("あい", 4, 6, CJKTokenizer.DOUBLE_TOKEN_TYPE) });
+  }
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java?rev=900196&r1=900195&r2=900196&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java Sun Jan 17 19:25:57 2010
@@ -66,33 +66,33 @@
 
   public void testFrontUnigram() throws Exception {
     EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 1, 1);
-    assertTokenStreamContents(tokenizer, new String[]{"a"}, new int[]{0}, new int[]{1});
+    assertTokenStreamContents(tokenizer, new String[]{"a"}, new int[]{0}, new int[]{1}, 5 /* abcde */);
   }
 
   public void testBackUnigram() throws Exception {
     EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.BACK, 1, 1);
-    assertTokenStreamContents(tokenizer, new String[]{"e"}, new int[]{4}, new int[]{5});
+    assertTokenStreamContents(tokenizer, new String[]{"e"}, new int[]{4}, new int[]{5}, 5 /* abcde */);
   }
 
   public void testOversizedNgrams() throws Exception {
     EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 6, 6);
-    assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0]);
+    assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0], 5 /* abcde */);
   }
 
   public void testFrontRangeOfNgrams() throws Exception {
     EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 1, 3);
-    assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
+    assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3}, 5 /* abcde */);
   }
 
   public void testBackRangeOfNgrams() throws Exception {
     EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.BACK, 1, 3);
-    assertTokenStreamContents(tokenizer, new String[]{"e","de","cde"}, new int[]{4,3,2}, new int[]{5,5,5});
+    assertTokenStreamContents(tokenizer, new String[]{"e","de","cde"}, new int[]{4,3,2}, new int[]{5,5,5}, 5 /* abcde */);
   }
   
   public void testReset() throws Exception {
     EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 1, 3);
-    assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
+    assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3}, 5 /* abcde */);
     tokenizer.reset(new StringReader("abcde"));
-    assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
+    assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3}, 5 /* abcde */);
   }
 }

Modified: lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java?rev=900196&r1=900195&r2=900196&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java (original)
+++ lucene/java/trunk/contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java Sun Jan 17 19:25:57 2010
@@ -58,12 +58,12 @@
 
     public void testUnigrams() throws Exception {
         NGramTokenizer tokenizer = new NGramTokenizer(input, 1, 1);
-        assertTokenStreamContents(tokenizer, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
+        assertTokenStreamContents(tokenizer, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5}, 5 /* abcde */);
     }
 
     public void testBigrams() throws Exception {
         NGramTokenizer tokenizer = new NGramTokenizer(input, 2, 2);
-        assertTokenStreamContents(tokenizer, new String[]{"ab","bc","cd","de"}, new int[]{0,1,2,3}, new int[]{2,3,4,5});
+        assertTokenStreamContents(tokenizer, new String[]{"ab","bc","cd","de"}, new int[]{0,1,2,3}, new int[]{2,3,4,5}, 5 /* abcde */);
     }
 
     public void testNgrams() throws Exception {
@@ -71,19 +71,20 @@
         assertTokenStreamContents(tokenizer,
           new String[]{"a","b","c","d","e", "ab","bc","cd","de", "abc","bcd","cde"}, 
           new int[]{0,1,2,3,4, 0,1,2,3, 0,1,2},
-          new int[]{1,2,3,4,5, 2,3,4,5, 3,4,5}
+          new int[]{1,2,3,4,5, 2,3,4,5, 3,4,5},
+          5 /* abcde */
         );
     }
 
     public void testOversizedNgrams() throws Exception {
         NGramTokenizer tokenizer = new NGramTokenizer(input, 6, 7);
-        assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0]);
+        assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0], 5 /* abcde */);
     }
     
     public void testReset() throws Exception {
       NGramTokenizer tokenizer = new NGramTokenizer(input, 1, 1);
-      assertTokenStreamContents(tokenizer, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
+      assertTokenStreamContents(tokenizer, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5}, 5 /* abcde */);
       tokenizer.reset(new StringReader("abcde"));
-      assertTokenStreamContents(tokenizer, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
+      assertTokenStreamContents(tokenizer, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5}, 5 /* abcde */);
     }
 }

Modified: lucene/java/trunk/contrib/analyzers/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SentenceTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/analyzers/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SentenceTokenizer.java?rev=900196&r1=900195&r2=900196&view=diff
==============================================================================
--- lucene/java/trunk/contrib/analyzers/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SentenceTokenizer.java (original)
+++ lucene/java/trunk/contrib/analyzers/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SentenceTokenizer.java Sun Jan 17 19:25:57 2010
@@ -134,4 +134,11 @@
     super.reset(input);
     reset();
   }
+
+  @Override
+  public void end() throws IOException {
+    // set final offset
+    final int finalOffset = correctOffset(tokenEnd);
+    offsetAtt.setOffset(finalOffset, finalOffset);
+  }
 }

Modified: lucene/java/trunk/contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizer.java?rev=900196&r1=900195&r2=900196&view=diff
==============================================================================
--- lucene/java/trunk/contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizer.java (original)
+++ lucene/java/trunk/contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizer.java Sun Jan 17 19:25:57 2010
@@ -320,4 +320,10 @@
     reset();
   }
 
+  @Override
+  public void end() throws IOException {
+    // set final offset
+    final int finalOffset = correctOffset(scanner.yychar() + scanner.yylength());
+    this.offsetAtt.setOffset(finalOffset, finalOffset);
+  }
 }
\ No newline at end of file

Modified: lucene/java/trunk/contrib/wikipedia/src/test/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/wikipedia/src/test/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerTest.java?rev=900196&r1=900195&r2=900196&view=diff
==============================================================================
--- lucene/java/trunk/contrib/wikipedia/src/test/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerTest.java (original)
+++ lucene/java/trunk/contrib/wikipedia/src/test/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerTest.java Sun Jan 17 19:25:57 2010
@@ -45,9 +45,15 @@
   }
 
   public void testSimple() throws Exception {
-    WikipediaTokenizer tf = new WikipediaTokenizer(new StringReader("This is a [[Category:foo]]"));
+    String text = "This is a [[Category:foo]]";
+    WikipediaTokenizer tf = new WikipediaTokenizer(new StringReader(text));
     assertTokenStreamContents(tf,
-        new String[] { "This", "is", "a", "foo" });
+        new String[] { "This", "is", "a", "foo" },
+        new int[] { 0, 5, 8, 21 },
+        new int[] { 4, 7, 9, 24 },
+        new String[] { "<ALPHANUM>", "<ALPHANUM>", "<ALPHANUM>", WikipediaTokenizer.CATEGORY },
+        new int[] { 1, 1, 1, 1, },
+        text.length());
   }
   
   public void testHandwritten() throws Exception {

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java?rev=900196&r1=900195&r2=900196&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java Sun Jan 17 19:25:57 2010
@@ -79,7 +79,7 @@
     }
   }
 
-  public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException {
+  public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], Integer finalOffset) throws IOException {
     assertNotNull(output);
     CheckClearAttributesAttribute checkClearAtt = ts.addAttribute(CheckClearAttributesAttribute.class);
     
@@ -87,7 +87,7 @@
     TermAttribute termAtt = ts.getAttribute(TermAttribute.class);
     
     OffsetAttribute offsetAtt = null;
-    if (startOffsets != null || endOffsets != null) {
+    if (startOffsets != null || endOffsets != null || finalOffset != null) {
       assertTrue("has no OffsetAttribute", ts.hasAttribute(OffsetAttribute.class));
       offsetAtt = ts.getAttribute(OffsetAttribute.class);
     }
@@ -129,32 +129,45 @@
     }
     assertFalse("end of stream", ts.incrementToken());
     ts.end();
+    if (finalOffset != null)
+      assertEquals("finalOffset ", finalOffset.intValue(), offsetAtt.endOffset());
     ts.close();
   }
   
+  public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException {
+    assertTokenStreamContents(ts, output, startOffsets, endOffsets, types, posIncrements, null);
+  }
+
   public static void assertTokenStreamContents(TokenStream ts, String[] output) throws IOException {
-    assertTokenStreamContents(ts, output, null, null, null, null);
+    assertTokenStreamContents(ts, output, null, null, null, null, null);
   }
   
   public static void assertTokenStreamContents(TokenStream ts, String[] output, String[] types) throws IOException {
-    assertTokenStreamContents(ts, output, null, null, types, null);
+    assertTokenStreamContents(ts, output, null, null, types, null, null);
   }
   
   public static void assertTokenStreamContents(TokenStream ts, String[] output, int[] posIncrements) throws IOException {
-    assertTokenStreamContents(ts, output, null, null, null, posIncrements);
+    assertTokenStreamContents(ts, output, null, null, null, posIncrements, null);
   }
   
   public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[]) throws IOException {
-    assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, null);
+    assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, null, null);
+  }
+  
+  public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], Integer finalOffset) throws IOException {
+    assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, null, finalOffset);
   }
   
   public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements) throws IOException {
-    assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, posIncrements);
+    assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, posIncrements, null);
   }
 
+  public static void assertTokenStreamContents(TokenStream ts, String[] output, int startOffsets[], int endOffsets[], int[] posIncrements, Integer finalOffset) throws IOException {
+    assertTokenStreamContents(ts, output, startOffsets, endOffsets, null, posIncrements, finalOffset);
+  }
   
   public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException {
-    assertTokenStreamContents(a.tokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements);
+    assertTokenStreamContents(a.tokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, input.length());
   }
   
   public static void assertAnalyzesTo(Analyzer a, String input, String[] output) throws IOException {
@@ -179,7 +192,7 @@
   
 
   public static void assertAnalyzesToReuse(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException {
-    assertTokenStreamContents(a.reusableTokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements);
+    assertTokenStreamContents(a.reusableTokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, input.length());
   }
   
   public static void assertAnalyzesToReuse(Analyzer a, String input, String[] output) throws IOException {