You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by bu...@apache.org on 2011/02/22 02:01:11 UTC

svn commit: r1073192 [21/32] - in /lucene/dev/branches/realtime_search: ./ dev-tools/eclipse/ dev-tools/idea/.idea/ dev-tools/idea/lucene/contrib/ant/ dev-tools/idea/lucene/contrib/demo/ dev-tools/idea/lucene/contrib/highlighter/ dev-tools/idea/lucene/...

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.jflex
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.jflex?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.jflex (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.jflex Tue Feb 22 01:00:39 2011
@@ -77,6 +77,8 @@ ComplexContext = ([\p{LB:Complex_Context
 Han = ([\p{Script:Han}] | {HanSupp})
 Hiragana = ([\p{Script:Hiragana}] | {HiraganaSupp})
 
+// Script=Hangul & Aletter
+HangulEx       = (!(!\p{Script:Hangul}|!\p{WB:ALetter})) ({Format} | {Extend})*
 // UAX#29 WB4. X (Extend | Format)* --> X
 //
 ALetterEx      = {ALetter}                     ({Format} | {Extend})*
@@ -168,16 +170,16 @@ EMAIL = {EMAILlocalPart} "@" ({DomainNam
 
 %{
   /** Alphanumeric sequences */
-  public static final String WORD_TYPE = "<ALPHANUM>";
+  public static final String WORD_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.ALPHANUM];
   
   /** Numbers */
-  public static final String NUMERIC_TYPE = "<NUM>";
+  public static final String NUMERIC_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.NUM];
   
   /** URLs with scheme: HTTP(S), FTP, or FILE; no-scheme URLs match HTTP syntax */
   public static final String URL_TYPE = "<URL>";
   
   /** E-mail addresses */
-  public static final String EMAIL_TYPE = "<EMAIL";
+  public static final String EMAIL_TYPE = "<EMAIL>";
   
   /**
    * Chars in class \p{Line_Break = Complex_Context} are from South East Asian
@@ -187,12 +189,16 @@ EMAIL = {EMAILlocalPart} "@" ({DomainNam
    * <p>
    * See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA
    */
-  public static final String SOUTH_EAST_ASIAN_TYPE = "<SOUTHEAST_ASIAN>";
+  public static final String SOUTH_EAST_ASIAN_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.SOUTHEAST_ASIAN];
   
-  public static final String IDEOGRAPHIC_TYPE = "<IDEOGRAPHIC>";
+  public static final String IDEOGRAPHIC_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.IDEOGRAPHIC];
   
-  public static final String HIRAGANA_TYPE = "<HIRAGANA>";
+  public static final String HIRAGANA_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.HIRAGANA];
   
+  public static final String KATAKANA_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.KATAKANA];
+
+  public static final String HANGUL_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.HANGUL];
+
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
   private final PositionIncrementAttribute posIncrAtt 
@@ -316,6 +322,12 @@ EMAIL = {EMAILlocalPart} "@" ({DomainNam
 {ExtendNumLetEx}* 
   { if (populateAttributes(NUMERIC_TYPE)) return true; }
 
+// subset of the below for typing purposes only!
+{HangulEx}+
+  { if (populateAttributes(HANGUL_TYPE)) return true; }
+
+{KatakanaEx}+
+  { if (populateAttributes(KATAKANA_TYPE)) return true; }
 
 // UAX#29 WB5.   ALetter × ALetter
 //        WB6.   ALetter × (MidLetter | MidNumLet) ALetter

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java Tue Feb 22 01:00:39 2011
@@ -78,6 +78,7 @@ public class SynonymMap {
   }
 
 
+  @Override
   public String toString() {
     StringBuilder sb = new StringBuilder("<");
     if (synonyms!=null) {

Added: lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/util/FilteringTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/util/FilteringTokenFilter.java?rev=1073192&view=auto
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/util/FilteringTokenFilter.java (added)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/util/FilteringTokenFilter.java Tue Feb 22 01:00:39 2011
@@ -0,0 +1,96 @@
+package org.apache.lucene.analysis.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.queryParser.QueryParser; // for javadoc
+
+/**
+ * Abstract base class for TokenFilters that may remove tokens.
+ * You have to implement {@link #accept} and return a boolean if the current
+ * token should be preserved. {@link #incrementToken} uses this method
+ * to decide if a token should be passed to the caller.
+ */
+public abstract class FilteringTokenFilter extends TokenFilter {
+
+  private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+  private boolean enablePositionIncrements; // no init needed, as ctor enforces setting value!
+
+  public FilteringTokenFilter(boolean enablePositionIncrements, TokenStream input){
+    super(input);
+    this.enablePositionIncrements = enablePositionIncrements;
+  }
+
+  /** Override this method and return if the current input token should be returned by {@link #incrementToken}. */
+  protected abstract boolean accept() throws IOException;
+
+  @Override
+  public final boolean incrementToken() throws IOException {
+    if (enablePositionIncrements) {
+      int skippedPositions = 0;
+      while (input.incrementToken()) {
+        if (accept()) {
+          if (skippedPositions != 0) {
+            posIncrAtt.setPositionIncrement(posIncrAtt.getPositionIncrement() + skippedPositions);
+          }
+          return true;
+        }
+        skippedPositions += posIncrAtt.getPositionIncrement();
+      }
+    } else {
+      while (input.incrementToken()) {
+        if (accept()) {
+          return true;
+        }
+      }
+    }
+    // reached EOS -- return false
+    return false;
+  }
+
+  /**
+   * @see #setEnablePositionIncrements(boolean)
+   */
+  public boolean getEnablePositionIncrements() {
+    return enablePositionIncrements;
+  }
+
+  /**
+   * If <code>true</code>, this TokenFilter will preserve
+   * positions of the incoming tokens (ie, accumulate and
+   * set position increments of the removed tokens).
+   * Generally, <code>true</code> is best as it does not
+   * lose information (positions of the original tokens)
+   * during indexing.
+   * 
+   * <p> When set, when a token is stopped
+   * (omitted), the position increment of the following
+   * token is incremented.
+   *
+   * <p> <b>NOTE</b>: be sure to also
+   * set {@link QueryParser#setEnablePositionIncrements} if
+   * you use QueryParser to create queries.
+   */
+  public void setEnablePositionIncrements(boolean enable) {
+    this.enablePositionIncrements = enable;
+  }
+}

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java Tue Feb 22 01:00:39 2011
@@ -1,4 +1,4 @@
-/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/3/10 9:07 AM */
+/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 2/9/11 11:45 AM */
 
 package org.apache.lucene.analysis.wikipedia;
 
@@ -25,8 +25,8 @@ import org.apache.lucene.analysis.tokena
 /**
  * This class is a scanner generated by 
  * <a href="http://www.jflex.de/">JFlex</a> 1.5.0-SNAPSHOT
- * on 10/3/10 9:07 AM from the specification file
- * <tt>C:/Users/rmuir/workspace/lucene-clean/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex</tt>
+ * on 2/9/11 11:45 AM from the specification file
+ * <tt>C:/Users/rmuir/workspace/lucene-2911/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex</tt>
  */
 class WikipediaTokenizerImpl {
 
@@ -757,6 +757,12 @@ final int setText(StringBuilder buffer){
   
       zzState = ZZ_LEXSTATE[zzLexicalState];
 
+      // set up zzAction for empty match case:
+      int zzAttributes = zzAttrL[zzState];
+      if ( (zzAttributes & 1) == 1 ) {
+        zzAction = zzState;
+      }
+
 
       zzForAction: {
         while (true) {
@@ -789,7 +795,7 @@ final int setText(StringBuilder buffer){
           if (zzNext == -1) break zzForAction;
           zzState = zzNext;
 
-          int zzAttributes = zzAttrL[zzState];
+          zzAttributes = zzAttrL[zzState];
           if ( (zzAttributes & 1) == 1 ) {
             zzAction = zzState;
             zzMarkedPosL = zzCurrentPosL;

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java Tue Feb 22 01:00:39 2011
@@ -169,7 +169,7 @@ public class HTMLStripCharFilterTest ext
 
   public void testBufferOverflow() throws Exception {
     StringBuilder testBuilder = new StringBuilder(HTMLStripCharFilter.DEFAULT_READ_AHEAD + 50);
-    testBuilder.append("ah<?> ");
+    testBuilder.append("ah<?> ??????");
     appendChars(testBuilder, HTMLStripCharFilter.DEFAULT_READ_AHEAD + 500);
     processBuffer(testBuilder.toString(), "Failed on pseudo proc. instr.");//processing instructions
 

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java Tue Feb 22 01:00:39 2011
@@ -207,4 +207,16 @@ public class TestStandardAnalyzer extend
         new String[] {"𩬅", "艱", "鍟", "䇹", "愯", "瀛"},
         new String[] { "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>" });
   }
+  
+  public void testKorean() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "훈민정음",
+        new String[] { "훈민정음" },
+        new String[] { "<HANGUL>" });
+  }
+  
+  public void testJapanese() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "仮名遣い カタカナ",
+        new String[] { "仮", "名", "遣", "い", "カタカナ" },
+        new String[] { "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<HIRAGANA>", "<KATAKANA>" });
+  }
 }

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java Tue Feb 22 01:00:39 2011
@@ -406,4 +406,16 @@ public class TestUAX29URLEmailTokenizer 
         new String[] {"𩬅", "艱", "鍟", "䇹", "愯", "瀛"},
         new String[] { "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>" });
   }
+  
+  public void testKorean() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "훈민정음",
+        new String[] { "훈민정음" },
+        new String[] { "<HANGUL>" });
+  }
+  
+  public void testJapanese() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "仮名遣い カタカナ",
+        new String[] { "仮", "名", "遣", "い", "カタカナ" },
+        new String[] { "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<HIRAGANA>", "<KATAKANA>" });
+  }
 }

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemmer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemmer.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemmer.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemmer.java Tue Feb 22 01:00:39 2011
@@ -1,5 +1,22 @@
 package org.apache.lucene.analysis.el;
 
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java Tue Feb 22 01:00:39 2011
@@ -35,16 +35,26 @@ public class TestKeepWordFilter extends 
     words.add( "aaa" );
     words.add( "bbb" );
     
-    String input = "aaa BBB ccc ddd EEE";
+    String input = "xxx yyy aaa zzz BBB ccc ddd EEE";
     
     // Test Stopwords
     TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
-    stream = new KeepWordFilter(stream, new CharArraySet(TEST_VERSION_CURRENT, words, true));
-    assertTokenStreamContents(stream, new String[] { "aaa", "BBB" });
+    stream = new KeepWordFilter(true, stream, new CharArraySet(TEST_VERSION_CURRENT, words, true));
+    assertTokenStreamContents(stream, new String[] { "aaa", "BBB" }, new int[] { 3, 2 });
        
     // Now force case
     stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
-    stream = new KeepWordFilter(stream, new CharArraySet(TEST_VERSION_CURRENT,words, false));
-    assertTokenStreamContents(stream, new String[] { "aaa" });
+    stream = new KeepWordFilter(true, stream, new CharArraySet(TEST_VERSION_CURRENT,words, false));
+    assertTokenStreamContents(stream, new String[] { "aaa" }, new int[] { 3 });
+    
+    // Test Stopwords
+    stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
+    stream = new KeepWordFilter(false, stream, new CharArraySet(TEST_VERSION_CURRENT, words, true));
+    assertTokenStreamContents(stream, new String[] { "aaa", "BBB" }, new int[] { 1, 1 });
+       
+    // Now force case
+    stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
+    stream = new KeepWordFilter(false, stream, new CharArraySet(TEST_VERSION_CURRENT,words, false));
+    assertTokenStreamContents(stream, new String[] { "aaa" }, new int[] { 1 });
   }
 }

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilter.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilter.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilter.java Tue Feb 22 01:00:39 2011
@@ -2,6 +2,7 @@ package org.apache.lucene.analysis.misce
 
 import java.io.IOException;
 import java.io.StringReader;
+import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Locale;
 import java.util.Set;
@@ -57,6 +58,19 @@ public class TestKeywordMarkerFilter ext
             "The quIck browN LuceneFox Jumps")), set2)), output);
   }
 
+  // LUCENE-2901
+  public void testComposition() throws Exception {   
+    TokenStream ts = new LowerCaseFilterMock(
+                     new KeywordMarkerFilter(
+                     new KeywordMarkerFilter(
+                     new WhitespaceTokenizer(TEST_VERSION_CURRENT,
+                     new StringReader("Dogs Trees Birds Houses")),
+                     new HashSet<String>(Arrays.asList(new String[] { "Birds", "Houses" }))), 
+                     new HashSet<String>(Arrays.asList(new String[] { "Dogs", "Trees" }))));
+    
+    assertTokenStreamContents(ts, new String[] { "Dogs", "Trees", "Birds", "Houses" });
+  }
+  
   public static final class LowerCaseFilterMock extends TokenFilter {
 
     private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilter.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilter.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilter.java Tue Feb 22 01:00:39 2011
@@ -24,19 +24,24 @@ import java.io.StringReader;
 
 public class TestLengthFilter extends BaseTokenStreamTestCase {
   
-  public void testFilter() throws Exception {
+  public void testFilterNoPosIncr() throws Exception {
     TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, 
         new StringReader("short toolong evenmuchlongertext a ab toolong foo"));
-    LengthFilter filter = new LengthFilter(stream, 2, 6);
-    CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class);
+    LengthFilter filter = new LengthFilter(false, stream, 2, 6);
+    assertTokenStreamContents(filter,
+      new String[]{"short", "ab", "foo"},
+      new int[]{1, 1, 1}
+    );
+  }
 
-    assertTrue(filter.incrementToken());
-    assertEquals("short", termAtt.toString());
-    assertTrue(filter.incrementToken());
-    assertEquals("ab", termAtt.toString());
-    assertTrue(filter.incrementToken());
-    assertEquals("foo", termAtt.toString());
-    assertFalse(filter.incrementToken());
+  public void testFilterWithPosIncr() throws Exception {
+    TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, 
+        new StringReader("short toolong evenmuchlongertext a ab toolong foo"));
+    LengthFilter filter = new LengthFilter(true, stream, 2, 6);
+    assertTokenStreamContents(filter,
+      new String[]{"short", "ab", "foo"},
+      new int[]{1, 4, 2}
+    );
   }
 
 }

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java Tue Feb 22 01:00:39 2011
@@ -47,6 +47,7 @@ public class TestRemoveDuplicatesTokenFi
           CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
           OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
           PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
+          @Override
           public boolean incrementToken() {
             if (toks.hasNext()) {
               clearAttributes();

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java Tue Feb 22 01:00:39 2011
@@ -87,6 +87,7 @@ public class TestTrimFilter extends Base
       this(tokens.toArray(new Token[tokens.size()]));
     }
     
+    @Override
     public boolean incrementToken() throws IOException {
       if (index >= tokens.length)
         return false;

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java Tue Feb 22 01:00:39 2011
@@ -213,6 +213,7 @@ public class TestWordDelimiterFilter ext
     
     /* analyzer that uses whitespace + wdf */
     Analyzer a = new Analyzer() {
+      @Override
       public TokenStream tokenStream(String field, Reader reader) {
         return new WordDelimiterFilter(
             new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader),
@@ -239,6 +240,7 @@ public class TestWordDelimiterFilter ext
     
     /* analyzer that will consume tokens with large position increments */
     Analyzer a2 = new Analyzer() {
+      @Override
       public TokenStream tokenStream(String field, Reader reader) {
         return new WordDelimiterFilter(
             new LargePosIncTokenFilter(
@@ -271,6 +273,7 @@ public class TestWordDelimiterFilter ext
         new int[] { 1, 11, 1 });
 
     Analyzer a3 = new Analyzer() {
+      @Override
       public TokenStream tokenStream(String field, Reader reader) {
         StopFilter filter = new StopFilter(TEST_VERSION_CURRENT,
             new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), StandardAnalyzer.STOP_WORDS_SET);

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java?rev=1073192&r1=1073113&r2=1073192&view=diff
==============================================================================
    (empty)

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java Tue Feb 22 01:00:39 2011
@@ -76,7 +76,10 @@ public class QueryAutoStopWordAnalyzerTe
   private int search(Analyzer a, String queryString) throws IOException, ParseException {
     QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "repetitiveField", a);
     Query q = qp.parse(queryString);
-    return new IndexSearcher(reader).search(q, null, 1000).totalHits;
+    IndexSearcher searcher = newSearcher(reader);
+    int hits = searcher.search(q, null, 1000).totalHits;
+    searcher.close();
+    return hits;
   }
 
   public void testUninitializedAnalyzer() throws Exception {

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilter.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilter.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilter.java Tue Feb 22 01:00:39 2011
@@ -395,6 +395,7 @@ public class TestSynonymFilter extends B
       this(tokens.toArray(new Token[tokens.size()]));
     }
     
+    @Override
     public boolean incrementToken() throws IOException {
       if (index >= tokens.length)
         return false;

Modified: lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java Tue Feb 22 01:00:39 2011
@@ -141,7 +141,7 @@ public abstract class CollationTestBase 
     writer.close();
 
     IndexReader reader = IndexReader.open(farsiIndex, true);
-    IndexSearcher search = new IndexSearcher(reader);
+    IndexSearcher search = newSearcher(reader);
         
     // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
     // orders the U+0698 character before the U+0633 character, so the single

Modified: lucene/dev/branches/realtime_search/modules/analysis/icu/build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/icu/build.xml?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/icu/build.xml (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/icu/build.xml Tue Feb 22 01:00:39 2011
@@ -49,6 +49,7 @@
   <path id="test.classpath">
   	<pathelement path="${analyzers-common.jar}"/>
     <path refid="classpath"/>
+    <pathelement location="../../../lucene/build/classes/test-framework/"/>
     <pathelement location="../../../lucene/build/classes/test/"/>
   	<pathelement location="../build/common/classes/test/"/>
     <path refid="junit-path"/>

Modified: lucene/dev/branches/realtime_search/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/DefaultICUTokenizerConfig.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/DefaultICUTokenizerConfig.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/DefaultICUTokenizerConfig.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/DefaultICUTokenizerConfig.java Tue Feb 22 01:00:39 2011
@@ -20,6 +20,8 @@ package org.apache.lucene.analysis.icu.s
 import java.io.IOException;
 import java.io.InputStream;
 
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+
 import com.ibm.icu.lang.UScript;
 import com.ibm.icu.text.BreakIterator;
 import com.ibm.icu.text.RuleBasedBreakIterator;
@@ -44,20 +46,24 @@ import com.ibm.icu.util.ULocale;
  */
 public class DefaultICUTokenizerConfig extends ICUTokenizerConfig {
   /** Token type for words containing ideographic characters */
-  public static final String WORD_IDEO = "<IDEOGRAPHIC>";
-  /** Token type for words containing Japanese kana */
-  public static final String WORD_KANA = "<KANA>";
+  public static final String WORD_IDEO = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.IDEOGRAPHIC];
+  /** Token type for words containing Japanese hiragana */
+  public static final String WORD_HIRAGANA = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.HIRAGANA];
+  /** Token type for words containing Japanese katakana */
+  public static final String WORD_KATAKANA = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.KATAKANA];
+  /** Token type for words containing Korean hangul  */
+  public static final String WORD_HANGUL = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.HANGUL];
   /** Token type for words that contain letters */
-  public static final String WORD_LETTER = "<ALPHANUM>";
+  public static final String WORD_LETTER = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.ALPHANUM];
   /** Token type for words that appear to be numbers */
-  public static final String WORD_NUMBER = "<NUM>";
+  public static final String WORD_NUMBER = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.NUM];
   
   /*
    * the default breakiterators in use. these can be expensive to
    * instantiate, cheap to clone.
    */  
   private static final BreakIterator rootBreakIterator = 
-    BreakIterator.getWordInstance(ULocale.ROOT);
+    readBreakIterator("Default.brk");
   private static final BreakIterator thaiBreakIterator = 
     BreakIterator.getWordInstance(new ULocale("th_TH"));
   private static final BreakIterator hebrewBreakIterator = 
@@ -87,9 +93,9 @@ public class DefaultICUTokenizerConfig e
       case RuleBasedBreakIterator.WORD_IDEO:
         return WORD_IDEO;
       case RuleBasedBreakIterator.WORD_KANA:
-        return WORD_KANA;
+        return script == UScript.HIRAGANA ? WORD_HIRAGANA : WORD_KATAKANA;
       case RuleBasedBreakIterator.WORD_LETTER:
-        return WORD_LETTER;
+        return script == UScript.HANGUL ? WORD_HANGUL : WORD_LETTER;
       case RuleBasedBreakIterator.WORD_NUMBER:
         return WORD_NUMBER;
       default: /* some other custom code */

Modified: lucene/dev/branches/realtime_search/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/tokenattributes/ScriptAttributeImpl.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/tokenattributes/ScriptAttributeImpl.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/tokenattributes/ScriptAttributeImpl.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/tokenattributes/ScriptAttributeImpl.java Tue Feb 22 01:00:39 2011
@@ -17,8 +17,6 @@ package org.apache.lucene.analysis.icu.t
  * limitations under the License.
  */
 
-import java.io.Serializable;
-
 import org.apache.lucene.util.AttributeImpl;
 import org.apache.lucene.util.AttributeReflector;
 
@@ -29,7 +27,7 @@ import com.ibm.icu.lang.UScript;
  * as an integer.
  * @lucene.experimental
  */
-public class ScriptAttributeImpl extends AttributeImpl implements ScriptAttribute, Cloneable, Serializable {
+public class ScriptAttributeImpl extends AttributeImpl implements ScriptAttribute, Cloneable {
   private int code = UScript.COMMON;
   
   public int getCode() {

Modified: lucene/dev/branches/realtime_search/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java Tue Feb 22 01:00:39 2011
@@ -128,11 +128,10 @@ public class TestICUTokenizer extends Ba
   
   /*
    * For chinese, tokenize as char (these can later form bigrams or whatever)
-   * TODO: why do full-width numerics have no word-break prop?
    */
   public void testChinese() throws Exception {
     assertAnalyzesTo(a, "我是中国人。 1234 Tests ",
-        new String[] { "我", "是", "中", "国", "人", "tests"});
+        new String[] { "我", "是", "中", "国", "人", "1234", "tests"});
   }
   
   public void testEmpty() throws Exception {
@@ -221,4 +220,16 @@ public class TestICUTokenizer extends Ba
         new String[] {"david", "has", "5000", "bones"},
         new String[] { "<ALPHANUM>", "<ALPHANUM>", "<NUM>", "<ALPHANUM>" });
   }
+  
+  public void testKorean() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "훈민정음",
+        new String[] { "훈민정음" },
+        new String[] { "<HANGUL>" });
+  }
+  
+  public void testJapanese() throws Exception {
+    BaseTokenStreamTestCase.assertAnalyzesTo(a, "仮名遣い カタカナ",
+        new String[] { "仮", "名", "遣", "い", "カタカナ" },
+        new String[] { "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<IDEOGRAPHIC>", "<HIRAGANA>", "<KATAKANA>" });
+  }
 }

Modified: lucene/dev/branches/realtime_search/modules/analysis/phonetic/build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/phonetic/build.xml?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/phonetic/build.xml (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/phonetic/build.xml Tue Feb 22 01:00:39 2011
@@ -48,6 +48,7 @@
   <path id="test.classpath">
   	<pathelement path="${analyzers-common.jar}"/>
     <path refid="classpath"/>
+    <pathelement location="../../../lucene/build/classes/test-framework/"/>
     <pathelement location="../../../lucene/build/classes/test/"/>
   	<pathelement location="../build/common/classes/test/"/>
     <path refid="junit-path"/>

Modified: lucene/dev/branches/realtime_search/modules/analysis/smartcn/build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/smartcn/build.xml?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/smartcn/build.xml (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/smartcn/build.xml Tue Feb 22 01:00:39 2011
@@ -39,6 +39,7 @@
   <path id="test.classpath">
   	<pathelement path="${analyzers-common.jar}"/>
     <path refid="classpath"/>
+    <pathelement location="../../../lucene/build/classes/test-framework"/>
     <pathelement location="../../../lucene/build/classes/test/"/>
     <path refid="junit-path"/>
     <pathelement location="${build.dir}/classes/java"/>

Modified: lucene/dev/branches/realtime_search/modules/analysis/stempel/build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/analysis/stempel/build.xml?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/analysis/stempel/build.xml (original)
+++ lucene/dev/branches/realtime_search/modules/analysis/stempel/build.xml Tue Feb 22 01:00:39 2011
@@ -38,6 +38,7 @@
 	
   <path id="test.classpath">
     <path refid="classpath"/>
+    <pathelement location="../../../lucene/build/classes/test-framework"/>
     <pathelement location="../../../lucene/build/classes/test/"/>
     <path refid="junit-path"/>
     <pathelement location="${build.dir}/classes/java"/>

Modified: lucene/dev/branches/realtime_search/modules/benchmark/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/CHANGES.txt?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/benchmark/CHANGES.txt (original)
+++ lucene/dev/branches/realtime_search/modules/benchmark/CHANGES.txt Tue Feb 22 01:00:39 2011
@@ -2,6 +2,17 @@ Lucene Benchmark Contrib Change Log
 
 The Benchmark contrib package contains code for benchmarking Lucene in a variety of ways.
 
+02/05/2011
+  LUCENE-1540: Improvements to contrib.benchmark for TREC collections. 
+  ContentSource can now process plain text files, gzip files, and bzip2 files.
+  TREC doc parsing now handles the TREC gov2 collection and TREC disks 4&5-CR 
+  collection (both used by many TREC tasks). (Shai Erera, Doron Cohen)
+  
+01/26/2011
+  LUCENE-929: ExtractReuters first extracts to a tmp dir and then renames. That 
+  way, if a previous extract attempt failed, "ant extract-reuters" will still 
+  extract the files. (Shai Erera, Doron Cohen, Grant Ingersoll)
+
 01/24/2011
   LUCENE-2885: Add WaitForMerges task (calls IndexWriter.waitForMerges()).
   (Mike McCandless)

Modified: lucene/dev/branches/realtime_search/modules/benchmark/build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/build.xml?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/benchmark/build.xml (original)
+++ lucene/dev/branches/realtime_search/modules/benchmark/build.xml Tue Feb 22 01:00:39 2011
@@ -22,7 +22,6 @@
     <module-uptodate name="analysis/common" jarfile="${common.dir}/../modules/analysis/build/common/lucene-analyzers-common-${version}.jar"
       property="analyzers-common.uptodate" classpath.property="analyzers-common.jar"/>
     <contrib-uptodate name="memory" property="memory.uptodate" classpath.property="memory.jar"/>
-    <contrib-uptodate name="demo" property="demo.uptodate" classpath.property="demo.jar"/>
 
     <target name="check-files">
         <available file="temp/news20.tar.gz" property="news20.exists"/>
@@ -87,7 +86,6 @@
 
     </target>
     <target name="extract-reuters" depends="check-files" unless="reuters.extracted">
-        <mkdir dir="${working.dir}/reuters-out"/>
         <java classname="org.apache.lucene.benchmark.utils.ExtractReuters" maxmemory="1024M" fork="true">
             <classpath refid="run.classpath"/>
             <arg file="${working.dir}/reuters"/>
@@ -144,7 +142,6 @@
       <pathelement path="${memory.jar}"/>
       <pathelement path="${highlighter.jar}"/>
       <pathelement path="${analyzers-common.jar}"/>
-      <pathelement path="${demo.jar}"/>
       <path refid="base.classpath"/>
     	<fileset dir="lib">
     		<include name="**/*.jar"/>
@@ -233,11 +230,6 @@
       <echo>Benchmark output in JIRA table format is in file: ${shingle.jira.output.file}</echo>
     </target>
 
-    <target name="compile-demo" unless="demo.uptodate">
-      <subant target="default">
-         <fileset dir="${common.dir}/contrib/demo" includes="build.xml"/>
-      </subant>
-    </target>
     <target name="compile-highlighter" unless="highlighter.uptodate">
       <subant target="default">
          <fileset dir="${common.dir}/contrib/highlighter" includes="build.xml"/>
@@ -259,7 +251,19 @@
       </subant>
     </target>
 
-    <target name="init" depends="contrib-build.init,compile-demo,compile-memory,compile-highlighter,compile-analyzers-common"/>
+    <target name="init" depends="contrib-build.init,compile-memory,compile-highlighter,compile-analyzers-common"/>
   
+    <target name="clean-javacc">
+      <fileset dir="src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml" includes="*.java">
+	<containsregexp expression="Generated.*By.*JavaCC"/>
+      </fileset>
+    </target>
+    
+    <target name="javacc" depends="init,javacc-check" if="javacc.present">
+      <invoke-javacc target="src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml/HTMLParser.jj"
+                     outputDir="src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml"
+		     />
+    </target>
+    
     <target name="dist-maven" depends="jar-core,javadocs,contrib-build.dist-maven"/>
 </project>

Modified: lucene/dev/branches/realtime_search/modules/benchmark/conf/createLineFile.alg
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/conf/createLineFile.alg?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/benchmark/conf/createLineFile.alg (original)
+++ lucene/dev/branches/realtime_search/modules/benchmark/conf/createLineFile.alg Tue Feb 22 01:00:39 2011
@@ -29,10 +29,14 @@
 #
 
 # Where to get documents from:
-content.source=org.apache.lucene.benchmark.byTask.feeds.ReutersContentSource
+content.source=org.apache.lucene.benchmark.byTask.feeds.EnwikiContentSource
 
 # Where to write the line file output:
-line.file.out=work/reuters.lines.txt
+line.file.out=/x/tmp/enwiki.out.txt
+
+docs.file=/x/lucene/data/enwiki/enwiki-20110115-pages-articles.xml
+
+keep.image.only.docs = false
 
 # Stop after processing the document feed once:
 content.source.forever=false

Added: lucene/dev/branches/realtime_search/modules/benchmark/lib/xercesImpl-2.9.1-patched-XERCESJ-1257.jar
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/lib/xercesImpl-2.9.1-patched-XERCESJ-1257.jar?rev=1073192&view=auto
==============================================================================
Binary file - no diff available.

Added: lucene/dev/branches/realtime_search/modules/benchmark/lib/xml-apis-2.9.0.jar
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/lib/xml-apis-2.9.0.jar?rev=1073192&view=auto
==============================================================================
Binary file - no diff available.

Modified: lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ContentSource.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ContentSource.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ContentSource.java (original)
+++ lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ContentSource.java Tue Feb 22 01:00:39 2011
@@ -56,11 +56,14 @@ import org.apache.lucene.benchmark.byTas
 public abstract class ContentSource {
   
   private static final int BZIP = 0;
-  private static final int OTHER = 1;
+  private static final int GZIP = 1;
+  private static final int OTHER = 2;
   private static final Map<String,Integer> extensionToType = new HashMap<String,Integer>();
   static {
     extensionToType.put(".bz2", Integer.valueOf(BZIP));
     extensionToType.put(".bzip", Integer.valueOf(BZIP));
+    extensionToType.put(".gz", Integer.valueOf(GZIP));
+    extensionToType.put(".gzip", Integer.valueOf(GZIP));
   }
   
   protected static final int BUFFER_SIZE = 1 << 16; // 64K
@@ -78,11 +81,13 @@ public abstract class ContentSource {
   
   private CompressorStreamFactory csFactory = new CompressorStreamFactory();
 
+  /** update count of bytes generated by this source */  
   protected final synchronized void addBytes(long numBytes) {
     bytesCount += numBytes;
     totalBytesCount += numBytes;
   }
   
+  /** update count of documents generated by this source */  
   protected final synchronized void addDoc() {
     ++docsCount;
     ++totalDocsCount;
@@ -130,21 +135,25 @@ public abstract class ContentSource {
         type = typeInt.intValue();
       }
     }
-    switch (type) {
-      case BZIP:
-        try {
+    
+    try {
+      switch (type) {
+        case BZIP:
           // According to BZip2CompressorInputStream's code, it reads the first 
           // two file header chars ('B' and 'Z'). It is important to wrap the
           // underlying input stream with a buffered one since
           // Bzip2CompressorInputStream uses the read() method exclusively.
           is = csFactory.createCompressorInputStream("bzip2", is);
-        } catch (CompressorException e) {
-          IOException ioe = new IOException(e.getMessage());
-          ioe.initCause(e);
-          throw ioe;
-        }
-        break;
-      default: // Do nothing, stay with FileInputStream
+          break;
+        case GZIP:
+          is = csFactory.createCompressorInputStream("gz", is);
+          break;
+        default: // Do nothing, stay with FileInputStream
+      }
+    } catch (CompressorException e) {
+      IOException ioe = new IOException(e.getMessage());
+      ioe.initCause(e);
+      throw ioe;
     }
     
     return is;

Modified: lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DemoHTMLParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DemoHTMLParser.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DemoHTMLParser.java (original)
+++ lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DemoHTMLParser.java Tue Feb 22 01:00:39 2011
@@ -29,11 +29,14 @@ import java.util.Properties;
  */
 public class DemoHTMLParser implements org.apache.lucene.benchmark.byTask.feeds.HTMLParser {
 
-  public DocData parse(DocData docData, String name, Date date, Reader reader, DateFormat dateFormat) throws IOException, InterruptedException {
-    org.apache.lucene.demo.html.HTMLParser p = new org.apache.lucene.demo.html.HTMLParser(reader);
+  public DocData parse(DocData docData, String name, Date date, String title, Reader reader, DateFormat dateFormat) throws IOException, InterruptedException {
+    org.apache.lucene.benchmark.byTask.feeds.demohtml.HTMLParser p = new org.apache.lucene.benchmark.byTask.feeds.demohtml.HTMLParser(reader);
     
     // title
-    String title = p.getTitle();
+    if (title==null) {
+      title = p.getTitle();
+    }
+    
     // properties 
     Properties props = p.getMetaTags(); 
     // body

Modified: lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/HTMLParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/HTMLParser.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/HTMLParser.java (original)
+++ lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/HTMLParser.java Tue Feb 22 01:00:39 2011
@@ -29,16 +29,18 @@ public interface HTMLParser {
 
   /**
    * Parse the input Reader and return DocData. 
-   * A provided name or date is used for the result, otherwise an attempt is 
-   * made to set them from the parsed data.
-   * @param dateFormat date formatter to use for extracting the date.   
-   * @param name name of the result doc data. If null, attempt to set by parsed data.
+   * The provided name,title,date are used for the result, unless when they're null, 
+   * in which case an attempt is made to set them from the parsed data.
+   * @param docData result reused
+   * @param name name of the result doc data.
    * @param date date of the result doc data. If null, attempt to set by parsed data.
-   * @param reader of html text to parse.
+   * @param title title of the result doc data. If null, attempt to set by parsed data.
+   * @param reader reader of html text to parse.
+   * @param dateFormat date formatter to use for extracting the date.   
    * @return Parsed doc data.
    * @throws IOException
    * @throws InterruptedException
    */
-  public DocData parse(DocData docData, String name, Date date, Reader reader, DateFormat dateFormat) throws IOException, InterruptedException;
+  public DocData parse(DocData docData, String name, Date date, String title, Reader reader, DateFormat dateFormat) throws IOException, InterruptedException;
 
 }

Modified: lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java (original)
+++ lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java Tue Feb 22 01:00:39 2011
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 package org.apache.lucene.benchmark.byTask.feeds;
 
 import org.apache.lucene.analysis.Analyzer;

Modified: lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java?rev=1073192&r1=1073191&r2=1073192&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java (original)
+++ lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java Tue Feb 22 01:00:39 2011
@@ -19,8 +19,8 @@ package org.apache.lucene.benchmark.byTa
 
 import java.io.BufferedReader;
 import java.io.File;
-import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.Reader;
 import java.text.DateFormat;
@@ -29,8 +29,8 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.Locale;
-import java.util.zip.GZIPInputStream;
 
+import org.apache.lucene.benchmark.byTask.feeds.TrecDocParser.ParsePathType;
 import org.apache.lucene.benchmark.byTask.utils.Config;
 import org.apache.lucene.benchmark.byTask.utils.StringBuilderReader;
 import org.apache.lucene.util.ThreadInterruptedException;
@@ -46,8 +46,10 @@ import org.apache.lucene.util.ThreadInte
  * <li><b>docs.dir</b> - specifies the directory where the TREC files reside.
  * Can be set to a relative path if "work.dir" is also specified
  * (<b>default=trec</b>).
+ * <li><b>trec.doc.parser</b> - specifies the {@link TrecDocParser} class to use for
+ * parsing the TREC documents content (<b>default=TrecGov2Parser</b>).
  * <li><b>html.parser</b> - specifies the {@link HTMLParser} class to use for
- * parsing the TREC documents content (<b>default=DemoHTMLParser</b>).
+ * parsing the HTML parts of the TREC documents content (<b>default=DemoHTMLParser</b>).
  * <li><b>content.source.encoding</b> - if not specified, ISO-8859-1 is used.
  * <li><b>content.source.excludeIteration</b> - if true, do not append iteration number to docname
  * </ul>
@@ -59,22 +61,24 @@ public class TrecContentSource extends C
     ParsePosition pos;
   }
 
-  private static final String DATE = "Date: ";
-  private static final String DOCHDR = "<DOCHDR>";
-  private static final String TERMINATING_DOCHDR = "</DOCHDR>";
-  private static final String DOCNO = "<DOCNO>";
-  private static final String TERMINATING_DOCNO = "</DOCNO>";
-  private static final String DOC = "<DOC>";
-  private static final String TERMINATING_DOC = "</DOC>";
+  public static final String DOCNO = "<DOCNO>";
+  public static final String TERMINATING_DOCNO = "</DOCNO>";
+  public static final String DOC = "<DOC>";
+  public static final String TERMINATING_DOC = "</DOC>";
 
-  private static final String NEW_LINE = System.getProperty("line.separator");
+  /** separator between lines in the byffer */ 
+  public static final String NEW_LINE = System.getProperty("line.separator");
 
   private static final String DATE_FORMATS [] = {
-       "EEE, dd MMM yyyy kk:mm:ss z",	  // Tue, 09 Dec 2003 22:39:08 GMT
-       "EEE MMM dd kk:mm:ss yyyy z",  	// Tue Dec 09 16:45:08 2003 EST
-       "EEE, dd-MMM-':'y kk:mm:ss z", 	// Tue, 09 Dec 2003 22:39:08 GMT
-       "EEE, dd-MMM-yyy kk:mm:ss z", 	  // Tue, 09 Dec 2003 22:39:08 GMT
-       "EEE MMM dd kk:mm:ss yyyy",  	  // Tue Dec 09 16:45:08 2003
+       "EEE, dd MMM yyyy kk:mm:ss z",   // Tue, 09 Dec 2003 22:39:08 GMT
+       "EEE MMM dd kk:mm:ss yyyy z",    // Tue Dec 09 16:45:08 2003 EST
+       "EEE, dd-MMM-':'y kk:mm:ss z",   // Tue, 09 Dec 2003 22:39:08 GMT
+       "EEE, dd-MMM-yyy kk:mm:ss z",    // Tue, 09 Dec 2003 22:39:08 GMT
+       "EEE MMM dd kk:mm:ss yyyy",      // Tue Dec 09 16:45:08 2003
+       "dd MMM yyyy",                   // 1 March 1994
+       "MMM dd, yyyy",                  // February 3, 1994
+       "yyMMdd",                        // 910513
+       "hhmm z.z.z. MMM dd, yyyy",       // 0901 u.t.c. April 28, 1994
   };
 
   private ThreadLocal<DateFormatInfo> dateFormats = new ThreadLocal<DateFormatInfo>();
@@ -83,7 +87,7 @@ public class TrecContentSource extends C
   private File dataDir = null;
   private ArrayList<File> inputFiles = new ArrayList<File>();
   private int nextFile = 0;
-  private int rawDocSize;
+  private int rawDocSize = 0;
 
   // Use to synchronize threads on reading from the TREC documents.
   private Object lock = new Object();
@@ -92,7 +96,10 @@ public class TrecContentSource extends C
   BufferedReader reader;
   int iteration = 0;
   HTMLParser htmlParser;
+  
   private boolean excludeDocnameIteration;
+  private TrecDocParser trecDocParser = new TrecGov2Parser(); // default
+  ParsePathType currPathType; // not private for tests
   
   private DateFormatInfo getDateFormatInfo() {
     DateFormatInfo dfi = dateFormats.get();
@@ -118,7 +125,7 @@ public class TrecContentSource extends C
     return sb;
   }
   
-  private Reader getTrecDocReader(StringBuilder docBuffer) {
+  Reader getTrecDocReader(StringBuilder docBuffer) {
     StringBuilderReader r = trecDocReader.get();
     if (r == null) {
       r = new StringBuilderReader(docBuffer);
@@ -129,10 +136,21 @@ public class TrecContentSource extends C
     return r;
   }
 
-  // read until finding a line that starts with the specified prefix, or a terminating tag has been found.
-  private void read(StringBuilder buf, String prefix, boolean collectMatchLine,
-                    boolean collectAll, String terminatingTag)
-      throws IOException, NoMoreDataException {
+  HTMLParser getHtmlParser() {
+    return htmlParser;
+  }
+  
+  /**
+   * Read until a line starting with the specified <code>lineStart</code>.
+   * @param buf buffer for collecting the data if so specified/ 
+   * @param lineStart line start to look for, must not be null.
+   * @param collectMatchLine whether to collect the matching line into <code>buffer</code>.
+   * @param collectAll whether to collect all lines into <code>buffer</code>.
+   * @throws IOException
+   * @throws NoMoreDataException
+   */
+   private void read(StringBuilder buf, String lineStart, 
+       boolean collectMatchLine, boolean collectAll) throws IOException, NoMoreDataException {
     String sep = "";
     while (true) {
       String line = reader.readLine();
@@ -144,20 +162,12 @@ public class TrecContentSource extends C
 
       rawDocSize += line.length();
 
-      if (line.startsWith(prefix)) {
+      if (lineStart!=null && line.startsWith(lineStart)) {
         if (collectMatchLine) {
           buf.append(sep).append(line);
           sep = NEW_LINE;
         }
-        break;
-      }
-
-      if (terminatingTag != null && line.startsWith(terminatingTag)) {
-        // didn't find the prefix that was asked, but the terminating
-        // tag was found. set the length to 0 to signal no match was
-        // found.
-        buf.setLength(0);
-        break;
+        return;
       }
 
       if (collectAll) {
@@ -169,7 +179,7 @@ public class TrecContentSource extends C
   
   void openNextFile() throws NoMoreDataException, IOException {
     close();
-    int retries = 0;
+    currPathType = null;
     while (true) {
       if (nextFile >= inputFiles.size()) { 
         // exhausted files, start a new round, unless forever set to false.
@@ -184,13 +194,13 @@ public class TrecContentSource extends C
         System.out.println("opening: " + f + " length: " + f.length());
       }
       try {
-        GZIPInputStream zis = new GZIPInputStream(new FileInputStream(f), BUFFER_SIZE);
-        reader = new BufferedReader(new InputStreamReader(zis, encoding), BUFFER_SIZE);
+        InputStream inputStream = getInputStream(f); // support either gzip, bzip2, or regular text file, by extension  
+        reader = new BufferedReader(new InputStreamReader(inputStream, encoding), BUFFER_SIZE);
+        currPathType = TrecDocParser.pathType(f);
         return;
       } catch (Exception e) {
-        retries++;
-        if (retries < 20 && verbose) {
-          System.out.println("Skipping 'bad' file " + f.getAbsolutePath() + "  #retries=" + retries);
+        if (verbose) {
+          System.out.println("Skipping 'bad' file " + f.getAbsolutePath()+" due to "+e.getMessage());
           continue;
         }
         throw new NoMoreDataException();
@@ -198,7 +208,7 @@ public class TrecContentSource extends C
     }
   }
 
-  Date parseDate(String dateStr) {
+  public Date parseDate(String dateStr) {
     dateStr = dateStr.trim();
     DateFormatInfo dfi = getDateFormatInfo();
     for (int i = 0; i < dfi.dfs.length; i++) {
@@ -237,70 +247,47 @@ public class TrecContentSource extends C
 
   @Override
   public DocData getNextDocData(DocData docData) throws NoMoreDataException, IOException {
-    String dateStr = null, name = null;
-    Reader r = null;
+    String name = null;
+    StringBuilder docBuf = getDocBuffer();
+    ParsePathType parsedPathType;
+    
     // protect reading from the TREC files by multiple threads. The rest of the
-    // method, i.e., parsing the content and returning the DocData can run
-    // unprotected.
+    // method, i.e., parsing the content and returning the DocData can run unprotected.
     synchronized (lock) {
       if (reader == null) {
         openNextFile();
       }
-
-      StringBuilder docBuf = getDocBuffer();
       
-      // 1. skip until doc start
+      // 1. skip until doc start - required for all TREC formats
       docBuf.setLength(0);
-      read(docBuf, DOC, false, false, null);
-
-      // 2. name
+      read(docBuf, DOC, false, false);
+      
+      // save parsedFile for passing trecDataParser after the sync block, in 
+      // case another thread will open another file in between.
+      parsedPathType = currPathType;
+      
+      // 2. name - required for all TREC formats
       docBuf.setLength(0);
-      read(docBuf, DOCNO, true, false, null);
+      read(docBuf, DOCNO, true, false);
       name = docBuf.substring(DOCNO.length(), docBuf.indexOf(TERMINATING_DOCNO,
-          DOCNO.length()));
-      if (!excludeDocnameIteration)
+          DOCNO.length())).trim();
+      
+      if (!excludeDocnameIteration) {
         name = name + "_" + iteration;
-
-      // 3. skip until doc header
-      docBuf.setLength(0);
-      read(docBuf, DOCHDR, false, false, null);
-
-      boolean findTerminatingDocHdr = false;
-
-      // 4. date - look for the date only until /DOCHDR
-      docBuf.setLength(0);
-      read(docBuf, DATE, true, false, TERMINATING_DOCHDR);
-      if (docBuf.length() != 0) {
-        // Date found.
-        dateStr = docBuf.substring(DATE.length());
-        findTerminatingDocHdr = true;
       }
 
-      // 5. skip until end of doc header
-      if (findTerminatingDocHdr) {
-        docBuf.setLength(0);
-        read(docBuf, TERMINATING_DOCHDR, false, false, null);
-      }
-
-      // 6. collect until end of doc
+      // 3. read all until end of doc
       docBuf.setLength(0);
-      read(docBuf, TERMINATING_DOC, false, true, null);
-      
-      // 7. Set up a Reader over the read content
-      r = getTrecDocReader(docBuf);
-      // Resetting the thread's reader means it will reuse the instance
-      // allocated as well as re-read from docBuf.
-      r.reset();
-      
-      // count char length of parsed html text (larger than the plain doc body text).
-      addBytes(docBuf.length()); 
+      read(docBuf, TERMINATING_DOC, false, true);
     }
+      
+    // count char length of text to be parsed (may be larger than the resulted plain doc body text).
+    addBytes(docBuf.length()); 
 
     // This code segment relies on HtmlParser being thread safe. When we get 
     // here, everything else is already private to that thread, so we're safe.
-    Date date = dateStr != null ? parseDate(dateStr) : null;
     try {
-      docData = htmlParser.parse(docData, name, date, r, null);
+      docData = trecDocParser.parse(docData, name, this, docBuf, parsedPathType);
       addDoc();
     } catch (InterruptedException ie) {
       throw new ThreadInterruptedException(ie);
@@ -322,27 +309,40 @@ public class TrecContentSource extends C
   @Override
   public void setConfig(Config config) {
     super.setConfig(config);
+    // dirs
     File workDir = new File(config.get("work.dir", "work"));
     String d = config.get("docs.dir", "trec");
     dataDir = new File(d);
     if (!dataDir.isAbsolute()) {
       dataDir = new File(workDir, d);
     }
+    // files
     collectFiles(dataDir, inputFiles);
     if (inputFiles.size() == 0) {
       throw new IllegalArgumentException("No files in dataDir: " + dataDir);
     }
+    // trec doc parser
+    try {
+      String trecDocParserClassName = config.get("trec.doc.parser", "org.apache.lucene.benchmark.byTask.feeds.TrecGov2Parser");
+      trecDocParser = Class.forName(trecDocParserClassName).asSubclass(TrecDocParser.class).newInstance();
+    } catch (Exception e) {
+      // Should not get here. Throw runtime exception.
+      throw new RuntimeException(e);
+    }
+    // html parser
     try {
-      String parserClassName = config.get("html.parser",
+      String htmlParserClassName = config.get("html.parser",
           "org.apache.lucene.benchmark.byTask.feeds.DemoHTMLParser");
-      htmlParser = Class.forName(parserClassName).asSubclass(HTMLParser.class).newInstance();
+      htmlParser = Class.forName(htmlParserClassName).asSubclass(HTMLParser.class).newInstance();
     } catch (Exception e) {
       // Should not get here. Throw runtime exception.
       throw new RuntimeException(e);
     }
+    // encoding
     if (encoding == null) {
       encoding = "ISO-8859-1";
     }
+    // iteration exclusion in doc name 
     excludeDocnameIteration = config.get("content.source.excludeIteration", false);
   }
 

Added: lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java?rev=1073192&view=auto
==============================================================================
--- lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java (added)
+++ lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java Tue Feb 22 01:00:39 2011
@@ -0,0 +1,136 @@
+package org.apache.lucene.benchmark.byTask.feeds;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+
+/** 
+ * Parser for trec doc content, invoked on doc text excluding <DOC> and <DOCNO>
+ * which are handled in TrecContentSource. Required to be stateless and hence thread safe. 
+ */
+public abstract class TrecDocParser {
+
+  /** Types of trec parse paths, */
+  public enum ParsePathType { GOV2, FBIS, FT, FR94, LATIMES }
+  
+  /** trec parser type used for unknown extensions */
+  public static final ParsePathType DEFAULT_PATH_TYPE  = ParsePathType.GOV2;
+
+  static final Map<ParsePathType,TrecDocParser> pathType2parser = new HashMap<ParsePathType,TrecDocParser>();
+  static {
+    pathType2parser.put(ParsePathType.GOV2, new TrecGov2Parser());
+    pathType2parser.put(ParsePathType.FBIS, new TrecFBISParser());
+    pathType2parser.put(ParsePathType.FR94, new TrecFR94Parser());
+    pathType2parser.put(ParsePathType.FT, new TrecFTParser());
+    pathType2parser.put(ParsePathType.LATIMES, new TrecLATimesParser());
+  }
+
+  static final Map<String,ParsePathType> pathName2Type = new HashMap<String,ParsePathType>();
+  static {
+    for (ParsePathType ppt : ParsePathType.values()) {
+      pathName2Type.put(ppt.name().toUpperCase(Locale.ENGLISH),ppt);
+    }
+  }
+  
+  /** max length of walk up from file to its ancestors when looking for a known path type */ 
+  private static final int MAX_PATH_LENGTH = 10;
+  
+  /**
+   * Compute the path type of a file by inspecting name of file and its parents
+   */
+  public static ParsePathType pathType(File f) {
+    int pathLength = 0;
+    while (f != null && ++pathLength < MAX_PATH_LENGTH) {
+      ParsePathType ppt = pathName2Type.get(f.getName().toUpperCase(Locale.ENGLISH));
+      if (ppt!=null) {
+        return ppt;
+      }
+      f = f.getParentFile();
+    }
+    return DEFAULT_PATH_TYPE;
+  }
+  
+  /** 
+   * parse the text prepared in docBuf into a result DocData, 
+   * no synchronization is required.
+   * @param docData reusable result
+   * @param name name that should be set to the result
+   * @param trecSrc calling trec content source  
+   * @param docBuf text to parse  
+   * @param pathType type of parsed file, or null if unknown - may be used by 
+   * parsers to alter their behavior according to the file path type. 
+   */  
+  public abstract DocData parse(DocData docData, String name, TrecContentSource trecSrc, 
+      StringBuilder docBuf, ParsePathType pathType) throws IOException, InterruptedException;
+  
+  /** 
+   * strip tags from <code>buf</code>: each tag is replaced by a single blank.
+   * @return text obtained when stripping all tags from <code>buf</code> (Input StringBuilder is unmodified).
+   */
+  public static String stripTags(StringBuilder buf, int start) {
+    return stripTags(buf.substring(start),0);
+  }
+
+  /** 
+   * strip tags from input.
+   * @see #stripTags(StringBuilder, int)
+   */
+  public static String stripTags(String buf, int start) {
+    if (start>0) {
+      buf = buf.substring(0);
+    }
+    return buf.replaceAll("<[^>]*>", " ");
+  }
+  
+  /**
+   * Extract from <code>buf</code> the text of interest within specified tags
+   * @param buf entire input text
+   * @param startTag tag marking start of text of interest 
+   * @param endTag tag marking end of text of interest
+   * @param maxPos if &ge; 0 sets a limit on start of text of interest
+   * @return text of interest or null if not found
+   */
+  public static String extract(StringBuilder buf, String startTag, String endTag, int maxPos, String noisePrefixes[]) {
+    int k1 = buf.indexOf(startTag);
+    if (k1>=0 && (maxPos<0 || k1<maxPos)) {
+      k1 += startTag.length();
+      int k2 = buf.indexOf(endTag,k1);
+      if (k2>=0 && (maxPos<0 || k2<maxPos)) { // found end tag with allowed range
+        if (noisePrefixes != null) {
+          for (String noise : noisePrefixes) {
+            int k1a = buf.indexOf(noise,k1);
+            if (k1a>=0 && k1a<k2) {
+              k1 = k1a + noise.length();
+            }
+          }          
+        }
+        return buf.substring(k1,k2).trim();
+      }
+    }
+    return null;
+  }
+
+  //public static void main(String[] args) {
+  //  System.out.println(stripTags("is it true that<space>2<<second space>><almost last space>1<one more space>?",0));
+  //}
+
+}

Added: lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFBISParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFBISParser.java?rev=1073192&view=auto
==============================================================================
--- lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFBISParser.java (added)
+++ lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFBISParser.java Tue Feb 22 01:00:39 2011
@@ -0,0 +1,65 @@
+package org.apache.lucene.benchmark.byTask.feeds;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Date;
+
+/**
+ * Parser for the FBIS docs in trec disks 4+5 collection format
+ */
+public class TrecFBISParser extends TrecDocParser {
+
+  private static final String HEADER = "<HEADER>";
+  private static final String HEADER_END = "</HEADER>";
+  private static final int HEADER_END_LENGTH = HEADER_END.length();
+  
+  private static final String DATE1 = "<DATE1>";
+  private static final String DATE1_END = "</DATE1>";
+  
+  private static final String TI = "<TI>";
+  private static final String TI_END = "</TI>";
+
+  @Override
+  public DocData parse(DocData docData, String name, TrecContentSource trecSrc, 
+      StringBuilder docBuf, ParsePathType pathType) throws IOException, InterruptedException {
+    int mark = 0; // that much is skipped
+    // optionally skip some of the text, set date, title
+    Date date = null;
+    String title = null;
+    int h1 = docBuf.indexOf(HEADER);
+    if (h1>=0) {
+      int h2 = docBuf.indexOf(HEADER_END,h1);
+      mark = h2+HEADER_END_LENGTH;
+      // date...
+      String dateStr = extract(docBuf, DATE1, DATE1_END, h2, null);
+      if (dateStr != null) {
+        date = trecSrc.parseDate(dateStr);
+      }
+      // title...
+      title = extract(docBuf, TI, TI_END, h2, null);
+    }
+    docData.clear();
+    docData.setName(name);
+    docData.setDate(date);
+    docData.setTitle(title);
+    docData.setBody(stripTags(docBuf, mark).toString());
+    return docData;
+  }
+
+}

Added: lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFR94Parser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFR94Parser.java?rev=1073192&view=auto
==============================================================================
--- lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFR94Parser.java (added)
+++ lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFR94Parser.java Tue Feb 22 01:00:39 2011
@@ -0,0 +1,66 @@
+package org.apache.lucene.benchmark.byTask.feeds;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Date;
+
+/**
+ * Parser for the FR94 docs in trec disks 4+5 collection format
+ */
+public class TrecFR94Parser extends TrecDocParser {
+
+  private static final String TEXT = "<TEXT>";
+  private static final int TEXT_LENGTH = TEXT.length();
+  private static final String TEXT_END = "</TEXT>";
+  
+  private static final String DATE = "<DATE>";
+  private static final String[] DATE_NOISE_PREFIXES = {
+    "DATE:",
+    "date:", //TODO improve date extraction for this format
+    "t.c.",
+  };
+  private static final String DATE_END = "</DATE>";
+  
+  //TODO can we also extract title for this format?
+  
+  @Override
+  public DocData parse(DocData docData, String name, TrecContentSource trecSrc, 
+      StringBuilder docBuf, ParsePathType pathType) throws IOException, InterruptedException {
+    int mark = 0; // that much is skipped
+    // optionally skip some of the text, set date (no title?)
+    Date date = null;
+    int h1 = docBuf.indexOf(TEXT);
+    if (h1>=0) {
+      int h2 = docBuf.indexOf(TEXT_END,h1);
+      mark = h1+TEXT_LENGTH;
+      // date...
+      String dateStr = extract(docBuf, DATE, DATE_END, h2, DATE_NOISE_PREFIXES);
+      if (dateStr != null) {
+        dateStr = stripTags(dateStr,0).toString();
+        date = trecSrc.parseDate(dateStr.trim());
+      }
+    }
+    docData.clear();
+    docData.setName(name);
+    docData.setDate(date);
+    docData.setBody(stripTags(docBuf, mark).toString());
+    return docData;
+  }
+
+}

Added: lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFTParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFTParser.java?rev=1073192&view=auto
==============================================================================
--- lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFTParser.java (added)
+++ lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFTParser.java Tue Feb 22 01:00:39 2011
@@ -0,0 +1,57 @@
+package org.apache.lucene.benchmark.byTask.feeds;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Date;
+
+/**
+ * Parser for the FT docs in trec disks 4+5 collection format
+ */
+public class TrecFTParser extends TrecDocParser {
+
+  private static final String DATE = "<DATE>";
+  private static final String DATE_END = "</DATE>";
+  
+  private static final String HEADLINE = "<HEADLINE>";
+  private static final String HEADLINE_END = "</HEADLINE>";
+
+  @Override
+  public DocData parse(DocData docData, String name, TrecContentSource trecSrc, 
+      StringBuilder docBuf, ParsePathType pathType) throws IOException, InterruptedException {
+    int mark = 0; // that much is skipped
+
+    // date...
+    Date date = null;
+    String dateStr = extract(docBuf, DATE, DATE_END, -1, null);
+    if (dateStr != null) {
+      date = trecSrc.parseDate(dateStr);
+    }
+     
+    // title...
+    String title = extract(docBuf, HEADLINE, HEADLINE_END, -1, null);
+
+    docData.clear();
+    docData.setName(name);
+    docData.setDate(date);
+    docData.setTitle(title);
+    docData.setBody(stripTags(docBuf, mark).toString());
+    return docData;
+  }
+
+}

Added: lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecGov2Parser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecGov2Parser.java?rev=1073192&view=auto
==============================================================================
--- lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecGov2Parser.java (added)
+++ lucene/dev/branches/realtime_search/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecGov2Parser.java Tue Feb 22 01:00:39 2011
@@ -0,0 +1,59 @@
+package org.apache.lucene.benchmark.byTask.feeds;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.Date;
+
+/**
+ * Parser for the GOV2 collection format
+ */
+public class TrecGov2Parser extends TrecDocParser {
+
+  private static final String DATE = "Date: ";
+  private static final String DATE_END = TrecContentSource.NEW_LINE;
+  
+  private static final String DOCHDR = "<DOCHDR>";
+  private static final String TERMINATING_DOCHDR = "</DOCHDR>";
+  private static final int TERMINATING_DOCHDR_LENGTH = TERMINATING_DOCHDR.length();
+
+  @Override
+  public DocData parse(DocData docData, String name, TrecContentSource trecSrc, 
+      StringBuilder docBuf, ParsePathType pathType) throws IOException, InterruptedException {
+    // Set up a (per-thread) reused Reader over the read content, reset it to re-read from docBuf
+    Reader r = trecSrc.getTrecDocReader(docBuf);
+
+    // skip some of the text, optionally set date
+    Date date = null;
+    int h1 = docBuf.indexOf(DOCHDR);
+    if (h1>=0) {
+      int h2 = docBuf.indexOf(TERMINATING_DOCHDR,h1);
+      String dateStr = extract(docBuf, DATE, DATE_END, h2, null);
+      if (dateStr != null) {
+        date = trecSrc.parseDate(dateStr);
+      }
+      r.mark(h2+TERMINATING_DOCHDR_LENGTH);
+    }
+
+    r.reset();
+    HTMLParser htmlParser = trecSrc.getHtmlParser();
+    return htmlParser.parse(docData, name, date, null, r, null);
+  }
+  
+}