You are viewing a plain text version of this content. The canonical link for it is here.
Posted to java-commits@lucene.apache.org by us...@apache.org on 2009/07/04 22:08:56 UTC

svn commit: r791173 [4/4] - in /lucene/java/branches/lucene_2_4_back_compat_tests: ./ contrib/highlighter/src/test/org/apache/lucene/search/highlight/ src/java/org/apache/lucene/analysis/ src/java/org/apache/lucene/analysis/standard/ src/java/org/apach...

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java Sat Jul  4 20:08:54 2009
@@ -20,18 +20,19 @@
 import java.io.IOException;
 import java.io.Reader;
 
+import org.apache.lucene.util.LuceneTestCase;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Field.Index;
 import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.LuceneTestCase;
 
 /**
  * This testcase tests whether multi-level skipping is being used
@@ -98,19 +99,17 @@
   private static class PayloadFilter extends TokenFilter {
     static int count = 0;
     
-    PayloadAttribute payloadAtt;
-    
     protected PayloadFilter(TokenStream input) {
       super(input);
-      payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
     }
 
-    public boolean incrementToken() throws IOException {
-      boolean hasNext = input.incrementToken();
-      if (hasNext) {
-        payloadAtt.setPayload(new Payload(new byte[] { (byte) count++ }));
-      } 
-      return hasNext;
+    public Token next(final Token reusableToken) throws IOException {
+      assert reusableToken != null;
+      Token nextToken = input.next(reusableToken);
+      if (nextToken != null) {
+        nextToken.setPayload(new Payload(new byte[] { (byte) count++ }));
+      }
+      return nextToken;
     }
 
   }

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestPayloads.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestPayloads.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestPayloads.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestPayloads.java Sat Jul  4 20:08:54 2009
@@ -27,20 +27,20 @@
 import java.util.Map;
 import java.util.Random;
 
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.UnicodeUtil;
+
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.UnicodeUtil;
 
 
 public class TestPayloads extends LuceneTestCase {
@@ -440,33 +440,32 @@
         private int length;
         private int offset;
         Payload payload = new Payload();
-        PayloadAttribute payloadAtt;
         
         public PayloadFilter(TokenStream in, byte[] data, int offset, int length) {
             super(in);
             this.data = data;
             this.length = length;
             this.offset = offset;
-            payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
         }
         
-        public boolean incrementToken() throws IOException {
-            boolean hasNext = input.incrementToken();
-            if (hasNext) {
+        public Token next(final Token reusableToken) throws IOException {
+            assert reusableToken != null;
+            Token nextToken = input.next(reusableToken);
+            if (nextToken != null) {
                 if (offset + length <= data.length) {
                     Payload p = null;
                     if (p == null) {
                         p = new Payload();
-                        payloadAtt.setPayload(p);
+                        nextToken.setPayload(p);
                     }
                     p.setData(data, offset, length);
                     offset += length;                
                 } else {
-                    payloadAtt.setPayload(null);
+                    nextToken.setPayload(null);
                 }
             }
             
-            return hasNext;
+            return nextToken;
         }
     }
     
@@ -528,25 +527,19 @@
         private boolean first;
         private ByteArrayPool pool;
         private String term;
-
-        TermAttribute termAtt;
-        PayloadAttribute payloadAtt;
-        
         PoolingPayloadTokenStream(ByteArrayPool pool) {
             this.pool = pool;
             payload = pool.get();
             generateRandomData(payload);
             term = pool.bytesToString(payload);
             first = true;
-            payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
-            termAtt = (TermAttribute) addAttribute(TermAttribute.class);
         }
         
-        public boolean incrementToken() throws IOException {
-            if (!first) return false;
-            termAtt.setTermBuffer(term);
-            payloadAtt.setPayload(new Payload(payload));
-            return true;
+        public Token next(final Token reusableToken) throws IOException {
+            if (!first) return null;
+            reusableToken.reinit(term, 0, 0);
+            reusableToken.setPayload(new Payload(payload));
+            return reusableToken;
         }
         
         public void close() throws IOException {

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestTermVectorsReader.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestTermVectorsReader.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestTermVectorsReader.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestTermVectorsReader.java Sat Jul  4 20:08:54 2009
@@ -17,23 +17,21 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.io.Reader;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.SortedSet;
-
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.store.MockRAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
 
+import java.io.IOException;
+import java.io.Reader;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.SortedSet;
+
 public class TestTermVectorsReader extends LuceneTestCase {
   //Must be lexicographically sorted, will do in setup, versus trying to maintain here
   private String[] testFields = {"f1", "f2", "f3", "f4"};
@@ -120,30 +118,17 @@
 
   private class MyTokenStream extends TokenStream {
     int tokenUpto;
-    
-    TermAttribute termAtt;
-    PositionIncrementAttribute posIncrAtt;
-    OffsetAttribute offsetAtt;
-    
-    public MyTokenStream() {
-      termAtt = (TermAttribute) addAttribute(TermAttribute.class);
-      posIncrAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
-      offsetAtt = (OffsetAttribute) addAttribute(OffsetAttribute.class);
-    }
-    
-    public boolean incrementToken() {
+    public Token next(final Token reusableToken) {
       if (tokenUpto >= tokens.length)
-        return false;
+        return null;
       else {
         final TestToken testToken = tokens[tokenUpto++];
-        termAtt.setTermBuffer(testToken.text);
-        offsetAtt.setOffset(testToken.startOffset, testToken.endOffset);
-        if (tokenUpto > 1) {
-          posIncrAtt.setPositionIncrement(testToken.pos - tokens[tokenUpto-2].pos);
-        } else {
-          posIncrAtt.setPositionIncrement(testToken.pos+1);
-        }
-        return true;
+        reusableToken.reinit(testToken.text, testToken.startOffset, testToken.endOffset);
+        if (tokenUpto > 1)
+          reusableToken.setPositionIncrement(testToken.pos - tokens[tokenUpto-2].pos);
+        else
+          reusableToken.setPositionIncrement(testToken.pos+1);
+        return reusableToken;
       }
     }
   }

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestTermdocPerf.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestTermdocPerf.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestTermdocPerf.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/index/TestTermdocPerf.java Sat Jul  4 20:08:54 2009
@@ -17,18 +17,18 @@
  */
 
 
-import java.io.IOException;
-import java.io.Reader;
-import java.util.Random;
-
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.Token;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.LuceneTestCase;
+
+import java.io.Reader;
+import java.io.IOException;
+import java.util.Random;
 
 /**
  * @version $Id$
@@ -36,21 +36,15 @@
 
 class RepeatingTokenStream extends TokenStream {
   public int num;
-  TermAttribute termAtt;
-  String value;
+  Token t;
 
    public RepeatingTokenStream(String val) {
-     this.value = val;
-     this.termAtt = (TermAttribute) addAttribute(TermAttribute.class);
+     t = new Token(0,val.length());
+     t.setTermBuffer(val);
    }
 
-   public boolean incrementToken() throws IOException {
-     num--;
-     if (num >= 0) {
-       termAtt.setTermBuffer(value);
-       return true;
-     }
-     return false;
+   public Token next(final Token reusableToken) throws IOException {
+     return --num<0 ? null : (Token) t.clone();
    }
 }
 

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java Sat Jul  4 20:08:54 2009
@@ -17,20 +17,17 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
 import java.io.Reader;
 
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.search.Query;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.LowerCaseFilter;
+import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.standard.StandardTokenizer;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.util.LuceneTestCase;
 
 /**
  * Test QueryParser's ability to deal with Analyzers that return more
@@ -143,48 +140,34 @@
 
   private final class TestFilter extends TokenFilter {
     
-    private String prevType;
-    private int prevStartOffset;
-    private int prevEndOffset;
-    
-    TermAttribute termAtt;
-    PositionIncrementAttribute posIncrAtt;
-    OffsetAttribute offsetAtt;
-    TypeAttribute typeAtt;
+    private Token prevToken;
     
     public TestFilter(TokenStream in) {
       super(in);
-      termAtt = (TermAttribute) addAttribute(TermAttribute.class);
-      posIncrAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
-      offsetAtt = (OffsetAttribute) addAttribute(OffsetAttribute.class);
-      typeAtt = (TypeAttribute) addAttribute(TypeAttribute.class);
     }
 
-    public final boolean incrementToken() throws java.io.IOException {
+    public final Token next(final Token reusableToken) throws java.io.IOException {
       if (multiToken > 0) {
-        termAtt.setTermBuffer("multi"+(multiToken+1));
-        offsetAtt.setOffset(prevStartOffset, prevEndOffset);
-        typeAtt.setType(prevType);
-        posIncrAtt.setPositionIncrement(0);
+        reusableToken.reinit("multi"+(multiToken+1), prevToken.startOffset(), prevToken.endOffset(), prevToken.type());
+        reusableToken.setPositionIncrement(0);
         multiToken--;
-        return true;
+        return reusableToken;
       } else {
-        boolean next = input.incrementToken();
-        if (next == false) {
-          return false;
+        Token nextToken = input.next(reusableToken);
+        if (nextToken == null) {
+          prevToken = null;
+          return null;
         }
-        prevType = typeAtt.type();
-        prevStartOffset = offsetAtt.startOffset();
-        prevEndOffset = offsetAtt.endOffset();
-        String text = termAtt.term();
+        prevToken = (Token) nextToken.clone();
+        String text = nextToken.term();
         if (text.equals("triplemulti")) {
           multiToken = 2;
-          return true;
+          return nextToken;
         } else if (text.equals("multi")) {
           multiToken = 1;
-          return true;
+          return nextToken;
         } else {
-          return true;
+          return nextToken;
         }
       }
     }
@@ -209,28 +192,23 @@
 
   private final class TestPosIncrementFilter extends TokenFilter {
     
-    TermAttribute termAtt;
-    PositionIncrementAttribute posIncrAtt;
-    
     public TestPosIncrementFilter(TokenStream in) {
       super(in);
-      termAtt = (TermAttribute) addAttribute(TermAttribute.class);
-      posIncrAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
     }
 
-    public final boolean incrementToken () throws java.io.IOException {
-      while(input.incrementToken()) {
-        if (termAtt.term().equals("the")) {
+    public final Token next(final Token reusableToken) throws java.io.IOException {
+      for (Token nextToken = input.next(reusableToken); nextToken != null; nextToken = input.next(reusableToken)) {
+        if (nextToken.term().equals("the")) {
           // stopword, do nothing
-        } else if (termAtt.term().equals("quick")) {
-          posIncrAtt.setPositionIncrement(2);
-          return true;
+        } else if (nextToken.term().equals("quick")) {
+          nextToken.setPositionIncrement(2);
+          return nextToken;
         } else {
-          posIncrAtt.setPositionIncrement(1);
-          return true;
+          nextToken.setPositionIncrement(1);
+          return nextToken;
         }
       }
-      return false;
+      return null;
     }
   }
 

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java Sat Jul  4 20:08:54 2009
@@ -22,6 +22,7 @@
 import java.util.Map;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.document.Document;

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/queryParser/TestQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/queryParser/TestQueryParser.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/queryParser/TestQueryParser.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/queryParser/TestQueryParser.java Sat Jul  4 20:08:54 2009
@@ -19,8 +19,8 @@
 
 import java.io.IOException;
 import java.io.Reader;
-import java.text.Collator;
 import java.text.DateFormat;
+import java.text.Collator;
 import java.util.Calendar;
 import java.util.Date;
 import java.util.Locale;
@@ -31,12 +31,11 @@
 import org.apache.lucene.analysis.SimpleAnalyzer;
 import org.apache.lucene.analysis.StopAnalyzer;
 import org.apache.lucene.analysis.StopFilter;
+import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.DateField;
 import org.apache.lucene.document.DateTools;
 import org.apache.lucene.document.Document;
@@ -44,6 +43,7 @@
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.ConstantScoreRangeQuery;
 import org.apache.lucene.search.FuzzyQuery;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
@@ -65,45 +65,36 @@
   public static Analyzer qpAnalyzer = new QPTestAnalyzer();
 
   public static class QPTestFilter extends TokenFilter {
-    TermAttribute termAtt;
-    OffsetAttribute offsetAtt;
-        
     /**
      * Filter which discards the token 'stop' and which expands the
      * token 'phrase' into 'phrase1 phrase2'
      */
     public QPTestFilter(TokenStream in) {
       super(in);
-      termAtt = (TermAttribute) addAttribute(TermAttribute.class);
-      offsetAtt = (OffsetAttribute) addAttribute(OffsetAttribute.class);
     }
 
     boolean inPhrase = false;
     int savedStart = 0, savedEnd = 0;
 
-    public boolean incrementToken() throws IOException {
+    public Token next(final Token reusableToken) throws IOException {
+      assert reusableToken != null;
       if (inPhrase) {
         inPhrase = false;
-        termAtt.setTermBuffer("phrase2");
-        offsetAtt.setOffset(savedStart, savedEnd);
-        return true;
+        return reusableToken.reinit("phrase2", savedStart, savedEnd);
       } else
-        while (input.incrementToken()) {
-          if (termAtt.term().equals("phrase")) {
+        for (Token nextToken = input.next(reusableToken); nextToken != null; nextToken = input.next(reusableToken)) {
+          if (nextToken.term().equals("phrase")) {
             inPhrase = true;
-            savedStart = offsetAtt.startOffset();
-            savedEnd = offsetAtt.endOffset();
-            termAtt.setTermBuffer("phrase1");
-            offsetAtt.setOffset(savedStart, savedEnd);
-            return true;
-          } else if (!termAtt.term().equals("stop"))
-            return true;
+            savedStart = nextToken.startOffset();
+            savedEnd = nextToken.endOffset();
+            return nextToken.reinit("phrase1", savedStart, savedEnd);
+          } else if (!nextToken.term().equals("stop"))
+            return nextToken;
         }
-      return false;
+      return null;
     }
   }
 
-  
   public static class QPTestAnalyzer extends Analyzer {
 
     /** Filters LowerCaseTokenizer with StopFilter. */
@@ -424,11 +415,13 @@
 
   public void testRange() throws Exception {
     assertQueryEquals("[ a TO z]", null, "[a TO z]");
-    assertTrue(((RangeQuery)getQuery("[ a TO z]", null)).getConstantScoreRewrite());
+    // disable this test
+    //assertTrue(getQuery("[ a TO z]", null) instanceof ConstantScoreRangeQuery);
 
     QueryParser qp = new QueryParser("field", new SimpleAnalyzer());
-	  qp.setConstantScoreRewrite(false);
-    assertFalse(((RangeQuery)qp.parse("[ a TO z]")).getConstantScoreRewrite());
+    qp.setUseOldRangeQuery(true);
+    // disable this test
+    //assertTrue(qp.parse("[ a TO z]") instanceof RangeQuery);
     
     assertQueryEquals("[ a TO z ]", null, "[a TO z]");
     assertQueryEquals("{ a TO z}", null, "{a TO z}");
@@ -467,7 +460,7 @@
     // supported).
       
     // Test ConstantScoreRangeQuery
-    qp.setConstantScoreRewrite(true);
+    qp.setUseOldRangeQuery(false);
     ScoreDoc[] result = is.search(qp.parse("[ \u062F TO \u0698 ]"), null, 1000).scoreDocs;
     assertEquals("The index Term should not be included.", 0, result.length);
 
@@ -475,7 +468,7 @@
     assertEquals("The index Term should be included.", 1, result.length);
 
     // Test RangeQuery
-    qp.setConstantScoreRewrite(false);
+    qp.setUseOldRangeQuery(true);
     result = is.search(qp.parse("[ \u062F TO \u0698 ]"), null, 1000).scoreDocs;
     assertEquals("The index Term should not be included.", 0, result.length);
 
@@ -784,7 +777,7 @@
 
   public void assertParseException(String queryString) throws Exception {
     try {
-      getQuery(queryString, null);
+      Query q = getQuery(queryString, null);
     } catch (ParseException expected) {
       return;
     }

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/TestPositionIncrement.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/TestPositionIncrement.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/TestPositionIncrement.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/TestPositionIncrement.java Sat Jul  4 20:08:54 2009
@@ -17,16 +17,14 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
 import java.io.Reader;
+import java.io.StringReader;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.StopFilter;
+import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriter;
@@ -51,18 +49,14 @@
           private final int[] INCREMENTS = {1, 2, 1, 0, 1};
           private int i = 0;
 
-          PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
-          TermAttribute termAtt = (TermAttribute) addAttribute(TermAttribute.class);
-          OffsetAttribute offsetAtt = (OffsetAttribute) addAttribute(OffsetAttribute.class);
-          
-          public boolean incrementToken() {
+          public Token next(final Token reusableToken) {
+            assert reusableToken != null;
             if (i == TOKENS.length)
-              return false;
-            termAtt.setTermBuffer(TOKENS[i]);
-            offsetAtt.setOffset(i, i);
-            posIncrAtt.setPositionIncrement(INCREMENTS[i]);
+              return null;
+            reusableToken.reinit(TOKENS[i], i, i);
+            reusableToken.setPositionIncrement(INCREMENTS[i]);
             i++;
-            return true;
+            return reusableToken;
           }
         };
       }
@@ -202,4 +196,18 @@
       StopFilter.setEnablePositionIncrementsDefault(dflt);
     }
   }
+
+  /**
+   * Basic analyzer behavior should be to keep sequential terms in one
+   * increment from one another.
+   */
+  public void testIncrementingPositions() throws Exception {
+    Analyzer analyzer = new WhitespaceAnalyzer();
+    TokenStream ts = analyzer.tokenStream("field",
+                                new StringReader("one two three four five"));
+    final Token reusableToken = new Token();
+    for (Token nextToken = ts.next(reusableToken); nextToken != null; nextToken = ts.next(reusableToken)) {
+      assertEquals(nextToken.term(), 1, nextToken.getPositionIncrement());
+    }
+  }
 }

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/TestRangeFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/TestRangeFilter.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/TestRangeFilter.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/TestRangeFilter.java Sat Jul  4 20:08:54 2009
@@ -376,46 +376,4 @@
         assertEquals("The index Term should be included.", 1, result.length());
         search.close();
     }
-
-    public void testDanish() throws Exception {
-            
-        /* build an index */
-        RAMDirectory danishIndex = new RAMDirectory();
-        IndexWriter writer = new IndexWriter
-            (danishIndex, new SimpleAnalyzer(), T, 
-             IndexWriter.MaxFieldLength.LIMITED);
-        // Danish collation orders the words below in the given order
-        // (example taken from TestSort.testInternationalSort() ).
-        String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
-        for (int docnum = 0 ; docnum < words.length ; ++docnum) {   
-            Document doc = new Document();
-            doc.add(new Field("content", words[docnum], 
-                              Field.Store.YES, Field.Index.UN_TOKENIZED));
-            doc.add(new Field("body", "body",
-                              Field.Store.YES, Field.Index.UN_TOKENIZED));
-            writer.addDocument(doc);
-        }
-        writer.optimize();
-        writer.close();
-
-        IndexReader reader = IndexReader.open(danishIndex);
-        IndexSearcher search = new IndexSearcher(reader);
-        Query q = new TermQuery(new Term("body","body"));
-
-        Collator collator = Collator.getInstance(new Locale("da", "dk"));
-        Query query = new RangeQuery
-            ("content", "H\u00D8T", "MAND", false, false, collator);
-
-        // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
-        // but Danish collation does.
-        Hits result = search.search
-            (q, new RangeFilter("content", "H\u00D8T", "MAND", F, F, collator));
-        assertEquals("The index Term should be included.", 1, result.length());
-
-        result = search.search
-            (q, new RangeFilter("content", "H\u00C5T", "MAND", F, F, collator));
-        assertEquals
-            ("The index Term should not be included.", 0, result.length());
-        search.close();
-    }
 }

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/TestRangeQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/TestRangeQuery.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/TestRangeQuery.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/TestRangeQuery.java Sat Jul  4 20:08:54 2009
@@ -26,7 +26,7 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.Token;
 
 import org.apache.lucene.util.LuceneTestCase;
 import java.io.IOException;
@@ -46,7 +46,9 @@
   }
 
   public void testExclusive() throws Exception {
-    Query query = new RangeQuery("content", "A", "C", false, false);
+    Query query = new RangeQuery(new Term("content", "A"),
+                                 new Term("content", "C"),
+                                 false);
     initializeIndex(new String[] {"A", "B", "C", "D"});
     IndexSearcher searcher = new IndexSearcher(dir);
     ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -65,26 +67,11 @@
     assertEquals("C added, still only B in range", 1, hits.length);
     searcher.close();
   }
-  
-  //TODO: remove in Lucene 3.0
-  public void testDeprecatedCstrctors() throws IOException {
-    Query query = new RangeQuery(null, new Term("content","C"), false);
-    initializeIndex(new String[] {"A", "B", "C", "D"});
-    IndexSearcher searcher = new IndexSearcher(dir);
-    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
-    assertEquals("A,B,C,D, only B in range", 2, hits.length);
-    searcher.close();
-    
-    query = new RangeQuery(new Term("content","C"),null, false);
-    initializeIndex(new String[] {"A", "B", "C", "D"});
-    searcher = new IndexSearcher(dir);
-    hits = searcher.search(query, null, 1000).scoreDocs;
-    assertEquals("A,B,C,D, only B in range", 1, hits.length);
-    searcher.close();
-  }
 
   public void testInclusive() throws Exception {
-    Query query = new RangeQuery("content", "A", "C", true, true);
+    Query query = new RangeQuery(new Term("content", "A"),
+                                 new Term("content", "C"),
+                                 true);
 
     initializeIndex(new String[]{"A", "B", "C", "D"});
     IndexSearcher searcher = new IndexSearcher(dir);
@@ -106,10 +93,13 @@
   }
 
   public void testEqualsHashcode() {
-    Query query = new RangeQuery("content", "A", "C", true, true);
-    
+    Query query = new RangeQuery(new Term("content", "A"),
+                                 new Term("content", "C"),
+                                 true);
     query.setBoost(1.0f);
-    Query other = new RangeQuery("content", "A", "C", true, true);
+    Query other = new RangeQuery(new Term("content", "A"),
+                                 new Term("content", "C"),
+                                 true);
     other.setBoost(1.0f);
 
     assertEquals("query equals itself is true", query, query);
@@ -119,36 +109,38 @@
     other.setBoost(2.0f);
     assertFalse("Different boost queries are not equal", query.equals(other));
 
-    other = new RangeQuery("notcontent", "A", "C", true, true);
+    other = new RangeQuery(new Term("notcontent", "A"), new Term("notcontent", "C"), true);
     assertFalse("Different fields are not equal", query.equals(other));
 
-    other = new RangeQuery("content", "X", "C", true, true);
+    other = new RangeQuery(new Term("content", "X"), new Term("content", "C"), true);
     assertFalse("Different lower terms are not equal", query.equals(other));
 
-    other = new RangeQuery("content", "A", "Z", true, true);
+    other = new RangeQuery(new Term("content", "A"), new Term("content", "Z"), true);
     assertFalse("Different upper terms are not equal", query.equals(other));
 
-    query = new RangeQuery("content", null, "C", true, true);
-    other = new RangeQuery("content", null, "C", true, true);
+    query = new RangeQuery(null, new Term("content", "C"), true);
+    other = new RangeQuery(null, new Term("content", "C"), true);
     assertEquals("equivalent queries with null lowerterms are equal()", query, other);
     assertEquals("hashcode must return same value when equals is true", query.hashCode(), other.hashCode());
 
-    query = new RangeQuery("content", "C", null, true, true);
-    other = new RangeQuery("content", "C", null, true, true);
+    query = new RangeQuery(new Term("content", "C"), null, true);
+    other = new RangeQuery(new Term("content", "C"), null, true);
     assertEquals("equivalent queries with null upperterms are equal()", query, other);
     assertEquals("hashcode returns same value", query.hashCode(), other.hashCode());
 
-    query = new RangeQuery("content", null, "C", true, true);
-    other = new RangeQuery("content", "C", null, true, true);
+    query = new RangeQuery(null, new Term("content", "C"), true);
+    other = new RangeQuery(new Term("content", "C"), null, true);
     assertFalse("queries with different upper and lower terms are not equal", query.equals(other));
 
-    query = new RangeQuery("content", "A", "C", false, false);
-    other = new RangeQuery("content", "A", "C", true, true);
+    query = new RangeQuery(new Term("content", "A"), new Term("content", "C"), false);
+    other = new RangeQuery(new Term("content", "A"), new Term("content", "C"), true);
     assertFalse("queries with different inclusive are not equal", query.equals(other));
   }
 
   public void testExclusiveCollating() throws Exception {
-    Query query = new RangeQuery("content", "A", "C", false, false, Collator.getInstance(Locale.ENGLISH));
+    Query query = new RangeQuery(new Term("content", "A"),
+                                 new Term("content", "C"),
+                                 false, Collator.getInstance(Locale.ENGLISH));
     initializeIndex(new String[] {"A", "B", "C", "D"});
     IndexSearcher searcher = new IndexSearcher(dir);
     ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -169,7 +161,9 @@
   }
 
   public void testInclusiveCollating() throws Exception {
-    Query query = new RangeQuery("content", "A", "C",true, true, Collator.getInstance(Locale.ENGLISH));
+    Query query = new RangeQuery(new Term("content", "A"),
+                                 new Term("content", "C"),
+                                 true, Collator.getInstance(Locale.ENGLISH));
 
     initializeIndex(new String[]{"A", "B", "C", "D"});
     IndexSearcher searcher = new IndexSearcher(dir);
@@ -195,7 +189,9 @@
     // RuleBasedCollator.  However, the Arabic Locale seems to order the Farsi
     // characters properly.
     Collator collator = Collator.getInstance(new Locale("ar"));
-    Query query = new RangeQuery("content", "\u062F", "\u0698", true, true, collator);
+    Query query = new RangeQuery(new Term("content", "\u062F"),
+                                 new Term("content", "\u0698"),
+                                 true, collator);
     // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
     // orders the U+0698 character before the U+0633 character, so the single
     // index Term below should NOT be returned by a RangeQuery with a Farsi
@@ -205,56 +201,36 @@
     ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
     assertEquals("The index Term should not be included.", 0, hits.length);
 
-    query = new RangeQuery("content", "\u0633", "\u0638",true, true, collator);
+    query = new RangeQuery(new Term("content", "\u0633"),
+                           new Term("content", "\u0638"),
+                           true, collator);
     hits = searcher.search(query, null, 1000).scoreDocs;
     assertEquals("The index Term should be included.", 1, hits.length);
     searcher.close();
   }
-  
-  public void testDanish() throws Exception {
-    Collator collator = Collator.getInstance(new Locale("da", "dk"));
-    // Danish collation orders the words below in the given order (example taken
-    // from TestSort.testInternationalSort() ).
-    String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
-    Query query = new RangeQuery("content", "H\u00D8T", "MAND", false, false, collator);
-
-    // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
-    // but Danish collation does.
-    initializeIndex(words);
-    IndexSearcher searcher = new IndexSearcher(dir);
-    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
-    assertEquals("The index Term should be included.", 1, hits.length);
-
-    query = new RangeQuery("content", "H\u00C5T", "MAND", false, false, collator);
-    hits = searcher.search(query, null, 1000).scoreDocs;
-    assertEquals("The index Term should not be included.", 0, hits.length);
-    searcher.close();
-  }
 
   private static class SingleCharAnalyzer extends Analyzer {
 
     private static class SingleCharTokenizer extends Tokenizer {
       char[] buffer = new char[1];
       boolean done;
-      TermAttribute termAtt;
-      
+
       public SingleCharTokenizer(Reader r) {
         super(r);
-        termAtt = (TermAttribute) addAttribute(TermAttribute.class);
       }
 
-      public boolean incrementToken() throws IOException {
+      public final Token next(final Token reusableToken) throws IOException {
         int count = input.read(buffer);
         if (done)
-          return false;
+          return null;
         else {
           done = true;
           if (count == 1) {
-            termAtt.termBuffer()[0] = buffer[0];
-            termAtt.setTermLength(1);
+            reusableToken.termBuffer()[0] = buffer[0];
+            reusableToken.setTermLength(1);
           } else
-            termAtt.setTermLength(0);
-          return true;
+            reusableToken.setTermLength(0);
+          return reusableToken;
         }
       }
 

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/payloads/PayloadHelper.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/payloads/PayloadHelper.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/payloads/PayloadHelper.java Sat Jul  4 20:08:54 2009
@@ -2,7 +2,6 @@
 
 
 import org.apache.lucene.analysis.*;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.index.Payload;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.store.RAMDirectory;
@@ -42,36 +41,34 @@
   public class PayloadFilter extends TokenFilter {
     String fieldName;
     int numSeen = 0;
-    PayloadAttribute payloadAtt;
-    
+
     public PayloadFilter(TokenStream input, String fieldName) {
       super(input);
       this.fieldName = fieldName;
-      payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
     }
 
-    public boolean incrementToken() throws IOException {
-      
-      if (input.incrementToken()) {
+    public Token next() throws IOException {
+      Token result = input.next();
+      if (result != null) {
         if (fieldName.equals(FIELD))
         {
-          payloadAtt.setPayload(new Payload(payloadField));
+          result.setPayload(new Payload(payloadField));
         }
         else if (fieldName.equals(MULTI_FIELD))
         {
           if (numSeen  % 2 == 0)
           {
-            payloadAtt.setPayload(new Payload(payloadMultiField1));
+            result.setPayload(new Payload(payloadMultiField1));
           }
           else
           {
-            payloadAtt.setPayload(new Payload(payloadMultiField2));
+            result.setPayload(new Payload(payloadMultiField2));
           }
           numSeen++;
         }
-        return true;
+
       }
-      return false;
+      return result;
     }
   }
 

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java Sat Jul  4 20:08:54 2009
@@ -21,9 +21,9 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriter;
@@ -66,32 +66,29 @@
   private class PayloadFilter extends TokenFilter {
     String fieldName;
     int numSeen = 0;
-    
-    PayloadAttribute payloadAtt;    
-    
+
     public PayloadFilter(TokenStream input, String fieldName) {
       super(input);
       this.fieldName = fieldName;
-      payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
     }
-    
-    public boolean incrementToken() throws IOException {
-      boolean hasNext = input.incrementToken();
-      if (hasNext) {
+
+    public Token next(final Token reusableToken) throws IOException {
+      assert reusableToken != null;
+      Token nextToken = input.next(reusableToken);
+      if (nextToken != null) {
         if (fieldName.equals("field")) {
-          payloadAtt.setPayload(new Payload(payloadField));
+          nextToken.setPayload(new Payload(payloadField));
         } else if (fieldName.equals("multiField")) {
           if (numSeen % 2 == 0) {
-            payloadAtt.setPayload(new Payload(payloadMultiField1));
+            nextToken.setPayload(new Payload(payloadMultiField1));
           } else {
-            payloadAtt.setPayload(new Payload(payloadMultiField2));
+            nextToken.setPayload(new Payload(payloadMultiField2));
           }
           numSeen++;
         }
-        return true;
-      } else {
-        return false;
+
       }
+      return nextToken;
     }
   }
 

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java Sat Jul  4 20:08:54 2009
@@ -29,9 +29,6 @@
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.CorruptIndexException;
@@ -61,7 +58,6 @@
   }
 
   protected void setUp() throws Exception {
-    super.setUp();
     PayloadHelper helper = new PayloadHelper();
     searcher = helper.setUp(similarity, 1000);
     indexReader = searcher.getIndexReader();
@@ -463,9 +459,6 @@
     Set entities = new HashSet();
     Set nopayload = new HashSet();
     int pos;
-    PayloadAttribute payloadAtt;
-    TermAttribute termAtt;
-    PositionIncrementAttribute posIncrAtt;
 
     public PayloadFilter(TokenStream input, String fieldName) {
       super(input);
@@ -475,26 +468,24 @@
       entities.add("one");
       nopayload.add("nopayload");
       nopayload.add("np");
-      termAtt = (TermAttribute) addAttribute(TermAttribute.class);
-      posIncrAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
-      payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
+
     }
 
-    public boolean incrementToken() throws IOException {
-      if (input.incrementToken()) {
-        String token = new String(termAtt.termBuffer(), 0, termAtt.termLength());
+    public Token next() throws IOException {
+      Token result = input.next();
+      if (result != null) {
+        String token = new String(result.termBuffer(), 0, result.termLength());
 
         if (!nopayload.contains(token)) {
           if (entities.contains(token)) {
-            payloadAtt.setPayload(new Payload((token + ":Entity:"+ pos ).getBytes()));
+            result.setPayload(new Payload((token + ":Entity:"+ pos ).getBytes()));
           } else {
-            payloadAtt.setPayload(new Payload((token + ":Noise:" + pos ).getBytes()));
+            result.setPayload(new Payload((token + ":Noise:" + pos ).getBytes()));
           }
         }
-        pos += posIncrAtt.getPositionIncrement();
-        return true;
+        pos += result.getPositionIncrement();
       }
-      return false;
+      return result;
     }
   }
   

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/util/LuceneTestCase.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/util/LuceneTestCase.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/util/LuceneTestCase.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/test/org/apache/lucene/util/LuceneTestCase.java Sat Jul  4 20:08:54 2009
@@ -17,7 +17,6 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.index.ConcurrentMergeScheduler;
 import junit.framework.TestCase;
 
@@ -43,7 +42,6 @@
 
   protected void setUp() throws Exception {
     ConcurrentMergeScheduler.setTestMode();
-    TokenStream.setUseNewAPIDefault(true);
   }
 
   protected void tearDown() throws Exception {