You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2010/11/30 12:22:46 UTC

svn commit: r1040463 [5/8] - in /lucene/dev/trunk: lucene/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ lucene/contrib/db/bdb-je/src/java/org/apache/lucene/store/je/ lucene/contrib/db/bdb/src/java/org/apache/lucene/store/db/ luce...

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java Tue Nov 30 11:22:39 2010
@@ -99,9 +99,9 @@ public class TestRegexpQuery extends Luc
     AutomatonProvider myProvider = new AutomatonProvider() {
       // automaton that matches quick or brown
       private Automaton quickBrownAutomaton = BasicOperations.union(Arrays
-          .asList(new Automaton[] {BasicAutomata.makeString("quick"),
-              BasicAutomata.makeString("brown"),
-              BasicAutomata.makeString("bob")}));
+          .asList(BasicAutomata.makeString("quick"),
+          BasicAutomata.makeString("brown"),
+          BasicAutomata.makeString("bob")));
       
       public Automaton getAutomaton(String name) throws IOException {
         if (name.equals("quickBrown")) return quickBrownAutomaton;

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestSort.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestSort.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestSort.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestSort.java Tue Nov 30 11:22:39 2010
@@ -155,9 +155,12 @@ public class TestSort extends LuceneTest
   private IndexSearcher getFullStrings() throws CorruptIndexException, LockObtainFailedException, IOException {
     Directory indexStore = newDirectory();
     dirs.add(indexStore);
-    IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(4));
-    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(97);
+    IndexWriter writer = new IndexWriter(
+        indexStore,
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
+            setMaxBufferedDocs(4).
+            setMergePolicy(newLogMergePolicy(97))
+    );
     for (int i=0; i<NUM_STRINGS; i++) {
         Document doc = new Document();
         String num = getRandomCharString(getRandomNumber(2, 8), 48, 52);
@@ -597,7 +600,7 @@ public class TestSort extends LuceneTest
     assertMatches (full, queryG, sort, "ZYXW");
 
     // Do the same for a MultiSearcher
-    Searcher multiSearcher=new MultiSearcher (new Searchable[] { full });
+    Searcher multiSearcher=new MultiSearcher (full);
 
     sort.setSort (new SortField ("int", SortField.INT),
                                 new SortField ("string", SortField.STRING),
@@ -611,7 +614,7 @@ public class TestSort extends LuceneTest
     // Don't close the multiSearcher. it would close the full searcher too!
 
     // Do the same for a ParallelMultiSearcher
-                Searcher parallelSearcher=new ParallelMultiSearcher (new Searchable[] { full });
+                Searcher parallelSearcher=new ParallelMultiSearcher (full);
 
     sort.setSort (new SortField ("int", SortField.INT),
                                 new SortField ("string", SortField.STRING),
@@ -670,7 +673,7 @@ public class TestSort extends LuceneTest
     // Test the MultiSearcher's ability to preserve locale-sensitive ordering
     // by wrapping it around a single searcher
   public void testInternationalMultiSearcherSort() throws Exception {
-    Searcher multiSearcher = new MultiSearcher (new Searchable[] { full });
+    Searcher multiSearcher = new MultiSearcher (full);
     
     sort.setSort (new SortField ("i18n", new Locale("sv", "se")));
     assertMatches (multiSearcher, queryY, sort, "BJDFH");
@@ -684,13 +687,13 @@ public class TestSort extends LuceneTest
 
   // test a variety of sorts using more than one searcher
   public void testMultiSort() throws Exception {
-    MultiSearcher searcher = new MultiSearcher (new Searchable[] { searchX, searchY });
+    MultiSearcher searcher = new MultiSearcher (searchX, searchY);
     runMultiSorts(searcher, false);
   }
 
   // test a variety of sorts using a parallel multisearcher
   public void testParallelMultiSort() throws Exception {
-    Searcher searcher = new ParallelMultiSearcher (new Searchable[] { searchX, searchY });
+    Searcher searcher = new ParallelMultiSearcher (searchX, searchY);
     runMultiSorts(searcher, false);
   }
 
@@ -705,7 +708,7 @@ public class TestSort extends LuceneTest
 
     // we'll test searching locally, remote and multi
     
-    MultiSearcher multi  = new MultiSearcher (new Searchable[] { searchX, searchY });
+    MultiSearcher multi  = new MultiSearcher (searchX, searchY);
 
     // change sorting and make sure relevancy stays the same
 

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java Tue Nov 30 11:22:39 2010
@@ -17,6 +17,14 @@ package org.apache.lucene.search;
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.io.Reader;
+import java.text.Collator;
+import java.util.Locale;
+import java.util.Set;
+
+import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriter;
@@ -24,21 +32,7 @@ import org.apache.lucene.index.MultiFiel
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.analysis.MockTokenizer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-
 import org.apache.lucene.util.LuceneTestCase;
-import java.io.IOException;
-import java.io.Reader;
-import java.util.Locale;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.Arrays;
-import java.text.Collator;
 
 
 public class TestTermRangeQuery extends LuceneTestCase {
@@ -143,7 +137,7 @@ public class TestTermRangeQuery extends 
   private void checkBooleanTerms(Searcher searcher, TermRangeQuery query, String... terms) throws IOException {
     query.setRewriteMethod(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(50));
     final BooleanQuery bq = (BooleanQuery) searcher.rewrite(query);
-    final Set<String> allowedTerms = new HashSet<String>(Arrays.asList(terms));
+    final Set<String> allowedTerms = asSet(terms);
     assertEquals(allowedTerms.size(), bq.clauses().size());
     for (BooleanClause c : bq.clauses()) {
       assertTrue(c.getQuery() instanceof TermQuery);

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java Tue Nov 30 11:22:39 2010
@@ -122,7 +122,7 @@ public class TestCustomScoreQuery extend
   private static class CustomMulAddQuery extends CustomScoreQuery {
     // constructor
     CustomMulAddQuery(Query q, ValueSourceQuery qValSrc1, ValueSourceQuery qValSrc2) {
-      super(q, new ValueSourceQuery[]{qValSrc1, qValSrc2});
+      super(q, qValSrc1, qValSrc2);
     }
 
     /*(non-Javadoc) @see org.apache.lucene.search.function.CustomScoreQuery#name() */

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/function/TestValueSource.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/function/TestValueSource.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/function/TestValueSource.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/function/TestValueSource.java Tue Nov 30 11:22:39 2010
@@ -28,7 +28,7 @@ public class TestValueSource extends Luc
 
   public void testMultiValueSource() throws Exception {
     Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, new MockAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
     Document doc = new Document();
     Field f = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
     doc.add(f);

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java Tue Nov 30 11:22:39 2010
@@ -227,7 +227,7 @@ public class TestBasics extends LuceneTe
                                            4, true);
     SpanTermQuery term3 = new SpanTermQuery(new Term("field", "forty"));
 
-    SpanOrQuery or = new SpanOrQuery(new SpanQuery[] {term3});
+    SpanOrQuery or = new SpanOrQuery(term3);
 
     SpanNotQuery query = new SpanNotQuery(near, or);
 
@@ -249,7 +249,7 @@ public class TestBasics extends LuceneTe
     SpanTermQuery term4 = new SpanTermQuery(new Term("field", "sixty"));
     SpanTermQuery term5 = new SpanTermQuery(new Term("field", "eighty"));
 
-    SpanOrQuery or = new SpanOrQuery(new SpanQuery[] {term3, term4, term5});
+    SpanOrQuery or = new SpanOrQuery(term3, term4, term5);
 
     SpanNotQuery query = new SpanNotQuery(near, or);
 
@@ -436,7 +436,7 @@ public class TestBasics extends LuceneTe
     SpanNearQuery near2 = new SpanNearQuery(new SpanQuery[] {term3, term4},
                                             0, true);
 
-    SpanOrQuery query = new SpanOrQuery(new SpanQuery[] {near1, near2});
+    SpanOrQuery query = new SpanOrQuery(near1, near2);
 
     checkHits(query, new int[]
       {33, 47, 133, 147, 233, 247, 333, 347, 433, 447, 533, 547, 633, 647, 733,
@@ -475,8 +475,8 @@ public class TestBasics extends LuceneTe
     SpanTermQuery t5 = new SpanTermQuery(new Term("field","seven"));
     SpanTermQuery t6 = new SpanTermQuery(new Term("field","six"));
 
-    SpanOrQuery to1 = new SpanOrQuery(new SpanQuery[] {t1, t3});
-    SpanOrQuery to2 = new SpanOrQuery(new SpanQuery[] {t5, t6});
+    SpanOrQuery to1 = new SpanOrQuery(t1, t3);
+    SpanOrQuery to2 = new SpanOrQuery(t5, t6);
     
     SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {to1, to2},
                                             10, true);
@@ -505,8 +505,8 @@ public class TestBasics extends LuceneTe
     SpanTermQuery t5 = new SpanTermQuery(new Term("field","seven"));
     SpanTermQuery t6 = new SpanTermQuery(new Term("field","six"));
 
-    SpanOrQuery to1 = new SpanOrQuery(new SpanQuery[] {tt1, tt2});
-    SpanOrQuery to2 = new SpanOrQuery(new SpanQuery[] {t5, t6});
+    SpanOrQuery to1 = new SpanOrQuery(tt1, tt2);
+    SpanOrQuery to2 = new SpanOrQuery(t5, t6);
     
     SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {to1, to2},
                                             100, true);

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java Tue Nov 30 11:22:39 2010
@@ -146,9 +146,8 @@ public class TestFieldMaskingSpanQuery e
       (new SpanTermQuery(new Term("last", "sally")) {
           @Override
           public Query rewrite(IndexReader reader) {
-            return new SpanOrQuery(new SpanQuery[] {
-              new SpanTermQuery(new Term("first", "sally")),
-              new SpanTermQuery(new Term("first", "james")) });
+            return new SpanOrQuery(new SpanTermQuery(new Term("first", "sally")),
+                new SpanTermQuery(new Term("first", "james")));
           }
         }, "first");
 
@@ -252,8 +251,7 @@ public class TestFieldMaskingSpanQuery e
   public void testSpans0() throws Exception {
     SpanQuery q1 = new SpanTermQuery(new Term("gender", "female"));
     SpanQuery q2 = new SpanTermQuery(new Term("first",  "james"));
-    SpanQuery q  = new SpanOrQuery(new SpanQuery[]
-      { q1, new FieldMaskingSpanQuery(q2, "gender")});
+    SpanQuery q  = new SpanOrQuery(q1, new FieldMaskingSpanQuery(q2, "gender"));
     check(q, new int[] { 0, 1, 2, 3, 4 });
   
     Spans span = q.getSpans(new SlowMultiReaderWrapper(searcher.getIndexReader()));
@@ -291,7 +289,7 @@ public class TestFieldMaskingSpanQuery e
   public void testSpans1() throws Exception {
     SpanQuery q1 = new SpanTermQuery(new Term("first", "sally"));
     SpanQuery q2 = new SpanTermQuery(new Term("first", "james"));
-    SpanQuery qA = new SpanOrQuery(new SpanQuery[] { q1, q2 });
+    SpanQuery qA = new SpanOrQuery(q1, q2);
     SpanQuery qB = new FieldMaskingSpanQuery(qA, "id");
                                             
     check(qA, new int[] { 0, 1, 2, 4 });
@@ -311,8 +309,7 @@ public class TestFieldMaskingSpanQuery e
   public void testSpans2() throws Exception {
     SpanQuery qA1 = new SpanTermQuery(new Term("gender", "female"));
     SpanQuery qA2 = new SpanTermQuery(new Term("first",  "james"));
-    SpanQuery qA  = new SpanOrQuery(new SpanQuery[]
-      { qA1, new FieldMaskingSpanQuery(qA2, "gender")});
+    SpanQuery qA  = new SpanOrQuery(qA1, new FieldMaskingSpanQuery(qA2, "gender"));
     SpanQuery qB  = new SpanTermQuery(new Term("last",   "jones"));
     SpanQuery q   = new SpanNearQuery(new SpanQuery[]
       { new FieldMaskingSpanQuery(qA, "id"),

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java Tue Nov 30 11:22:39 2010
@@ -331,8 +331,8 @@ public class TestSpans extends LuceneTes
     Spans spans = orSpans(new String[0]);
     assertFalse("empty next", spans.next());
 
-    SpanOrQuery a = new SpanOrQuery( new SpanQuery[0] );
-    SpanOrQuery b = new SpanOrQuery( new SpanQuery[0] );
+    SpanOrQuery a = new SpanOrQuery();
+    SpanOrQuery b = new SpanOrQuery();
     assertTrue("empty should equal", a.equals(b));
   }
 

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/MockDirectoryWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/MockDirectoryWrapper.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/MockDirectoryWrapper.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/MockDirectoryWrapper.java Tue Nov 30 11:22:39 2010
@@ -97,17 +97,6 @@ public class MockDirectoryWrapper extend
     preventDoubleWrite = value;
   }
 
-  @Deprecated
-  @Override
-  public void sync(String name) throws IOException {
-    maybeYield();
-    maybeThrowDeterministicException();
-    if (crashed)
-      throw new IOException("cannot sync after crash");
-    unSyncedFiles.remove(name);
-    delegate.sync(name);
-  }
-
   @Override
   public synchronized void sync(Collection<String> names) throws IOException {
     maybeYield();

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java Tue Nov 30 11:22:39 2010
@@ -22,6 +22,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 import java.util.Random;
 
@@ -243,10 +244,12 @@ public class TestBufferedIndexInput exte
       File indexDir = new File(TEMP_DIR, "testSetBufferSize");
       MockFSDirectory dir = new MockFSDirectory(indexDir, random);
       try {
-        IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
-          TEST_VERSION_CURRENT, new MockAnalyzer())
-          .setOpenMode(OpenMode.CREATE));
-        ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false);
+        IndexWriter writer = new IndexWriter(
+            dir,
+            new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
+                setOpenMode(OpenMode.CREATE).
+                setMergePolicy(newLogMergePolicy(false))
+        );
         for(int i=0;i<37;i++) {
           Document doc = new Document();
           doc.add(newField("content", "aaa bbb ccc ddd" + i, Field.Store.YES, Field.Index.ANALYZED));
@@ -366,12 +369,13 @@ public class TestBufferedIndexInput exte
       {
         return dir.listAll();
       }
-
+      @Override
+      public void sync(Collection<String> names) throws IOException {
+        dir.sync(names);
+      }
       @Override
       public long fileLength(String name) throws IOException {
         return dir.fileLength(name);
       }
-
-
     }
 }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java Tue Nov 30 11:22:39 2010
@@ -44,9 +44,11 @@ public class TestFileSwitchDirectory ext
     Directory secondaryDir = new MockDirectoryWrapper(random, new RAMDirectory());
     
     FileSwitchDirectory fsd = new FileSwitchDirectory(fileExtensions, primaryDir, secondaryDir, true);
-    IndexWriter writer = new IndexWriter(fsd, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
-    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false);
-    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false);
+    IndexWriter writer = new IndexWriter(
+        fsd,
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
+            setMergePolicy(newLogMergePolicy(false))
+    );
     TestIndexWriterReader.createIndexNoClose(true, "ram", writer);
     IndexReader reader = IndexReader.open(writer);
     assertEquals(100, reader.maxDoc());

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java Tue Nov 30 11:22:39 2010
@@ -17,16 +17,26 @@ package org.apache.lucene.util;
  * limitations under the License.
  */
 
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.lang.annotation.Documented;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.*;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Field.Index;
 import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.Field.TermVector;
-import org.apache.lucene.index.ConcurrentMergeScheduler;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.LogDocMergePolicy;
-import org.apache.lucene.index.LogMergePolicy;
-import org.apache.lucene.index.SerialMergeScheduler;
+import org.apache.lucene.index.*;
 import org.apache.lucene.index.codecs.Codec;
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.index.codecs.mockintblock.MockFixedIntBlockCodec;
@@ -43,15 +53,7 @@ import org.apache.lucene.search.FieldCac
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
-import org.junit.Assume;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
+import org.junit.*;
 import org.junit.rules.TestWatchman;
 import org.junit.runner.Description;
 import org.junit.runner.RunWith;
@@ -62,30 +64,6 @@ import org.junit.runners.BlockJUnit4Clas
 import org.junit.runners.model.FrameworkMethod;
 import org.junit.runners.model.InitializationError;
 
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.lang.annotation.Documented;
-import java.lang.annotation.Inherited;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Method;
-import java.lang.reflect.Modifier;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.IdentityHashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Random;
-import java.util.TimeZone;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
 /**
  * Base class for all Lucene unit tests, Junit3 or Junit4 variant.
  * <p>
@@ -177,6 +155,21 @@ public abstract class LuceneTestCase ext
   /** Used to track if setUp and tearDown are called correctly from subclasses */
   private boolean setup;
 
+  /**
+   * Some tests expect the directory to contain a single segment, and want to do tests on that segment's reader.
+   * This is an utility method to help them.
+   */
+  public static SegmentReader getOnlySegmentReader(IndexReader reader) {
+    if (reader instanceof SegmentReader)
+      return (SegmentReader) reader;
+
+    IndexReader[] subReaders = reader.getSequentialSubReaders();
+    if (subReaders.length != 1)
+      throw new IllegalArgumentException(reader + " has " + subReaders.length + " segments instead of exactly one");
+
+    return (SegmentReader) subReaders[0];
+  }
+
   private static class UncaughtExceptionEntry {
     public final Thread thread;
     public final Throwable exception;
@@ -308,7 +301,7 @@ public abstract class LuceneTestCase ext
     }
   }
 
-  /** @deprecated: until we fix no-fork problems in solr tests */
+  /** @deprecated (4.0) until we fix no-fork problems in solr tests */
   @Deprecated
   private static List<String> testClassesRun = new ArrayList<String>();
   
@@ -543,8 +536,7 @@ public abstract class LuceneTestCase ext
     }
   }
   
-  // These deprecated methods should be removed soon, when all tests using no Epsilon are fixed:
-  
+  // @deprecated (4.0) These deprecated methods should be removed soon, when all tests using no Epsilon are fixed:
   @Deprecated
   static public void assertEquals(double expected, double actual) {
     assertEquals(null, expected, actual);
@@ -608,6 +600,10 @@ public abstract class LuceneTestCase ext
     Assume.assumeNoException(e == null ? null : new TestIgnoredException(msg, e));
   }
  
+  public static <T> Set<T> asSet(T... args) {
+    return new HashSet<T>(Arrays.asList(args));
+  }
+
   /**
    * Convinience method for logging an iterator.
    *
@@ -647,9 +643,6 @@ public abstract class LuceneTestCase ext
   public static IndexWriterConfig newIndexWriterConfig(Random r, Version v, Analyzer a) {
     IndexWriterConfig c = new IndexWriterConfig(v, a);
     if (r.nextBoolean()) {
-      c.setMergePolicy(new LogDocMergePolicy());
-    }
-    if (r.nextBoolean()) {
       c.setMergeScheduler(new SerialMergeScheduler());
     }
     if (r.nextBoolean()) {
@@ -665,24 +658,52 @@ public abstract class LuceneTestCase ext
     if (r.nextBoolean()) {
       c.setMaxThreadStates(_TestUtil.nextInt(r, 1, 20));
     }
-    
-    if (c.getMergePolicy() instanceof LogMergePolicy) {
-      LogMergePolicy logmp = (LogMergePolicy) c.getMergePolicy();
-      logmp.setUseCompoundDocStore(r.nextBoolean());
-      logmp.setUseCompoundFile(r.nextBoolean());
-      logmp.setCalibrateSizeByDeletes(r.nextBoolean());
-      if (r.nextInt(3) == 2) {
-        logmp.setMergeFactor(2);
-      } else {
-        logmp.setMergeFactor(_TestUtil.nextInt(r, 2, 20));
-      }
-    }
-    
+
+    c.setMergePolicy(newLogMergePolicy(r));
+
     c.setReaderPooling(r.nextBoolean());
     c.setReaderTermsIndexDivisor(_TestUtil.nextInt(r, 1, 4));
     return c;
   }
 
+  public static LogMergePolicy newLogMergePolicy() {
+    return newLogMergePolicy(random);
+  }
+
+  public static LogMergePolicy newLogMergePolicy(Random r) {
+    LogMergePolicy logmp = r.nextBoolean() ? new LogDocMergePolicy() : new LogByteSizeMergePolicy();
+    logmp.setUseCompoundDocStore(r.nextBoolean());
+    logmp.setUseCompoundFile(r.nextBoolean());
+    logmp.setCalibrateSizeByDeletes(r.nextBoolean());
+    if (r.nextInt(3) == 2) {
+      logmp.setMergeFactor(2);
+    } else {
+      logmp.setMergeFactor(_TestUtil.nextInt(r, 2, 20));
+    }
+    return logmp;
+  }
+
+  public static LogMergePolicy newLogMergePolicy(boolean useCFS) {
+    LogMergePolicy logmp = newLogMergePolicy();
+    logmp.setUseCompoundFile(useCFS);
+    logmp.setUseCompoundDocStore(useCFS);
+    return logmp;
+  }
+
+  public static LogMergePolicy newLogMergePolicy(boolean useCFS, int mergeFactor) {
+    LogMergePolicy logmp = newLogMergePolicy();
+    logmp.setUseCompoundFile(useCFS);
+    logmp.setUseCompoundDocStore(useCFS);
+    logmp.setMergeFactor(mergeFactor);
+    return logmp;
+  }
+
+  public static LogMergePolicy newLogMergePolicy(int mergeFactor) {
+    LogMergePolicy logmp = newLogMergePolicy();
+    logmp.setMergeFactor(mergeFactor);
+    return logmp;
+  }
+
   /**
    * Returns a new Dictionary instance. Use this when the test does not
    * care about the specific Directory implementation (most tests).

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java Tue Nov 30 11:22:39 2010
@@ -70,7 +70,7 @@ public class TestFieldCacheSanityChecker
     wB.close();
     readerA = IndexReader.open(dirA, true);
     readerB = IndexReader.open(dirB, true);
-    readerX = new MultiReader(new IndexReader[] { readerA, readerB });
+    readerX = new MultiReader(readerA, readerB);
   }
 
   @Override

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java Tue Nov 30 11:22:39 2010
@@ -17,33 +17,10 @@ package org.apache.lucene.util;
  * limitations under the License.
  */
 
-import java.nio.CharBuffer;
-import java.nio.ByteBuffer;
-
 public class TestIndexableBinaryStringTools extends LuceneTestCase {
   private static final int NUM_RANDOM_TESTS = 2000 * RANDOM_MULTIPLIER;
   private static final int MAX_RANDOM_BINARY_LENGTH = 300 * RANDOM_MULTIPLIER;
   
-  /** @deprecated remove this test for Lucene 4.0 */
-  @Deprecated
-  public void testSingleBinaryRoundTripNIO() {
-    byte[] binary = new byte[] 
-      { (byte)0x23, (byte)0x98, (byte)0x13, (byte)0xE4, (byte)0x76, (byte)0x41,
-        (byte)0xB2, (byte)0xC9, (byte)0x7F, (byte)0x0A, (byte)0xA6, (byte)0xD8 };
-
-    ByteBuffer binaryBuf = ByteBuffer.wrap(binary);
-    CharBuffer encoded = IndexableBinaryStringTools.encode(binaryBuf);
-    ByteBuffer decoded = IndexableBinaryStringTools.decode(encoded);
-    assertEquals("Round trip decode/decode returned different results:"
-                 + System.getProperty("line.separator")
-                 + "original: " + binaryDumpNIO(binaryBuf)
-                 + System.getProperty("line.separator")
-                 + " encoded: " + charArrayDumpNIO(encoded)
-                 + System.getProperty("line.separator")
-                 + " decoded: " + binaryDumpNIO(decoded),
-                 binaryBuf, decoded);
-  }
-  
   public void testSingleBinaryRoundTrip() {
     byte[] binary = new byte[] { (byte) 0x23, (byte) 0x98, (byte) 0x13,
         (byte) 0xE4, (byte) 0x76, (byte) 0x41, (byte) 0xB2, (byte) 0xC9,
@@ -71,64 +48,6 @@ public class TestIndexableBinaryStringTo
         binaryDump(binary, binary.length), binaryDump(decoded, decoded.length));
   }
   
-  /** @deprecated remove this test for Lucene 4.0 */
-  @Deprecated
-  public void testEncodedSortabilityNIO() {
-    byte[] originalArray1 = new byte[MAX_RANDOM_BINARY_LENGTH];
-    ByteBuffer originalBuf1 = ByteBuffer.wrap(originalArray1);
-    char[] originalString1 = new char[MAX_RANDOM_BINARY_LENGTH];
-    CharBuffer originalStringBuf1 = CharBuffer.wrap(originalString1);
-    char[] encoded1 = new char[IndexableBinaryStringTools.getEncodedLength(originalBuf1)];
-    CharBuffer encodedBuf1 = CharBuffer.wrap(encoded1);
-    byte[] original2 = new byte[MAX_RANDOM_BINARY_LENGTH];
-    ByteBuffer originalBuf2 = ByteBuffer.wrap(original2);
-    char[] originalString2 = new char[MAX_RANDOM_BINARY_LENGTH];
-    CharBuffer originalStringBuf2 = CharBuffer.wrap(originalString2);
-    char[] encoded2 = new char[IndexableBinaryStringTools.getEncodedLength(originalBuf2)];
-    CharBuffer encodedBuf2 = CharBuffer.wrap(encoded2);
-    for (int testNum = 0 ; testNum < NUM_RANDOM_TESTS ; ++testNum) {
-      int numBytes1 = random.nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1; // Min == 1
-      originalBuf1.limit(numBytes1);
-      originalStringBuf1.limit(numBytes1);
-      
-      for (int byteNum = 0 ; byteNum < numBytes1 ; ++byteNum) {
-        int randomInt = random.nextInt(0x100);
-        originalArray1[byteNum] = (byte) randomInt;
-        originalString1[byteNum] = (char)randomInt;
-      }
-      
-      int numBytes2 = random.nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1; // Min == 1
-      originalBuf2.limit(numBytes2);
-      originalStringBuf2.limit(numBytes2);
-      for (int byteNum = 0 ; byteNum < numBytes2 ; ++byteNum) {
-        int randomInt = random.nextInt(0x100);
-        original2[byteNum] = (byte)randomInt;
-        originalString2[byteNum] = (char)randomInt;
-      }
-      int originalComparison = originalStringBuf1.compareTo(originalStringBuf2);
-      originalComparison = originalComparison < 0 ? -1 : originalComparison > 0 ? 1 : 0;
-      
-      IndexableBinaryStringTools.encode(originalBuf1, encodedBuf1);
-      IndexableBinaryStringTools.encode(originalBuf2, encodedBuf2);
-      
-      int encodedComparison = encodedBuf1.compareTo(encodedBuf2);
-      encodedComparison = encodedComparison < 0 ? -1 : encodedComparison > 0 ? 1 : 0;
-      
-      assertEquals("Test #" + (testNum + 1) 
-                   + ": Original bytes and encoded chars compare differently:"
-                   + System.getProperty("line.separator")
-                   + " binary 1: " + binaryDumpNIO(originalBuf1)
-                   + System.getProperty("line.separator")
-                   + " binary 2: " + binaryDumpNIO(originalBuf2)
-                   + System.getProperty("line.separator")
-                   + "encoded 1: " + charArrayDumpNIO(encodedBuf1)
-                   + System.getProperty("line.separator")
-                   + "encoded 2: " + charArrayDumpNIO(encodedBuf2)
-                   + System.getProperty("line.separator"),
-                   originalComparison, encodedComparison);
-    }
-  }
-
   public void testEncodedSortability() {
     byte[] originalArray1 = new byte[MAX_RANDOM_BINARY_LENGTH];
     char[] originalString1 = new char[MAX_RANDOM_BINARY_LENGTH];
@@ -192,16 +111,6 @@ public class TestIndexableBinaryStringTo
     }
   }
 
-  /** @deprecated remove this test for Lucene 4.0 */
-  @Deprecated
-  public void testEmptyInputNIO() {
-    byte[] binary = new byte[0];
-    CharBuffer encoded = IndexableBinaryStringTools.encode(ByteBuffer.wrap(binary));
-    ByteBuffer decoded = IndexableBinaryStringTools.decode(encoded);
-    assertNotNull("decode() returned null", decoded);
-    assertEquals("decoded empty input was not empty", decoded.limit(), 0);
-  }
-  
   public void testEmptyInput() {
     byte[] binary = new byte[0];
 
@@ -220,23 +129,6 @@ public class TestIndexableBinaryStringTo
     assertEquals("decoded empty input was not empty", decoded.length, 0);
   }
   
-  /** @deprecated remove this test for Lucene 4.0 */
-  @Deprecated
-  public void testAllNullInputNIO() {
-    byte[] binary = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-    ByteBuffer binaryBuf = ByteBuffer.wrap(binary);
-    CharBuffer encoded = IndexableBinaryStringTools.encode(binaryBuf);
-    assertNotNull("encode() returned null", encoded);
-    ByteBuffer decodedBuf = IndexableBinaryStringTools.decode(encoded);
-    assertNotNull("decode() returned null", decodedBuf);
-    assertEquals("Round trip decode/decode returned different results:"
-                 + System.getProperty("line.separator")
-                 + "  original: " + binaryDumpNIO(binaryBuf)
-                 + System.getProperty("line.separator")
-                 + "decodedBuf: " + binaryDumpNIO(decodedBuf),
-                 binaryBuf, decodedBuf);
-  }
-  
   public void testAllNullInput() {
     byte[] binary = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
 
@@ -260,35 +152,6 @@ public class TestIndexableBinaryStringTo
         binaryDump(binary, binary.length), binaryDump(decoded, decoded.length));
   }
   
-  /** @deprecated remove this test for Lucene 4.0 */
-  @Deprecated
-  public void testRandomBinaryRoundTripNIO() {
-    byte[] binary = new byte[MAX_RANDOM_BINARY_LENGTH];
-    ByteBuffer binaryBuf = ByteBuffer.wrap(binary);
-    char[] encoded = new char[IndexableBinaryStringTools.getEncodedLength(binaryBuf)];
-    CharBuffer encodedBuf = CharBuffer.wrap(encoded);
-    byte[] decoded = new byte[MAX_RANDOM_BINARY_LENGTH];
-    ByteBuffer decodedBuf = ByteBuffer.wrap(decoded);
-    for (int testNum = 0 ; testNum < NUM_RANDOM_TESTS ; ++testNum) {
-      int numBytes = random.nextInt(MAX_RANDOM_BINARY_LENGTH - 1) + 1 ; // Min == 1
-      binaryBuf.limit(numBytes);
-      for (int byteNum = 0 ; byteNum < numBytes ; ++byteNum) {
-        binary[byteNum] = (byte)random.nextInt(0x100);
-      }
-      IndexableBinaryStringTools.encode(binaryBuf, encodedBuf);
-      IndexableBinaryStringTools.decode(encodedBuf, decodedBuf);
-      assertEquals("Test #" + (testNum + 1) 
-                   + ": Round trip decode/decode returned different results:"
-                   + System.getProperty("line.separator")
-                   + "  original: " + binaryDumpNIO(binaryBuf)
-                   + System.getProperty("line.separator")
-                   + "encodedBuf: " + charArrayDumpNIO(encodedBuf)
-                   + System.getProperty("line.separator")
-                   + "decodedBuf: " + binaryDumpNIO(decodedBuf),
-                   binaryBuf, decodedBuf);
-    }
-  }
-
   public void testRandomBinaryRoundTrip() {
     byte[] binary = new byte[MAX_RANDOM_BINARY_LENGTH];
     char[] encoded = new char[MAX_RANDOM_BINARY_LENGTH * 10];
@@ -323,13 +186,6 @@ public class TestIndexableBinaryStringTo
     }
   }
   
-  /** @deprecated remove this method for Lucene 4.0 */
-  @Deprecated
-  public String binaryDumpNIO(ByteBuffer binaryBuf) {
-    return binaryDump(binaryBuf.array(), 
-        binaryBuf.limit() - binaryBuf.arrayOffset());
-  }
-
   public String binaryDump(byte[] binary, int numBytes) {
     StringBuilder buf = new StringBuilder();
     for (int byteNum = 0 ; byteNum < numBytes ; ++byteNum) {
@@ -344,13 +200,7 @@ public class TestIndexableBinaryStringTo
     }
     return buf.toString();
   }
-  /** @deprecated remove this method for Lucene 4.0 */
-  @Deprecated
-  public String charArrayDumpNIO(CharBuffer charBuf) {
-    return charArrayDump(charBuf.array(), 
-        charBuf.limit() - charBuf.arrayOffset());
-  }
-  
+
   public String charArrayDump(char[] charArray, int numBytes) {
     StringBuilder buf = new StringBuilder();
     for (int charNum = 0 ; charNum < numBytes ; ++charNum) {

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestVersion.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestVersion.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestVersion.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestVersion.java Tue Nov 30 11:22:39 2010
@@ -23,9 +23,9 @@ public class TestVersion extends LuceneT
     for (Version v : Version.values()) {
       assertTrue("LUCENE_CURRENT must be always onOrAfter("+v+")", Version.LUCENE_CURRENT.onOrAfter(v));
     }
-    assertTrue(Version.LUCENE_30.onOrAfter(Version.LUCENE_29));
-    assertTrue(Version.LUCENE_30.onOrAfter(Version.LUCENE_30));
-    assertFalse(Version.LUCENE_29.onOrAfter(Version.LUCENE_30));
+    assertTrue(Version.LUCENE_40.onOrAfter(Version.LUCENE_31));
+    assertTrue(Version.LUCENE_40.onOrAfter(Version.LUCENE_40));
+    assertFalse(Version.LUCENE_30.onOrAfter(Version.LUCENE_31));
   }
 
 }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/_TestUtil.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/_TestUtil.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/_TestUtil.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/_TestUtil.java Tue Nov 30 11:22:39 2010
@@ -229,7 +229,7 @@ public class _TestUtil {
   // count lowish
   public static void reduceOpenFiles(IndexWriter w) {
     // keep number of open files lowish
-    LogMergePolicy lmp = (LogMergePolicy) w.getMergePolicy();
+    LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy();
     lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
 
     MergeScheduler ms = w.getConfig().getMergeScheduler();

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java Tue Nov 30 11:22:39 2010
@@ -62,14 +62,6 @@ public final class ArabicAnalyzer extend
   public final static String DEFAULT_STOPWORD_FILE = "stopwords.txt";
 
   /**
-   * The comment character in the stopwords file.  All lines prefixed with this will be ignored
-   * @deprecated use {@link WordlistLoader#getWordSet(File, String)} directly  
-   */
-  // TODO make this private 
-  @Deprecated
-  public static final String STOPWORDS_COMMENT = "#";
-  
-  /**
    * Returns an unmodifiable instance of the default stop-words set.
    * @return an unmodifiable instance of the default stop-words set.
    */
@@ -86,7 +78,7 @@ public final class ArabicAnalyzer extend
 
     static {
       try {
-        DEFAULT_STOP_SET = loadStopwordSet(false, ArabicAnalyzer.class, DEFAULT_STOPWORD_FILE, STOPWORDS_COMMENT);
+        DEFAULT_STOP_SET = loadStopwordSet(false, ArabicAnalyzer.class, DEFAULT_STOPWORD_FILE, "#");
       } catch (IOException ex) {
         // default set should always be present as it is part of the
         // distribution (JAR)
@@ -135,33 +127,6 @@ public final class ArabicAnalyzer extend
   }
 
   /**
-   * Builds an analyzer with the given stop words.
-   * @deprecated use {@link #ArabicAnalyzer(Version, Set)} instead
-   */
-  @Deprecated
-  public ArabicAnalyzer( Version matchVersion, String... stopwords ) {
-    this(matchVersion, StopFilter.makeStopSet(matchVersion, stopwords ));
-  }
-
-  /**
-   * Builds an analyzer with the given stop words.
-   * @deprecated use {@link #ArabicAnalyzer(Version, Set)} instead
-   */
-  @Deprecated
-  public ArabicAnalyzer( Version matchVersion, Hashtable<?,?> stopwords ) {
-    this(matchVersion, stopwords.keySet());
-  }
-
-  /**
-   * Builds an analyzer with the given stop words.  Lines can be commented out using {@link #STOPWORDS_COMMENT}
-   * @deprecated use {@link #ArabicAnalyzer(Version, Set)} instead
-   */
-  @Deprecated
-  public ArabicAnalyzer( Version matchVersion, File stopwords ) throws IOException {
-    this(matchVersion, WordlistLoader.getWordSet( stopwords, STOPWORDS_COMMENT));
-  }
-
-  /**
    * Creates
    * {@link org.apache.lucene.analysis.util.ReusableAnalyzerBase.TokenStreamComponents}
    * used to tokenize all the text in the provided {@link Reader}.

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicLetterTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicLetterTokenizer.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicLetterTokenizer.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicLetterTokenizer.java Tue Nov 30 11:22:39 2010
@@ -40,8 +40,6 @@ import org.apache.lucene.util.Version;
  * </ul>
  */
 public class ArabicLetterTokenizer extends LetterTokenizer {
-
-  
   /**
    * Construct a new ArabicLetterTokenizer.
    * @param matchVersion Lucene version
@@ -84,41 +82,6 @@ public class ArabicLetterTokenizer exten
   }
   
   /**
-   * Construct a new ArabicLetterTokenizer.
-   * 
-   * @deprecated use {@link #ArabicLetterTokenizer(Version, Reader)} instead. This will
-   *             be removed in Lucene 4.0.
-   */
-  @Deprecated
-  public ArabicLetterTokenizer(Reader in) {
-    super(in);
-  }
-
-  /**
-   * Construct a new ArabicLetterTokenizer using a given {@link AttributeSource}.
-   * 
-   * @deprecated use {@link #ArabicLetterTokenizer(Version, AttributeSource, Reader)}
-   *             instead. This will be removed in Lucene 4.0.
-   */
-  @Deprecated
-  public ArabicLetterTokenizer(AttributeSource source, Reader in) {
-    super(source, in);
-  }
-
-  /**
-   * Construct a new ArabicLetterTokenizer using a given
-   * {@link org.apache.lucene.util.AttributeSource.AttributeFactory}.
-   * 
-   * @deprecated use {@link #ArabicLetterTokenizer(Version, AttributeSource.AttributeFactory, Reader)}
-   *             instead. This will be removed in Lucene 4.0.
-   */
-  @Deprecated
-  public ArabicLetterTokenizer(AttributeFactory factory, Reader in) {
-    super(factory, in);
-  }
-  
-  
-  /** 
    * Allows for Letter category or NonspacingMark category
    * @see org.apache.lucene.analysis.core.LetterTokenizer#isTokenChar(int)
    */

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java Tue Nov 30 11:22:39 2010
@@ -17,7 +17,6 @@ package org.apache.lucene.analysis.bg;
  * limitations under the License.
  */
 
-import java.io.File;
 import java.io.IOException;
 import java.io.Reader;
 import java.util.Set;
@@ -32,7 +31,6 @@ import org.apache.lucene.analysis.standa
 import org.apache.lucene.analysis.standard.StandardTokenizer;
 import org.apache.lucene.analysis.util.CharArraySet;
 import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
-import org.apache.lucene.analysis.util.WordlistLoader;
 import org.apache.lucene.util.Version;
 
 /**
@@ -44,7 +42,6 @@ import org.apache.lucene.util.Version;
  * <p>
  */
 public final class BulgarianAnalyzer extends StopwordAnalyzerBase {
-  
   /**
    * File containing default Bulgarian stopwords.
    * 
@@ -53,16 +50,7 @@ public final class BulgarianAnalyzer ext
    * BSD-Licensed.
    */
   public final static String DEFAULT_STOPWORD_FILE = "stopwords.txt";
-  
-  /**
-   * The comment character in the stopwords file. All lines prefixed with this
-   * will be ignored
-   * @deprecated use {@link WordlistLoader#getWordSet(File, String)} directly
-   */
-  //TODO make this private
-  @Deprecated
-  public static final String STOPWORDS_COMMENT = "#";
-  
+
   /**
    * Returns an unmodifiable instance of the default stop-words set.
    * 
@@ -81,7 +69,7 @@ public final class BulgarianAnalyzer ext
     
     static {
       try {
-        DEFAULT_STOP_SET = loadStopwordSet(false, BulgarianAnalyzer.class, DEFAULT_STOPWORD_FILE, STOPWORDS_COMMENT);
+        DEFAULT_STOP_SET = loadStopwordSet(false, BulgarianAnalyzer.class, DEFAULT_STOPWORD_FILE, "#");
       } catch (IOException ex) {
         // default set should always be present as it is part of the
         // distribution (JAR)

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java Tue Nov 30 11:22:39 2010
@@ -17,20 +17,17 @@ package org.apache.lucene.analysis.br;
  * limitations under the License.
  */
 
-import java.io.File;
 import java.io.IOException;
 import java.io.Reader;
 import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
 import java.util.Set;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.core.LowerCaseFilter;
 import org.apache.lucene.analysis.core.StopFilter;
 import org.apache.lucene.analysis.miscellaneous.KeywordMarkerFilter;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.analysis.standard.StandardFilter;
 import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -51,34 +48,6 @@ import org.apache.lucene.util.Version;
  * dependent settings as {@link StandardAnalyzer}.</p>
  */
 public final class BrazilianAnalyzer extends StopwordAnalyzerBase {
-
-	/**
-	 * List of typical Brazilian Portuguese stopwords.
-	 * @deprecated use {@link #getDefaultStopSet()} instead
-	 */
-  // TODO make this private in 3.1
-	@Deprecated
-	public final static String[] BRAZILIAN_STOP_WORDS = {
-      "a","ainda","alem","ambas","ambos","antes",
-      "ao","aonde","aos","apos","aquele","aqueles",
-      "as","assim","com","como","contra","contudo",
-      "cuja","cujas","cujo","cujos","da","das","de",
-      "dela","dele","deles","demais","depois","desde",
-      "desta","deste","dispoe","dispoem","diversa",
-      "diversas","diversos","do","dos","durante","e",
-      "ela","elas","ele","eles","em","entao","entre",
-      "essa","essas","esse","esses","esta","estas",
-      "este","estes","ha","isso","isto","logo","mais",
-      "mas","mediante","menos","mesma","mesmas","mesmo",
-      "mesmos","na","nas","nao","nas","nem","nesse","neste",
-      "nos","o","os","ou","outra","outras","outro","outros",
-      "pelas","pelas","pelo","pelos","perante","pois","por",
-      "porque","portanto","proprio","propios","quais","qual",
-      "qualquer","quando","quanto","que","quem","quer","se",
-      "seja","sem","sendo","seu","seus","sob","sobre","sua",
-      "suas","tal","tambem","teu","teus","toda","todas","todo",
-      "todos","tua","tuas","tudo","um","uma","umas","uns"};
-
   /** File containing default Brazilian Portuguese stopwords. */
   public final static String DEFAULT_STOPWORD_FILE = "stopwords.txt";
   
@@ -110,7 +79,6 @@ public final class BrazilianAnalyzer ext
 	/**
 	 * Contains words that should be indexed but not stemmed.
 	 */
-	// TODO make this private in 3.1
 	private Set<?> excltable = Collections.emptySet();
 	
 	/**
@@ -147,62 +115,6 @@ public final class BrazilianAnalyzer ext
         .copy(matchVersion, stemExclusionSet));
   }
 
-	/**
-	 * Builds an analyzer with the given stop words.
-	 * @deprecated use {@link #BrazilianAnalyzer(Version, Set)} instead
-	 */
-  @Deprecated
-  public BrazilianAnalyzer(Version matchVersion, String... stopwords) {
-    this(matchVersion, StopFilter.makeStopSet(matchVersion, stopwords));
-  }
-
-  /**
-   * Builds an analyzer with the given stop words. 
-   * @deprecated use {@link #BrazilianAnalyzer(Version, Set)} instead
-   */
-  @Deprecated
-  public BrazilianAnalyzer(Version matchVersion, Map<?,?> stopwords) {
-    this(matchVersion, stopwords.keySet());
-  }
-
-  /**
-   * Builds an analyzer with the given stop words.
-   * @deprecated use {@link #BrazilianAnalyzer(Version, Set)} instead
-   */
-  @Deprecated
-  public BrazilianAnalyzer(Version matchVersion, File stopwords)
-      throws IOException {
-    this(matchVersion, WordlistLoader.getWordSet(stopwords));
-  }
-
-	/**
-	 * Builds an exclusionlist from an array of Strings.
-	 * @deprecated use {@link #BrazilianAnalyzer(Version, Set, Set)} instead
-	 */
-	@Deprecated
-	public void setStemExclusionTable( String... exclusionlist ) {
-		excltable = StopFilter.makeStopSet( matchVersion, exclusionlist );
-		setPreviousTokenStream(null); // force a new stemmer to be created
-	}
-	/**
-	 * Builds an exclusionlist from a {@link Map}.
-	 * @deprecated use {@link #BrazilianAnalyzer(Version, Set, Set)} instead
-	 */
-	@Deprecated
-	public void setStemExclusionTable( Map<?,?> exclusionlist ) {
-		excltable = new HashSet<Object>(exclusionlist.keySet());
-		setPreviousTokenStream(null); // force a new stemmer to be created
-	}
-	/**
-	 * Builds an exclusionlist from the words contained in the given file.
-	 * @deprecated use {@link #BrazilianAnalyzer(Version, Set, Set)} instead
-	 */
-	@Deprecated
-	public void setStemExclusionTable( File exclusionlist ) throws IOException {
-		excltable = WordlistLoader.getWordSet( exclusionlist );
-		setPreviousTokenStream(null); // force a new stemmer to be created
-	}
-
   /**
    * Creates
    * {@link org.apache.lucene.analysis.util.ReusableAnalyzerBase.TokenStreamComponents}

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilter.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilter.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilter.java Tue Nov 30 11:22:39 2010
@@ -20,11 +20,11 @@ package org.apache.lucene.analysis.br;
 import java.io.IOException;
 import java.util.Set;
 
-import org.apache.lucene.analysis.miscellaneous.KeywordMarkerFilter; // for javadoc
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
+import org.apache.lucene.analysis.miscellaneous.KeywordMarkerFilter;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
 
 /**
  * A {@link TokenFilter} that applies {@link BrazilianStemmer}.
@@ -55,19 +55,6 @@ public final class BrazilianStemFilter e
     super(in);
   }
   
-  /**
-   * Creates a new BrazilianStemFilter 
-   * 
-   * @param in the source {@link TokenStream} 
-   * @param exclusiontable a set of terms that should be prevented from being stemmed.
-   * @deprecated use {@link KeywordAttribute} with {@link KeywordMarkerFilter} instead.
-   */
-  @Deprecated
-  public BrazilianStemFilter(TokenStream in, Set<?> exclusiontable) {
-    this(in);
-    this.exclusions = exclusiontable;
-  }
-
   @Override
   public boolean incrementToken() throws IOException {
     if (input.incrementToken()) {

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java Tue Nov 30 11:22:39 2010
@@ -17,17 +17,16 @@ package org.apache.lucene.analysis.cjk;
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.io.Reader;
+import java.util.Set;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.core.StopFilter;
-import org.apache.lucene.analysis.util.CharArraySet;
 import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
 import org.apache.lucene.util.Version;
 
-import java.io.Reader;
-import java.util.Arrays;
-import java.util.Set;
-
 
 /**
  * An {@link Analyzer} that tokenizes text with {@link CJKTokenizer} and
@@ -35,28 +34,13 @@ import java.util.Set;
  *
  */
 public final class CJKAnalyzer extends StopwordAnalyzerBase {
-  //~ Static fields/initializers ---------------------------------------------
-
   /**
-   * An array containing some common English words that are not usually
+   * File containing default CJK stopwords.
+   * <p/>
+   * Currently it concains some common English words that are not usually
    * useful for searching and some double-byte interpunctions.
-   * @deprecated use {@link #getDefaultStopSet()} instead
    */
-  // TODO make this final in 3.1 -
-  // this might be revised and merged with StopFilter stop words too
-  @Deprecated
-  public final static String[] STOP_WORDS = {
-    "a", "and", "are", "as", "at", "be",
-    "but", "by", "for", "if", "in",
-    "into", "is", "it", "no", "not",
-    "of", "on", "or", "s", "such", "t",
-    "that", "the", "their", "then",
-    "there", "these", "they", "this",
-    "to", "was", "will", "with", "",
-    "www"
-  };
-
-  //~ Instance fields --------------------------------------------------------
+  public final static String DEFAULT_STOPWORD_FILE = "stopwords.txt";
 
   /**
    * Returns an unmodifiable instance of the default stop-words set.
@@ -67,12 +51,18 @@ public final class CJKAnalyzer extends S
   }
   
   private static class DefaultSetHolder {
-    static final Set<?> DEFAULT_STOP_SET = CharArraySet
-        .unmodifiableSet(new CharArraySet(Version.LUCENE_CURRENT, Arrays.asList(STOP_WORDS),
-            false));
-  }
+    static final Set<?> DEFAULT_STOP_SET;
 
-  //~ Constructors -----------------------------------------------------------
+    static {
+      try {
+        DEFAULT_STOP_SET = loadStopwordSet(false, CJKAnalyzer.class, DEFAULT_STOPWORD_FILE, "#");
+      } catch (IOException ex) {
+        // default set should always be present as it is part of the
+        // distribution (JAR)
+        throw new RuntimeException("Unable to load default stopword set");
+      }
+    }
+  }
 
   /**
    * Builds an analyzer which removes words in {@link #STOP_WORDS}.
@@ -93,19 +83,6 @@ public final class CJKAnalyzer extends S
     super(matchVersion, stopwords);
   }
 
-  /**
-   * Builds an analyzer which removes words in the provided array.
-   *
-   * @param stopWords stop word array
-   * @deprecated use {@link #CJKAnalyzer(Version, Set)} instead
-   */
-  @Deprecated
-  public CJKAnalyzer(Version matchVersion, String... stopWords) {
-    super(matchVersion, StopFilter.makeStopSet(matchVersion, stopWords));
-  }
-
-  //~ Methods ----------------------------------------------------------------
-
   @Override
   protected TokenStreamComponents createComponents(String fieldName,
       Reader reader) {

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java Tue Nov 30 11:22:39 2010
@@ -27,8 +27,8 @@ import org.apache.lucene.analysis.Tokeni
 /**
  * An {@link Analyzer} that tokenizes text with {@link ChineseTokenizer} and
  * filters with {@link ChineseFilter}
- * @deprecated Use {@link StandardAnalyzer} instead, which has the same functionality.
- * This analyzer will be removed in Lucene 4.0
+ * @deprecated (3.1) Use {@link StandardAnalyzer} instead, which has the same functionality.
+ * This analyzer will be removed in Lucene 5.0
  */
 @Deprecated
 public final class ChineseAnalyzer extends ReusableAnalyzerBase {

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseFilter.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseFilter.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseFilter.java Tue Nov 30 11:22:39 2010
@@ -42,8 +42,8 @@ import org.apache.lucene.util.Version;
  * </ol>
  * 
  * @version 1.0
- * @deprecated Use {@link StopFilter} instead, which has the same functionality.
- * This filter will be removed in Lucene 4.0
+ * @deprecated (3.1) Use {@link StopFilter} instead, which has the same functionality.
+ * This filter will be removed in Lucene 5.0
  */
 @Deprecated
 public final class ChineseFilter extends TokenFilter {

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizer.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizer.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizer.java Tue Nov 30 11:22:39 2010
@@ -53,8 +53,8 @@ import org.apache.lucene.util.AttributeS
  * CJKTokenizer will not work.
  * </p>
  * @version 1.0
- * @deprecated Use {@link StandardTokenizer} instead, which has the same functionality.
- * This filter will be removed in Lucene 4.0
+ * @deprecated (3.1) Use {@link StandardTokenizer} instead, which has the same functionality.
+ * This filter will be removed in Lucene 5.0
  */
 @Deprecated
 public final class ChineseTokenizer extends Tokenizer {

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilter.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilter.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilter.java Tue Nov 30 11:22:39 2010
@@ -61,18 +61,6 @@ public final class CommonGramsFilter ext
   private boolean lastWasCommon;
   private State savedState;
 
-  /** @deprecated Use {@link #CommonGramsFilter(Version, TokenStream, Set)} instead */
-  @Deprecated
-  public CommonGramsFilter(TokenStream input, Set<?> commonWords) {
-    this(Version.LUCENE_29, input, commonWords);
-  }
-  
-  /** @deprecated Use {@link #CommonGramsFilter(Version, TokenStream, Set, boolean)} instead */
-  @Deprecated
-  public CommonGramsFilter(TokenStream input, Set<?> commonWords, boolean ignoreCase) {
-    this(Version.LUCENE_29, input, commonWords, ignoreCase);
-  }
-  
   /**
    * Construct a token stream filtering the given input using a Set of common
    * words to create bigrams. Outputs both unigrams with position increment and
@@ -114,66 +102,6 @@ public final class CommonGramsFilter ext
   }
 
   /**
-   * Construct a token stream filtering the given input using an Array of common
-   * words to create bigrams.
-   * 
-   * @param input Tokenstream in filter chain
-   * @param commonWords words to be used in constructing bigrams
-   * @deprecated Use {@link #CommonGramsFilter(Version, TokenStream, Set)} instead.
-   */
-  @Deprecated
-  public CommonGramsFilter(TokenStream input, String[] commonWords) {
-    this(input, commonWords, false);
-  }
-
-  /**
-   * Construct a token stream filtering the given input using an Array of common
-   * words to create bigrams and is case-sensitive if ignoreCase is false.
-   * 
-   * @param input Tokenstream in filter chain
-   * @param commonWords words to be used in constructing bigrams
-   * @param ignoreCase -Ignore case when constructing bigrams for common words.
-   * @deprecated Use {@link #CommonGramsFilter(Version, TokenStream, Set, boolean)} instead.
-   */
-  @Deprecated
-  public CommonGramsFilter(TokenStream input, String[] commonWords, boolean ignoreCase) {
-    super(input);
-    this.commonWords = makeCommonSet(commonWords, ignoreCase);
-  }
-
-  /**
-   * Build a CharArraySet from an array of common words, appropriate for passing
-   * into the CommonGramsFilter constructor. This permits this commonWords
-   * construction to be cached once when an Analyzer is constructed.
-   *
-   * @param commonWords Array of common words which will be converted into the CharArraySet
-   * @return CharArraySet of the given words, appropriate for passing into the CommonGramFilter constructor
-   * @see #makeCommonSet(java.lang.String[], boolean) passing false to ignoreCase
-   * @deprecated create a CharArraySet with CharArraySet instead
-   */
-  @Deprecated
-  public static CharArraySet makeCommonSet(String[] commonWords) {
-    return makeCommonSet(commonWords, false);
-  }
-
-  /**
-   * Build a CharArraySet from an array of common words, appropriate for passing
-   * into the CommonGramsFilter constructor,case-sensitive if ignoreCase is
-   * false.
-   * 
-   * @param commonWords Array of common words which will be converted into the CharArraySet
-   * @param ignoreCase If true, all words are lower cased first.
-   * @return a Set containing the words
-   * @deprecated create a CharArraySet with CharArraySet instead
-   */
-  @Deprecated
-  public static CharArraySet makeCommonSet(String[] commonWords, boolean ignoreCase) {
-    CharArraySet commonSet = new CharArraySet(commonWords.length, ignoreCase);
-    commonSet.addAll(Arrays.asList(commonWords));
-    return commonSet;
-  }
-
-  /**
    * Inserts bigrams for common words into a token stream. For each input token,
    * output the token. If the token and/or the following token are in the list
    * of common words also output a bigram with position increment 0 and

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/CompoundWordTokenFilterBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/CompoundWordTokenFilterBase.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/CompoundWordTokenFilterBase.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/CompoundWordTokenFilterBase.java Tue Nov 30 11:22:39 2010
@@ -78,54 +78,7 @@ public abstract class CompoundWordTokenF
   private final PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class);
   
   private final Token wrapper = new Token();
-  /**
-   * @deprecated use {@link #CompoundWordTokenFilterBase(Version, TokenStream, String[], int, int, int, boolean)} instead
-   */
-  @Deprecated
-  protected CompoundWordTokenFilterBase(TokenStream input, String[] dictionary, int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
-    this(Version.LUCENE_30, input, makeDictionary(dictionary),minWordSize,minSubwordSize,maxSubwordSize, onlyLongestMatch);
-  }
-  
-  /**
-   * @deprecated use {@link #CompoundWordTokenFilterBase(Version, TokenStream, String[], boolean)} instead
-   */
-  @Deprecated
-  protected CompoundWordTokenFilterBase(TokenStream input, String[] dictionary, boolean onlyLongestMatch) {
-    this(Version.LUCENE_30, input, makeDictionary(dictionary),DEFAULT_MIN_WORD_SIZE,DEFAULT_MIN_SUBWORD_SIZE,DEFAULT_MAX_SUBWORD_SIZE, onlyLongestMatch);
-  }
-  
-  /**
-   * @deprecated use {@link #CompoundWordTokenFilterBase(Version, TokenStream, Set, boolean)} instead
-   */
-  @Deprecated
-  protected CompoundWordTokenFilterBase(TokenStream input, Set<?> dictionary, boolean onlyLongestMatch) {
-    this(Version.LUCENE_30, input, dictionary,DEFAULT_MIN_WORD_SIZE,DEFAULT_MIN_SUBWORD_SIZE,DEFAULT_MAX_SUBWORD_SIZE, onlyLongestMatch);
-  }
-  
-  /**
-   * @deprecated use {@link #CompoundWordTokenFilterBase(Version, TokenStream, String[])} instead
-   */
-  @Deprecated
-  protected CompoundWordTokenFilterBase(TokenStream input, String[] dictionary) {
-    this(Version.LUCENE_30, input, makeDictionary(dictionary),DEFAULT_MIN_WORD_SIZE,DEFAULT_MIN_SUBWORD_SIZE,DEFAULT_MAX_SUBWORD_SIZE, false);
-  }
-  
-  /**
-   * @deprecated use {@link #CompoundWordTokenFilterBase(Version, TokenStream, Set)} instead
-   */
-  @Deprecated
-  protected CompoundWordTokenFilterBase(TokenStream input, Set<?> dictionary) {
-    this(Version.LUCENE_30, input, dictionary,DEFAULT_MIN_WORD_SIZE,DEFAULT_MIN_SUBWORD_SIZE,DEFAULT_MAX_SUBWORD_SIZE, false);
-  }
 
-  /**
-   * @deprecated use {@link #CompoundWordTokenFilterBase(Version, TokenStream, Set, int, int, int, boolean)} instead
-   */
-  @Deprecated
-  protected CompoundWordTokenFilterBase(TokenStream input, Set<?> dictionary, int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
-    this(Version.LUCENE_30, input, dictionary, minWordSize, minSubwordSize, maxSubwordSize, onlyLongestMatch);
-  }
-  
   protected CompoundWordTokenFilterBase(Version matchVersion, TokenStream input, String[] dictionary, int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
     this(matchVersion, input,makeDictionary(dictionary),minWordSize,minSubwordSize,maxSubwordSize, onlyLongestMatch);
   }
@@ -170,11 +123,11 @@ public abstract class CompoundWordTokenF
    * @param dictionary 
    * @return {@link Set} of lowercased terms 
    */
-  public static final Set<?> makeDictionary(final String[] dictionary) {
+  public static Set<?> makeDictionary(final String[] dictionary) {
     return makeDictionary(Version.LUCENE_30, dictionary);
   }
   
-  public static final Set<?> makeDictionary(final Version matchVersion, final String[] dictionary) {
+  public static Set<?> makeDictionary(final Version matchVersion, final String[] dictionary) {
     if (dictionary == null) {
       return null;
     }
@@ -184,7 +137,7 @@ public abstract class CompoundWordTokenF
     return dict;
   }
   
-  private final void setToken(final Token token) throws IOException {
+  private void setToken(final Token token) throws IOException {
     clearAttributes();
     termAtt.copyBuffer(token.buffer(), 0, token.length());
     flagsAtt.setFlags(token.getFlags());
@@ -222,7 +175,7 @@ public abstract class CompoundWordTokenF
     }
   }
   
-  protected static final void addAllLowerCase(CharArraySet target, Collection<?> col) {
+  protected static void addAllLowerCase(CharArraySet target, Collection<?> col) {
     for (Object obj : col) {
       String string = (String) obj;
       target.add(string.toLowerCase(Locale.ENGLISH));

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java Tue Nov 30 11:22:39 2010
@@ -21,7 +21,7 @@ package org.apache.lucene.analysis.compo
 import java.util.Set;
 
 import org.apache.lucene.analysis.Token;
-import org.apache.lucene.analysis.TokenFilter; // for javadocs
+import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.util.Version;
 
@@ -34,67 +34,6 @@ import org.apache.lucene.util.Version;
  * </p>
  */
 public class DictionaryCompoundWordTokenFilter extends CompoundWordTokenFilterBase {
-  
-  /**
-   * Creates a new {@link DictionaryCompoundWordTokenFilter}
-   * 
-   * @param input the {@link TokenStream} to process
-   * @param dictionary the word dictionary to match against
-   * @param minWordSize only words longer than this get processed
-   * @param minSubwordSize only subwords longer than this get to the output stream
-   * @param maxSubwordSize only subwords shorter than this get to the output stream
-   * @param onlyLongestMatch Add only the longest matching subword to the stream
-   * @deprecated use {@link #DictionaryCompoundWordTokenFilter(Version, TokenStream, String[], int, int, int, boolean)} instead
-   */
-  @Deprecated
-  public DictionaryCompoundWordTokenFilter(TokenStream input, String[] dictionary,
-      int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
-    super(Version.LUCENE_30, input, dictionary, minWordSize, minSubwordSize, maxSubwordSize, onlyLongestMatch);
-  }
-
-  /**
-   * Creates a new {@link DictionaryCompoundWordTokenFilter}
-   *  
-   * @param input the {@link TokenStream} to process
-   * @param dictionary the word dictionary to match against
-   * @deprecated use {@link #DictionaryCompoundWordTokenFilter(Version, TokenStream, String[])} instead 
-   */
-  @Deprecated
-  public DictionaryCompoundWordTokenFilter(TokenStream input, String[] dictionary) {
-    super(Version.LUCENE_30, input, dictionary);
-  }
-
-  /**
-   * Creates a new {@link DictionaryCompoundWordTokenFilter}
-   *  
-   * @param input the {@link TokenStream} to process
-   * @param dictionary the word dictionary to match against. If this is a {@link org.apache.lucene.analysis.util.CharArraySet CharArraySet} it must have set ignoreCase=false and only contain
-   *        lower case strings.
-   * @deprecated use {@link #DictionaryCompoundWordTokenFilter(Version, TokenStream, Set)} instead 
-   */
-  @Deprecated
-  public DictionaryCompoundWordTokenFilter(TokenStream input, Set dictionary) {
-    super(Version.LUCENE_30, input, dictionary);
-  }
-
-  /**
-   * Creates a new {@link DictionaryCompoundWordTokenFilter}
-   *  
-   * @param input the {@link TokenStream} to process
-   * @param dictionary the word dictionary to match against. If this is a {@link org.apache.lucene.analysis.util.CharArraySet CharArraySet} it must have set ignoreCase=false and only contain
-   *        lower case strings. 
-   * @param minWordSize only words longer than this get processed
-   * @param minSubwordSize only subwords longer than this get to the output stream
-   * @param maxSubwordSize only subwords shorter than this get to the output stream
-   * @param onlyLongestMatch Add only the longest matching subword to the stream
-   * @deprecated use {@link #DictionaryCompoundWordTokenFilter(Version, TokenStream, Set, int, int, int, boolean)} instead
-   */
-  @Deprecated
-  public DictionaryCompoundWordTokenFilter(TokenStream input, Set dictionary,
-      int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
-    super(Version.LUCENE_30, input, dictionary, minWordSize, minSubwordSize, maxSubwordSize, onlyLongestMatch);
-  }
-  
   /**
    * Creates a new {@link DictionaryCompoundWordTokenFilter}
    * 

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java Tue Nov 30 11:22:39 2010
@@ -18,12 +18,10 @@ package org.apache.lucene.analysis.compo
  */
 
 import java.io.File;
-import java.io.FileInputStream;
-import java.io.Reader;
 import java.util.Set;
 
 import org.apache.lucene.analysis.Token;
-import org.apache.lucene.analysis.TokenFilter; // for javadocs
+import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.compound.hyphenation.Hyphenation;
 import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree;
@@ -68,8 +66,10 @@ public class HyphenationCompoundWordToke
   public HyphenationCompoundWordTokenFilter(Version matchVersion, TokenStream input,
       HyphenationTree hyphenator, String[] dictionary, int minWordSize,
       int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
-    this(input, hyphenator, makeDictionary(dictionary), minWordSize,
-        minSubwordSize, maxSubwordSize, onlyLongestMatch);
+    super(matchVersion, input, dictionary, minWordSize, minSubwordSize, maxSubwordSize,
+        onlyLongestMatch);
+
+    this.hyphenator = hyphenator;
   }
 
   /**
@@ -89,7 +89,7 @@ public class HyphenationCompoundWordToke
    */
   public HyphenationCompoundWordTokenFilter(Version matchVersion, TokenStream input,
       HyphenationTree hyphenator, String[] dictionary) {
-    this(input, hyphenator, makeDictionary(dictionary), DEFAULT_MIN_WORD_SIZE,
+    this(matchVersion, input, hyphenator, makeDictionary(dictionary), DEFAULT_MIN_WORD_SIZE,
         DEFAULT_MIN_SUBWORD_SIZE, DEFAULT_MAX_SUBWORD_SIZE, false);
   }
 
@@ -113,7 +113,7 @@ public class HyphenationCompoundWordToke
    */
   public HyphenationCompoundWordTokenFilter(Version matchVersion, TokenStream input,
       HyphenationTree hyphenator, Set<?> dictionary) {
-    this(input, hyphenator, dictionary, DEFAULT_MIN_WORD_SIZE,
+    this(matchVersion, input, hyphenator, dictionary, DEFAULT_MIN_WORD_SIZE,
         DEFAULT_MIN_SUBWORD_SIZE, DEFAULT_MAX_SUBWORD_SIZE, false);
   }
 
@@ -180,84 +180,6 @@ public class HyphenationCompoundWordToke
   }
 
   /**
-   * Creates a new {@link HyphenationCompoundWordTokenFilter} instance.
-   * 
-   * @param input the {@link TokenStream} to process
-   * @param hyphenator the hyphenation pattern tree to use for hyphenation
-   * @param dictionary the word dictionary to match against
-   * @param minWordSize only words longer than this get processed
-   * @param minSubwordSize only subwords longer than this get to the output
-   *        stream
-   * @param maxSubwordSize only subwords shorter than this get to the output
-   *        stream
-   * @param onlyLongestMatch Add only the longest matching subword to the stream
-   * @deprecated use {@link #HyphenationCompoundWordTokenFilter(Version, TokenStream, HyphenationTree, String[], int, int, int, boolean)} instead. 
-   */
-  @Deprecated
-  public HyphenationCompoundWordTokenFilter(TokenStream input,
-      HyphenationTree hyphenator, String[] dictionary, int minWordSize,
-      int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
-    this(Version.LUCENE_30, input, hyphenator, makeDictionary(dictionary), minWordSize,
-        minSubwordSize, maxSubwordSize, onlyLongestMatch);
-  }
-
-  /**
-   * Creates a new {@link HyphenationCompoundWordTokenFilter} instance.
-   *  
-   * @param input the {@link TokenStream} to process
-   * @param hyphenator the hyphenation pattern tree to use for hyphenation
-   * @param dictionary the word dictionary to match against
-   * @deprecated use {@link #HyphenationCompoundWordTokenFilter(Version, TokenStream, HyphenationTree, String[])} instead.
-   */
-  @Deprecated
-  public HyphenationCompoundWordTokenFilter(TokenStream input,
-      HyphenationTree hyphenator, String[] dictionary) {
-    this(Version.LUCENE_30, input, hyphenator, makeDictionary(dictionary), DEFAULT_MIN_WORD_SIZE,
-        DEFAULT_MIN_SUBWORD_SIZE, DEFAULT_MAX_SUBWORD_SIZE, false);
-  }
-
-  /**
-   * Creates a new {@link HyphenationCompoundWordTokenFilter} instance.
-   *  
-   * @param input the {@link TokenStream} to process
-   * @param hyphenator the hyphenation pattern tree to use for hyphenation
-   * @param dictionary the word dictionary to match against. If this is a {@link org.apache.lucene.analysis.util.CharArraySet CharArraySet} it must have set ignoreCase=false and only contain
-   *        lower case strings. 
-   * @deprecated use {@link #HyphenationCompoundWordTokenFilter(Version, TokenStream, HyphenationTree, Set)} instead.        
-   */
-  @Deprecated
-  public HyphenationCompoundWordTokenFilter(TokenStream input,
-      HyphenationTree hyphenator, Set<?> dictionary) {
-    this(Version.LUCENE_30, input, hyphenator, dictionary, DEFAULT_MIN_WORD_SIZE,
-        DEFAULT_MIN_SUBWORD_SIZE, DEFAULT_MAX_SUBWORD_SIZE, false);
-  }
-
-  /**
-   * Creates a new {@link HyphenationCompoundWordTokenFilter} instance.
-   *  
-   * @param input the {@link TokenStream} to process
-   * @param hyphenator the hyphenation pattern tree to use for hyphenation
-   * @param dictionary the word dictionary to match against. If this is a {@link org.apache.lucene.analysis.util.CharArraySet CharArraySet} it must have set ignoreCase=false and only contain
-   *        lower case strings. 
-   * @param minWordSize only words longer than this get processed
-   * @param minSubwordSize only subwords longer than this get to the output
-   *        stream
-   * @param maxSubwordSize only subwords shorter than this get to the output
-   *        stream
-   * @param onlyLongestMatch Add only the longest matching subword to the stream
-   * @deprecated use {@link #HyphenationCompoundWordTokenFilter(Version, TokenStream, HyphenationTree, Set, int, int, int, boolean)} instead.
-   */
-  @Deprecated
-  public HyphenationCompoundWordTokenFilter(TokenStream input,
-      HyphenationTree hyphenator, Set<?> dictionary, int minWordSize,
-      int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
-    super(Version.LUCENE_30, input, dictionary, minWordSize, minSubwordSize, maxSubwordSize,
-        onlyLongestMatch);
-
-    this.hyphenator = hyphenator;
-  }
-
-  /**
    * Create a hyphenator tree
    * 
    * @param hyphenationFilename the filename of the XML grammar to load
@@ -284,27 +206,6 @@ public class HyphenationCompoundWordToke
   /**
    * Create a hyphenator tree
    * 
-   * @param hyphenationReader the reader of the XML grammar to load from
-   * @return An object representing the hyphenation patterns
-   * @throws Exception
-   * @deprecated Don't use Readers with fixed charset to load XML files, unless programatically created.
-   * Use {@link #getHyphenationTree(InputSource)} instead, where you can supply default charset and input
-   * stream, if you like.
-   */
-  @Deprecated
-  public static HyphenationTree getHyphenationTree(Reader hyphenationReader)
-      throws Exception {
-    final InputSource is = new InputSource(hyphenationReader);
-    // we need this to load the DTD in very old parsers (like the one in JDK 1.4).
-    // The DTD itsself is provided via EntityResolver, so it should always load, but
-    // some parsers still want to have a base URL (Crimson).
-    is.setSystemId("urn:java:" + HyphenationTree.class.getName());
-    return getHyphenationTree(is);
-  }
-
-  /**
-   * Create a hyphenator tree
-   * 
    * @param hyphenationSource the InputSource pointing to the XML grammar
    * @return An object representing the hyphenation patterns
    * @throws Exception

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizer.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizer.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizer.java Tue Nov 30 11:22:39 2010
@@ -87,40 +87,6 @@ public class LetterTokenizer extends Cha
     super(matchVersion, factory, in);
   }
   
-  /**
-   * Construct a new LetterTokenizer.
-   * 
-   * @deprecated use {@link #LetterTokenizer(Version, Reader)} instead. This
-   *             will be removed in Lucene 4.0.
-   */
-  @Deprecated
-  public LetterTokenizer(Reader in) {
-    super(Version.LUCENE_30, in);
-  }
-  
-  /**
-   * Construct a new LetterTokenizer using a given {@link AttributeSource}. 
-   * @deprecated
-   * use {@link #LetterTokenizer(Version, AttributeSource, Reader)} instead.
-   * This will be removed in Lucene 4.0.
-   */
-  @Deprecated
-  public LetterTokenizer(AttributeSource source, Reader in) {
-    super(Version.LUCENE_30, source, in);
-  }
-  
-  /**
-   * Construct a new LetterTokenizer using a given
-   * {@link org.apache.lucene.util.AttributeSource.AttributeFactory}.
-   * 
-   * @deprecated use {@link #LetterTokenizer(Version, AttributeSource.AttributeFactory, Reader)}
-   *             instead. This will be removed in Lucene 4.0.
-   */
-  @Deprecated
-  public LetterTokenizer(AttributeFactory factory, Reader in) {
-    super(Version.LUCENE_30, factory, in);
-  }
-  
   /** Collects only characters which satisfy
    * {@link Character#isLetter(int)}.*/
   @Override

Modified: lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseFilter.java?rev=1040463&r1=1040462&r2=1040463&view=diff
==============================================================================
--- lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseFilter.java (original)
+++ lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseFilter.java Tue Nov 30 11:22:39 2010
@@ -49,14 +49,6 @@ public final class LowerCaseFilter exten
     charUtils = CharacterUtils.getInstance(matchVersion);
   }
   
-  /**
-   * @deprecated Use {@link #LowerCaseFilter(Version, TokenStream)} instead.
-   */
-  @Deprecated
-  public LowerCaseFilter(TokenStream in) {
-    this(Version.LUCENE_30, in);
-  }
-
   @Override
   public final boolean incrementToken() throws IOException {
     if (input.incrementToken()) {