You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2010/07/13 18:12:24 UTC

svn commit: r963780 [5/6] - in /lucene/dev/branches/branch_3x: ./ lucene/ lucene/backwards/src/ lucene/backwards/src/test/org/apache/lucene/analysis/ lucene/backwards/src/test/org/apache/lucene/document/ lucene/backwards/src/test/org/apache/lucene/inde...

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermScorer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermScorer.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermScorer.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermScorer.java Tue Jul 13 16:12:21 2010
@@ -26,155 +26,155 @@ import org.apache.lucene.analysis.Whites
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.RAMDirectory;
 
-public class TestTermScorer extends LuceneTestCase
-{
-    protected RAMDirectory directory;
-    private static final String FIELD = "field";
-
-    protected String[] values = new String[]{"all", "dogs dogs", "like", "playing", "fetch", "all"};
-    protected IndexSearcher indexSearcher;
-    protected IndexReader indexReader;
-
-
-    public TestTermScorer(String s)
-    {
-        super(s);
+public class TestTermScorer extends LuceneTestCase {
+  protected RAMDirectory directory;
+  private static final String FIELD = "field";
+  
+  protected String[] values = new String[] {"all", "dogs dogs", "like",
+      "playing", "fetch", "all"};
+  protected IndexSearcher indexSearcher;
+  protected IndexReader indexReader;
+  
+  public TestTermScorer(String s) {
+    super(s);
+  }
+  
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
+    directory = new RAMDirectory();
+    
+    RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+    for (int i = 0; i < values.length; i++) {
+      Document doc = new Document();
+      doc
+          .add(new Field(FIELD, values[i], Field.Store.YES,
+              Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    indexReader = writer.getReader();
+    writer.close();
+    indexSearcher = new IndexSearcher(indexReader);
+  }
+  
+  @Override
+  protected void tearDown() throws Exception {
+    indexSearcher.close();
+    indexReader.close();
+    directory.close();
+  }
+
+  public void test() throws IOException {
+    
+    Term allTerm = new Term(FIELD, "all");
+    TermQuery termQuery = new TermQuery(allTerm);
+    
+    Weight weight = termQuery.weight(indexSearcher);
+    
+    Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true);
+    // we have 2 documents with the term all in them, one document for all the
+    // other values
+    final List<TestHit> docs = new ArrayList<TestHit>();
+    // must call next first
+    
+    ts.score(new Collector() {
+      private int base = 0;
+      private Scorer scorer;
+      
+      @Override
+      public void setScorer(Scorer scorer) throws IOException {
+        this.scorer = scorer;
+      }
+      
+      @Override
+      public void collect(int doc) throws IOException {
+        float score = scorer.score();
+        doc = doc + base;
+        docs.add(new TestHit(doc, score));
+        assertTrue("score " + score + " is not greater than 0", score > 0);
+        assertTrue("Doc: " + doc + " does not equal 0 or doc does not equal 5",
+            doc == 0 || doc == 5);
+      }
+      
+      @Override
+      public void setNextReader(IndexReader reader, int docBase) {
+        base = docBase;
+      }
+      
+      @Override
+      public boolean acceptsDocsOutOfOrder() {
+        return true;
+      }
+    });
+    assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2);
+    TestHit doc0 = docs.get(0);
+    TestHit doc5 = docs.get(1);
+    // The scores should be the same
+    assertTrue(doc0.score + " does not equal: " + doc5.score,
+        doc0.score == doc5.score);
+    /*
+     * Score should be (based on Default Sim.: All floats are approximate tf = 1
+     * numDocs = 6 docFreq(all) = 2 idf = ln(6/3) + 1 = 1.693147 idf ^ 2 =
+     * 2.8667 boost = 1 lengthNorm = 1 //there is 1 term in every document coord
+     * = 1 sumOfSquaredWeights = (idf * boost) ^ 2 = 1.693147 ^ 2 = 2.8667
+     * queryNorm = 1 / (sumOfSquaredWeights)^0.5 = 1 /(1.693147) = 0.590
+     * 
+     * score = 1 * 2.8667 * 1 * 1 * 0.590 = 1.69
+     */
+    assertTrue(doc0.score + " does not equal: " + 1.6931472f,
+        doc0.score == 1.6931472f);
+  }
+  
+  public void testNext() throws Exception {
+    
+    Term allTerm = new Term(FIELD, "all");
+    TermQuery termQuery = new TermQuery(allTerm);
+    
+    Weight weight = termQuery.weight(indexSearcher);
+    
+    Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true);
+    assertTrue("next did not return a doc",
+        ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue("score is not correct", ts.score() == 1.6931472f);
+    assertTrue("next did not return a doc",
+        ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue("score is not correct", ts.score() == 1.6931472f);
+    assertTrue("next returned a doc and it should not have",
+        ts.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
+  }
+  
+  public void testAdvance() throws Exception {
+    
+    Term allTerm = new Term(FIELD, "all");
+    TermQuery termQuery = new TermQuery(allTerm);
+    
+    Weight weight = termQuery.weight(indexSearcher);
+    
+    Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true);
+    assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
+    // The next doc should be doc 5
+    assertTrue("doc should be number 5", ts.docID() == 5);
+  }
+  
+  private class TestHit {
+    public int doc;
+    public float score;
+    
+    public TestHit(int doc, float score) {
+      this.doc = doc;
+      this.score = score;
     }
-
+    
     @Override
-    protected void setUp() throws Exception {
-        super.setUp();
-        directory = new RAMDirectory();
-
-        IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
-        for (int i = 0; i < values.length; i++) {
-            Document doc = new Document();
-            doc.add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.ANALYZED));
-            writer.addDocument(doc);
-        }
-        writer.close();
-        indexSearcher = new IndexSearcher(directory, false);
-        indexReader = indexSearcher.getIndexReader();
-
-
-    }
-
-    public void test() throws IOException {
-
-        Term allTerm = new Term(FIELD, "all");
-        TermQuery termQuery = new TermQuery(allTerm);
-
-        Weight weight = termQuery.weight(indexSearcher);
-
-        TermScorer ts = new TermScorer(weight,
-                                       indexReader.termDocs(allTerm), indexSearcher.getSimilarity(),
-                                       indexReader.norms(FIELD));
-        //we have 2 documents with the term all in them, one document for all the other values
-        final List<TestHit> docs = new ArrayList<TestHit>();
-        //must call next first
-
-
-        ts.score(new Collector() {
-            private int base = 0;
-            private Scorer scorer;
-            @Override
-            public void setScorer(Scorer scorer) throws IOException {
-              this.scorer = scorer; 
-            }
-
-            @Override
-            public void collect(int doc) throws IOException {
-              float score = scorer.score();
-              doc = doc + base;
-              docs.add(new TestHit(doc, score));
-              assertTrue("score " + score + " is not greater than 0", score > 0);
-              assertTrue("Doc: " + doc + " does not equal 0 or doc does not equal 5",
-                            doc == 0 || doc == 5);
-            }
-            @Override
-            public void setNextReader(IndexReader reader, int docBase) {
-              base = docBase;
-            }
-            @Override
-            public boolean acceptsDocsOutOfOrder() {
-              return true;
-            }
-        });
-        assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2);
-        TestHit doc0 =  docs.get(0);
-        TestHit doc5 =  docs.get(1);
-        //The scores should be the same
-        assertTrue(doc0.score + " does not equal: " + doc5.score, doc0.score == doc5.score);
-        /*
-        Score should be (based on Default Sim.:
-        All floats are approximate
-        tf = 1
-        numDocs = 6
-        docFreq(all) = 2
-        idf = ln(6/3) + 1 = 1.693147
-        idf ^ 2 = 2.8667
-        boost = 1
-        lengthNorm = 1 //there is 1 term in every document
-        coord = 1
-        sumOfSquaredWeights = (idf * boost) ^ 2 = 1.693147 ^ 2 = 2.8667
-        queryNorm = 1 / (sumOfSquaredWeights)^0.5 = 1 /(1.693147) = 0.590
-
-         score = 1 * 2.8667 * 1 * 1 * 0.590 = 1.69
-
-        */
-        assertTrue(doc0.score + " does not equal: " + 1.6931472f, doc0.score == 1.6931472f);
-    }
-
-    public void testNext() throws Exception {
-
-        Term allTerm = new Term(FIELD, "all");
-        TermQuery termQuery = new TermQuery(allTerm);
-
-        Weight weight = termQuery.weight(indexSearcher);
-
-        TermScorer ts = new TermScorer(weight,
-                                       indexReader.termDocs(allTerm), indexSearcher.getSimilarity(),
-                                       indexReader.norms(FIELD));
-        assertTrue("next did not return a doc", ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
-        assertTrue("score is not correct", ts.score() == 1.6931472f);
-        assertTrue("next did not return a doc", ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
-        assertTrue("score is not correct", ts.score() == 1.6931472f);
-        assertTrue("next returned a doc and it should not have", ts.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
-    }
-
-    public void testSkipTo() throws Exception {
-
-        Term allTerm = new Term(FIELD, "all");
-        TermQuery termQuery = new TermQuery(allTerm);
-
-        Weight weight = termQuery.weight(indexSearcher);
-
-        TermScorer ts = new TermScorer(weight,
-                                       indexReader.termDocs(allTerm), indexSearcher.getSimilarity(),
-                                       indexReader.norms(FIELD));
-        assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
-        //The next doc should be doc 5
-        assertTrue("doc should be number 5", ts.docID() == 5);
-    }
-
-    private class TestHit {
-        public int doc;
-        public float score;
-
-        public TestHit(int doc, float score) {
-            this.doc = doc;
-            this.score = score;
-        }
-
-        @Override
-        public String toString() {
-            return "TestHit{" + "doc=" + doc + ", score=" + score + "}";
-        }
+    public String toString() {
+      return "TestHit{" + "doc=" + doc + ", score=" + score + "}";
     }
-
+  }
+  
 }

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermVectors.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermVectors.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermVectors.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermVectors.java Tue Jul 13 16:12:21 2010
@@ -30,11 +30,16 @@ import org.apache.lucene.util.English;
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Random;
 import java.util.SortedSet;
 
 public class TestTermVectors extends LuceneTestCase {
   private IndexSearcher searcher;
+  private IndexReader reader;
   private Directory directory = new MockRAMDirectory();
+
+  private Random random;
+
   public TestTermVectors(String s) {
     super(s);
   }
@@ -42,9 +47,9 @@ public class TestTermVectors extends Luc
   @Override
   protected void setUp() throws Exception {                  
     super.setUp();
-    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new SimpleAnalyzer(
-        TEST_VERSION_CURRENT)));
+    random = newRandom();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
     //writer.setUseCompoundFile(true);
     //writer.infoStream = System.out;
     for (int i = 0; i < 1000; i++) {
@@ -68,8 +73,17 @@ public class TestTermVectors extends Luc
           Field.Store.YES, Field.Index.ANALYZED, termVector));
       writer.addDocument(doc);
     }
+    reader = writer.getReader();
     writer.close();
-    searcher = new IndexSearcher(directory, true);
+    searcher = new IndexSearcher(reader);
+  }
+  
+  @Override
+  protected void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
   }
 
   public void test() {
@@ -95,17 +109,16 @@ public class TestTermVectors extends Luc
   
   public void testTermVectorsFieldOrder() throws IOException {
     Directory dir = new MockRAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new SimpleAnalyzer(
-        TEST_VERSION_CURRENT)));
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
     Document doc = new Document();
     doc.add(new Field("c", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     doc.add(new Field("a", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     doc.add(new Field("b", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     doc.add(new Field("x", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     writer.addDocument(doc);
+    IndexReader reader = writer.getReader();
     writer.close();
-    IndexReader reader = IndexReader.open(dir, true);
     TermFreqVector[] v = reader.getTermFreqVectors(0);
     assertEquals(4, v.length);
     String[] expectedFields = new String[]{"a", "b", "c", "x"};
@@ -124,65 +137,63 @@ public class TestTermVectors extends Luc
         assertEquals(expectedPositions[j], positions[0]);
       }
     }
+    reader.close();
+    dir.close();
   }
 
-  public void testTermPositionVectors() {
+  public void testTermPositionVectors() throws IOException {
     Query query = new TermQuery(new Term("field", "zero"));
-    try {
-      ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
-      assertEquals(1, hits.length);
+    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+    assertEquals(1, hits.length);
+    
+    for (int i = 0; i < hits.length; i++)
+    {
+      TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
+      assertTrue(vector != null);
+      assertTrue(vector.length == 1);
       
-      for (int i = 0; i < hits.length; i++)
-      {
-        TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
-        assertTrue(vector != null);
-        assertTrue(vector.length == 1);
-        
-        boolean shouldBePosVector = (hits[i].doc % 2 == 0) ? true : false;
-        assertTrue((shouldBePosVector == false) || (shouldBePosVector == true && (vector[0] instanceof TermPositionVector == true)));
-       
-        boolean shouldBeOffVector = (hits[i].doc % 3 == 0) ? true : false;
-        assertTrue((shouldBeOffVector == false) || (shouldBeOffVector == true && (vector[0] instanceof TermPositionVector == true)));
+      boolean shouldBePosVector = (hits[i].doc % 2 == 0) ? true : false;
+      assertTrue((shouldBePosVector == false) || (shouldBePosVector == true && (vector[0] instanceof TermPositionVector == true)));
+      
+      boolean shouldBeOffVector = (hits[i].doc % 3 == 0) ? true : false;
+      assertTrue((shouldBeOffVector == false) || (shouldBeOffVector == true && (vector[0] instanceof TermPositionVector == true)));
+      
+      if(shouldBePosVector || shouldBeOffVector){
+        TermPositionVector posVec = (TermPositionVector)vector[0];
+        String [] terms = posVec.getTerms();
+        assertTrue(terms != null && terms.length > 0);
         
-        if(shouldBePosVector || shouldBeOffVector){
-          TermPositionVector posVec = (TermPositionVector)vector[0];
-          String [] terms = posVec.getTerms();
-          assertTrue(terms != null && terms.length > 0);
+        for (int j = 0; j < terms.length; j++) {
+          int [] positions = posVec.getTermPositions(j);
+          TermVectorOffsetInfo [] offsets = posVec.getOffsets(j);
           
-          for (int j = 0; j < terms.length; j++) {
-            int [] positions = posVec.getTermPositions(j);
-            TermVectorOffsetInfo [] offsets = posVec.getOffsets(j);
-            
-            if(shouldBePosVector){
-              assertTrue(positions != null);
-              assertTrue(positions.length > 0);
-            }
-            else
-              assertTrue(positions == null);
-            
-            if(shouldBeOffVector){
-              assertTrue(offsets != null);
-              assertTrue(offsets.length > 0);
-            }
-            else
-              assertTrue(offsets == null);
-          }
-        }
-        else{
-          try{
-            assertTrue(false);
-          }
-          catch(ClassCastException ignore){
-            TermFreqVector freqVec = vector[0];
-            String [] terms = freqVec.getTerms();
-            assertTrue(terms != null && terms.length > 0);
+          if(shouldBePosVector){
+            assertTrue(positions != null);
+            assertTrue(positions.length > 0);
           }
+          else
+            assertTrue(positions == null);
           
+          if(shouldBeOffVector){
+            assertTrue(offsets != null);
+            assertTrue(offsets.length > 0);
+          }
+          else
+            assertTrue(offsets == null);
         }
-       
       }
-    } catch (IOException e) {
-      assertTrue(false);
+      else{
+        try{
+          assertTrue(false);
+        }
+        catch(ClassCastException ignore){
+          TermFreqVector freqVec = vector[0];
+          String [] terms = freqVec.getTerms();
+          assertTrue(terms != null && terms.length > 0);
+        }
+        
+      }
+      
     }
   }
   
@@ -205,7 +216,7 @@ public class TestTermVectors extends Luc
     }
   }
 
-  public void testKnownSetOfDocuments() {
+  public void testKnownSetOfDocuments() throws IOException {
     String test1 = "eating chocolate in a computer lab"; //6 terms
     String test2 = "computer in a computer lab"; //5 terms
     String test3 = "a chocolate lab grows old"; //5 terms
@@ -231,114 +242,112 @@ public class TestTermVectors extends Luc
     setupDoc(testDoc3, test3);
     Document testDoc4 = new Document();
     setupDoc(testDoc4, test4);
-        
+    
     Directory dir = new MockRAMDirectory();
     
-    try {
-      IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
-          TEST_VERSION_CURRENT, 
-          new SimpleAnalyzer(TEST_VERSION_CURRENT))
-          .setOpenMode(OpenMode.CREATE));
-      writer.addDocument(testDoc1);
-      writer.addDocument(testDoc2);
-      writer.addDocument(testDoc3);
-      writer.addDocument(testDoc4);
-      writer.close();
-      IndexSearcher knownSearcher = new IndexSearcher(dir, true);
-      TermEnum termEnum = knownSearcher.reader.terms();
-      TermDocs termDocs = knownSearcher.reader.termDocs();
-      //System.out.println("Terms: " + termEnum.size() + " Orig Len: " + termArray.length);
-      
-      //Similarity sim = knownSearcher.getSimilarity();
-      while (termEnum.next() == true)
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, 
+        new SimpleAnalyzer(TEST_VERSION_CURRENT))
+    .setOpenMode(OpenMode.CREATE));
+    writer.addDocument(testDoc1);
+    writer.addDocument(testDoc2);
+    writer.addDocument(testDoc3);
+    writer.addDocument(testDoc4);
+    IndexReader reader = writer.getReader();
+    writer.close();
+    IndexSearcher knownSearcher = new IndexSearcher(reader);
+    TermEnum termEnum = knownSearcher.reader.terms();
+    TermDocs termDocs = knownSearcher.reader.termDocs();
+    //System.out.println("Terms: " + termEnum.size() + " Orig Len: " + termArray.length);
+    
+    //Similarity sim = knownSearcher.getSimilarity();
+    while (termEnum.next() == true)
+    {
+      Term term = termEnum.term();
+      //System.out.println("Term: " + term);
+      termDocs.seek(term);
+      while (termDocs.next())
       {
-        Term term = termEnum.term();
-        //System.out.println("Term: " + term);
-        termDocs.seek(term);
-        while (termDocs.next())
+        int docId = termDocs.doc();
+        int freq = termDocs.freq();
+        //System.out.println("Doc Id: " + docId + " freq " + freq);
+        TermFreqVector vector = knownSearcher.reader.getTermFreqVector(docId, "field");
+        //float tf = sim.tf(freq);
+        //float idf = sim.idf(knownSearcher.docFreq(term), knownSearcher.maxDoc());
+        //float qNorm = sim.queryNorm()
+        //This is fine since we don't have stop words
+        //float lNorm = sim.lengthNorm("field", vector.getTerms().length);
+        //float coord = sim.coord()
+        //System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm);
+        assertTrue(vector != null);
+        String[] vTerms = vector.getTerms();
+        int [] freqs = vector.getTermFrequencies();
+        for (int i = 0; i < vTerms.length; i++)
         {
-          int docId = termDocs.doc();
-          int freq = termDocs.freq();
-          //System.out.println("Doc Id: " + docId + " freq " + freq);
-          TermFreqVector vector = knownSearcher.reader.getTermFreqVector(docId, "field");
-          //float tf = sim.tf(freq);
-          //float idf = sim.idf(knownSearcher.docFreq(term), knownSearcher.maxDoc());
-          //float qNorm = sim.queryNorm()
-          //This is fine since we don't have stop words
-          //float lNorm = sim.lengthNorm("field", vector.getTerms().length);
-          //float coord = sim.coord()
-          //System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm);
-          assertTrue(vector != null);
-          String[] vTerms = vector.getTerms();
-          int [] freqs = vector.getTermFrequencies();
-          for (int i = 0; i < vTerms.length; i++)
+          if (term.text().equals(vTerms[i]))
           {
-            if (term.text().equals(vTerms[i]))
-            {
-              assertTrue(freqs[i] == freq);
-            }
+            assertTrue(freqs[i] == freq);
           }
-          
         }
-        //System.out.println("--------");
+        
       }
-      Query query = new TermQuery(new Term("field", "chocolate"));
-      ScoreDoc[] hits = knownSearcher.search(query, null, 1000).scoreDocs;
-      //doc 3 should be the first hit b/c it is the shortest match
-      assertTrue(hits.length == 3);
-      /*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString());
+      //System.out.println("--------");
+    }
+    Query query = new TermQuery(new Term("field", "chocolate"));
+    ScoreDoc[] hits = knownSearcher.search(query, null, 1000).scoreDocs;
+    //doc 3 should be the first hit b/c it is the shortest match
+    assertTrue(hits.length == 3);
+    /*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString());
       System.out.println("Explain: " + knownSearcher.explain(query, hits.id(0)));
       System.out.println("Hit 1: " + hits.id(1) + " Score: " + hits.score(1) + " String: " + hits.doc(1).toString());
       System.out.println("Explain: " + knownSearcher.explain(query, hits.id(1)));
       System.out.println("Hit 2: " + hits.id(2) + " Score: " + hits.score(2) + " String: " +  hits.doc(2).toString());
       System.out.println("Explain: " + knownSearcher.explain(query, hits.id(2)));*/
-      assertTrue(hits[0].doc == 2);
-      assertTrue(hits[1].doc == 3);
-      assertTrue(hits[2].doc == 0);
-      TermFreqVector vector = knownSearcher.reader.getTermFreqVector(hits[1].doc, "field");
-      assertTrue(vector != null);
-      //System.out.println("Vector: " + vector);
-      String[] terms = vector.getTerms();
-      int [] freqs = vector.getTermFrequencies();
-      assertTrue(terms != null && terms.length == 10);
-      for (int i = 0; i < terms.length; i++) {
-        String term = terms[i];
-        //System.out.println("Term: " + term);
-        int freq = freqs[i];
-        assertTrue(test4.indexOf(term) != -1);
-        Integer freqInt = test4Map.get(term);
-        assertTrue(freqInt != null);
-        assertTrue(freqInt.intValue() == freq);        
-      }
-      SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
-      knownSearcher.reader.getTermFreqVector(hits[1].doc, mapper);
-      SortedSet<TermVectorEntry> vectorEntrySet = mapper.getTermVectorEntrySet();
-      assertTrue("mapper.getTermVectorEntrySet() Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
-      TermVectorEntry last = null;
-      for (final TermVectorEntry tve : vectorEntrySet) {
-        if (tve != null && last != null)
-        {
-          assertTrue("terms are not properly sorted", last.getFrequency() >= tve.getFrequency());
-          Integer expectedFreq =  test4Map.get(tve.getTerm());
-          //we expect double the expectedFreq, since there are two fields with the exact same text and we are collapsing all fields
-          assertTrue("Frequency is not correct:", tve.getFrequency() == 2*expectedFreq.intValue());
-        }
-        last = tve;
-
+    assertTrue(hits[0].doc == 2);
+    assertTrue(hits[1].doc == 3);
+    assertTrue(hits[2].doc == 0);
+    TermFreqVector vector = knownSearcher.reader.getTermFreqVector(hits[1].doc, "field");
+    assertTrue(vector != null);
+    //System.out.println("Vector: " + vector);
+    String[] terms = vector.getTerms();
+    int [] freqs = vector.getTermFrequencies();
+    assertTrue(terms != null && terms.length == 10);
+    for (int i = 0; i < terms.length; i++) {
+      String term = terms[i];
+      //System.out.println("Term: " + term);
+      int freq = freqs[i];
+      assertTrue(test4.indexOf(term) != -1);
+      Integer freqInt = test4Map.get(term);
+      assertTrue(freqInt != null);
+      assertTrue(freqInt.intValue() == freq);        
+    }
+    SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
+    knownSearcher.reader.getTermFreqVector(hits[1].doc, mapper);
+    SortedSet<TermVectorEntry> vectorEntrySet = mapper.getTermVectorEntrySet();
+    assertTrue("mapper.getTermVectorEntrySet() Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
+    TermVectorEntry last = null;
+    for (final TermVectorEntry tve : vectorEntrySet) {
+      if (tve != null && last != null)
+      {
+        assertTrue("terms are not properly sorted", last.getFrequency() >= tve.getFrequency());
+        Integer expectedFreq =  test4Map.get(tve.getTerm());
+        //we expect double the expectedFreq, since there are two fields with the exact same text and we are collapsing all fields
+        assertTrue("Frequency is not correct:", tve.getFrequency() == 2*expectedFreq.intValue());
       }
-
-      FieldSortedTermVectorMapper fieldMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
-      knownSearcher.reader.getTermFreqVector(hits[1].doc, fieldMapper);
-      Map<String,SortedSet<TermVectorEntry>> map = fieldMapper.getFieldToTerms();
-      assertTrue("map Size: " + map.size() + " is not: " + 2, map.size() == 2);
-      vectorEntrySet = map.get("field");
-      assertTrue("vectorEntrySet is null and it shouldn't be", vectorEntrySet != null);
-      assertTrue("vectorEntrySet Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
-      knownSearcher.close();
-    } catch (IOException e) {
-      e.printStackTrace();
-      assertTrue(false);
+      last = tve;
+      
     }
+    
+    FieldSortedTermVectorMapper fieldMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
+    knownSearcher.reader.getTermFreqVector(hits[1].doc, fieldMapper);
+    Map<String,SortedSet<TermVectorEntry>> map = fieldMapper.getFieldToTerms();
+    assertTrue("map Size: " + map.size() + " is not: " + 2, map.size() == 2);
+    vectorEntrySet = map.get("field");
+    assertTrue("vectorEntrySet is null and it shouldn't be", vectorEntrySet != null);
+    assertTrue("vectorEntrySet Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
+    knownSearcher.close();
+    reader.close();
+    dir.close();
   } 
   
   private void setupDoc(Document doc, String text)
@@ -352,8 +361,8 @@ public class TestTermVectors extends Luc
 
   // Test only a few docs having vectors
   public void testRareVectors() throws IOException {
-    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT))
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT))
         .setOpenMode(OpenMode.CREATE));
     for (int i = 0; i < 100; i++) {
       Document doc = new Document();
@@ -368,8 +377,9 @@ public class TestTermVectors extends Luc
       writer.addDocument(doc);
     }
 
+    IndexReader reader = writer.getReader();
     writer.close();
-    searcher = new IndexSearcher(directory, true);
+    searcher = new IndexSearcher(reader);
 
     Query query = new TermQuery(new Term("field", "hundred"));
     ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -379,14 +389,15 @@ public class TestTermVectors extends Luc
       assertTrue(vector != null);
       assertTrue(vector.length == 1);
     }
+    reader.close();
   }
 
 
   // In a single doc, for the same field, mix the term
   // vectors up
   public void testMixedVectrosVectors() throws IOException {
-    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, 
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, 
         new SimpleAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE));
     Document doc = new Document();
     doc.add(new Field("field", "one",
@@ -400,9 +411,10 @@ public class TestTermVectors extends Luc
     doc.add(new Field("field", "one",
                       Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     writer.addDocument(doc);
+    IndexReader reader = writer.getReader();
     writer.close();
 
-    searcher = new IndexSearcher(directory, true);
+    searcher = new IndexSearcher(reader);
 
     Query query = new TermQuery(new Term("field", "one"));
     ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -428,5 +440,6 @@ public class TestTermVectors extends Luc
       assertEquals(4*i, offsets[i].getStartOffset());
       assertEquals(4*i+3, offsets[i].getEndOffset());
     }
+    reader.close();
   }
 }

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java Tue Jul 13 16:12:21 2010
@@ -24,8 +24,8 @@ import org.apache.lucene.analysis.Whites
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.search.TimeLimitingCollector.TimeExceededException;
 import org.apache.lucene.store.Directory;
@@ -51,6 +51,9 @@ public class TestTimeLimitingCollector e
   private static final int N_THREADS = 50;
 
   private Searcher searcher;
+  private Directory directory;
+  private IndexReader reader;
+
   private final String FIELD_NAME = "body";
   private Query query;
 
@@ -74,14 +77,16 @@ public class TestTimeLimitingCollector e
         "blueberry strudel",
         "blueberry pizza",
     };
-    Directory directory = new RAMDirectory();
-    IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+    directory = new RAMDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(newRandom(), directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
     
     for (int i=0; i<N_DOCS; i++) {
       add(docText[i%docText.length], iw);
     }
+    reader = iw.getReader();
     iw.close();
-    searcher = new IndexSearcher(directory, true);
+    searcher = new IndexSearcher(reader);
 
     String qtxt = "one";
     // start from 1, so that the 0th doc never matches
@@ -99,10 +104,12 @@ public class TestTimeLimitingCollector e
   @Override
   protected void tearDown() throws Exception {
     searcher.close();
+    reader.close();
+    directory.close();
     super.tearDown();
   }
 
-  private void add(String value, IndexWriter iw) throws IOException {
+  private void add(String value, RandomIndexWriter iw) throws IOException {
     Document d = new Document();
     d.add(new Field(FIELD_NAME, value, Field.Store.NO, Field.Index.ANALYZED));
     iw.addDocument(d);

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java Tue Jul 13 16:12:21 2010
@@ -22,8 +22,8 @@ import java.io.IOException;
 import org.apache.lucene.analysis.KeywordAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -93,10 +93,11 @@ public class TestTopDocsCollector extend
   private static final float MAX_SCORE = 9.17561f;
   
   private Directory dir = new RAMDirectory();
+  private IndexReader reader;
 
   private TopDocsCollector<ScoreDoc> doSearch(int numResults) throws IOException {
     Query q = new MatchAllDocsQuery();
-    IndexSearcher searcher = new IndexSearcher(dir, true);
+    IndexSearcher searcher = new IndexSearcher(reader);
     TopDocsCollector<ScoreDoc> tdc = new MyTopsDocCollector(numResults);
     searcher.search(q, tdc);
     searcher.close();
@@ -109,15 +110,18 @@ public class TestTopDocsCollector extend
     
     // populate an index with 30 documents, this should be enough for the test.
     // The documents have no content - the test uses MatchAllDocsQuery().
-    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new KeywordAnalyzer()));
+    RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new KeywordAnalyzer()));
+
     for (int i = 0; i < 30; i++) {
       writer.addDocument(new Document());
     }
+    reader = writer.getReader();
     writer.close();
   }
   
   @Override
   protected void tearDown() throws Exception {
+    reader.close();
     dir.close();
     dir = null;
     super.tearDown();

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java Tue Jul 13 16:12:21 2010
@@ -18,9 +18,12 @@ package org.apache.lucene.search;
  */
 
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import java.util.Random;
+
 import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
@@ -38,12 +41,12 @@ public class TestTopScoreDocCollector ex
   public void testOutOfOrderCollection() throws Exception {
 
     Directory dir = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+    Random random = newRandom();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
     for (int i = 0; i < 10; i++) {
       writer.addDocument(new Document());
     }
-    writer.commit();
-    writer.close();
     
     boolean[] inOrder = new boolean[] { false, true };
     String[] actualTSDCClass = new String[] {
@@ -58,7 +61,8 @@ public class TestTopScoreDocCollector ex
     // Set minNrShouldMatch to 1 so that BQ will not optimize rewrite to return
     // the clause instead of BQ.
     bq.setMinimumNumberShouldMatch(1);
-    IndexSearcher searcher = new IndexSearcher(dir, true);
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = new IndexSearcher(reader);
     for (int i = 0; i < inOrder.length; i++) {
       TopDocsCollector<ScoreDoc> tdc = TopScoreDocCollector.create(3, inOrder[i]);
       assertEquals("org.apache.lucene.search.TopScoreDocCollector$" + actualTSDCClass[i], tdc.getClass().getName());
@@ -71,6 +75,10 @@ public class TestTopScoreDocCollector ex
         assertEquals("expected doc Id " + j + " found " + sd[j].doc, j, sd[j].doc);
       }
     }
+    writer.close();
+    searcher.close();
+    reader.close();
+    dir.close();
   }
   
 }

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestWildcard.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestWildcard.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestWildcard.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestWildcard.java Tue Jul 13 16:12:21 2010
@@ -24,19 +24,28 @@ import org.apache.lucene.document.Docume
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.store.RAMDirectory;
 
 import java.io.IOException;
+import java.util.Random;
 
 /**
  * TestWildcard tests the '*' and '?' wildcard characters.
  */
 public class TestWildcard
     extends LuceneTestCase {
+  private Random random;
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    random = newRandom();
+  }
+
   public void testEquals() {
     WildcardQuery wq1 = new WildcardQuery(new Term("field", "b*a"));
     WildcardQuery wq2 = new WildcardQuery(new Term("field", "b*a"));
@@ -212,14 +221,13 @@ public class TestWildcard
   private RAMDirectory getIndexStore(String field, String[] contents)
       throws IOException {
     RAMDirectory indexStore = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(
+    RandomIndexWriter writer = new RandomIndexWriter(random, indexStore, new IndexWriterConfig(
         TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
     for (int i = 0; i < contents.length; ++i) {
       Document doc = new Document();
       doc.add(new Field(field, contents[i], Field.Store.YES, Field.Index.ANALYZED));
       writer.addDocument(doc);
     }
-    writer.optimize();
     writer.close();
 
     return indexStore;
@@ -270,7 +278,9 @@ public class TestWildcard
 
     // prepare the index
     RAMDirectory dir = new RAMDirectory();
-    IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+    RandomIndexWriter iw = new RandomIndexWriter(random, dir, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+
     for (int i = 0; i < docs.length; i++) {
       Document doc = new Document();
       doc.add(new Field(field,docs[i],Store.NO,Index.ANALYZED));

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java Tue Jul 13 16:12:21 2010
@@ -26,9 +26,10 @@ import org.apache.lucene.analysis.TokenS
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.Payload;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.DefaultSimilarity;
 import org.apache.lucene.search.IndexSearcher;
@@ -39,6 +40,7 @@ import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanNearQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.English;
 import org.apache.lucene.util.LuceneTestCase;
@@ -47,6 +49,8 @@ import org.apache.lucene.search.Explanat
 
 public class TestPayloadNearQuery extends LuceneTestCase {
   private IndexSearcher searcher;
+  private IndexReader reader;
+  private Directory directory;
   private BoostingSimilarity similarity = new BoostingSimilarity();
   private byte[] payload2 = new byte[]{2};
   private byte[] payload4 = new byte[]{4};
@@ -101,9 +105,10 @@ public class TestPayloadNearQuery extend
   @Override
   protected void setUp() throws Exception {
     super.setUp();
-    RAMDirectory directory = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarity(similarity));
+    directory = new RAMDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer())
+        .setSimilarity(similarity));
     //writer.infoStream = System.out;
     for (int i = 0; i < 1000; i++) {
       Document doc = new Document();
@@ -112,13 +117,21 @@ public class TestPayloadNearQuery extend
       doc.add(new Field("field2",  txt, Field.Store.YES, Field.Index.ANALYZED));
       writer.addDocument(doc);
     }
-    writer.optimize();
+    reader = writer.getReader();
     writer.close();
 
-    searcher = new IndexSearcher(directory, true);
+    searcher = new IndexSearcher(reader);
     searcher.setSimilarity(similarity);
   }
 
+  @Override
+  protected void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+
   public void test() throws IOException {
     PayloadNearQuery query;
     TopDocs hits;

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java Tue Jul 13 16:12:21 2010
@@ -34,9 +34,10 @@ import org.apache.lucene.analysis.TokenS
 import org.apache.lucene.analysis.LowerCaseTokenizer;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.Payload;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.document.Document;
@@ -52,6 +53,7 @@ import java.io.IOException;
  **/
 public class TestPayloadTermQuery extends LuceneTestCase {
   private IndexSearcher searcher;
+  private IndexReader reader;
   private BoostingSimilarity similarity = new BoostingSimilarity();
   private byte[] payloadField = new byte[]{1};
   private byte[] payloadMultiField1 = new byte[]{2};
@@ -110,9 +112,9 @@ public class TestPayloadTermQuery extend
   protected void setUp() throws Exception {
     super.setUp();
     directory = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarity(
-        similarity));
+    RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer())
+        .setSimilarity(similarity));
     //writer.infoStream = System.out;
     for (int i = 0; i < 1000; i++) {
       Document doc = new Document();
@@ -123,13 +125,21 @@ public class TestPayloadTermQuery extend
       doc.add(new Field("multiField", English.intToEnglish(i) + "  " + English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
       writer.addDocument(doc);
     }
-    writer.optimize();
+    reader = writer.getReader();
     writer.close();
 
-    searcher = new IndexSearcher(directory, true);
+    searcher = new IndexSearcher(reader);
     searcher.setSimilarity(similarity);
   }
 
+  @Override
+  protected void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+
   public void test() throws IOException {
     PayloadTermQuery query = new PayloadTermQuery(new Term("field", "seventy"),
             new MaxPayloadFunction());

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java Tue Jul 13 16:12:21 2010
@@ -22,8 +22,9 @@ import java.io.IOException;
 import org.apache.lucene.analysis.SimpleAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
@@ -33,6 +34,7 @@ import org.apache.lucene.search.PhraseQu
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.QueryUtils;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.English;
 import org.apache.lucene.util.LuceneTestCase;
@@ -51,25 +53,35 @@ import org.apache.lucene.util.LuceneTest
  */
 public class TestBasics extends LuceneTestCase {
   private IndexSearcher searcher;
+  private IndexReader reader;
+  private Directory directory;
 
   @Override
   protected void setUp() throws Exception {
     super.setUp();
-    RAMDirectory directory = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
+    directory = new RAMDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
     //writer.infoStream = System.out;
     for (int i = 0; i < 1000; i++) {
       Document doc = new Document();
       doc.add(new Field("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
       writer.addDocument(doc);
     }
-
+    reader = writer.getReader();
+    searcher = new IndexSearcher(reader);
     writer.close();
+  }
 
-    searcher = new IndexSearcher(directory, true);
+  @Override
+  protected void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
   }
-  
+
+
   public void testTerm() throws Exception {
     Query query = new TermQuery(new Term("field", "seventy"));
     checkHits(query, new int[]

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java Tue Jul 13 16:12:21 2010
@@ -24,13 +24,14 @@ import org.apache.lucene.analysis.Whites
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.CheckHits;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -49,12 +50,15 @@ public class TestFieldMaskingSpanQuery e
   }
 
   protected IndexSearcher searcher;
+  protected Directory directory;
+  protected IndexReader reader;
   
   @Override
   protected void setUp() throws Exception {
     super.setUp();
-    RAMDirectory directory = new RAMDirectory();
-    IndexWriter writer= new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+    directory = new RAMDirectory();
+    RandomIndexWriter writer= new RandomIndexWriter(newRandom(), directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
     
     writer.addDocument(doc(new Field[] { field("id", "0")
                                          ,
@@ -109,14 +113,16 @@ public class TestFieldMaskingSpanQuery e
                                          field("gender", "male"),
                                          field("first",  "bubba"),
                                          field("last",   "jones")     }));
-    
+    reader = writer.getReader();
     writer.close();
-    searcher = new IndexSearcher(directory, true);
+    searcher = new IndexSearcher(reader);
   }
 
   @Override
   protected void tearDown() throws Exception {
     searcher.close();
+    reader.close();
+    directory.close();
     super.tearDown();
   }
 

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java Tue Jul 13 16:12:21 2010
@@ -20,8 +20,9 @@ package org.apache.lucene.search.spans;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.search.CheckHits;
@@ -29,11 +30,14 @@ import org.apache.lucene.search.Explanat
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.Scorer;
+import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
 
 public class TestNearSpansOrdered extends LuceneTestCase {
   protected IndexSearcher searcher;
+  protected Directory directory;
+  protected IndexReader reader;
 
   public static final String FIELD = "field";
   public static final QueryParser qp =
@@ -42,21 +46,25 @@ public class TestNearSpansOrdered extend
   @Override
   protected void tearDown() throws Exception {
     searcher.close();
+    reader.close();
+    directory.close();
     super.tearDown();
   }
   
   @Override
   protected void setUp() throws Exception {
     super.setUp();
-    RAMDirectory directory = new RAMDirectory();
-    IndexWriter writer= new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+    directory = new RAMDirectory();
+    RandomIndexWriter writer= new RandomIndexWriter(newRandom(), directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
     for (int i = 0; i < docFields.length; i++) {
       Document doc = new Document();
       doc.add(new Field(FIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
       writer.addDocument(doc);
     }
+    reader = writer.getReader();
     writer.close();
-    searcher = new IndexSearcher(directory, true);
+    searcher = new IndexSearcher(reader);
   }
 
   protected String[] docFields = {

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java Tue Jul 13 16:12:21 2010
@@ -34,6 +34,7 @@ import org.apache.lucene.analysis.standa
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -43,23 +44,35 @@ import java.util.Collections;
 
 public class TestSpans extends LuceneTestCase {
   private IndexSearcher searcher;
+  private IndexReader reader;
+  private Directory directory;
 
   public static final String field = "field";
 
   @Override
   protected void setUp() throws Exception {
     super.setUp();
-    RAMDirectory directory = new RAMDirectory();
-    IndexWriter writer= new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+    directory = new RAMDirectory();
+    RandomIndexWriter writer= new RandomIndexWriter(newRandom(), directory, 
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
     for (int i = 0; i < docFields.length; i++) {
       Document doc = new Document();
       doc.add(new Field(field, docFields[i], Field.Store.YES, Field.Index.ANALYZED));
       writer.addDocument(doc);
     }
+    reader = writer.getReader();
     writer.close();
-    searcher = new IndexSearcher(directory, true);
+    searcher = new IndexSearcher(reader);
   }
-
+  
+  @Override
+  protected void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    directory.close();
+    super.tearDown();
+  }
+  
   private String[] docFields = {
     "w1 w2 w3 w4 w5",
     "w1 w3 w2 w3",

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java Tue Jul 13 16:12:21 2010
@@ -18,14 +18,16 @@ package org.apache.lucene.search.spans;
  */
 
 import java.io.IOException;
+import java.util.Random;
 
 import org.apache.lucene.util.LuceneTestCase;
 
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.*;
 import org.apache.lucene.store.Directory;
@@ -34,136 +36,144 @@ import org.apache.lucene.store.RAMDirect
 /*******************************************************************************
  * Tests the span query bug in Lucene. It demonstrates that SpanTermQuerys don't
  * work correctly in a BooleanQuery.
- *
+ * 
  */
 public class TestSpansAdvanced extends LuceneTestCase {
-
-    // location to the index
-    protected Directory mDirectory;
-
-    protected IndexSearcher searcher;
-
-    // field names in the index
-    private final static String FIELD_ID = "ID";
-    protected final static String FIELD_TEXT = "TEXT";
-
-    /**
-     * Initializes the tests by adding 4 identical documents to the index.
-     */
-    @Override
-    protected void setUp() throws Exception {
-        super.setUp();
-
-        // create test index
-        mDirectory = new RAMDirectory();
-        final IndexWriter writer = new IndexWriter(mDirectory,
-        new IndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT)));
-        addDocument(writer, "1", "I think it should work.");
-        addDocument(writer, "2", "I think it should work.");
-        addDocument(writer, "3", "I think it should work.");
-        addDocument(writer, "4", "I think it should work.");
-        writer.close();
-        searcher = new IndexSearcher(mDirectory, true);
-    }
-
-    @Override
-    protected void tearDown() throws Exception {
-        searcher.close();
-        mDirectory.close();
-        mDirectory = null;
-        super.tearDown();
-    }
-
-    /**
-     * Adds the document to the index.
-     *
-     * @param writer the Lucene index writer
-     * @param id the unique id of the document
-     * @param text the text of the document
-     * @throws IOException
-     */
-    protected void addDocument(final IndexWriter writer, final String id, final String text) throws IOException {
-
-        final Document document = new Document();
-        document.add(new Field(FIELD_ID, id, Field.Store.YES, Field.Index.NOT_ANALYZED));
-        document.add(new Field(FIELD_TEXT, text, Field.Store.YES, Field.Index.ANALYZED));
-        writer.addDocument(document);
-    }
-
-    /**
-     * Tests two span queries.
-     *
-     * @throws IOException
-     */
-    public void testBooleanQueryWithSpanQueries() throws IOException {
-
-        doTestBooleanQueryWithSpanQueries(searcher,0.3884282f);
+  
+  // location to the index
+  protected Directory mDirectory;
+  protected IndexReader reader;
+  protected IndexSearcher searcher;
+  protected Random random;
+  
+  // field names in the index
+  private final static String FIELD_ID = "ID";
+  protected final static String FIELD_TEXT = "TEXT";
+  
+  /**
+   * Initializes the tests by adding 4 identical documents to the index.
+   */
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
+    random = newRandom();
+    // create test index
+    mDirectory = new RAMDirectory();
+    final RandomIndexWriter writer = new RandomIndexWriter(random,
+        mDirectory, new IndexWriterConfig(TEST_VERSION_CURRENT,
+            new StandardAnalyzer(TEST_VERSION_CURRENT)));
+    addDocument(writer, "1", "I think it should work.");
+    addDocument(writer, "2", "I think it should work.");
+    addDocument(writer, "3", "I think it should work.");
+    addDocument(writer, "4", "I think it should work.");
+    reader = writer.getReader();
+    writer.close();
+    searcher = new IndexSearcher(reader);
+  }
+  
+  @Override
+  protected void tearDown() throws Exception {
+    searcher.close();
+    reader.close();
+    mDirectory.close();
+    mDirectory = null;
+    super.tearDown();
+  }
+  
+  /**
+   * Adds the document to the index.
+   * 
+   * @param writer the Lucene index writer
+   * @param id the unique id of the document
+   * @param text the text of the document
+   * @throws IOException
+   */
+  protected void addDocument(final RandomIndexWriter writer, final String id,
+      final String text) throws IOException {
+    
+    final Document document = new Document();
+    document.add(new Field(FIELD_ID, id, Field.Store.YES,
+        Field.Index.NOT_ANALYZED));
+    document.add(new Field(FIELD_TEXT, text, Field.Store.YES,
+        Field.Index.ANALYZED));
+    writer.addDocument(document);
+  }
+  
+  /**
+   * Tests two span queries.
+   * 
+   * @throws IOException
+   */
+  public void testBooleanQueryWithSpanQueries() throws IOException {
+    
+    doTestBooleanQueryWithSpanQueries(searcher, 0.3884282f);
+  }
+  
+  /**
+   * Tests two span queries.
+   * 
+   * @throws IOException
+   */
+  protected void doTestBooleanQueryWithSpanQueries(IndexSearcher s,
+      final float expectedScore) throws IOException {
+    
+    final Query spanQuery = new SpanTermQuery(new Term(FIELD_TEXT, "work"));
+    final BooleanQuery query = new BooleanQuery();
+    query.add(spanQuery, BooleanClause.Occur.MUST);
+    query.add(spanQuery, BooleanClause.Occur.MUST);
+    final String[] expectedIds = new String[] {"1", "2", "3", "4"};
+    final float[] expectedScores = new float[] {expectedScore, expectedScore,
+        expectedScore, expectedScore};
+    assertHits(s, query, "two span queries", expectedIds, expectedScores);
+  }
+  
+  /**
+   * Checks to see if the hits are what we expected.
+   * 
+   * @param query the query to execute
+   * @param description the description of the search
+   * @param expectedIds the expected document ids of the hits
+   * @param expectedScores the expected scores of the hits
+   * 
+   * @throws IOException
+   */
+  protected static void assertHits(Searcher s, Query query,
+      final String description, final String[] expectedIds,
+      final float[] expectedScores) throws IOException {
+    QueryUtils.check(query, s);
+    
+    final float tolerance = 1e-5f;
+    
+    // Hits hits = searcher.search(query);
+    // hits normalizes and throws things off if one score is greater than 1.0
+    TopDocs topdocs = s.search(query, null, 10000);
+    
+    /*****
+     * // display the hits System.out.println(hits.length() +
+     * " hits for search: \"" + description + '\"'); for (int i = 0; i <
+     * hits.length(); i++) { System.out.println("  " + FIELD_ID + ':' +
+     * hits.doc(i).get(FIELD_ID) + " (score:" + hits.score(i) + ')'); }
+     *****/
+    
+    // did we get the hits we expected
+    assertEquals(expectedIds.length, topdocs.totalHits);
+    for (int i = 0; i < topdocs.totalHits; i++) {
+      // System.out.println(i + " exp: " + expectedIds[i]);
+      // System.out.println(i + " field: " + hits.doc(i).get(FIELD_ID));
+      
+      int id = topdocs.scoreDocs[i].doc;
+      float score = topdocs.scoreDocs[i].score;
+      Document doc = s.doc(id);
+      assertEquals(expectedIds[i], doc.get(FIELD_ID));
+      boolean scoreEq = Math.abs(expectedScores[i] - score) < tolerance;
+      if (!scoreEq) {
+        System.out.println(i + " warning, expected score: " + expectedScores[i]
+            + ", actual " + score);
+        System.out.println(s.explain(query, id));
+      }
+      assertEquals(expectedScores[i], score, tolerance);
+      assertEquals(s.explain(query, id).getValue(), score, tolerance);
     }
-
-    /**
-     * Tests two span queries.
-     *
-     * @throws IOException
-     */
-    protected void doTestBooleanQueryWithSpanQueries(IndexSearcher s, final float expectedScore) throws IOException {
-
-        final Query spanQuery = new SpanTermQuery(new Term(FIELD_TEXT, "work"));
-        final BooleanQuery query = new BooleanQuery();
-        query.add(spanQuery, BooleanClause.Occur.MUST);
-        query.add(spanQuery, BooleanClause.Occur.MUST);
-        final String[] expectedIds = new String[] { "1", "2", "3", "4" };
-        final float[] expectedScores = new float[] { expectedScore, expectedScore, expectedScore, expectedScore };
-        assertHits(s, query, "two span queries", expectedIds, expectedScores);
-    }
-
-
-    /**
-     * Checks to see if the hits are what we expected.
-     *
-     * @param query the query to execute
-     * @param description the description of the search
-     * @param expectedIds the expected document ids of the hits
-     * @param expectedScores the expected scores of the hits
-     *
-     * @throws IOException
-     */
-    protected static void assertHits(Searcher s, Query query, final String description, final String[] expectedIds,
-            final float[] expectedScores) throws IOException {
-        QueryUtils.check(query,s);
-
-        final float tolerance = 1e-5f;
-
-        // Hits hits = searcher.search(query);
-        // hits normalizes and throws things off if one score is greater than 1.0
-        TopDocs topdocs = s.search(query,null,10000);
-
-        /*****
-        // display the hits
-        System.out.println(hits.length() + " hits for search: \"" + description + '\"');
-        for (int i = 0; i < hits.length(); i++) {
-            System.out.println("  " + FIELD_ID + ':' + hits.doc(i).get(FIELD_ID) + " (score:" + hits.score(i) + ')');
-        }
-        *****/
-
-        // did we get the hits we expected
-        assertEquals(expectedIds.length, topdocs.totalHits);
-        for (int i = 0; i < topdocs.totalHits; i++) {
-            //System.out.println(i + " exp: " + expectedIds[i]);
-            //System.out.println(i + " field: " + hits.doc(i).get(FIELD_ID));
-
-            int id = topdocs.scoreDocs[i].doc;
-            float score = topdocs.scoreDocs[i].score;
-            Document doc = s.doc(id);
-            assertEquals(expectedIds[i], doc.get(FIELD_ID));
-            boolean scoreEq = Math.abs(expectedScores[i] - score) < tolerance;
-            if (!scoreEq) {
-              System.out.println(i + " warning, expected score: " + expectedScores[i] + ", actual " + score);
-              System.out.println(s.explain(query,id));
-            }
-            assertEquals(expectedScores[i], score, tolerance);
-            assertEquals(s.explain(query,id).getValue(), score, tolerance);
-        }
-    }
-
-
+  }
+  
 }
\ No newline at end of file

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java Tue Jul 13 16:12:21 2010
@@ -21,8 +21,8 @@ import java.io.IOException;
 
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.*;
@@ -30,84 +30,96 @@ import org.apache.lucene.search.*;
 /*******************************************************************************
  * Some expanded tests to make sure my patch doesn't break other SpanTermQuery
  * functionality.
- *
+ * 
  */
 public class TestSpansAdvanced2 extends TestSpansAdvanced {
-    IndexSearcher searcher2;
-    /**
-     * Initializes the tests by adding documents to the index.
-     */
-    @Override
-    protected void setUp() throws Exception {
-        super.setUp();
-
-        // create test index
-        final IndexWriter writer = new IndexWriter(mDirectory,
-            new IndexWriterConfig(TEST_VERSION_CURRENT, 
-                new StandardAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(
-                    OpenMode.APPEND));
-        addDocument(writer, "A", "Should we, could we, would we?");
-        addDocument(writer, "B", "It should.  Should it?");
-        addDocument(writer, "C", "It shouldn't.");
-        addDocument(writer, "D", "Should we, should we, should we.");
-        writer.close();
-
-        // re-open the searcher since we added more docs
-        searcher2 = new IndexSearcher(mDirectory, true);
-    }
-
-    /**
-     * Verifies that the index has the correct number of documents.
-     *
-     * @throws Exception
-     */
-    public void testVerifyIndex() throws Exception {
-        final IndexReader reader = IndexReader.open(mDirectory, true);
-        assertEquals(8, reader.numDocs());
-        reader.close();
-    }
-
-    /**
-     * Tests a single span query that matches multiple documents.
-     *
-     * @throws IOException
-     */
-    public void testSingleSpanQuery() throws IOException {
-
-        final Query spanQuery = new SpanTermQuery(new Term(FIELD_TEXT, "should"));
-        final String[] expectedIds = new String[] { "B", "D", "1", "2", "3", "4", "A" };
-        final float[] expectedScores = new float[] { 0.625f, 0.45927936f, 0.35355338f, 0.35355338f, 0.35355338f,
-                0.35355338f, 0.26516503f, };
-        assertHits(searcher2, spanQuery, "single span query", expectedIds, expectedScores);
-    }
-
-    /**
-     * Tests a single span query that matches multiple documents.
-     *
-     * @throws IOException
-     */
-    public void testMultipleDifferentSpanQueries() throws IOException {
-
-        final Query spanQuery1 = new SpanTermQuery(new Term(FIELD_TEXT, "should"));
-        final Query spanQuery2 = new SpanTermQuery(new Term(FIELD_TEXT, "we"));
-        final BooleanQuery query = new BooleanQuery();
-        query.add(spanQuery1, BooleanClause.Occur.MUST);
-        query.add(spanQuery2, BooleanClause.Occur.MUST);
-        final String[] expectedIds = new String[] { "D", "A" };
-        // these values were pre LUCENE-413
-        // final float[] expectedScores = new float[] { 0.93163157f, 0.20698164f };
-        final float[] expectedScores = new float[] { 1.0191123f, 0.93163157f };
-        assertHits(searcher2, query, "multiple different span queries", expectedIds, expectedScores);
-    }
-
-    /**
-     * Tests two span queries.
-     *
-     * @throws IOException
-     */
-    @Override
-    public void testBooleanQueryWithSpanQueries() throws IOException {
-
-        doTestBooleanQueryWithSpanQueries(searcher2, 0.73500174f);
-    }
+  IndexSearcher searcher2;
+  IndexReader reader2;
+  
+  /**
+   * Initializes the tests by adding documents to the index.
+   */
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
+    
+    // create test index
+    final RandomIndexWriter writer = new RandomIndexWriter(random, mDirectory,
+        new IndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT))
+            .setOpenMode(OpenMode.APPEND));
+    addDocument(writer, "A", "Should we, could we, would we?");
+    addDocument(writer, "B", "It should.  Should it?");
+    addDocument(writer, "C", "It shouldn't.");
+    addDocument(writer, "D", "Should we, should we, should we.");
+    reader2 = writer.getReader();
+    writer.close();
+    
+    // re-open the searcher since we added more docs
+    searcher2 = new IndexSearcher(reader2);
+  }
+  
+  @Override
+  protected void tearDown() throws Exception {
+    searcher2.close();
+    reader2.close();
+    super.tearDown();
+  }
+  
+  /**
+   * Verifies that the index has the correct number of documents.
+   * 
+   * @throws Exception
+   */
+  public void testVerifyIndex() throws Exception {
+    final IndexReader reader = IndexReader.open(mDirectory, true);
+    assertEquals(8, reader.numDocs());
+    reader.close();
+  }
+  
+  /**
+   * Tests a single span query that matches multiple documents.
+   * 
+   * @throws IOException
+   */
+  public void testSingleSpanQuery() throws IOException {
+    
+    final Query spanQuery = new SpanTermQuery(new Term(FIELD_TEXT, "should"));
+    final String[] expectedIds = new String[] {"B", "D", "1", "2", "3", "4",
+        "A"};
+    final float[] expectedScores = new float[] {0.625f, 0.45927936f,
+        0.35355338f, 0.35355338f, 0.35355338f, 0.35355338f, 0.26516503f,};
+    assertHits(searcher2, spanQuery, "single span query", expectedIds,
+        expectedScores);
+  }
+  
+  /**
+   * Tests a single span query that matches multiple documents.
+   * 
+   * @throws IOException
+   */
+  public void testMultipleDifferentSpanQueries() throws IOException {
+    
+    final Query spanQuery1 = new SpanTermQuery(new Term(FIELD_TEXT, "should"));
+    final Query spanQuery2 = new SpanTermQuery(new Term(FIELD_TEXT, "we"));
+    final BooleanQuery query = new BooleanQuery();
+    query.add(spanQuery1, BooleanClause.Occur.MUST);
+    query.add(spanQuery2, BooleanClause.Occur.MUST);
+    final String[] expectedIds = new String[] {"D", "A"};
+    // these values were pre LUCENE-413
+    // final float[] expectedScores = new float[] { 0.93163157f, 0.20698164f };
+    final float[] expectedScores = new float[] {1.0191123f, 0.93163157f};
+    assertHits(searcher2, query, "multiple different span queries",
+        expectedIds, expectedScores);
+  }
+  
+  /**
+   * Tests two span queries.
+   * 
+   * @throws IOException
+   */
+  @Override
+  public void testBooleanQueryWithSpanQueries() throws IOException {
+    
+    doTestBooleanQueryWithSpanQueries(searcher2, 0.73500174f);
+  }
 }

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java?rev=963780&r1=963779&r2=963780&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java Tue Jul 13 16:12:21 2010
@@ -33,6 +33,7 @@ import java.io.File;
 import java.io.PrintStream;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.Hashtable;
 import java.util.Iterator;
 import java.util.Random;
 import java.util.ArrayList;
@@ -322,6 +323,30 @@ public class LuceneTestCaseJ4 {
     return new Random(seed);
   }
 
+  private static Hashtable<Class<?>,Long> staticSeeds = new Hashtable<Class<?>,Long>();
+
+  /**
+   * Returns a {@link Random} instance for generating random numbers from a beforeclass
+   * annotated method.
+   * The random seed is logged during test execution and printed to System.out on any failure
+   * for reproducing the test using {@link #newStaticRandom(Class, long)} with the recorded seed
+   * .
+   */
+  public static Random newStaticRandom(Class<?> clazz) {
+    return newStaticRandom(clazz, seedRnd.nextLong());
+  }
+  
+  /**
+   * Returns a {@link Random} instance for generating random numbers from a beforeclass
+   * annotated method.
+   * If an error occurs in the test that is not reproducible, you can use this method to
+   * initialize the number generator with the seed that was printed out during the failing test.
+   */
+  public static Random newStaticRandom(Class<?> clazz, long seed) {
+    staticSeeds.put(clazz, Long.valueOf(seed));
+    return new Random(seed);
+  }
+
   public String getName() {
     return this.name;
   }
@@ -340,6 +365,11 @@ public class LuceneTestCaseJ4 {
 
   // We get here from InterceptTestCaseEvents on the 'failed' event....
   public void reportAdditionalFailureInfo() {
+    Long staticSeed = staticSeeds.get(getClass());
+    if (staticSeed != null) {
+      System.out.println("NOTE: random static seed of testclass '" + getName() + "' was: " + staticSeed);
+    }
+    
     if (seed != null) {
       System.out.println("NOTE: random seed of testcase '" + getName() + "' was: " + seed);
     }

Propchange: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/util/TestAttributeSource.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jul 13 16:12:21 2010
@@ -1,4 +1,4 @@
-/lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestAttributeSource.java:931298,931337,931502,932129-932131,932163,932304,932369,932374,932398,932417,932541,932576,932587,932698,932731-932749,932752,932773,932795,932828,932856-932857,932862,932864,932878,932963,933541-933575,933598,933613,933679,933879,934339,934954,935014-935048,935065,935186-935513,935521-935522,935553-935962,936522,936544,936605,936657-936726,937039,937360,938582-938646,938989,939111,939611,939649,940433,940447,940451-940452,940666,940699,940730,940878-940892,940994,941270,941363,942166,942288,942676,942719,943142,943493,943931,945057,945090,945130,945245,945343,945420,946139,946330,946338,946599,948011,948082,948429,949288,949311,949445,949976,949997,950008,950042,950458,950613,951126,951355,951397,951521,953407,953628,955547,955613,955615,956715,957465,957520,957707,960367,960371,960374,960719,962555,963372,963654
+/lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestAttributeSource.java:931298,931337,931502,932129-932131,932163,932304,932369,932374,932398,932417,932541,932576,932587,932698,932731-932749,932752,932773,932795,932828,932856-932857,932862,932864,932878,932963,933541-933575,933598,933613,933679,933879,934339,934954,935014-935048,935065,935186-935513,935521-935522,935553-935962,936522,936544,936605,936657-936726,937039,937360,938582-938646,938989,939111,939611,939649,940433,940447,940451-940452,940666,940699,940730,940878-940892,940994,941270,941363,942166,942288,942676,942719,943142,943493,943931,945057,945090,945130,945245,945343,945420,946139,946330,946338,946599,948011,948082,948429,949288,949311,949445,949976,949997,950008,950042,950458,950613,951126,951355,951397,951521,953407,953628,955547,955613,955615,956715,957465,957520,957707,960367,960371,960374,960719,962555,963372,963654,963720
 /lucene/java/branches/flex_1458/src/test/org/apache/lucene/util/TestAttributeSource.java:924791,924850,930201
 /lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/util/TestAttributeSource.java:896850,909334,948516
 /lucene/java/trunk/src/test/org/apache/lucene/util/TestAttributeSource.java:924483-925561

Propchange: lucene/dev/branches/branch_3x/solr/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jul 13 16:12:21 2010
@@ -1,4 +1,4 @@
-/lucene/dev/trunk/solr:931298,931337,931502,932129-932131,932163,932304,932369,932374,932398,932417,932541,932576,932587,932698,932731-932749,932752,932773,932795,932828,932856-932857,932862,932864,932878,932963,933541-933575,933598,933613,933679,933879,934339,934954,935014-935048,935065,935186-935513,935521-935522,935553-935962,936522,936544,936605,936657-936726,937039,937360,938582-938646,938989,939111,939611,939649,940433,940447,940451-940452,940666,940699,940730,940878-940892,940994,941270,941363,942166,942235,942288,942676,942719,943142,943493,943931,945057,945090,945130,945245,945343,945420,946139,946330,946338,946599,948011,948082,948429,949288,949311,949445,949976,949997,950008,950042,950125,950207,950458,950613,950723,950835,951126,951355,951397,951521,953628,953886,954336,955547,955613,955615,956715,957465,957520,957707,960367,960374,960719,961941,962555,962714,963372,963654
+/lucene/dev/trunk/solr:931298,931337,931502,932129-932131,932163,932304,932369,932374,932398,932417,932541,932576,932587,932698,932731-932749,932752,932773,932795,932828,932856-932857,932862,932864,932878,932963,933541-933575,933598,933613,933679,933879,934339,934954,935014-935048,935065,935186-935513,935521-935522,935553-935962,936522,936544,936605,936657-936726,937039,937360,938582-938646,938989,939111,939611,939649,940433,940447,940451-940452,940666,940699,940730,940878-940892,940994,941270,941363,942166,942235,942288,942676,942719,943142,943493,943931,945057,945090,945130,945245,945343,945420,946139,946330,946338,946599,948011,948082,948429,949288,949311,949445,949976,949997,950008,950042,950125,950207,950458,950613,950723,950835,951126,951355,951397,951521,953628,953886,954336,955547,955613,955615,956715,957465,957520,957707,960367,960374,960719,961941,962555,962714,963372,963654,963720
 /lucene/java/branches/lucene_2_4/solr:748824
 /lucene/java/branches/lucene_2_9/solr:817269-818600,825998,829134,829881,831036,896850,909334
 /lucene/java/branches/lucene_2_9_back_compat_tests/solr:818601-821336

Propchange: lucene/dev/branches/branch_3x/solr/example/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jul 13 16:12:21 2010
@@ -1,4 +1,4 @@
-/lucene/dev/trunk/solr/example:931298,931337,931502,932129-932131,932163,932304,932369,932374,932398,932417,932541,932576,932587,932698,932731-932749,932752,932773,932795,932828,932856-932857,932862,932864,932878,932963,933541-933575,933598,933613,933679,933879,934339,934954,935014-935048,935065,935186-935513,935521-935522,935553-935962,936522,936544,936605,936657-936726,937039,937360,938582-938646,938989,939111,939611,939649,940433,940447,940451-940452,940666,940699,940730,940878-940892,940994,941270,941363,942166,942235,942288,942676,942719,943142,943493,943931,945057,945090,945130,945245,945343,945420,946139,946330,946338,946599,948011,948082,948429,949288,949311,949445,949976,949997,950008,950042,950125,950207,950458,950613,950723,950835,951126,951355,951397,951521,953628,953886,954336,955547,955613,955615,956715,957465,957520,957707,960367,960374,961821,961941,962555,962714,963372,963654
+/lucene/dev/trunk/solr/example:931298,931337,931502,932129-932131,932163,932304,932369,932374,932398,932417,932541,932576,932587,932698,932731-932749,932752,932773,932795,932828,932856-932857,932862,932864,932878,932963,933541-933575,933598,933613,933679,933879,934339,934954,935014-935048,935065,935186-935513,935521-935522,935553-935962,936522,936544,936605,936657-936726,937039,937360,938582-938646,938989,939111,939611,939649,940433,940447,940451-940452,940666,940699,940730,940878-940892,940994,941270,941363,942166,942235,942288,942676,942719,943142,943493,943931,945057,945090,945130,945245,945343,945420,946139,946330,946338,946599,948011,948082,948429,949288,949311,949445,949976,949997,950008,950042,950125,950207,950458,950613,950723,950835,951126,951355,951397,951521,953628,953886,954336,955547,955613,955615,956715,957465,957520,957707,960367,960374,961821,961941,962555,962714,963372,963654,963720
 /lucene/java/branches/lucene_2_4/solr/example:748824
 /lucene/java/branches/lucene_2_9/solr/example:817269-818600,825998,829134,829881,831036,896850,909334
 /lucene/java/branches/lucene_2_9_back_compat_tests/solr/example:818601-821336