You are viewing a plain text version of this content. The canonical link for it is here.
Posted to java-commits@lucene.apache.org by mi...@apache.org on 2009/04/09 19:17:47 UTC

svn commit: r763737 [2/2] - in /lucene/java/trunk: ./ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTas...

Added: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterReader.java?rev=763737&view=auto
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterReader.java (added)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterReader.java Thu Apr  9 17:17:46 2009
@@ -0,0 +1,793 @@
+package org.apache.lucene.index;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestIndexWriterReader extends LuceneTestCase {
+  static PrintStream infoStream;
+  
+  public static int count(Term t, IndexReader r) throws IOException {
+    int count = 0;
+    TermDocs td = r.termDocs(t);
+    while (td.next()) {
+      td.doc();
+      count++;
+    }
+    td.close();
+    return count;
+  }
+
+  public void testUpdateDocument() throws Exception {
+    boolean optimize = true;
+
+    Directory dir1 = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+
+    // create the index
+    createIndexNoClose(!optimize, "index1", writer);
+
+    // writer.flush(false, true, true);
+
+    // get a reader
+    IndexReader r1 = writer.getReader();
+
+    String id10 = r1.document(10).getField("id").stringValue();
+    
+    Document newDoc = r1.document(10);
+    newDoc.removeField("id");
+    newDoc.add(new Field("id", Integer.toString(8000), Store.YES, Index.NOT_ANALYZED));
+    writer.updateDocument(new Term("id", id10), newDoc);
+
+    IndexReader r2 = writer.getReader();
+    assertEquals(0, count(new Term("id", id10), r2));
+    assertEquals(1, count(new Term("id", Integer.toString(8000)), r2));
+    
+    r1.close();
+    r2.close();
+    writer.close();
+    
+    IndexReader r3 = IndexReader.open(dir1);
+    assertEquals(0, count(new Term("id", id10), r3));
+    assertEquals(1, count(new Term("id", Integer.toString(8000)), r3));
+    r3.close();
+    
+    dir1.close();
+  }
+  
+  /**
+   * Test using IW.addIndexes
+   * 
+   * @throws Exception
+   */
+  public void testAddIndexes() throws Exception {
+    boolean optimize = false;
+
+    Directory dir1 = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.setInfoStream(infoStream);
+    // create the index
+    createIndexNoClose(!optimize, "index1", writer);
+    writer.flush(false, true, true);
+
+    // create a 2nd index
+    Directory dir2 = new MockRAMDirectory();
+    IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer2.setInfoStream(infoStream);
+    createIndexNoClose(!optimize, "index2", writer2);
+    writer2.close();
+
+    writer.addIndexesNoOptimize(new Directory[] { dir2 });
+
+    IndexReader r1 = writer.getReader();
+    assertEquals(200, r1.maxDoc());
+
+    int index2df = r1.docFreq(new Term("indexname", "index2"));
+
+    assertEquals(100, index2df);
+
+    // verify the docs are from different indexes
+    Document doc5 = r1.document(5);
+    assertEquals("index1", doc5.get("indexname"));
+    Document doc150 = r1.document(150);
+    assertEquals("index2", doc150.get("indexname"));
+    r1.close();
+    writer.close();
+    dir1.close();
+  }
+  
+  public void testAddIndexes2() throws Exception {
+    boolean optimize = false;
+
+    Directory dir1 = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.setInfoStream(infoStream);
+
+    // create a 2nd index
+    Directory dir2 = new MockRAMDirectory();
+    IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer2.setInfoStream(infoStream);
+    createIndexNoClose(!optimize, "index2", writer2);
+    writer2.close();
+
+    writer.addIndexesNoOptimize(new Directory[] { dir2 });
+    writer.addIndexesNoOptimize(new Directory[] { dir2 });
+    writer.addIndexesNoOptimize(new Directory[] { dir2 });
+    writer.addIndexesNoOptimize(new Directory[] { dir2 });
+    writer.addIndexesNoOptimize(new Directory[] { dir2 });
+
+    IndexReader r1 = writer.getReader();
+    assertEquals(500, r1.maxDoc());
+    
+    r1.close();
+    writer.close();
+    dir1.close();
+  }
+
+  /**
+   * Deletes using IW.deleteDocuments
+   * 
+   * @throws Exception
+   */
+  public void testDeleteFromIndexWriter() throws Exception {
+    boolean optimize = true;
+
+    Directory dir1 = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.setInfoStream(infoStream);
+    // create the index
+    createIndexNoClose(!optimize, "index1", writer);
+    writer.flush(false, true, true);
+    // get a reader
+    IndexReader r1 = writer.getReader();
+
+    String id10 = r1.document(10).getField("id").stringValue();
+
+    // deleted IW docs should not show up in the next getReader
+    writer.deleteDocuments(new Term("id", id10));
+    IndexReader r2 = writer.getReader();
+    assertEquals(1, count(new Term("id", id10), r1));
+    assertEquals(0, count(new Term("id", id10), r2));
+    
+    String id50 = r1.document(50).getField("id").stringValue();
+    assertEquals(1, count(new Term("id", id50), r1));
+    
+    writer.deleteDocuments(new Term("id", id50));
+    
+    IndexReader r3 = writer.getReader();
+    assertEquals(0, count(new Term("id", id10), r3));
+    assertEquals(0, count(new Term("id", id50), r3));
+    
+    String id75 = r1.document(75).getField("id").stringValue();
+    writer.deleteDocuments(new TermQuery(new Term("id", id75)));
+    IndexReader r4 = writer.getReader();
+    assertEquals(1, count(new Term("id", id75), r3));
+    assertEquals(0, count(new Term("id", id75), r4));
+    
+    r1.close();
+    r2.close();
+    r3.close();
+    r4.close();
+    writer.close();
+        
+    // reopen the writer to verify the delete made it to the directory
+    writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.setInfoStream(infoStream);
+    IndexReader w2r1 = writer.getReader();
+    assertEquals(0, count(new Term("id", id10), w2r1));
+    w2r1.close();
+    writer.close();
+    dir1.close();
+  }
+
+  public void testAddIndexesAndDoDeletesThreads() throws Throwable {
+    final int numIter = 5;
+    int numDirs = 3;
+    
+    Directory mainDir = new MockRAMDirectory();
+    IndexWriter mainWriter = new IndexWriter(mainDir, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    mainWriter.setInfoStream(infoStream);
+    AddDirectoriesThreads addDirThreads = new AddDirectoriesThreads(numIter, mainWriter);
+    addDirThreads.launchThreads(numDirs);
+    addDirThreads.joinThreads();
+    
+    //assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS
+    //    * addDirThreads.NUM_INIT_DOCS, addDirThreads.mainWriter.numDocs());
+    assertEquals(addDirThreads.count.intValue(), addDirThreads.mainWriter.numDocs());
+
+    addDirThreads.close(true);
+    
+    assertTrue(addDirThreads.failures.size() == 0);
+
+    _TestUtil.checkIndex(mainDir);
+
+    IndexReader reader = IndexReader.open(mainDir);
+    assertEquals(addDirThreads.count.intValue(), reader.numDocs());
+    //assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS
+    //    * addDirThreads.NUM_INIT_DOCS, reader.numDocs());
+    reader.close();
+
+    addDirThreads.closeDir();
+    mainDir.close();
+  }
+  
+  private class DeleteThreads {
+    final static int NUM_THREADS = 5;
+    final Thread[] threads = new Thread[NUM_THREADS];
+    IndexWriter mainWriter;
+    AtomicInteger delCount = new AtomicInteger();
+    List deletedTerms = new ArrayList();
+    LinkedList toDeleteTerms = new LinkedList();
+    Random random;
+    final List failures = new ArrayList();
+    
+    public DeleteThreads(IndexWriter mainWriter) throws IOException {
+      this.mainWriter = mainWriter;
+      IndexReader reader = mainWriter.getReader();
+      int maxDoc = reader.maxDoc();
+      random = newRandom();
+      int iter = random.nextInt(maxDoc);
+      for (int x=0; x < iter; x++) {
+        int doc = random.nextInt(iter);
+        String id = reader.document(doc).get("id");
+        toDeleteTerms.add(new Term("id", id));
+      }
+    }
+    
+    Term getDeleteTerm() {
+      synchronized (toDeleteTerms) {
+        return (Term)toDeleteTerms.removeFirst();
+      }
+    }
+    
+    void launchThreads(final int numIter) {
+      for (int i = 0; i < NUM_THREADS; i++) {
+        threads[i] = new Thread() {
+          public void run() {
+            try {
+              Term term = getDeleteTerm();
+              mainWriter.deleteDocuments(term);
+              synchronized (deletedTerms) {
+                deletedTerms.add(term);
+              }
+            } catch (Throwable t) {
+              handle(t);
+            }
+          }
+        };
+      }
+    }
+    
+    void handle(Throwable t) {
+      t.printStackTrace(System.out);
+      synchronized (failures) {
+        failures.add(t);
+      }
+    }
+    
+    void joinThreads() {
+      for (int i = 0; i < NUM_THREADS; i++)
+        try {
+          threads[i].join();
+        } catch (InterruptedException ie) {
+          Thread.currentThread().interrupt();
+        }
+    }
+  }
+  
+  private class AddDirectoriesThreads {
+    Directory addDir;
+    final static int NUM_THREADS = 5;
+    final static int NUM_INIT_DOCS = 100;
+    int numDirs;
+    final Thread[] threads = new Thread[NUM_THREADS];
+    IndexWriter mainWriter;
+    final List failures = new ArrayList();
+    IndexReader[] readers;
+    boolean didClose = false;
+    AtomicInteger count = new AtomicInteger(0);
+    AtomicInteger numAddIndexesNoOptimize = new AtomicInteger(0);
+    
+    public AddDirectoriesThreads(int numDirs, IndexWriter mainWriter) throws Throwable {
+      this.numDirs = numDirs;
+      this.mainWriter = mainWriter;
+      addDir = new MockRAMDirectory();
+      IndexWriter writer = new IndexWriter(addDir, new WhitespaceAnalyzer(),
+          IndexWriter.MaxFieldLength.LIMITED);
+      writer.setMaxBufferedDocs(2);
+      for (int i = 0; i < NUM_INIT_DOCS; i++) {
+        Document doc = createDocument(i, "addindex", 4);
+        writer.addDocument(doc);
+      }
+        
+      writer.close();
+      
+      readers = new IndexReader[numDirs];
+      for (int i = 0; i < numDirs; i++)
+        readers[i] = IndexReader.open(addDir);
+    }
+    
+    void joinThreads() {
+      for (int i = 0; i < NUM_THREADS; i++)
+        try {
+          threads[i].join();
+        } catch (InterruptedException ie) {
+          Thread.currentThread().interrupt();
+        }
+    }
+
+    void close(boolean doWait) throws Throwable {
+      didClose = true;
+      mainWriter.close(doWait);
+    }
+
+    void closeDir() throws Throwable {
+      for (int i = 0; i < numDirs; i++)
+        readers[i].close();
+      addDir.close();
+    }
+    
+    void handle(Throwable t) {
+      t.printStackTrace(System.out);
+      synchronized (failures) {
+        failures.add(t);
+      }
+    }
+    
+    void launchThreads(final int numIter) {
+      for (int i = 0; i < NUM_THREADS; i++) {
+        threads[i] = new Thread() {
+          public void run() {
+            try {
+              final Directory[] dirs = new Directory[numDirs];
+              for (int k = 0; k < numDirs; k++)
+                dirs[k] = new MockRAMDirectory(addDir);
+              //int j = 0;
+              //while (true) {
+                // System.out.println(Thread.currentThread().getName() + ": iter
+                // j=" + j);
+                for (int x=0; x < numIter; x++) {
+                  // only do addIndexesNoOptimize
+                  doBody(x, dirs);
+                }
+                //if (numIter > 0 && j == numIter)
+                //  break;
+                //doBody(j++, dirs);
+                //doBody(5, dirs);
+              //}
+            } catch (Throwable t) {
+              handle(t);
+            }
+          }
+        };
+      }
+      for (int i = 0; i < NUM_THREADS; i++)
+        threads[i].start();
+    }
+    
+    void doBody(int j, Directory[] dirs) throws Throwable {
+      switch (j % 4) {
+        case 0:
+          mainWriter.addIndexes(dirs);
+          break;
+        case 1:
+          mainWriter.addIndexesNoOptimize(dirs);
+          numAddIndexesNoOptimize.incrementAndGet();
+          break;
+        case 2:
+          mainWriter.addIndexes(readers);
+          break;
+        case 3:
+          mainWriter.commit();
+      }
+      count.addAndGet(dirs.length*NUM_INIT_DOCS);
+    }
+  }
+
+  public void testIndexWriterReopenSegmentOptimize() throws Exception {
+    doTestIndexWriterReopenSegment(true);
+  }
+
+  public void testIndexWriterReopenSegment() throws Exception {
+    doTestIndexWriterReopenSegment(false);
+  }
+
+  /**
+   * Tests creating a segment, then check to insure the segment can be seen via
+   * IW.getReader
+   */
+  public void doTestIndexWriterReopenSegment(boolean optimize) throws Exception {
+    Directory dir1 = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    writer.setInfoStream(infoStream);
+    DirectoryIndexReader r1 = (DirectoryIndexReader) writer.getReader();
+    assertEquals(0, r1.maxDoc());
+    createIndexNoClose(false, "index1", writer);
+    writer.flush(!optimize, true, true);
+
+    DirectoryIndexReader iwr1 = (DirectoryIndexReader) writer.getReader();
+    assertEquals(100, iwr1.maxDoc());
+
+    DirectoryIndexReader r2 = (DirectoryIndexReader) writer.getReader();
+    assertEquals(r2.maxDoc(), 100);
+    // add 100 documents
+    for (int x = 10000; x < 10000 + 100; x++) {
+      Document d = createDocument(x, "index1", 5);
+      writer.addDocument(d);
+    }
+    writer.flush(false, true, true);
+    // verify the reader was reopened internally
+    IndexReader iwr2 = writer.getReader();
+    assertTrue(iwr2 != r1);
+    assertEquals(200, iwr2.maxDoc());
+    // should have flushed out a segment
+    IndexReader r3 = writer.getReader();
+    assertTrue(r2 != r3);
+    assertEquals(200, r3.maxDoc());
+
+    // dec ref the readers rather than close them because
+    // closing flushes changes to the writer
+    r1.close();
+    iwr1.close();
+    r2.close();
+    r3.close();
+    iwr2.close();
+    writer.close();
+
+    // test whether the changes made it to the directory
+    writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    IndexReader w2r1 = writer.getReader();
+    // insure the deletes were actually flushed to the directory
+    assertEquals(200, w2r1.maxDoc());
+    w2r1.close();
+    writer.close();
+
+    dir1.close();
+  }
+
+  
+  public static Document createDocument(int n, String indexName, int numFields) {
+    StringBuffer sb = new StringBuffer();
+    Document doc = new Document();
+    doc.add(new Field("id", Integer.toString(n), Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("indexname", indexName, Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    sb.append("a");
+    sb.append(n);
+    doc.add(new Field("field1", sb.toString(), Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    sb.append(" b");
+    sb.append(n);
+    for (int i = 1; i < numFields; i++) {
+      doc.add(new Field("field" + (i + 1), sb.toString(), Store.YES,
+                        Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    }
+    return doc;
+  }
+
+  /**
+   * Delete a document by term and return the doc id
+   * 
+   * @return
+   * 
+   * public static int deleteDocument(Term term, IndexWriter writer) throws
+   * IOException { IndexReader reader = writer.getReader(); TermDocs td =
+   * reader.termDocs(term); int doc = -1; //if (td.next()) { // doc = td.doc();
+   * //} //writer.deleteDocuments(term); td.close(); return doc; }
+   */
+  public static void createIndex(Directory dir1, String indexName,
+      boolean multiSegment) throws IOException {
+    IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+        IndexWriter.MaxFieldLength.LIMITED);
+    w.setMergePolicy(new LogDocMergePolicy());
+    for (int i = 0; i < 100; i++) {
+      w.addDocument(createDocument(i, indexName, 4));
+      if (multiSegment && (i % 10) == 0) {
+      }
+    }
+    if (!multiSegment) {
+      w.optimize();
+    }
+    w.close();
+  }
+
+  public static void createIndexNoClose(boolean multiSegment, String indexName,
+      IndexWriter w) throws IOException {
+    w.setMergePolicy(new LogDocMergePolicy());
+    for (int i = 0; i < 100; i++) {
+      w.addDocument(createDocument(i, indexName, 4));
+    }
+    if (!multiSegment) {
+      w.optimize();
+    }
+  }
+
+  private static class MyWarmer extends IndexWriter.IndexReaderWarmer {
+    int warmCount;
+    public void warm(IndexReader reader) throws IOException {
+      warmCount++;
+    }
+  }
+
+  public void testMergeWarmer() throws Exception {
+
+    Directory dir1 = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+                                         IndexWriter.MaxFieldLength.LIMITED);
+    writer.setInfoStream(infoStream);
+
+    // create the index
+    createIndexNoClose(false, "test", writer);
+
+    // get a reader to put writer into near real-time mode
+    IndexReader r1 = writer.getReader();
+
+    // Enroll warmer
+    MyWarmer warmer = new MyWarmer();
+    writer.setMergedSegmentWarmer(warmer);
+    writer.setMergeFactor(2);
+    writer.setMaxBufferedDocs(2);
+
+    for (int i = 0; i < 10; i++) {
+      writer.addDocument(createDocument(i, "test", 4));
+    }
+    ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync();
+
+    assertTrue(warmer.warmCount > 0);
+    final int count = warmer.warmCount;
+
+    writer.addDocument(createDocument(17, "test", 4));
+    writer.optimize();
+    assertTrue(warmer.warmCount > count);
+    
+    writer.close();
+    r1.close();
+    dir1.close();
+  }
+
+  public void testAfterCommit() throws Exception {
+    Directory dir1 = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+                                         IndexWriter.MaxFieldLength.LIMITED);
+    writer.setInfoStream(infoStream);
+
+    // create the index
+    createIndexNoClose(false, "test", writer);
+
+    // get a reader to put writer into near real-time mode
+    IndexReader r1 = writer.getReader();
+    _TestUtil.checkIndex(dir1);
+    writer.commit();
+    _TestUtil.checkIndex(dir1);
+    assertEquals(100, r1.numDocs());
+
+    for (int i = 0; i < 10; i++) {
+      writer.addDocument(createDocument(i, "test", 4));
+    }
+    ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync();
+
+    IndexReader r2 = r1.reopen();
+    if (r2 != r1) {
+      r1.close();
+      r1 = r2;
+    }
+    assertEquals(110, r1.numDocs());
+    writer.close();
+    r1.close();
+    dir1.close();
+  }
+
+  // Make sure reader remains usable even if IndexWriter closes
+  public void testAfterClose() throws Exception {
+    Directory dir1 = new MockRAMDirectory();
+    IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+                                         IndexWriter.MaxFieldLength.LIMITED);
+    writer.setInfoStream(infoStream);
+
+    // create the index
+    createIndexNoClose(false, "test", writer);
+
+    IndexReader r = writer.getReader();
+    writer.close();
+
+    _TestUtil.checkIndex(dir1);
+
+    // reader should remain usable even after IndexWriter is closed:
+    assertEquals(100, r.numDocs());
+    Query q = new TermQuery(new Term("indexname", "test"));
+    assertEquals(100, new IndexSearcher(r).search(q, 10).totalHits);
+
+    try {
+      r.reopen();
+      fail("failed to hit AlreadyClosedException");
+    } catch (AlreadyClosedException ace) {
+      // expected
+    }
+    r.close();
+    dir1.close();
+  }
+
+  // Stress test reopen during addIndexes
+  public void testDuringAddIndexes() throws Exception {
+    Directory dir1 = new MockRAMDirectory();
+    final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+                                               IndexWriter.MaxFieldLength.LIMITED);
+    writer.setInfoStream(infoStream);
+    writer.setMergeFactor(2);
+
+    // create the index
+    createIndexNoClose(false, "test", writer);
+    writer.commit();
+
+    final Directory[] dirs = new Directory[10];
+    for (int i=0;i<10;i++) {
+      dirs[i] = new MockRAMDirectory(dir1);
+    }
+
+    IndexReader r = writer.getReader();
+
+    final int NUM_THREAD = 5;
+    final float SECONDS = 3;
+
+    final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
+    final List excs = Collections.synchronizedList(new ArrayList());
+
+    final Thread[] threads = new Thread[NUM_THREAD];
+    for(int i=0;i<NUM_THREAD;i++) {
+      threads[i] = new Thread() {
+          public void run() {
+            while(System.currentTimeMillis() < endTime) {
+              try {
+                writer.addIndexesNoOptimize(dirs);
+              } catch (Throwable t) {
+                excs.add(t);
+                throw new RuntimeException(t);
+              }
+            }
+          }
+        };
+      threads[i].setDaemon(true);
+      threads[i].start();
+    }
+
+    int lastCount = 0;
+    while(System.currentTimeMillis() < endTime) {
+      IndexReader r2 = r.reopen();
+      if (r2 != r) {
+        r.close();
+        r = r2;
+      }
+      Query q = new TermQuery(new Term("indexname", "test"));
+      final int count = new IndexSearcher(r).search(q, 10).totalHits;
+      assertTrue(count >= lastCount);
+      lastCount = count;
+    }
+
+    for(int i=0;i<NUM_THREAD;i++) {
+      threads[i].join();
+    }
+
+    assertEquals(0, excs.size());
+    writer.close();
+
+    _TestUtil.checkIndex(dir1);
+    r.close();
+    dir1.close();
+  }
+
+  // Stress test reopen during add/delete
+  public void testDuringAddDelete() throws Exception {
+    Directory dir1 = new MockRAMDirectory();
+    final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
+                                               IndexWriter.MaxFieldLength.LIMITED);
+    writer.setInfoStream(infoStream);
+    writer.setMergeFactor(2);
+
+    // create the index
+    createIndexNoClose(false, "test", writer);
+    writer.commit();
+
+    IndexReader r = writer.getReader();
+
+    final int NUM_THREAD = 5;
+    final float SECONDS = 3;
+
+    final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
+    final List excs = Collections.synchronizedList(new ArrayList());
+
+    final Thread[] threads = new Thread[NUM_THREAD];
+    for(int i=0;i<NUM_THREAD;i++) {
+      threads[i] = new Thread() {
+          public void run() {
+            int count = 0;
+            final Random r = new Random();
+            while(System.currentTimeMillis() < endTime) {
+              try {
+                for(int i=0;i<10;i++) {
+                  writer.addDocument(createDocument(10*count+i, "test", 4));
+                }
+                count++;
+                final int limit = count*10;
+                for(int i=0;i<5;i++) {
+                  int x = r.nextInt(limit);
+                  writer.deleteDocuments(new Term("field3", "b"+x));
+                }
+              } catch (Throwable t) {
+                excs.add(t);
+                throw new RuntimeException(t);
+              }
+            }
+          }
+        };
+      threads[i].setDaemon(true);
+      threads[i].start();
+    }
+
+    int sum = 0;
+    while(System.currentTimeMillis() < endTime) {
+      IndexReader r2 = r.reopen();
+      if (r2 != r) {
+        r.close();
+        r = r2;
+      }
+      Query q = new TermQuery(new Term("indexname", "test"));
+      sum += new IndexSearcher(r).search(q, 10).totalHits;
+    }
+
+    for(int i=0;i<NUM_THREAD;i++) {
+      threads[i].join();
+    }
+    assertTrue(sum > 0);
+
+    assertEquals(0, excs.size());
+    writer.close();
+
+    _TestUtil.checkIndex(dir1);
+    r.close();
+    dir1.close();
+  }
+}

Propchange: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterReader.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java?rev=763737&r1=763736&r2=763737&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java Thu Apr  9 17:17:46 2009
@@ -51,7 +51,21 @@
       return true;
     }
   }
-
+  
+  public void testRandomIWReader() throws Throwable {
+    r = newRandom();
+    Directory dir = new MockRAMDirectory();
+    
+    // TODO: verify equals using IW.getReader
+    DocsAndWriter dw = indexRandomIWReader(10, 100, 100, dir);
+    IndexReader r = dw.writer.getReader();
+    dw.writer.commit();
+    verifyEquals(r, dir, "id");
+    r.close();
+    dw.writer.close();
+    dir.close();
+  }
+  
   public void testRandom() throws Throwable {
     r = newRandom();
     Directory dir1 = new MockRAMDirectory();
@@ -101,20 +115,69 @@
   // This test avoids using any extra synchronization in the multiple
   // indexing threads to test that IndexWriter does correctly synchronize
   // everything.
+  
+  public static class DocsAndWriter {
+    Map docs;
+    IndexWriter writer;
+  }
+  
+  public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
+    Map docs = new HashMap();
+    IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
+    w.setUseCompoundFile(false);
+
+    /***
+        w.setMaxMergeDocs(Integer.MAX_VALUE);
+        w.setMaxFieldLength(10000);
+        w.setRAMBufferSizeMB(1);
+        w.setMergeFactor(10);
+    ***/
+
+    // force many merges
+    w.setMergeFactor(mergeFactor);
+    w.setRAMBufferSizeMB(.1);
+    w.setMaxBufferedDocs(maxBufferedDocs);
+
+    threads = new IndexingThread[nThreads];
+    for (int i=0; i<threads.length; i++) {
+      IndexingThread th = new IndexingThread();
+      th.w = w;
+      th.base = 1000000*i;
+      th.range = range;
+      th.iterations = iterations;
+      threads[i] = th;
+    }
+
+    for (int i=0; i<threads.length; i++) {
+      threads[i].start();
+    }
+    for (int i=0; i<threads.length; i++) {
+      threads[i].join();
+    }
+
+    // w.optimize();
+    //w.close();    
+
+    for (int i=0; i<threads.length; i++) {
+      IndexingThread th = threads[i];
+      synchronized(th) {
+        docs.putAll(th.docs);
+      }
+    }
 
+    _TestUtil.checkIndex(dir);
+    DocsAndWriter dw = new DocsAndWriter();
+    dw.docs = docs;
+    dw.writer = w;
+    return dw;
+  }
+  
   public Map indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
     Map docs = new HashMap();
     for(int iter=0;iter<3;iter++) {
       IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
       w.setUseCompoundFile(false);
 
-      /***
-          w.setMaxMergeDocs(Integer.MAX_VALUE);
-          w.setMaxFieldLength(10000);
-          w.setRAMBufferSizeMB(1);
-          w.setMergeFactor(10);
-      ***/
-
       // force many merges
       w.setMergeFactor(mergeFactor);
       w.setRAMBufferSizeMB(.1);
@@ -177,6 +240,12 @@
     
     w.close();
   }
+  
+  public static void verifyEquals(IndexReader r1, Directory dir2, String idField) throws Throwable {
+    IndexReader r2 = IndexReader.open(dir2);
+    verifyEquals(r1, r2, idField);
+    r2.close();
+  }
 
   public static void verifyEquals(Directory dir1, Directory dir2, String idField) throws Throwable {
     IndexReader r1 = IndexReader.open(dir1);
@@ -222,7 +291,14 @@
       r2r1[id2] = id1;
 
       // verify stored fields are equivalent
-      verifyEquals(r1.document(id1), r2.document(id2));
+      try {
+        verifyEquals(r1.document(id1), r2.document(id2));
+      } catch (Throwable t) {
+        System.out.println("FAILED id=" + term + " id1=" + id1 + " id2=" + id2 + " term="+ term);
+        System.out.println("  d1=" + r1.document(id1));
+        System.out.println("  d2=" + r2.document(id2));
+        throw t;
+      }
 
       try {
         // verify term vectors are equivalent        

Modified: lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMDirectory.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMDirectory.java?rev=763737&r1=763736&r2=763737&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMDirectory.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMDirectory.java Thu Apr  9 17:17:46 2009
@@ -212,7 +212,7 @@
       if (preventDoubleWrite && createdFiles.contains(name) && !name.equals("segments.gen"))
         throw new IOException("file \"" + name + "\" was already written to");
       if (noDeleteOpenFile && openFiles.containsKey(name))
-       throw new IOException("MockRAMDirectory: file \"" + name + "\" is still open: cannot overwrite");
+        throw new IOException("MockRAMDirectory: file \"" + name + "\" is still open: cannot overwrite");
     }
     RAMFile file = new RAMFile(this);
     synchronized (this) {
@@ -234,7 +234,7 @@
       }
     }
 
-    return new MockRAMOutputStream(this, file);
+    return new MockRAMOutputStream(this, file, name);
   }
 
   public IndexInput openInput(String name) throws IOException {

Modified: lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMOutputStream.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMOutputStream.java?rev=763737&r1=763736&r2=763737&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMOutputStream.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMOutputStream.java Thu Apr  9 17:17:46 2009
@@ -29,13 +29,15 @@
 public class MockRAMOutputStream extends RAMOutputStream {
   private MockRAMDirectory dir;
   private boolean first=true;
+  private final String name;
   
   byte[] singleByte = new byte[1];
 
   /** Construct an empty output buffer. */
-  public MockRAMOutputStream(MockRAMDirectory dir, RAMFile f) {
+  public MockRAMOutputStream(MockRAMDirectory dir, RAMFile f, String name) {
     super(f);
     this.dir = dir;
+    this.name = name;
   }
 
   public void close() throws IOException {
@@ -66,7 +68,7 @@
     // If MockRAMDir crashed since we were opened, then
     // don't write anything:
     if (dir.crashed)
-      throw new IOException("MockRAMDirectory was crashed");
+      throw new IOException("MockRAMDirectory was crashed; cannot write to " + name);
 
     // Enforce disk full:
     if (dir.maxSize != 0 && freeSpace <= len) {
@@ -84,7 +86,7 @@
       if (realUsage > dir.maxUsedSize) {
         dir.maxUsedSize = realUsage;
       }
-      throw new IOException("fake disk full at " + dir.getRecomputedActualSizeInBytes() + " bytes");
+      throw new IOException("fake disk full at " + dir.getRecomputedActualSizeInBytes() + " bytes when writing " + name);
     } else {
       super.writeBytes(b, offset, len);
     }