You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2010/11/04 21:49:47 UTC

svn commit: r1031231 [6/9] - in /lucene/dev/branches/branch_3x: ./ lucene/ lucene/backwards/src/ lucene/backwards/src/test/org/apache/lucene/analysis/ lucene/backwards/src/test/org/apache/lucene/document/ lucene/backwards/src/test/org/apache/lucene/ind...

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java?rev=1031231&r1=1031230&r2=1031231&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Thu Nov  4 20:49:44 2010
@@ -18,12 +18,17 @@ package org.apache.lucene.index;
  */
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.RAMDirectory;
@@ -558,4 +563,286 @@ public class TestAddIndexes extends Luce
     dir2.close();
   }
 
+  // TODO: these are also in TestIndexWriter... add a simple doc-writing method
+  // like this to LuceneTestCase?
+  private void addDoc(IndexWriter writer) throws IOException
+  {
+      Document doc = new Document();
+      doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+  }
+  
+  private abstract class RunAddIndexesThreads {
+
+    Directory dir, dir2;
+    final static int NUM_INIT_DOCS = 17;
+    IndexWriter writer2;
+    final List<Throwable> failures = new ArrayList<Throwable>();
+    volatile boolean didClose;
+    final IndexReader[] readers;
+    final int NUM_COPY;
+    final static int NUM_THREADS = 5;
+    final Thread[] threads = new Thread[NUM_THREADS];
+
+    public RunAddIndexesThreads(int numCopy) throws Throwable {
+      NUM_COPY = numCopy;
+      dir = new MockDirectoryWrapper(new RAMDirectory());
+      IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
+          TEST_VERSION_CURRENT, new MockAnalyzer())
+          .setMaxBufferedDocs(2));
+      for (int i = 0; i < NUM_INIT_DOCS; i++)
+        addDoc(writer);
+      writer.close();
+
+      dir2 = new MockDirectoryWrapper(new RAMDirectory());
+      writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+      writer2.commit();
+
+      readers = new IndexReader[NUM_COPY];
+      for(int i=0;i<NUM_COPY;i++)
+        readers[i] = IndexReader.open(dir, true);
+    }
+
+    void launchThreads(final int numIter) {
+
+      for(int i=0;i<NUM_THREADS;i++) {
+        threads[i] = new Thread() {
+            @Override
+            public void run() {
+              try {
+
+                final Directory[] dirs = new Directory[NUM_COPY];
+                for(int k=0;k<NUM_COPY;k++)
+                  dirs[k] = new MockDirectoryWrapper(new RAMDirectory(dir));
+
+                int j=0;
+
+                while(true) {
+                  // System.out.println(Thread.currentThread().getName() + ": iter j=" + j);
+                  if (numIter > 0 && j == numIter)
+                    break;
+                  doBody(j++, dirs);
+                }
+              } catch (Throwable t) {
+                handle(t);
+              }
+            }
+          };
+      }
+
+      for(int i=0;i<NUM_THREADS;i++)
+        threads[i].start();
+    }
+
+    void joinThreads() throws Exception {
+      for(int i=0;i<NUM_THREADS;i++)
+        threads[i].join();
+    }
+
+    void close(boolean doWait) throws Throwable {
+      didClose = true;
+      writer2.close(doWait);
+    }
+
+    void closeDir() throws Throwable {
+      for(int i=0;i<NUM_COPY;i++)
+        readers[i].close();
+      dir2.close();
+    }
+
+    abstract void doBody(int j, Directory[] dirs) throws Throwable;
+    abstract void handle(Throwable t);
+  }
+
+  private class CommitAndAddIndexes extends RunAddIndexesThreads {
+    public CommitAndAddIndexes(int numCopy) throws Throwable {
+      super(numCopy);
+    }
+
+    @Override
+    void handle(Throwable t) {
+      t.printStackTrace(System.out);
+      synchronized(failures) {
+        failures.add(t);
+      }
+    }
+
+    @Override
+    void doBody(int j, Directory[] dirs) throws Throwable {
+      switch(j%5) {
+      case 0:
+        writer2.addIndexes(dirs);
+        writer2.optimize();
+        break;
+      case 1:
+        writer2.addIndexes(dirs);
+        break;
+      case 2:
+        writer2.addIndexes(readers);
+        break;
+      case 3:
+        writer2.addIndexes(dirs);
+        writer2.maybeMerge();
+        break;
+      case 4:
+        writer2.commit();
+      }
+    }
+  }
+  
+  // LUCENE-1335: test simultaneous addIndexes & commits
+  // from multiple threads
+  public void testAddIndexesWithThreads() throws Throwable {
+
+    final int NUM_ITER = 15;
+    final int NUM_COPY = 3;
+    CommitAndAddIndexes c = new CommitAndAddIndexes(NUM_COPY);
+    c.launchThreads(NUM_ITER);
+
+    for(int i=0;i<100;i++)
+      addDoc(c.writer2);
+
+    c.joinThreads();
+
+    int expectedNumDocs = 100+NUM_COPY*(4*NUM_ITER/5)*RunAddIndexesThreads.NUM_THREADS*RunAddIndexesThreads.NUM_INIT_DOCS;
+    assertEquals(expectedNumDocs, c.writer2.numDocs());
+
+    c.close(true);
+
+    assertTrue(c.failures.size() == 0);
+
+    _TestUtil.checkIndex(c.dir2);
+
+    IndexReader reader = IndexReader.open(c.dir2, true);
+    assertEquals(expectedNumDocs, reader.numDocs());
+    reader.close();
+
+    c.closeDir();
+  }
+
+  private class CommitAndAddIndexes2 extends CommitAndAddIndexes {
+    public CommitAndAddIndexes2(int numCopy) throws Throwable {
+      super(numCopy);
+    }
+
+    @Override
+    void handle(Throwable t) {
+      if (!(t instanceof AlreadyClosedException) && !(t instanceof NullPointerException)) {
+        t.printStackTrace(System.out);
+        synchronized(failures) {
+          failures.add(t);
+        }
+      }
+    }
+  }
+
+  // LUCENE-1335: test simultaneous addIndexes & close
+  public void testAddIndexesWithClose() throws Throwable {
+    final int NUM_COPY = 3;
+    CommitAndAddIndexes2 c = new CommitAndAddIndexes2(NUM_COPY);
+    //c.writer2.setInfoStream(System.out);
+    c.launchThreads(-1);
+
+    // Close w/o first stopping/joining the threads
+    c.close(true);
+    //c.writer2.close();
+
+    c.joinThreads();
+
+    _TestUtil.checkIndex(c.dir2);
+
+    c.closeDir();
+
+    assertTrue(c.failures.size() == 0);
+  }
+
+  private class CommitAndAddIndexes3 extends RunAddIndexesThreads {
+    public CommitAndAddIndexes3(int numCopy) throws Throwable {
+      super(numCopy);
+    }
+
+    @Override
+    void doBody(int j, Directory[] dirs) throws Throwable {
+      switch(j%5) {
+      case 0:
+        writer2.addIndexes(dirs);
+        writer2.optimize();
+        break;
+      case 1:
+        writer2.addIndexes(dirs);
+        break;
+      case 2:
+        writer2.addIndexes(readers);
+        break;
+      case 3:
+        writer2.optimize();
+        break;
+      case 4:
+        writer2.commit();
+      }
+    }
+
+    @Override
+    void handle(Throwable t) {
+      boolean report = true;
+
+      if (t instanceof AlreadyClosedException || t instanceof MergePolicy.MergeAbortedException || t instanceof NullPointerException) {
+        report = !didClose;
+      } else if (t instanceof IOException)  {
+        Throwable t2 = t.getCause();
+        if (t2 instanceof MergePolicy.MergeAbortedException) {
+          report = !didClose;
+        }
+      }
+      if (report) {
+        t.printStackTrace(System.out);
+        synchronized(failures) {
+          failures.add(t);
+        }
+      }
+    }
+  }
+
+  // LUCENE-1335: test simultaneous addIndexes & close
+  public void testAddIndexesWithCloseNoWait() throws Throwable {
+
+    final int NUM_COPY = 50;
+    CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
+    c.launchThreads(-1);
+
+    Thread.sleep(500);
+
+    // Close w/o first stopping/joining the threads
+    c.close(false);
+
+    c.joinThreads();
+
+    _TestUtil.checkIndex(c.dir2);
+
+    c.closeDir();
+
+    assertTrue(c.failures.size() == 0);
+  }
+
+  // LUCENE-1335: test simultaneous addIndexes & close
+  public void testAddIndexesWithRollback() throws Throwable {
+    
+    final int NUM_COPY = 50;
+    CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
+    c.launchThreads(-1);
+
+    Thread.sleep(500);
+
+    // Close w/o first stopping/joining the threads
+    c.didClose = true;
+    c.writer2.rollback();
+
+    c.joinThreads();
+
+    _TestUtil.checkIndex(c.dir2);
+
+    c.closeDir();
+
+    assertTrue(c.failures.size() == 0);
+  }
 }

Propchange: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Nov  4 20:49:44 2010
@@ -1,5 +1,5 @@
 /lucene/dev/branches/preflexfixes/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:967125-979432
-/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:931298,931337,931502,932129-932131,932163,932304,932369,932374,932398,932417,932541,932576,932587,932698,932731-932749,932752,932773,932795,932828,932856-932857,932862,932864,932878,932963,932998-932999,933541-933575,933598,933613,933679,933879,934339,934954,935014-935048,935065,935186-935513,935521-935522,935553-935962,936522,936544,936605,936657-936726,937039,937360,938582-938646,938989,939111,939611,939649,940433,940447,940451-940452,940666,940699,940730,940878-940892,940994,941270,941363,942166,942288,942676,942719,943142,943493,943931,945057,945090,945130,945245,945343,945420,946139,946330,946338,946599,948011,948082,948429,949288,949311,949445,949976,949997,950008,950042,950458,950613,951126,951355,951397,951521,953407,953628,955547,955613,955615,955796-955797,955809-955996,956097,956125,956173,956316,956715,957465,957520,957634,957707,960367,960371,960374,960719,962555,963372,963
 654,963720,963781,963873,963906,963909,963920,964019,964054,964430,964459,964720,964753,964832,964856,965103,965110,965222,965230,965299,965327,965330,965585,966354,966878,967080,979453,979809,980369,980428,980436,980501,980909,980911,980917,981265,981550,981598,981650,981661,981857,981936,982073,982084,982201,982725,982824,983100,983212,983216,983313,983495,983500,983530,983622,983632,983778,984187,984202,984232,984510,984968,985453,985455,985672,985875,986158,986173,986612,987122,988087,988206,988216,988259,988346,988478,988527,988543,988592,988613,988688,988710,988736,988739,989004,989010,989013,989030,989035,989315,989321,989334,989785,990160-990161,990180,990189,990281,990301,990451,990459,990766,990781,990854,991053,991191,991310,991497,992424,992469,992567,992571,992623,993106,993194,993199,993287,993408,994935,994976,994979,995247,995250,995376,995772,996268,996357,996416,996511,996611,996623,996647-996653,996720,996942,996978,997180,997230,998055,998505,998684,99901
 6,999223,999545,999842,1000424,1000581,1000675,1001006,1001420,1001661,1001796,1003614,1003631,1003645,1003841-1003852,1003873,1003877,1003906,1003938,1003954,1003978,1003990,1004038,1004082,1004179,1004200,1004215,1004241,1004335,1005310,1005356,1005363,1006146,1006290,1006324,1021340,1021357,1021360,1021439,1021449,1021969-1021971,1022165,1022191,1022632,1022708-1022710,1022730-1022735,1022748-1022755,1022762-1022793,1022798-1022802,1022805,1022826,1022927,1022939,1022956,1022989,1022998,1023006,1023009,1023022,1023040,1023106,1023235-1023246,1023250,1023264-1023265,1023312,1023329-1023330,1023346-1023347,1023355,1023493,1023509-1023511,1023518,1023520,1023535-1023536,1023562,1023579-1023588,1023594-1023595,1023600-1023602,1023606,1023621,1023635,1023637,1023711,1023845,1023870,1024196,1024219,1024233,1024238,1024256,1024292,1024305,1024395,1024402,1024408,1024476,1024486,1025597,1025669,1025929,1026044,1026167,1026336,1026431,1026460,1026592,1026606,1026610,1026738,102684
 1,1026868,1026882,1027998,1028039,1028386,1029096,1029325,1029333,1029345,1030012,1030019,1030073,1030078,1030754,1031076
+/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:931298,931337,931502,932129-932131,932163,932304,932369,932374,932398,932417,932541,932576,932587,932698,932731-932749,932752,932773,932795,932828,932856-932857,932862,932864,932878,932963,932998-932999,933541-933575,933598,933613,933679,933879,934339,934954,935014-935048,935065,935186-935513,935521-935522,935553-935962,936522,936544,936605,936657-936726,937039,937360,938582-938646,938989,939111,939611,939649,940433,940447,940451-940452,940666,940699,940730,940878-940892,940994,941270,941363,942166,942288,942676,942719,943142,943493,943931,945057,945090,945130,945245,945343,945420,946139,946330,946338,946599,948011,948082,948429,949288,949311,949445,949976,949997,950008,950042,950458,950613,951126,951355,951397,951521,953407,953628,955547,955613,955615,955796-955797,955809-955996,956097,956125,956173,956316,956715,957465,957520,957634,957707,960367,960371,960374,960719,962555,963372,963
 654,963720,963781,963873,963906,963909,963920,964019,964054,964430,964459,964720,964753,964832,964856,965103,965110,965222,965230,965299,965327,965330,965585,966354,966878,967080,979453,979809,980369,980428,980436,980501,980909,980911,980917,981265,981550,981598,981650,981661,981857,981936,982073,982084,982201,982725,982824,983100,983212,983216,983313,983495,983500,983530,983622,983632,983778,984187,984202,984232,984510,984968,985453,985455,985672,985875,986158,986173,986612,987122,988087,988206,988216,988259,988346,988478,988527,988543,988592,988613,988688,988710,988736,988739,989004,989010,989013,989030,989035,989315,989321,989334,989785,990160-990161,990180,990189,990281,990301,990451,990459,990766,990781,990854,991053,991191,991310,991497,992424,992469,992567,992571,992623,993106,993194,993199,993287,993408,994935,994976,994979,995247,995250,995376,995772,996268,996357,996416,996511,996611,996623,996647-996653,996720,996942,996978,997180,997230,998055,998505,998684,99901
 6,999223,999545,999842,1000424,1000581,1000675,1001006,1001420,1001661,1001796,1003614,1003631,1003645,1003841-1003852,1003873,1003877,1003906,1003938,1003954,1003978,1003990,1004038,1004082,1004179,1004200,1004215,1004241,1004335,1005310,1005356,1005363,1006146,1006290,1006324,1021340,1021357,1021360,1021439,1021449,1021969-1021971,1022165,1022191,1022632,1022708-1022710,1022730-1022735,1022748-1022755,1022762-1022793,1022798-1022802,1022805,1022826,1022927,1022939,1022956,1022989,1022998,1023006,1023009,1023022,1023040,1023106,1023235-1023246,1023250,1023264-1023265,1023312,1023329-1023330,1023346-1023347,1023355,1023493,1023509-1023511,1023518,1023520,1023535-1023536,1023562,1023579-1023588,1023594-1023595,1023600-1023602,1023606,1023621,1023635,1023637,1023711,1023845,1023870,1024196,1024219,1024233,1024238,1024256,1024292,1024305,1024395,1024402,1024408,1024476,1024486,1025597,1025669,1025929,1026044,1026167,1026336,1026431,1026460,1026592,1026606,1026610,1026738,102684
 1,1026868,1026882,1027998,1028039,1028386,1029096,1029325,1029333,1029345,1030012,1030019,1030073,1030078,1030754,1031076,1031219
 /lucene/java/branches/flex_1458/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:924791,924850,928295,930201
 /lucene/java/branches/lucene_2_4/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:748824
 /lucene/java/branches/lucene_2_9/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java:825998,829134,829881,831036,896850,909334,948516

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=1031231&r1=1031230&r2=1031231&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Thu Nov  4 20:49:44 2010
@@ -395,117 +395,6 @@ public class TestIndexWriter extends Luc
         }
     }
 
-    // Simulate a writer that crashed while writing segments
-    // file: make sure we can still open the index (ie,
-    // gracefully fallback to the previous segments file),
-    // and that we can add to the index:
-    public void testSimulatedCrashedWriter() throws IOException {
-        MockDirectoryWrapper dir = newDirectory();
-        dir.setPreventDoubleWrite(false);
-
-        IndexWriter writer = null;
-
-        writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
-
-        // add 100 documents
-        for (int i = 0; i < 100; i++) {
-            addDoc(writer);
-        }
-
-        // close
-        writer.close();
-
-        long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
-        assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
-
-        // Make the next segments file, with last byte
-        // missing, to simulate a writer that crashed while
-        // writing segments file:
-        String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
-        String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
-                                                                   "",
-                                                                   1+gen);
-        IndexInput in = dir.openInput(fileNameIn);
-        IndexOutput out = dir.createOutput(fileNameOut);
-        long length = in.length();
-        for(int i=0;i<length-1;i++) {
-          out.writeByte(in.readByte());
-        }
-        in.close();
-        out.close();
-
-        IndexReader reader = null;
-        try {
-          reader = IndexReader.open(dir, true);
-        } catch (Exception e) {
-          fail("reader failed to open on a crashed index");
-        }
-        reader.close();
-
-        try {
-          writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE));
-        } catch (Exception e) {
-          e.printStackTrace(System.out);
-          fail("writer failed to open on a crashed index");
-        }
-
-        // add 100 documents
-        for (int i = 0; i < 100; i++) {
-            addDoc(writer);
-        }
-
-        // close
-        writer.close();
-        dir.close();
-    }
-
-    // Simulate a corrupt index by removing last byte of
-    // latest segments file and make sure we get an
-    // IOException trying to open the index:
-    public void testSimulatedCorruptIndex1() throws IOException {
-        Directory dir = newDirectory();
-
-        IndexWriter writer = null;
-
-        writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
-
-        // add 100 documents
-        for (int i = 0; i < 100; i++) {
-            addDoc(writer);
-        }
-
-        // close
-        writer.close();
-
-        long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
-        assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
-
-        String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
-        String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
-                                                                   "",
-                                                                   1+gen);
-        IndexInput in = dir.openInput(fileNameIn);
-        IndexOutput out = dir.createOutput(fileNameOut);
-        long length = in.length();
-        for(int i=0;i<length-1;i++) {
-          out.writeByte(in.readByte());
-        }
-        in.close();
-        out.close();
-        dir.deleteFile(fileNameIn);
-
-        IndexReader reader = null;
-        try {
-          reader = IndexReader.open(dir, true);
-          fail("reader did not hit IOException on opening a corrupt index");
-        } catch (Exception e) {
-        }
-        if (reader != null) {
-          reader.close();
-        }
-        dir.close();
-    }
-
     public void testChangesAfterClose() throws IOException {
         Directory dir = newDirectory();
 
@@ -524,52 +413,6 @@ public class TestIndexWriter extends Luc
         }
         dir.close();
     }
-  
-
-    // Simulate a corrupt index by removing one of the cfs
-    // files and make sure we get an IOException trying to
-    // open the index:
-    public void testSimulatedCorruptIndex2() throws IOException {
-        Directory dir = newDirectory();
-
-        IndexWriter writer = null;
-
-        writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
-        ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(true);
-
-        // add 100 documents
-        for (int i = 0; i < 100; i++) {
-            addDoc(writer);
-        }
-
-        // close
-        writer.close();
-
-        long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
-        assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
-
-        String[] files = dir.listAll();
-        boolean corrupted = false;
-        for(int i=0;i<files.length;i++) {
-          if (files[i].endsWith(".cfs")) {
-            dir.deleteFile(files[i]);
-            corrupted = true;
-            break;
-          }
-        }
-        assertTrue("failed to find cfs file to remove", corrupted);
-
-        IndexReader reader = null;
-        try {
-          reader = IndexReader.open(dir, true);
-          fail("reader did not hit IOException on opening a corrupt index");
-        } catch (Exception e) {
-        }
-        if (reader != null) {
-          reader.close();
-        }
-        dir.close();
-    }
 
     /*
      * Simple test for "commit on close": open writer then
@@ -1378,354 +1221,6 @@ public class TestIndexWriter extends Luc
     dir.close();
   }
 
-  // LUCENE-1072
-  public void testExceptionFromTokenStream() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new Analyzer() {
-
-      @Override
-      public TokenStream tokenStream(String fieldName, Reader reader) {
-        return new TokenFilter(new StandardTokenizer(TEST_VERSION_CURRENT, reader)) {
-          private int count = 0;
-
-          @Override
-          public boolean incrementToken() throws IOException {
-            if (count++ == 5) {
-              throw new IOException();
-            }
-            return input.incrementToken();
-          }
-        };
-      }
-
-    });
-    IndexWriter writer = new IndexWriter(dir, conf);
-
-    Document doc = new Document();
-    String contents = "aa bb cc dd ee ff gg hh ii jj kk";
-    doc.add(newField("content", contents, Field.Store.NO,
-        Field.Index.ANALYZED));
-    try {
-      writer.addDocument(doc);
-      fail("did not hit expected exception");
-    } catch (Exception e) {
-    }
-
-    // Make sure we can add another normal document
-    doc = new Document();
-    doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
-        Field.Index.ANALYZED));
-    writer.addDocument(doc);
-
-    // Make sure we can add another normal document
-    doc = new Document();
-    doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
-        Field.Index.ANALYZED));
-    writer.addDocument(doc);
-
-    writer.close();
-    IndexReader reader = IndexReader.open(dir, true);
-    final Term t = new Term("content", "aa");
-    assertEquals(reader.docFreq(t), 3);
-
-    // Make sure the doc that hit the exception was marked
-    // as deleted:
-    TermDocs tdocs = reader.termDocs(t);
-    int count = 0;
-    while(tdocs.next()) {
-      count++;
-    }
-    assertEquals(2, count);
-
-    assertEquals(reader.docFreq(new Term("content", "gg")), 0);
-    reader.close();
-    dir.close();
-  }
-
-  private static class FailOnlyOnFlush extends MockDirectoryWrapper.Failure {
-    boolean doFail = false;
-    int count;
-
-    @Override
-    public void setDoFail() {
-      this.doFail = true;
-    }
-    @Override
-    public void clearDoFail() {
-      this.doFail = false;
-    }
-
-    @Override
-    public void eval(MockDirectoryWrapper dir)  throws IOException {
-      if (doFail) {
-        StackTraceElement[] trace = new Exception().getStackTrace();
-        boolean sawAppend = false;
-        boolean sawFlush = false;
-        for (int i = 0; i < trace.length; i++) {
-          if ("org.apache.lucene.index.FreqProxTermsWriter".equals(trace[i].getClassName()) && "appendPostings".equals(trace[i].getMethodName()))
-            sawAppend = true;
-          if ("doFlush".equals(trace[i].getMethodName()))
-            sawFlush = true;
-        }
-
-        if (sawAppend && sawFlush && count++ >= 30) {
-          doFail = false;
-          throw new IOException("now failing during flush");
-        }
-      }
-    }
-  }
-
-  // LUCENE-1072: make sure an errant exception on flushing
-  // one segment only takes out those docs in that one flush
-  public void testDocumentsWriterAbort() throws IOException {
-    MockDirectoryWrapper dir = newDirectory();
-    FailOnlyOnFlush failure = new FailOnlyOnFlush();
-    failure.setDoFail();
-    dir.failOn(failure);
-
-    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(2));
-    Document doc = new Document();
-    String contents = "aa bb cc dd ee ff gg hh ii jj kk";
-    doc.add(newField("content", contents, Field.Store.NO,
-        Field.Index.ANALYZED));
-    boolean hitError = false;
-    for(int i=0;i<200;i++) {
-      try {
-        writer.addDocument(doc);
-      } catch (IOException ioe) {
-        // only one flush should fail:
-        assertFalse(hitError);
-        hitError = true;
-      }
-    }
-    assertTrue(hitError);
-    writer.close();
-    IndexReader reader = IndexReader.open(dir, true);
-    assertEquals(198, reader.docFreq(new Term("content", "aa")));
-    reader.close();
-    dir.close();
-  }
-
-  private class CrashingFilter extends TokenFilter {
-    String fieldName;
-    int count;
-
-    public CrashingFilter(String fieldName, TokenStream input) {
-      super(input);
-      this.fieldName = fieldName;
-    }
-
-    @Override
-    public boolean incrementToken() throws IOException {
-      if (this.fieldName.equals("crash") && count++ >= 4)
-        throw new IOException("I'm experiencing problems");
-      return input.incrementToken();
-    }
-
-    @Override
-    public void reset() throws IOException {
-      super.reset();
-      count = 0;
-    }
-  }
-
-  public void testDocumentsWriterExceptions() throws IOException {
-    Analyzer analyzer = new Analyzer() {
-      @Override
-      public TokenStream tokenStream(String fieldName, Reader reader) {
-        return new CrashingFilter(fieldName, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader));
-      }
-    };
-
-    for(int i=0;i<2;i++) {
-      MockDirectoryWrapper dir = newDirectory();
-      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer));
-      //writer.setInfoStream(System.out);
-      Document doc = new Document();
-      doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-      writer.addDocument(doc);
-      writer.addDocument(doc);
-      doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-      doc.add(newField("other", "this will not get indexed", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-      try {
-        writer.addDocument(doc);
-        fail("did not hit expected exception");
-      } catch (IOException ioe) {
-      }
-
-      if (0 == i) {
-        doc = new Document();
-        doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                          Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-        writer.addDocument(doc);
-        writer.addDocument(doc);
-      }
-      writer.close();
-
-      IndexReader reader = IndexReader.open(dir, true);
-      int expected = 3+(1-i)*2;
-      assertEquals(expected, reader.docFreq(new Term("contents", "here")));
-      assertEquals(expected, reader.maxDoc());
-      int numDel = 0;
-      for(int j=0;j<reader.maxDoc();j++) {
-        if (reader.isDeleted(j))
-          numDel++;
-        else {
-          reader.document(j);
-          reader.getTermFreqVectors(j);
-        }
-      }
-      reader.close();
-
-      assertEquals(1, numDel);
-
-      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
-          analyzer).setMaxBufferedDocs(10));
-      doc = new Document();
-      doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-      for(int j=0;j<17;j++)
-        writer.addDocument(doc);
-      writer.optimize();
-      writer.close();
-
-      reader = IndexReader.open(dir, true);
-      expected = 19+(1-i)*2;
-      assertEquals(expected, reader.docFreq(new Term("contents", "here")));
-      assertEquals(expected, reader.maxDoc());
-      numDel = 0;
-      for(int j=0;j<reader.maxDoc();j++) {
-        if (reader.isDeleted(j))
-          numDel++;
-        else {
-          reader.document(j);
-          reader.getTermFreqVectors(j);
-        }
-      }
-      reader.close();
-      assertEquals(0, numDel);
-
-      dir.close();
-    }
-  }
-
-  public void testDocumentsWriterExceptionThreads() throws Exception {
-    Analyzer analyzer = new Analyzer() {
-      @Override
-      public TokenStream tokenStream(String fieldName, Reader reader) {
-        return new CrashingFilter(fieldName, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader));
-      }
-    };
-
-    final int NUM_THREAD = 3;
-    final int NUM_ITER = 100;
-
-    for(int i=0;i<2;i++) {
-      MockDirectoryWrapper dir = newDirectory();
-
-      {
-        final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(-1));
-        ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
-        final int finalI = i;
-
-        Thread[] threads = new Thread[NUM_THREAD];
-        for(int t=0;t<NUM_THREAD;t++) {
-          threads[t] = new Thread() {
-              @Override
-              public void run() {
-                try {
-                  for(int iter=0;iter<NUM_ITER;iter++) {
-                    Document doc = new Document();
-                    doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-                    writer.addDocument(doc);
-                    writer.addDocument(doc);
-                    doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
-                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-                    doc.add(newField("other", "this will not get indexed", Field.Store.YES,
-                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-                    try {
-                      writer.addDocument(doc);
-                      fail("did not hit expected exception");
-                    } catch (IOException ioe) {
-                    }
-
-                    if (0 == finalI) {
-                      doc = new Document();
-                      doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-                      writer.addDocument(doc);
-                      writer.addDocument(doc);
-                    }
-                  }
-                } catch (Throwable t) {
-                  synchronized(this) {
-                    System.out.println(Thread.currentThread().getName() + ": ERROR: hit unexpected exception");
-                    t.printStackTrace(System.out);
-                  }
-                  fail();
-                }
-              }
-            };
-          threads[t].start();
-        }
-
-        for(int t=0;t<NUM_THREAD;t++)
-          threads[t].join();
-            
-        writer.close();
-      }
-
-      IndexReader reader = IndexReader.open(dir, true);
-      int expected = (3+(1-i)*2)*NUM_THREAD*NUM_ITER;
-      assertEquals("i=" + i, expected, reader.docFreq(new Term("contents", "here")));
-      assertEquals(expected, reader.maxDoc());
-      int numDel = 0;
-      for(int j=0;j<reader.maxDoc();j++) {
-        if (reader.isDeleted(j))
-          numDel++;
-        else {
-          reader.document(j);
-          reader.getTermFreqVectors(j);
-        }
-      }
-      reader.close();
-
-      assertEquals(NUM_THREAD*NUM_ITER, numDel);
-
-      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
-          TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10));
-      Document doc = new Document();
-      doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-      for(int j=0;j<17;j++)
-        writer.addDocument(doc);
-      writer.optimize();
-      writer.close();
-
-      reader = IndexReader.open(dir, true);
-      expected += 17-NUM_THREAD*NUM_ITER;
-      assertEquals(expected, reader.docFreq(new Term("contents", "here")));
-      assertEquals(expected, reader.maxDoc());
-      numDel = 0;
-      for(int j=0;j<reader.maxDoc();j++) {
-        if (reader.isDeleted(j))
-          numDel++;
-        else {
-          reader.document(j);
-          reader.getTermFreqVectors(j);
-        }
-      }
-      reader.close();
-      assertEquals(0, numDel);
-
-      dir.close();
-    }
-  }
 
   public void testVariableSchema() throws Exception {
     Directory dir = newDirectory();
@@ -1892,45 +1387,6 @@ public class TestIndexWriter extends Luc
     dir.close();
   }
 
-  // LUCENE-1044: Simulate checksum error in segments_N
-  public void testSegmentsChecksumError() throws IOException {
-    Directory dir = newDirectory();
-
-    IndexWriter writer = null;
-
-    writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
-
-    // add 100 documents
-    for (int i = 0; i < 100; i++) {
-      addDoc(writer);
-    }
-
-    // close
-    writer.close();
-
-    long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
-    assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
-
-    final String segmentsFileName = SegmentInfos.getCurrentSegmentFileName(dir);
-    IndexInput in = dir.openInput(segmentsFileName);
-    IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen));
-    out.copyBytes(in, in.length()-1);
-    byte b = in.readByte();
-    out.writeByte((byte) (1+b));
-    out.close();
-    in.close();
-
-    IndexReader reader = null;
-    try {
-      reader = IndexReader.open(dir, true);
-    } catch (IOException e) {
-      e.printStackTrace(System.out);
-      fail("segmentInfos failed to retry fallback to correct segments_N file");
-    }
-    reader.close();
-    dir.close();
-  }
-
   // LUCENE-1044: test writer.commit() when ac=false
   public void testForceCommit() throws IOException {
     Directory dir = newDirectory();
@@ -1968,56 +1424,6 @@ public class TestIndexWriter extends Luc
     dir.close();
   }
 
-  // Throws IOException during MockDirectoryWrapper.sync
-  private static class FailOnlyInSync extends MockDirectoryWrapper.Failure {
-    boolean didFail;
-    @Override
-    public void eval(MockDirectoryWrapper dir)  throws IOException {
-      if (doFail) {
-        StackTraceElement[] trace = new Exception().getStackTrace();
-        for (int i = 0; i < trace.length; i++) {
-          if (doFail && "org.apache.lucene.store.MockDirectoryWrapper".equals(trace[i].getClassName()) && "sync".equals(trace[i].getMethodName())) {
-            didFail = true;
-            throw new IOException("now failing on purpose during sync");
-          }
-        }
-      }
-    }
-  }
-
-  // LUCENE-1044: test exception during sync
-  public void testExceptionDuringSync() throws IOException {
-    MockDirectoryWrapper dir = newDirectory();
-    FailOnlyInSync failure = new FailOnlyInSync();
-    dir.failOn(failure);
-
-    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
-        .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
-    failure.setDoFail();
-    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
-
-    for (int i = 0; i < 23; i++) {
-      addDoc(writer);
-      if ((i-1)%2 == 0) {
-        try {
-          writer.commit();
-        } catch (IOException ioe) {
-          // expected
-        }
-      }
-    }
-
-    ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
-    assertTrue(failure.didFail);
-    failure.clearDoFail();
-    writer.close();
-
-    IndexReader reader = IndexReader.open(dir, true);
-    assertEquals(23, reader.numDocs());
-    reader.close();
-    dir.close();
-  }
-
   // LUCENE-1168
   public void testTermVectorCorruption() throws IOException {
 
@@ -2317,120 +1723,11 @@ public class TestIndexWriter extends Luc
     dir.close();
   }
 
-  // LUCENE-1198
-  private static final class MockIndexWriter extends IndexWriter {
-
-    public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
-      super(dir, conf);
-    }
-
-    boolean doFail;
-
-    @Override
-    boolean testPoint(String name) {
-      if (doFail && name.equals("DocumentsWriter.ThreadState.init start"))
-        throw new RuntimeException("intentionally failing");
-      return true;
-    }
-  }
-  
-
-  public void testExceptionDocumentsWriterInit() throws IOException {
-    Directory dir = newDirectory();
-    MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
-    Document doc = new Document();
-    doc.add(newField("field", "a field", Field.Store.YES,
-                      Field.Index.ANALYZED));
-    w.addDocument(doc);
-    w.doFail = true;
-    try {
-      w.addDocument(doc);
-      fail("did not hit exception");
-    } catch (RuntimeException re) {
-      // expected
-    }
-    w.close();
-    _TestUtil.checkIndex(dir);
-    dir.close();
-  }
-
-  // LUCENE-1208
-  public void testExceptionJustBeforeFlush() throws IOException {
-    Directory dir = newDirectory();
-    MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(2));
-    Document doc = new Document();
-    doc.add(newField("field", "a field", Field.Store.YES,
-                      Field.Index.ANALYZED));
-    w.addDocument(doc);
-
-    Analyzer analyzer = new Analyzer() {
-      @Override
-      public TokenStream tokenStream(String fieldName, Reader reader) {
-        return new CrashingFilter(fieldName, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader));
-      }
-    };
-
-    Document crashDoc = new Document();
-    crashDoc.add(newField("crash", "do it on token 4", Field.Store.YES,
-                           Field.Index.ANALYZED));
-    try {
-      w.addDocument(crashDoc, analyzer);
-      fail("did not hit expected exception");
-    } catch (IOException ioe) {
-      // expected
-    }
-    w.addDocument(doc);
-    w.close();
-    dir.close();
-  }    
-
-  private static final class MockIndexWriter2 extends IndexWriter {
-
-    public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException {
-      super(dir, conf);
-    }
-
-    boolean doFail;
-    boolean failed;
-
-    @Override
-    boolean testPoint(String name) {
-      if (doFail && name.equals("startMergeInit")) {
-        failed = true;
-        throw new RuntimeException("intentionally failing");
-      }
-      return true;
-    }
-  }
-  
-
-  // LUCENE-1210
-  public void testExceptionOnMergeInit() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
-      .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler());
-    ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
-    MockIndexWriter2 w = new MockIndexWriter2(dir, conf);
-    w.doFail = true;
-    Document doc = new Document();
-    doc.add(newField("field", "a field", Field.Store.YES,
-                      Field.Index.ANALYZED));
-    for(int i=0;i<10;i++)
-      try {
-        w.addDocument(doc);
-      } catch (RuntimeException re) {
-        break;
-      }
 
-    ((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync();
-    assertTrue(w.failed);
-    w.close();
-    dir.close();
-  }
 
-  private static final class MockIndexWriter3 extends IndexWriter {
+  private static final class MockIndexWriter extends IndexWriter {
 
-    public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException {
+    public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
       super(dir, conf);
     }
 
@@ -2452,7 +1749,7 @@ public class TestIndexWriter extends Luc
   // LUCENE-1222
   public void testDoBeforeAfterFlush() throws IOException {
     Directory dir = newDirectory();
-    MockIndexWriter3 w = new MockIndexWriter3(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
+    MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
     Document doc = new Document();
     doc.add(newField("field", "a field", Field.Store.YES,
                       Field.Index.ANALYZED));
@@ -2476,57 +1773,7 @@ public class TestIndexWriter extends Luc
     dir.close();
   }
 
-  private static class FailOnlyInCommit extends MockDirectoryWrapper.Failure {
-
-    boolean fail1, fail2;
-
-    @Override
-    public void eval(MockDirectoryWrapper dir)  throws IOException {
-      StackTraceElement[] trace = new Exception().getStackTrace();
-      boolean isCommit = false;
-      boolean isDelete = false;
-      for (int i = 0; i < trace.length; i++) {
-        if ("org.apache.lucene.index.SegmentInfos".equals(trace[i].getClassName()) && "prepareCommit".equals(trace[i].getMethodName()))
-          isCommit = true;
-        if ("org.apache.lucene.store.MockDirectoryWrapper".equals(trace[i].getClassName()) && "deleteFile".equals(trace[i].getMethodName()))
-          isDelete = true;
-      }
 
-      if (isCommit) {
-        if (!isDelete) {
-          fail1 = true;
-          throw new RuntimeException("now fail first");
-        } else {
-          fail2 = true;
-          throw new IOException("now fail during delete");
-        }
-      }
-    }
-  }
-  
-
-  // LUCENE-1214
-  public void testExceptionsDuringCommit() throws Throwable {
-    MockDirectoryWrapper dir = newDirectory();
-    FailOnlyInCommit failure = new FailOnlyInCommit();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
-    Document doc = new Document();
-    doc.add(newField("field", "a field", Field.Store.YES,
-                      Field.Index.ANALYZED));
-    w.addDocument(doc);
-    dir.failOn(failure);
-    try {
-      w.close();
-      fail();
-    } catch (IOException ioe) {
-      fail("expected only RuntimeException");
-    } catch (RuntimeException re) {
-      // Expected
-    }
-    assertTrue(failure.fail1 && failure.fail2);
-    w.rollback();
-    dir.close();
-  }
   
   final String[] utf8Data = new String[] {
     // unpaired low surrogate
@@ -2914,318 +2161,6 @@ public class TestIndexWriter extends Luc
     dir.close();
   }
 
-  private abstract class RunAddIndexesThreads {
-
-    Directory dir, dir2;
-    final static int NUM_INIT_DOCS = 17;
-    IndexWriter writer2;
-    final List<Throwable> failures = new ArrayList<Throwable>();
-    volatile boolean didClose;
-    final IndexReader[] readers;
-    final int NUM_COPY;
-    final static int NUM_THREADS = 5;
-    final Thread[] threads = new Thread[NUM_THREADS];
-
-    public RunAddIndexesThreads(int numCopy) throws Throwable {
-      NUM_COPY = numCopy;
-      dir = new MockDirectoryWrapper(new RAMDirectory());
-      IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
-          TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
-          .setMaxBufferedDocs(2));
-      for (int i = 0; i < NUM_INIT_DOCS; i++)
-        addDoc(writer);
-      writer.close();
-
-      dir2 = new MockDirectoryWrapper(new RAMDirectory());
-      writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
-      writer2.commit();
-
-      readers = new IndexReader[NUM_COPY];
-      for(int i=0;i<NUM_COPY;i++)
-        readers[i] = IndexReader.open(dir, true);
-    }
-
-    void launchThreads(final int numIter) {
-
-      for(int i=0;i<NUM_THREADS;i++) {
-        threads[i] = new Thread() {
-            @Override
-            public void run() {
-              try {
-
-                final Directory[] dirs = new Directory[NUM_COPY];
-                for(int k=0;k<NUM_COPY;k++)
-                  dirs[k] = new MockDirectoryWrapper(new RAMDirectory(dir));
-
-                int j=0;
-
-                while(true) {
-                  // System.out.println(Thread.currentThread().getName() + ": iter j=" + j);
-                  if (numIter > 0 && j == numIter)
-                    break;
-                  doBody(j++, dirs);
-                }
-              } catch (Throwable t) {
-                handle(t);
-              }
-            }
-          };
-      }
-
-      for(int i=0;i<NUM_THREADS;i++)
-        threads[i].start();
-    }
-
-    void joinThreads() throws Exception {
-      for(int i=0;i<NUM_THREADS;i++)
-        threads[i].join();
-    }
-
-    void close(boolean doWait) throws Throwable {
-      didClose = true;
-      writer2.close(doWait);
-    }
-
-    void closeDir() throws Throwable {
-      for(int i=0;i<NUM_COPY;i++)
-        readers[i].close();
-      dir2.close();
-    }
-
-    abstract void doBody(int j, Directory[] dirs) throws Throwable;
-    abstract void handle(Throwable t);
-  }
-
-  private class CommitAndAddIndexes extends RunAddIndexesThreads {
-    public CommitAndAddIndexes(int numCopy) throws Throwable {
-      super(numCopy);
-    }
-
-    @Override
-    void handle(Throwable t) {
-      t.printStackTrace(System.out);
-      synchronized(failures) {
-        failures.add(t);
-      }
-    }
-
-    @Override
-    void doBody(int j, Directory[] dirs) throws Throwable {
-      switch(j%5) {
-      case 0:
-        writer2.addIndexes(dirs);
-        writer2.optimize();
-        break;
-      case 1:
-        writer2.addIndexes(dirs);
-        break;
-      case 2:
-        writer2.addIndexes(readers);
-        break;
-      case 3:
-        writer2.addIndexes(dirs);
-        writer2.maybeMerge();
-        break;
-      case 4:
-        writer2.commit();
-      }
-    }
-  }
-
-  // LUCENE-1335: test simultaneous addIndexes & commits
-  // from multiple threads
-  public void testAddIndexesWithThreads() throws Throwable {
-
-    final int NUM_ITER = 15;
-    final int NUM_COPY = 3;
-    CommitAndAddIndexes c = new CommitAndAddIndexes(NUM_COPY);
-    c.launchThreads(NUM_ITER);
-
-    for(int i=0;i<100;i++)
-      addDoc(c.writer2);
-
-    c.joinThreads();
-
-    int expectedNumDocs = 100+NUM_COPY*(4*NUM_ITER/5)*RunAddIndexesThreads.NUM_THREADS*RunAddIndexesThreads.NUM_INIT_DOCS;
-    assertEquals(expectedNumDocs, c.writer2.numDocs());
-
-    c.close(true);
-
-    assertTrue(c.failures.size() == 0);
-
-    _TestUtil.checkIndex(c.dir2);
-
-    IndexReader reader = IndexReader.open(c.dir2, true);
-    assertEquals(expectedNumDocs, reader.numDocs());
-    reader.close();
-
-    c.closeDir();
-  }
-
-  private class CommitAndAddIndexes2 extends CommitAndAddIndexes {
-    public CommitAndAddIndexes2(int numCopy) throws Throwable {
-      super(numCopy);
-    }
-
-    @Override
-    void handle(Throwable t) {
-      if (!(t instanceof AlreadyClosedException) && !(t instanceof NullPointerException)) {
-        t.printStackTrace(System.out);
-        synchronized(failures) {
-          failures.add(t);
-        }
-      }
-    }
-  }
-
-  // LUCENE-1335: test simultaneous addIndexes & close
-  public void testAddIndexesWithClose() throws Throwable {
-    final int NUM_COPY = 3;
-    CommitAndAddIndexes2 c = new CommitAndAddIndexes2(NUM_COPY);
-    //c.writer2.setInfoStream(System.out);
-    c.launchThreads(-1);
-
-    // Close w/o first stopping/joining the threads
-    c.close(true);
-    //c.writer2.close();
-
-    c.joinThreads();
-
-    _TestUtil.checkIndex(c.dir2);
-
-    c.closeDir();
-
-    assertTrue(c.failures.size() == 0);
-  }
-
-  private class CommitAndAddIndexes3 extends RunAddIndexesThreads {
-    public CommitAndAddIndexes3(int numCopy) throws Throwable {
-      super(numCopy);
-    }
-
-    @Override
-    void doBody(int j, Directory[] dirs) throws Throwable {
-      switch(j%5) {
-      case 0:
-        writer2.addIndexes(dirs);
-        writer2.optimize();
-        break;
-      case 1:
-        writer2.addIndexes(dirs);
-        break;
-      case 2:
-        writer2.addIndexes(readers);
-        break;
-      case 3:
-        writer2.optimize();
-        break;
-      case 4:
-        writer2.commit();
-      }
-    }
-
-    @Override
-    void handle(Throwable t) {
-      boolean report = true;
-
-      if (t instanceof AlreadyClosedException || t instanceof MergePolicy.MergeAbortedException || t instanceof NullPointerException) {
-        report = !didClose;
-      } else if (t instanceof IOException)  {
-        Throwable t2 = t.getCause();
-        if (t2 instanceof MergePolicy.MergeAbortedException) {
-          report = !didClose;
-        }
-      }
-      if (report) {
-        t.printStackTrace(System.out);
-        synchronized(failures) {
-          failures.add(t);
-        }
-      }
-    }
-  }
-
-  // LUCENE-1335: test simultaneous addIndexes & close
-  public void testAddIndexesWithCloseNoWait() throws Throwable {
-
-    final int NUM_COPY = 50;
-    CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
-    c.launchThreads(-1);
-
-    Thread.sleep(500);
-
-    // Close w/o first stopping/joining the threads
-    c.close(false);
-
-    c.joinThreads();
-
-    _TestUtil.checkIndex(c.dir2);
-
-    c.closeDir();
-
-    assertTrue(c.failures.size() == 0);
-  }
-
-  // LUCENE-1335: test simultaneous addIndexes & close
-  public void testAddIndexesWithRollback() throws Throwable {
-    
-    final int NUM_COPY = 50;
-    CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
-    c.launchThreads(-1);
-
-    Thread.sleep(500);
-
-    // Close w/o first stopping/joining the threads
-    c.didClose = true;
-    c.writer2.rollback();
-
-    c.joinThreads();
-
-    _TestUtil.checkIndex(c.dir2);
-
-    c.closeDir();
-
-    assertTrue(c.failures.size() == 0);
-  }
-
-  // LUCENE-1347
-  private static final class MockIndexWriter4 extends IndexWriter {
-
-    public MockIndexWriter4(Directory dir, IndexWriterConfig conf) throws IOException {
-      super(dir, conf);
-    }
-
-    boolean doFail;
-
-    @Override
-    boolean testPoint(String name) {
-      if (doFail && name.equals("rollback before checkpoint"))
-        throw new RuntimeException("intentionally failing");
-      return true;
-    }
-  }
-  
-
-  // LUCENE-1347
-  public void testRollbackExceptionHang() throws Throwable {
-    Directory dir = newDirectory();
-    MockIndexWriter4 w = new MockIndexWriter4(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
-
-    addDoc(w);
-    w.doFail = true;
-    try {
-      w.rollback();
-      fail("did not hit intentional RuntimeException");
-    } catch (RuntimeException re) {
-      // expected
-    }
-    
-    w.doFail = false;
-    w.rollback();
-    dir.close();
-  }
-
-
   // LUCENE-1219
   public void testBinaryFieldOffsetLength() throws IOException {
     Directory dir = newDirectory();
@@ -3294,64 +2229,6 @@ public class TestIndexWriter extends Luc
     dir.close();
   }
 
-  public void testOptimizeExceptions() throws IOException {
-    Directory startDir = newDirectory();
-    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(2);
-    ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100);
-    IndexWriter w = new IndexWriter(startDir, conf);
-    for(int i=0;i<27;i++)
-      addDoc(w);
-    w.close();
-
-    for(int i=0;i<200;i++) {
-      MockDirectoryWrapper dir = new MockDirectoryWrapper(new RAMDirectory(startDir));
-      conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMergeScheduler(new ConcurrentMergeScheduler());
-      ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
-      w = new IndexWriter(dir, conf);
-      dir.setRandomIOExceptionRate(0.5, 100);
-      try {
-        w.optimize();
-      } catch (IOException ioe) {
-        if (ioe.getCause() == null)
-          fail("optimize threw IOException without root cause");
-      }
-      dir.setRandomIOExceptionRate(0, 0);
-      w.close();
-      dir.close();
-    }
-    startDir.close();
-  }
-
-  // LUCENE-1429
-  public void testOutOfMemoryErrorCausesCloseToFail() throws Exception {
-
-    final List<Throwable> thrown = new ArrayList<Throwable>();
-    final Directory dir = newDirectory();
-    final IndexWriter writer = new IndexWriter(dir,
-        newIndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(
-            TEST_VERSION_CURRENT))) {
-        @Override
-        public void message(final String message) {
-          if (message.startsWith("now flush at close") && 0 == thrown.size()) {
-            thrown.add(null);
-            throw new OutOfMemoryError("fake OOME at " + message);
-          }
-        }
-      };
-
-    // need to set an info stream so message is called
-    writer.setInfoStream(new PrintStream(new ByteArrayOutputStream()));
-    try {
-      writer.close();
-      fail("OutOfMemoryError expected");
-    }
-    catch (final OutOfMemoryError expected) {}
-
-    // throws IllegalStateEx w/o bug fix
-    writer.close();
-    dir.close();
-  }
-
   // LUCENE-1442
   public void testDoubleOffsetCounting() throws Exception {
     Directory dir = newDirectory();

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java?rev=1031231&r1=1031230&r2=1031231&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java Thu Nov  4 20:49:44 2010
@@ -17,16 +17,32 @@ package org.apache.lucene.index;
  * limitations under the License.
  */
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Random;
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
+import java.io.PrintStream;
+import java.io.Reader;
 
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.WhitespaceTokenizer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 
 public class TestIndexWriterExceptions extends LuceneTestCase {
 
@@ -214,4 +230,860 @@ public class TestIndexWriterExceptions e
     _TestUtil.checkIndex(dir);
     dir.close();
   }
+  
+  // LUCENE-1198
+  private static final class MockIndexWriter2 extends IndexWriter {
+
+    public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException {
+      super(dir, conf);
+    }
+
+    boolean doFail;
+
+    @Override
+    boolean testPoint(String name) {
+      if (doFail && name.equals("DocumentsWriter.ThreadState.init start"))
+        throw new RuntimeException("intentionally failing");
+      return true;
+    }
+  }
+  
+  private class CrashingFilter extends TokenFilter {
+    String fieldName;
+    int count;
+
+    public CrashingFilter(String fieldName, TokenStream input) {
+      super(input);
+      this.fieldName = fieldName;
+    }
+
+    @Override
+    public boolean incrementToken() throws IOException {
+      if (this.fieldName.equals("crash") && count++ >= 4)
+        throw new IOException("I'm experiencing problems");
+      return input.incrementToken();
+    }
+
+    @Override
+    public void reset() throws IOException {
+      super.reset();
+      count = 0;
+    }
+  }
+
+  public void testExceptionDocumentsWriterInit() throws IOException {
+    Directory dir = newDirectory();
+    MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+    Document doc = new Document();
+    doc.add(newField("field", "a field", Field.Store.YES,
+                      Field.Index.ANALYZED));
+    w.addDocument(doc);
+    w.doFail = true;
+    try {
+      w.addDocument(doc);
+      fail("did not hit exception");
+    } catch (RuntimeException re) {
+      // expected
+    }
+    w.close();
+    _TestUtil.checkIndex(dir);
+    dir.close();
+  }
+
+  // LUCENE-1208
+  public void testExceptionJustBeforeFlush() throws IOException {
+    Directory dir = newDirectory();
+    MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
+    Document doc = new Document();
+    doc.add(newField("field", "a field", Field.Store.YES,
+                      Field.Index.ANALYZED));
+    w.addDocument(doc);
+
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        return new CrashingFilter(fieldName, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader));
+      }
+    };
+
+    Document crashDoc = new Document();
+    crashDoc.add(newField("crash", "do it on token 4", Field.Store.YES,
+                           Field.Index.ANALYZED));
+    try {
+      w.addDocument(crashDoc, analyzer);
+      fail("did not hit expected exception");
+    } catch (IOException ioe) {
+      // expected
+    }
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }    
+
+  private static final class MockIndexWriter3 extends IndexWriter {
+
+    public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException {
+      super(dir, conf);
+    }
+
+    boolean doFail;
+    boolean failed;
+
+    @Override
+    boolean testPoint(String name) {
+      if (doFail && name.equals("startMergeInit")) {
+        failed = true;
+        throw new RuntimeException("intentionally failing");
+      }
+      return true;
+    }
+  }
+  
+
+  // LUCENE-1210
+  public void testExceptionOnMergeInit() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
+      .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler());
+    ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
+    MockIndexWriter3 w = new MockIndexWriter3(dir, conf);
+    w.doFail = true;
+    Document doc = new Document();
+    doc.add(newField("field", "a field", Field.Store.YES,
+                      Field.Index.ANALYZED));
+    for(int i=0;i<10;i++)
+      try {
+        w.addDocument(doc);
+      } catch (RuntimeException re) {
+        break;
+      }
+
+    ((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync();
+    assertTrue(w.failed);
+    w.close();
+    dir.close();
+  }
+  
+  // LUCENE-1072
+  public void testExceptionFromTokenStream() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new Analyzer() {
+
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        return new TokenFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader)) {
+          private int count = 0;
+
+          @Override
+          public boolean incrementToken() throws IOException {
+            if (count++ == 5) {
+              throw new IOException();
+            }
+            return input.incrementToken();
+          }
+        };
+      }
+
+    });
+    IndexWriter writer = new IndexWriter(dir, conf);
+
+    Document doc = new Document();
+    String contents = "aa bb cc dd ee ff gg hh ii jj kk";
+    doc.add(newField("content", contents, Field.Store.NO,
+        Field.Index.ANALYZED));
+    try {
+      writer.addDocument(doc);
+      fail("did not hit expected exception");
+    } catch (Exception e) {
+    }
+
+    // Make sure we can add another normal document
+    doc = new Document();
+    doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
+        Field.Index.ANALYZED));
+    writer.addDocument(doc);
+
+    // Make sure we can add another normal document
+    doc = new Document();
+    doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
+        Field.Index.ANALYZED));
+    writer.addDocument(doc);
+
+    writer.close();
+    IndexReader reader = IndexReader.open(dir, true);
+    final Term t = new Term("content", "aa");
+    assertEquals(reader.docFreq(t), 3);
+
+    // Make sure the doc that hit the exception was marked
+    // as deleted:
+    TermDocs tdocs = reader.termDocs(t);
+    int count = 0;
+    while(tdocs.next()) {
+      count++;
+    }
+    assertEquals(2, count);
+
+    assertEquals(reader.docFreq(new Term("content", "gg")), 0);
+    reader.close();
+    dir.close();
+  }
+
+  private static class FailOnlyOnFlush extends MockDirectoryWrapper.Failure {
+    boolean doFail = false;
+    int count;
+
+    @Override
+    public void setDoFail() {
+      this.doFail = true;
+    }
+    @Override
+    public void clearDoFail() {
+      this.doFail = false;
+    }
+
+    @Override
+    public void eval(MockDirectoryWrapper dir)  throws IOException {
+      if (doFail) {
+        StackTraceElement[] trace = new Exception().getStackTrace();
+        boolean sawAppend = false;
+        boolean sawFlush = false;
+        for (int i = 0; i < trace.length; i++) {
+          if ("org.apache.lucene.index.FreqProxTermsWriter".equals(trace[i].getClassName()) && "appendPostings".equals(trace[i].getMethodName()))
+            sawAppend = true;
+          if ("doFlush".equals(trace[i].getMethodName()))
+            sawFlush = true;
+        }
+
+        if (sawAppend && sawFlush && count++ >= 30) {
+          doFail = false;
+          throw new IOException("now failing during flush");
+        }
+      }
+    }
+  }
+
+  // LUCENE-1072: make sure an errant exception on flushing
+  // one segment only takes out those docs in that one flush
+  public void testDocumentsWriterAbort() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    FailOnlyOnFlush failure = new FailOnlyOnFlush();
+    failure.setDoFail();
+    dir.failOn(failure);
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
+    Document doc = new Document();
+    String contents = "aa bb cc dd ee ff gg hh ii jj kk";
+    doc.add(newField("content", contents, Field.Store.NO,
+        Field.Index.ANALYZED));
+    boolean hitError = false;
+    for(int i=0;i<200;i++) {
+      try {
+        writer.addDocument(doc);
+      } catch (IOException ioe) {
+        // only one flush should fail:
+        assertFalse(hitError);
+        hitError = true;
+      }
+    }
+    assertTrue(hitError);
+    writer.close();
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(198, reader.docFreq(new Term("content", "aa")));
+    reader.close();
+    dir.close();
+  }
+
+  public void testDocumentsWriterExceptions() throws IOException {
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        return new CrashingFilter(fieldName, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader));
+      }
+    };
+
+    for(int i=0;i<2;i++) {
+      MockDirectoryWrapper dir = newDirectory();
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer));
+      //writer.setInfoStream(System.out);
+      Document doc = new Document();
+      doc.add(newField("contents", "here are some contents", Field.Store.YES,
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      writer.addDocument(doc);
+      writer.addDocument(doc);
+      doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(newField("other", "this will not get indexed", Field.Store.YES,
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      try {
+        writer.addDocument(doc);
+        fail("did not hit expected exception");
+      } catch (IOException ioe) {
+      }
+
+      if (0 == i) {
+        doc = new Document();
+        doc.add(newField("contents", "here are some contents", Field.Store.YES,
+                          Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+        writer.addDocument(doc);
+        writer.addDocument(doc);
+      }
+      writer.close();
+
+      IndexReader reader = IndexReader.open(dir, true);
+      int expected = 3+(1-i)*2;
+      assertEquals(expected, reader.docFreq(new Term("contents", "here")));
+      assertEquals(expected, reader.maxDoc());
+      int numDel = 0;
+      for(int j=0;j<reader.maxDoc();j++) {
+        if (reader.isDeleted(j))
+          numDel++;
+        else {
+          reader.document(j);
+          reader.getTermFreqVectors(j);
+        }
+      }
+      reader.close();
+
+      assertEquals(1, numDel);
+
+      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
+          analyzer).setMaxBufferedDocs(10));
+      doc = new Document();
+      doc.add(newField("contents", "here are some contents", Field.Store.YES,
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      for(int j=0;j<17;j++)
+        writer.addDocument(doc);
+      writer.optimize();
+      writer.close();
+
+      reader = IndexReader.open(dir, true);
+      expected = 19+(1-i)*2;
+      assertEquals(expected, reader.docFreq(new Term("contents", "here")));
+      assertEquals(expected, reader.maxDoc());
+      numDel = 0;
+      for(int j=0;j<reader.maxDoc();j++) {
+        if (reader.isDeleted(j))
+          numDel++;
+        else {
+          reader.document(j);
+          reader.getTermFreqVectors(j);
+        }
+      }
+      reader.close();
+      assertEquals(0, numDel);
+
+      dir.close();
+    }
+  }
+
+  public void testDocumentsWriterExceptionThreads() throws Exception {
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        return new CrashingFilter(fieldName, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader));
+      }
+    };
+
+    final int NUM_THREAD = 3;
+    final int NUM_ITER = 100;
+
+    for(int i=0;i<2;i++) {
+      MockDirectoryWrapper dir = newDirectory();
+
+      {
+        final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(-1));
+        ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
+        final int finalI = i;
+
+        Thread[] threads = new Thread[NUM_THREAD];
+        for(int t=0;t<NUM_THREAD;t++) {
+          threads[t] = new Thread() {
+              @Override
+              public void run() {
+                try {
+                  for(int iter=0;iter<NUM_ITER;iter++) {
+                    Document doc = new Document();
+                    doc.add(newField("contents", "here are some contents", Field.Store.YES,
+                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                    writer.addDocument(doc);
+                    writer.addDocument(doc);
+                    doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
+                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                    doc.add(newField("other", "this will not get indexed", Field.Store.YES,
+                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                    try {
+                      writer.addDocument(doc);
+                      fail("did not hit expected exception");
+                    } catch (IOException ioe) {
+                    }
+
+                    if (0 == finalI) {
+                      doc = new Document();
+                      doc.add(newField("contents", "here are some contents", Field.Store.YES,
+                                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                      writer.addDocument(doc);
+                      writer.addDocument(doc);
+                    }
+                  }
+                } catch (Throwable t) {
+                  synchronized(this) {
+                    System.out.println(Thread.currentThread().getName() + ": ERROR: hit unexpected exception");
+                    t.printStackTrace(System.out);
+                  }
+                  fail();
+                }
+              }
+            };
+          threads[t].start();
+        }
+
+        for(int t=0;t<NUM_THREAD;t++)
+          threads[t].join();
+            
+        writer.close();
+      }
+
+      IndexReader reader = IndexReader.open(dir, true);
+      int expected = (3+(1-i)*2)*NUM_THREAD*NUM_ITER;
+      assertEquals("i=" + i, expected, reader.docFreq(new Term("contents", "here")));
+      assertEquals(expected, reader.maxDoc());
+      int numDel = 0;
+      for(int j=0;j<reader.maxDoc();j++) {
+        if (reader.isDeleted(j))
+          numDel++;
+        else {
+          reader.document(j);
+          reader.getTermFreqVectors(j);
+        }
+      }
+      reader.close();
+
+      assertEquals(NUM_THREAD*NUM_ITER, numDel);
+
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+          TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10));
+      Document doc = new Document();
+      doc.add(newField("contents", "here are some contents", Field.Store.YES,
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      for(int j=0;j<17;j++)
+        writer.addDocument(doc);
+      writer.optimize();
+      writer.close();
+
+      reader = IndexReader.open(dir, true);
+      expected += 17-NUM_THREAD*NUM_ITER;
+      assertEquals(expected, reader.docFreq(new Term("contents", "here")));
+      assertEquals(expected, reader.maxDoc());
+      numDel = 0;
+      for(int j=0;j<reader.maxDoc();j++) {
+        if (reader.isDeleted(j))
+          numDel++;
+        else {
+          reader.document(j);
+          reader.getTermFreqVectors(j);
+        }
+      }
+      reader.close();
+
+      dir.close();
+    }
+  }
+  
+  // Throws IOException during MockDirectoryWrapper.sync
+  private static class FailOnlyInSync extends MockDirectoryWrapper.Failure {
+    boolean didFail;
+    @Override
+    public void eval(MockDirectoryWrapper dir)  throws IOException {
+      if (doFail) {
+        StackTraceElement[] trace = new Exception().getStackTrace();
+        for (int i = 0; i < trace.length; i++) {
+          if (doFail && "org.apache.lucene.store.MockDirectoryWrapper".equals(trace[i].getClassName()) && "sync".equals(trace[i].getMethodName())) {
+            didFail = true;
+            throw new IOException("now failing on purpose during sync");
+          }
+        }
+      }
+    }
+  }
+  
+  // TODO: these are also in TestIndexWriter... add a simple doc-writing method
+  // like this to LuceneTestCase?
+  private void addDoc(IndexWriter writer) throws IOException
+  {
+      Document doc = new Document();
+      doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(doc);
+  }
+  
+  // LUCENE-1044: test exception during sync
+  public void testExceptionDuringSync() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    FailOnlyInSync failure = new FailOnlyInSync();
+    dir.failOn(failure);
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
+        .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
+    failure.setDoFail();
+    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
+
+    for (int i = 0; i < 23; i++) {
+      addDoc(writer);
+      if ((i-1)%2 == 0) {
+        try {
+          writer.commit();
+        } catch (IOException ioe) {
+          // expected
+        }
+      }
+    }
+
+    ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
+    assertTrue(failure.didFail);
+    failure.clearDoFail();
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(23, reader.numDocs());
+    reader.close();
+    dir.close();
+  }
+  
+  private static class FailOnlyInCommit extends MockDirectoryWrapper.Failure {
+
+    boolean fail1, fail2;
+
+    @Override
+    public void eval(MockDirectoryWrapper dir)  throws IOException {
+      StackTraceElement[] trace = new Exception().getStackTrace();
+      boolean isCommit = false;
+      boolean isDelete = false;
+      for (int i = 0; i < trace.length; i++) {
+        if ("org.apache.lucene.index.SegmentInfos".equals(trace[i].getClassName()) && "prepareCommit".equals(trace[i].getMethodName()))
+          isCommit = true;
+        if ("org.apache.lucene.store.MockDirectoryWrapper".equals(trace[i].getClassName()) && "deleteFile".equals(trace[i].getMethodName()))
+          isDelete = true;
+      }
+
+      if (isCommit) {
+        if (!isDelete) {
+          fail1 = true;
+          throw new RuntimeException("now fail first");
+        } else {
+          fail2 = true;
+          throw new IOException("now fail during delete");
+        }
+      }
+    }
+  }
+  
+  // LUCENE-1214
+  public void testExceptionsDuringCommit() throws Throwable {
+    MockDirectoryWrapper dir = newDirectory();
+    FailOnlyInCommit failure = new FailOnlyInCommit();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+    Document doc = new Document();
+    doc.add(newField("field", "a field", Field.Store.YES,
+                      Field.Index.ANALYZED));
+    w.addDocument(doc);
+    dir.failOn(failure);
+    try {
+      w.close();
+      fail();
+    } catch (IOException ioe) {
+      fail("expected only RuntimeException");
+    } catch (RuntimeException re) {
+      // Expected
+    }
+    assertTrue(failure.fail1 && failure.fail2);
+    w.rollback();
+    dir.close();
+  }
+  
+  public void testOptimizeExceptions() throws IOException {
+    Directory startDir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
+    ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100);
+    IndexWriter w = new IndexWriter(startDir, conf);
+    for(int i=0;i<27;i++)
+      addDoc(w);
+    w.close();
+
+    for(int i=0;i<200;i++) {
+      MockDirectoryWrapper dir = new MockDirectoryWrapper(new RAMDirectory(startDir));
+      conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergeScheduler(new ConcurrentMergeScheduler());
+      ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
+      w = new IndexWriter(dir, conf);
+      dir.setRandomIOExceptionRate(0.5, 100);
+      try {
+        w.optimize();
+      } catch (IOException ioe) {
+        if (ioe.getCause() == null)
+          fail("optimize threw IOException without root cause");
+      }
+      dir.setRandomIOExceptionRate(0, 0);
+      w.close();
+      dir.close();
+    }
+    startDir.close();
+  }
+  
+  // LUCENE-1429
+  public void testOutOfMemoryErrorCausesCloseToFail() throws Exception {
+
+    final List<Throwable> thrown = new ArrayList<Throwable>();
+    final Directory dir = newDirectory();
+    final IndexWriter writer = new IndexWriter(dir,
+        newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())) {
+        @Override
+        public void message(final String message) {
+          if (message.startsWith("now flush at close") && 0 == thrown.size()) {
+            thrown.add(null);
+            throw new OutOfMemoryError("fake OOME at " + message);
+          }
+        }
+      };
+
+    // need to set an info stream so message is called
+    writer.setInfoStream(new PrintStream(new ByteArrayOutputStream()));
+    try {
+      writer.close();
+      fail("OutOfMemoryError expected");
+    }
+    catch (final OutOfMemoryError expected) {}
+
+    // throws IllegalStateEx w/o bug fix
+    writer.close();
+    dir.close();
+  }
+  
+  // LUCENE-1347
+  private static final class MockIndexWriter4 extends IndexWriter {
+
+    public MockIndexWriter4(Directory dir, IndexWriterConfig conf) throws IOException {
+      super(dir, conf);
+    }
+
+    boolean doFail;
+
+    @Override
+    boolean testPoint(String name) {
+      if (doFail && name.equals("rollback before checkpoint"))
+        throw new RuntimeException("intentionally failing");
+      return true;
+    }
+  }
+  
+  // LUCENE-1347
+  public void testRollbackExceptionHang() throws Throwable {
+    Directory dir = newDirectory();
+    MockIndexWriter4 w = new MockIndexWriter4(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+
+    addDoc(w);
+    w.doFail = true;
+    try {
+      w.rollback();
+      fail("did not hit intentional RuntimeException");
+    } catch (RuntimeException re) {
+      // expected
+    }
+    
+    w.doFail = false;
+    w.rollback();
+    dir.close();
+  }
+  
+  // LUCENE-1044: Simulate checksum error in segments_N
+  public void testSegmentsChecksumError() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = null;
+
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+
+    // add 100 documents
+    for (int i = 0; i < 100; i++) {
+      addDoc(writer);
+    }
+
+    // close
+    writer.close();
+
+    long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+    assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
+
+    final String segmentsFileName = SegmentInfos.getCurrentSegmentFileName(dir);
+    IndexInput in = dir.openInput(segmentsFileName);
+    IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen));
+    out.copyBytes(in, in.length()-1);
+    byte b = in.readByte();
+    out.writeByte((byte) (1+b));
+    out.close();
+    in.close();
+
+    IndexReader reader = null;
+    try {
+      reader = IndexReader.open(dir, true);
+    } catch (IOException e) {
+      e.printStackTrace(System.out);
+      fail("segmentInfos failed to retry fallback to correct segments_N file");
+    }
+    reader.close();
+    dir.close();
+  }
+  
+  // Simulate a corrupt index by removing last byte of
+  // latest segments file and make sure we get an
+  // IOException trying to open the index:
+  public void testSimulatedCorruptIndex1() throws IOException {
+      Directory dir = newDirectory();
+
+      IndexWriter writer = null;
+
+      writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+
+      // add 100 documents
+      for (int i = 0; i < 100; i++) {
+          addDoc(writer);
+      }
+
+      // close
+      writer.close();
+
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+      assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
+
+      String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
+      String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
+                                                                 "",
+                                                                 1+gen);
+      IndexInput in = dir.openInput(fileNameIn);
+      IndexOutput out = dir.createOutput(fileNameOut);
+      long length = in.length();
+      for(int i=0;i<length-1;i++) {
+        out.writeByte(in.readByte());
+      }
+      in.close();
+      out.close();
+      dir.deleteFile(fileNameIn);
+
+      IndexReader reader = null;
+      try {
+        reader = IndexReader.open(dir, true);
+        fail("reader did not hit IOException on opening a corrupt index");
+      } catch (Exception e) {
+      }
+      if (reader != null) {
+        reader.close();
+      }
+      dir.close();
+  }
+  
+  // Simulate a corrupt index by removing one of the cfs
+  // files and make sure we get an IOException trying to
+  // open the index:
+  public void testSimulatedCorruptIndex2() throws IOException {
+      Directory dir = newDirectory();
+
+      IndexWriter writer = null;
+
+      writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+      ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(true);
+
+      // add 100 documents
+      for (int i = 0; i < 100; i++) {
+          addDoc(writer);
+      }
+
+      // close
+      writer.close();
+
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+      assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
+
+      String[] files = dir.listAll();
+      boolean corrupted = false;
+      for(int i=0;i<files.length;i++) {
+        if (files[i].endsWith(".cfs")) {
+          dir.deleteFile(files[i]);
+          corrupted = true;
+          break;
+        }
+      }
+      assertTrue("failed to find cfs file to remove", corrupted);
+
+      IndexReader reader = null;
+      try {
+        reader = IndexReader.open(dir, true);
+        fail("reader did not hit IOException on opening a corrupt index");
+      } catch (Exception e) {
+      }
+      if (reader != null) {
+        reader.close();
+      }
+      dir.close();
+  }
+  
+  // Simulate a writer that crashed while writing segments
+  // file: make sure we can still open the index (ie,
+  // gracefully fallback to the previous segments file),
+  // and that we can add to the index:
+  public void testSimulatedCrashedWriter() throws IOException {
+      MockDirectoryWrapper dir = newDirectory();
+      dir.setPreventDoubleWrite(false);
+
+      IndexWriter writer = null;
+
+      writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+
+      // add 100 documents
+      for (int i = 0; i < 100; i++) {
+          addDoc(writer);
+      }
+
+      // close
+      writer.close();
+
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+      assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
+
+      // Make the next segments file, with last byte
+      // missing, to simulate a writer that crashed while
+      // writing segments file:
+      String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
+      String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
+                                                                 "",
+                                                                 1+gen);
+      IndexInput in = dir.openInput(fileNameIn);
+      IndexOutput out = dir.createOutput(fileNameOut);
+      long length = in.length();
+      for(int i=0;i<length-1;i++) {
+        out.writeByte(in.readByte());
+      }
+      in.close();
+      out.close();
+
+      IndexReader reader = null;
+      try {
+        reader = IndexReader.open(dir, true);
+      } catch (Exception e) {
+        fail("reader failed to open on a crashed index");
+      }
+      reader.close();
+
+      try {
+        writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
+      } catch (Exception e) {
+        e.printStackTrace(System.out);
+        fail("writer failed to open on a crashed index");
+      }
+
+      // add 100 documents
+      for (int i = 0; i < 100; i++) {
+          addDoc(writer);
+      }
+
+      // close
+      writer.close();
+      dir.close();
+  }
 }