You are viewing a plain text version of this content. The canonical link for it is here.
Posted to java-commits@lucene.apache.org by mi...@apache.org on 2007/03/13 10:06:23 UTC

svn commit: r517599 [2/2] - in /lucene/java/trunk: ./ src/java/org/apache/lucene/index/ src/site/src/documentation/content/xdocs/ src/test/org/apache/lucene/index/ src/test/org/apache/lucene/store/

Modified: lucene/java/trunk/src/java/org/apache/lucene/index/SegmentInfos.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/SegmentInfos.java?view=diff&rev=517599&r1=517598&r2=517599
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/SegmentInfos.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/SegmentInfos.java Tue Mar 13 02:06:22 2007
@@ -88,16 +88,9 @@
     for (int i = 0; i < files.length; i++) {
       String file = files[i];
       if (file.startsWith(IndexFileNames.SEGMENTS) && !file.equals(IndexFileNames.SEGMENTS_GEN)) {
-        if (file.equals(IndexFileNames.SEGMENTS)) {
-          // Pre lock-less commits:
-          if (max == -1) {
-            max = 0;
-          }
-        } else {
-          long v = Long.parseLong(file.substring(prefixLen), Character.MAX_RADIX);
-          if (v > max) {
-            max = v;
-          }
+        long gen = generationFromSegmentsFileName(file);
+        if (gen > max) {
+          max = gen;
         }
       }
     }
@@ -152,6 +145,22 @@
   }
 
   /**
+   * Parse the generation off the segments file name and
+   * return it.
+   */
+  public static long generationFromSegmentsFileName(String fileName) {
+    if (fileName.equals(IndexFileNames.SEGMENTS)) {
+      return 0;
+    } else if (fileName.startsWith(IndexFileNames.SEGMENTS)) {
+      return Long.parseLong(fileName.substring(1+IndexFileNames.SEGMENTS.length()),
+                            Character.MAX_RADIX);
+    } else {
+      throw new IllegalArgumentException("fileName \"" + fileName + "\" is not a segments file");
+    }
+  }
+
+
+  /**
    * Get the next segments_N filename that will be written.
    */
   public String getNextSegmentFileName() {
@@ -181,12 +190,8 @@
 
     IndexInput input = directory.openInput(segmentFileName);
 
-    if (segmentFileName.equals(IndexFileNames.SEGMENTS)) {
-      generation = 0;
-    } else {
-      generation = Long.parseLong(segmentFileName.substring(1+IndexFileNames.SEGMENTS.length()),
-                                  Character.MAX_RADIX);
-    }
+    generation = generationFromSegmentsFileName(segmentFileName);
+
     lastGeneration = generation;
 
     try {
@@ -255,6 +260,8 @@
 
     IndexOutput output = directory.createOutput(segmentFileName);
 
+    boolean success = false;
+
     try {
       output.writeInt(FORMAT_SINGLE_NORM_FILE); // write FORMAT
       output.writeLong(++version); // every write changes
@@ -266,7 +273,16 @@
       }         
     }
     finally {
-      output.close();
+      try {
+        output.close();
+        success = true;
+      } finally {
+        if (!success) {
+          // Try not to leave a truncated segments_N file in
+          // the index:
+          directory.deleteFile(segmentFileName);
+        }
+      }
     }
 
     try {
@@ -304,6 +320,9 @@
    */
   public long getVersion() {
     return version;
+  }
+  public long getGeneration() {
+    return generation;
   }
 
   /**

Modified: lucene/java/trunk/src/java/org/apache/lucene/index/SegmentReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/SegmentReader.java?view=diff&rev=517599&r1=517598&r2=517599
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/SegmentReader.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/SegmentReader.java Tue Mar 13 02:06:22 2007
@@ -77,15 +77,6 @@
 
     private void reWrite(SegmentInfo si) throws IOException {
       // NOTE: norms are re-written in regular directory, not cfs
-
-      String oldFileName = si.getNormFileName(this.number);
-      if (oldFileName != null && !oldFileName.endsWith("." + IndexFileNames.NORMS_EXTENSION)) {
-        // Mark this file for deletion.  Note that we don't
-        // actually try to delete it until the new segments files is
-        // successfully written:
-        deleter.addPendingFile(oldFileName);
-      }
-
       si.advanceNormGen(this.number);
       IndexOutput out = directory().createOutput(si.getNormFileName(this.number));
       try {
@@ -227,14 +218,6 @@
 
   protected void doCommit() throws IOException {
     if (deletedDocsDirty) {               // re-write deleted
-      String oldDelFileName = si.getDelFileName();
-      if (oldDelFileName != null) {
-        // Mark this file for deletion.  Note that we don't
-        // actually try to delete it until the new segments files is
-        // successfully written:
-        deleter.addPendingFile(oldDelFileName);
-      }
-
       si.advanceDelGen();
 
       // We can write directly to the actual name (vs to a
@@ -243,13 +226,6 @@
       deletedDocs.write(directory(), si.getDelFileName());
     }
     if (undeleteAll && si.hasDeletions()) {
-      String oldDelFileName = si.getDelFileName();
-      if (oldDelFileName != null) {
-        // Mark this file for deletion.  Note that we don't
-        // actually try to delete it until the new segments files is
-        // successfully written:
-        deleter.addPendingFile(oldDelFileName);
-      }
       si.clearDelGen();
     }
     if (normsDirty) {               // re-write norms
@@ -320,37 +296,7 @@
   }
 
   Vector files() throws IOException {
-    Vector files = new Vector(16);
-
-    if (si.getUseCompoundFile()) {
-      String name = segment + ".cfs";
-      if (directory().fileExists(name)) {
-        files.addElement(name);
-      }
-    } else {
-      for (int i = 0; i < IndexFileNames.INDEX_EXTENSIONS.length; i++) {
-        String name = segment + "." + IndexFileNames.INDEX_EXTENSIONS[i];
-        if (directory().fileExists(name))
-          files.addElement(name);
-      }
-    }
-
-    if (si.hasDeletions()) {
-      files.addElement(si.getDelFileName());
-    }
-
-    boolean addedNrm = false;
-    for (int i = 0; i < fieldInfos.size(); i++) {
-      String name = si.getNormFileName(i);
-      if (name != null && directory().fileExists(name)) {
-        if (name.endsWith("." + IndexFileNames.NORMS_EXTENSION)) {
-          if (addedNrm) continue; // add .nrm just once
-          addedNrm = true;
-        }
-        files.addElement(name);
-      }
-    }
-    return files;
+    return new Vector(si.files());
   }
 
   public TermEnum terms() {

Modified: lucene/java/trunk/src/site/src/documentation/content/xdocs/fileformats.xml
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/site/src/documentation/content/xdocs/fileformats.xml?view=diff&rev=517599&r1=517598&r2=517599
==============================================================================
--- lucene/java/trunk/src/site/src/documentation/content/xdocs/fileformats.xml (original)
+++ lucene/java/trunk/src/site/src/documentation/content/xdocs/fileformats.xml Tue Mar 13 02:06:22 2007
@@ -771,7 +771,9 @@
                     generation is the active one (when older
                     segments_N files are present it's because they
                     temporarily cannot be deleted, or, a writer is in
-                    the process of committing). This file lists each
+                    the process of committing, or a custom
+                    <a href="http://lucene.apache.org/java/docs/api/org/apache/lucene/index/IndexDeletionPolicy.html">IndexDeletionPolicy</a>
+		    is in use). This file lists each
                     segment by name, has details about the separate
                     norms and deletion files, and also contains the
                     size of each segment.

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?view=diff&rev=517599&r1=517598&r2=517599
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Tue Mar 13 02:06:22 2007
@@ -21,7 +21,7 @@
 import java.util.zip.*;
 
 /*
-  Verify we can read the pre-XXX file format, do searches
+  Verify we can read the pre-2.1 file format, do searches
   against it, and add documents to it.
 */
 
@@ -104,7 +104,11 @@
     for(int i=0;i<oldNames.length;i++) {
       String dirName = "src/test/org/apache/lucene/index/index." + oldNames[i];
       unzip(dirName, oldNames[i]);
-      changeIndexNoAdds(oldNames[i]);
+      changeIndexNoAdds(oldNames[i], true);
+      rmDir(oldNames[i]);
+
+      unzip(dirName, oldNames[i]);
+      changeIndexNoAdds(oldNames[i], false);
       rmDir(oldNames[i]);
     }
   }
@@ -114,7 +118,11 @@
     for(int i=0;i<oldNames.length;i++) {
       String dirName = "src/test/org/apache/lucene/index/index." + oldNames[i];
       unzip(dirName, oldNames[i]);
-      changeIndexWithAdds(oldNames[i]);
+      changeIndexWithAdds(oldNames[i], true);
+      rmDir(oldNames[i]);
+
+      unzip(dirName, oldNames[i]);
+      changeIndexWithAdds(oldNames[i], false);
       rmDir(oldNames[i]);
     }
   }
@@ -141,13 +149,14 @@
 
   /* Open pre-lockless index, add docs, do a delete &
    * setNorm, and search */
-  public void changeIndexWithAdds(String dirName) throws IOException {
+  public void changeIndexWithAdds(String dirName, boolean autoCommit) throws IOException {
 
     dirName = fullDir(dirName);
 
     Directory dir = FSDirectory.getDirectory(dirName);
+
     // open writer
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+    IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
 
     // add 10 docs
     for(int i=0;i<10;i++) {
@@ -166,7 +175,7 @@
     assertEquals("wrong first document", "21", d.get("id"));
     searcher.close();
 
-    // make sure we can do another delete & another setNorm against this
+    // make sure we can do delete & setNorm against this
     // pre-lockless segment:
     IndexReader reader = IndexReader.open(dir);
     Term searchTerm = new Term("id", "6");
@@ -175,7 +184,7 @@
     reader.setNorm(22, "content", (float) 2.0);
     reader.close();
 
-    // make sure 2nd delete & 2nd norm "took":
+    // make sure they "took":
     searcher = new IndexSearcher(dir);
     hits = searcher.search(new TermQuery(new Term("content", "aaa")));
     assertEquals("wrong number of hits", 43, hits.length());
@@ -184,7 +193,7 @@
     searcher.close();
 
     // optimize
-    writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+    writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
     writer.optimize();
     writer.close();
 
@@ -200,7 +209,7 @@
 
   /* Open pre-lockless index, add docs, do a delete &
    * setNorm, and search */
-  public void changeIndexNoAdds(String dirName) throws IOException {
+  public void changeIndexNoAdds(String dirName, boolean autoCommit) throws IOException {
 
     dirName = fullDir(dirName);
 
@@ -214,7 +223,7 @@
     assertEquals("wrong first document", "21", d.get("id"));
     searcher.close();
 
-    // make sure we can do another delete & another setNorm against this
+    // make sure we can do a delete & setNorm against this
     // pre-lockless segment:
     IndexReader reader = IndexReader.open(dir);
     Term searchTerm = new Term("id", "6");
@@ -223,7 +232,7 @@
     reader.setNorm(22, "content", (float) 2.0);
     reader.close();
 
-    // make sure 2nd delete & 2nd norm "took":
+    // make sure they "took":
     searcher = new IndexSearcher(dir);
     hits = searcher.search(new TermQuery(new Term("content", "aaa")));
     assertEquals("wrong number of hits", 33, hits.length());
@@ -232,7 +241,7 @@
     searcher.close();
 
     // optimize
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+    IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
     writer.optimize();
     writer.close();
 
@@ -273,66 +282,78 @@
 
   /* Verifies that the expected file names were produced */
 
-  // disable until hardcoded file names are fixes:
   public void testExactFileNames() throws IOException {
 
-    String outputDir = "lucene.backwardscompat0.index";
-    Directory dir = FSDirectory.getDirectory(fullDir(outputDir));
-    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
-    for(int i=0;i<35;i++) {
-      addDoc(writer, i);
-    }
-    assertEquals("wrong doc count", 35, writer.docCount());
-    writer.close();
+    for(int pass=0;pass<2;pass++) {
 
-    // Delete one doc so we get a .del file:
-    IndexReader reader = IndexReader.open(dir);
-    Term searchTerm = new Term("id", "7");
-    int delCount = reader.deleteDocuments(searchTerm);
-    assertEquals("didn't delete the right number of documents", 1, delCount);
+      String outputDir = "lucene.backwardscompat0.index";
 
-    // Set one norm so we get a .s0 file:
-    reader.setNorm(21, "content", (float) 1.5);
-    reader.close();
+      try {
+        Directory dir = FSDirectory.getDirectory(fullDir(outputDir));
 
-    // The numbering of fields can vary depending on which
-    // JRE is in use.  On some JREs we see content bound to
-    // field 0; on others, field 1.  So, here we have to
-    // figure out which field number corresponds to
-    // "content", and then set our expected file names below
-    // accordingly:
-    CompoundFileReader cfsReader = new CompoundFileReader(dir, "_2.cfs");
-    FieldInfos fieldInfos = new FieldInfos(cfsReader, "_2.fnm");
-    int contentFieldIndex = -1;
-    for(int i=0;i<fieldInfos.size();i++) {
-      FieldInfo fi = fieldInfos.fieldInfo(i);
-      if (fi.name.equals("content")) {
-        contentFieldIndex = i;
-        break;
+        boolean autoCommit = 0 == pass;
+      
+        IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
+        //IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+        for(int i=0;i<35;i++) {
+          addDoc(writer, i);
+        }
+        assertEquals("wrong doc count", 35, writer.docCount());
+        writer.close();
+
+        // Delete one doc so we get a .del file:
+        IndexReader reader = IndexReader.open(dir);
+        Term searchTerm = new Term("id", "7");
+        int delCount = reader.deleteDocuments(searchTerm);
+        assertEquals("didn't delete the right number of documents", 1, delCount);
+
+        // Set one norm so we get a .s0 file:
+        reader.setNorm(21, "content", (float) 1.5);
+        reader.close();
+
+        // The numbering of fields can vary depending on which
+        // JRE is in use.  On some JREs we see content bound to
+        // field 0; on others, field 1.  So, here we have to
+        // figure out which field number corresponds to
+        // "content", and then set our expected file names below
+        // accordingly:
+        CompoundFileReader cfsReader = new CompoundFileReader(dir, "_2.cfs");
+        FieldInfos fieldInfos = new FieldInfos(cfsReader, "_2.fnm");
+        int contentFieldIndex = -1;
+        for(int i=0;i<fieldInfos.size();i++) {
+          FieldInfo fi = fieldInfos.fieldInfo(i);
+          if (fi.name.equals("content")) {
+            contentFieldIndex = i;
+            break;
+          }
+        }
+        cfsReader.close();
+        assertTrue("could not locate the 'content' field number in the _2.cfs segment", contentFieldIndex != -1);
+
+        // Now verify file names:
+        String[] expected = {"_0.cfs",
+                             "_0_1.del",
+                             "_1.cfs",
+                             "_2.cfs",
+                             "_2_1.s" + contentFieldIndex,
+                             "_3.cfs",
+                             "segments_a",
+                             "segments.gen"};
+        if (!autoCommit) {
+          expected[6] = "segments_3";
+        }
+
+        String[] actual = dir.list();
+        Arrays.sort(expected);
+        Arrays.sort(actual);
+        if (!Arrays.equals(expected, actual)) {
+          fail("incorrect filenames in index: expected:\n    " + asString(expected) + "\n  actual:\n    " + asString(actual));
+        }
+        dir.close();
+      } finally {
+        rmDir(outputDir);
       }
     }
-    cfsReader.close();
-    assertTrue("could not locate the 'content' field number in the _2.cfs segment", contentFieldIndex != -1);
-
-    // Now verify file names:
-    String[] expected = {"_0.cfs",
-                         "_0_1.del",
-                         "_1.cfs",
-                         "_2.cfs",
-                         "_2_1.s" + contentFieldIndex,
-                         "_3.cfs",
-                         "segments_a",
-                         "segments.gen"};
-
-    String[] actual = dir.list();
-    Arrays.sort(expected);
-    Arrays.sort(actual);
-    if (!Arrays.equals(expected, actual)) {
-      fail("incorrect filenames in index: expected:\n    " + asString(expected) + "\n  actual:\n    " + asString(actual));
-    }
-    dir.close();
-
-    rmDir(outputDir);
   }
 
   private String asString(String[] l) {

Added: lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java?view=auto&rev=517599
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java (added)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java Tue Mar 13 02:06:22 2007
@@ -0,0 +1,618 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import junit.framework.TestCase;
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.Hits;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import java.util.List;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.HashSet;
+
+/*
+  Verify we can read the pre-2.1 file format, do searches
+  against it, and add documents to it.
+*/
+
+public class TestDeletionPolicy extends TestCase
+{
+  private void verifyCommitOrder(List commits) {
+    long last = SegmentInfos.generationFromSegmentsFileName(((IndexCommitPoint) commits.get(0)).getSegmentsFileName());
+    for(int i=1;i<commits.size();i++) {
+      long now = SegmentInfos.generationFromSegmentsFileName(((IndexCommitPoint) commits.get(i)).getSegmentsFileName());
+      assertTrue("SegmentInfos commits are out-of-order", now > last);
+      last = now;
+    }
+  }
+
+  class KeepAllDeletionPolicy implements IndexDeletionPolicy {
+    int numOnInit;
+    int numOnCommit;
+    public void onInit(List commits) {
+      verifyCommitOrder(commits);
+      numOnInit++;
+    }
+    public void onCommit(List commits) {
+      verifyCommitOrder(commits);
+      numOnCommit++;
+    }
+  }
+
+  /**
+   * This is useful for adding to a big index w/ autoCommit
+   * false when you know readers are not using it.
+   */
+  class KeepNoneOnInitDeletionPolicy implements IndexDeletionPolicy {
+    int numOnInit;
+    int numOnCommit;
+    public void onInit(List commits) {
+      verifyCommitOrder(commits);
+      numOnInit++;
+      // On init, delete all commit points:
+      Iterator it = commits.iterator();
+      while(it.hasNext()) {
+        ((IndexCommitPoint) it.next()).delete();
+      }
+    }
+    public void onCommit(List commits) {
+      verifyCommitOrder(commits);
+      int size = commits.size();
+      // Delete all but last one:
+      for(int i=0;i<size-1;i++) {
+        ((IndexCommitPoint) commits.get(i)).delete();
+      }
+      numOnCommit++;
+    }
+  }
+
+  class KeepLastNDeletionPolicy implements IndexDeletionPolicy {
+    int numOnInit;
+    int numOnCommit;
+    int numToKeep;
+    int numDelete;
+    Set seen = new HashSet();
+
+    public KeepLastNDeletionPolicy(int numToKeep) {
+      this.numToKeep = numToKeep;
+    }
+
+    public void onInit(List commits) {
+      verifyCommitOrder(commits);
+      numOnInit++;
+      // do no deletions on init
+      doDeletes(commits, false);
+    }
+
+    public void onCommit(List commits) {
+      verifyCommitOrder(commits);
+      doDeletes(commits, true);
+    }
+    
+    private void doDeletes(List commits, boolean isCommit) {
+
+      // Assert that we really are only called for each new
+      // commit:
+      if (isCommit) {
+        String fileName = ((IndexCommitPoint) commits.get(commits.size()-1)).getSegmentsFileName();
+        if (seen.contains(fileName)) {
+          throw new RuntimeException("onCommit was called twice on the same commit point: " + fileName);
+        }
+        seen.add(fileName);
+        numOnCommit++;
+      }
+      int size = commits.size();
+      for(int i=0;i<size-numToKeep;i++) {
+        ((IndexCommitPoint) commits.get(i)).delete();
+        numDelete++;
+      }
+    }
+  }
+
+  /*
+   * Delete a commit only when it has been obsoleted by N
+   * seconds.
+   */
+  class ExpirationTimeDeletionPolicy implements IndexDeletionPolicy {
+
+    Directory dir;
+    double expirationTimeSeconds;
+    int numDelete;
+
+    public ExpirationTimeDeletionPolicy(Directory dir, double seconds) {
+      this.dir = dir;
+      this.expirationTimeSeconds = seconds;
+    }
+
+    public void onInit(List commits) throws IOException {
+      verifyCommitOrder(commits);
+      onCommit(commits);
+    }
+
+    public void onCommit(List commits) throws IOException {
+      verifyCommitOrder(commits);
+
+      IndexCommitPoint lastCommit = (IndexCommitPoint) commits.get(commits.size()-1);
+
+      // Any commit older than expireTime should be deleted:
+      double expireTime = dir.fileModified(lastCommit.getSegmentsFileName())/1000.0 - expirationTimeSeconds;
+
+      Iterator it = commits.iterator();
+
+      while(it.hasNext()) {
+        IndexCommitPoint commit = (IndexCommitPoint) it.next();
+        double modTime = dir.fileModified(commit.getSegmentsFileName())/1000.0;
+        if (commit != lastCommit && modTime < expireTime) {
+          commit.delete();
+          numDelete += 1;
+        }
+      }
+    }
+  }
+
+  /*
+   * Test "by time expiration" deletion policy:
+   */
+  public void testExpirationTimeDeletionPolicy() throws IOException, InterruptedException {
+
+    final double SECONDS = 2.0;
+
+    boolean autoCommit = false;
+    boolean useCompoundFile = true;
+
+    Directory dir = new RAMDirectory();
+    ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS);
+    IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+    writer.setUseCompoundFile(useCompoundFile);
+    writer.close();
+
+    for(int i=0;i<7;i++) {
+      writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+      writer.setUseCompoundFile(useCompoundFile);
+      for(int j=0;j<17;j++) {
+        addDoc(writer);
+      }
+      writer.close();
+
+      // Make sure to sleep long enough so that some commit
+      // points will be deleted:
+      Thread.sleep((int) (1000.0*(SECONDS/5.0)));
+    }
+
+    // First, make sure the policy in fact deleted something:
+    assertTrue("no commits were deleted", policy.numDelete > 0);
+
+    // Then simplistic check: just verify that the
+    // segments_N's that still exist are in fact within SECONDS
+    // seconds of the last one's mod time, and, that I can
+    // open a reader on each:
+    long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+    
+    String fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
+                                                            "",
+                                                            gen);
+    long newestModTime = dir.fileModified(fileName);
+      
+    while(gen > 0) {
+      try {
+        IndexReader reader = IndexReader.open(dir);
+        reader.close();
+        fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
+                                                         "",
+                                                         gen);
+        long modTime = dir.fileModified(fileName);
+        assertTrue("commit point was older than " + SECONDS + " seconds but did not get deleted", newestModTime - modTime < (SECONDS*1000));
+      } catch (IOException e) {
+        // OK
+        break;
+      }
+      
+      dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
+      gen--;
+    }
+
+    dir.close();
+  }
+
+  /*
+   * Test a silly deletion policy that keeps all commits around.
+   */
+  public void testKeepAllDeletionPolicy() throws IOException {
+
+    for(int pass=0;pass<4;pass++) {
+
+      boolean autoCommit = pass < 2;
+      boolean useCompoundFile = (pass % 2) > 0;
+
+      KeepAllDeletionPolicy policy = new KeepAllDeletionPolicy();
+
+      Directory dir = new RAMDirectory();
+
+      IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+      writer.setUseCompoundFile(useCompoundFile);
+      for(int i=0;i<107;i++) {
+        addDoc(writer);
+      }
+      writer.close();
+
+      writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+      writer.setUseCompoundFile(useCompoundFile);
+      writer.optimize();
+      writer.close();
+
+      assertEquals(2, policy.numOnInit);
+      if (autoCommit) {
+        assertTrue(policy.numOnCommit > 2);
+      } else {
+        // If we are not auto committing then there should
+        // be exactly 2 commits (one per close above):
+        assertEquals(2, policy.numOnCommit);
+      }
+
+      // Simplistic check: just verify all segments_N's still
+      // exist, and, I can open a reader on each:
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+      while(gen > 0) {
+        IndexReader reader = IndexReader.open(dir);
+        reader.close();
+        dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
+        gen--;
+
+        if (gen > 0) {
+          // Now that we've removed a commit point, which
+          // should have orphan'd at least one index file.
+          // Open & close a writer and assert that it
+          // actually removed something:
+          int preCount = dir.list().length;
+          writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, policy);
+          writer.close();
+          int postCount = dir.list().length;
+          assertTrue(postCount < preCount);
+        }
+      }
+
+      dir.close();
+    }
+  }
+
+  /* Test keeping NO commit points.  This is a viable and
+   * useful case eg where you want to build a big index with
+   * autoCommit false and you know there are no readers.
+   */
+  public void testKeepNoneOnInitDeletionPolicy() throws IOException {
+
+    for(int pass=0;pass<4;pass++) {
+
+      boolean autoCommit = pass < 2;
+      boolean useCompoundFile = (pass % 2) > 0;
+
+      KeepNoneOnInitDeletionPolicy policy = new KeepNoneOnInitDeletionPolicy();
+
+      Directory dir = new RAMDirectory();
+
+      IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+      writer.setUseCompoundFile(useCompoundFile);
+      for(int i=0;i<107;i++) {
+        addDoc(writer);
+      }
+      writer.close();
+
+      writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+      writer.setUseCompoundFile(useCompoundFile);
+      writer.optimize();
+      writer.close();
+
+      assertEquals(2, policy.numOnInit);
+      if (autoCommit) {
+        assertTrue(policy.numOnCommit > 2);
+      } else {
+        // If we are not auto committing then there should
+        // be exactly 2 commits (one per close above):
+        assertEquals(2, policy.numOnCommit);
+      }
+
+      // Simplistic check: just verify the index is in fact
+      // readable:
+      IndexReader reader = IndexReader.open(dir);
+      reader.close();
+
+      dir.close();
+    }
+  }
+
+  /*
+   * Test a deletion policy that keeps last N commits.
+   */
+  public void testKeepLastNDeletionPolicy() throws IOException {
+
+    final int N = 5;
+
+    for(int pass=0;pass<4;pass++) {
+
+      boolean autoCommit = pass < 2;
+      boolean useCompoundFile = (pass % 2) > 0;
+
+      Directory dir = new RAMDirectory();
+
+      KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
+
+      for(int j=0;j<N+1;j++) {
+        IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+        writer.setUseCompoundFile(useCompoundFile);
+        for(int i=0;i<17;i++) {
+          addDoc(writer);
+        }
+        writer.optimize();
+        writer.close();
+      }
+
+      assertTrue(policy.numDelete > 0);
+      assertEquals(N+1, policy.numOnInit);
+      if (autoCommit) {
+        assertTrue(policy.numOnCommit > 1);
+      } else {
+        assertEquals(N+1, policy.numOnCommit);
+      }
+
+      // Simplistic check: just verify only the past N segments_N's still
+      // exist, and, I can open a reader on each:
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+      for(int i=0;i<N+1;i++) {
+        try {
+          IndexReader reader = IndexReader.open(dir);
+          reader.close();
+          if (i == N) {
+            fail("should have failed on commits prior to last " + N);
+          }
+        } catch (IOException e) {
+          if (i != N) {
+            throw e;
+          }
+        }
+        if (i < N) {
+          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
+        }
+        gen--;
+      }
+
+      dir.close();
+    }
+  }
+
+  /*
+   * Test a deletion policy that keeps last N commits
+   * around, with reader doing deletes.
+   */
+  public void testKeepLastNDeletionPolicyWithReader() throws IOException {
+
+    final int N = 10;
+
+    for(int pass=0;pass<4;pass++) {
+
+      boolean autoCommit = pass < 2;
+      boolean useCompoundFile = (pass % 2) > 0;
+
+      KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
+
+      Directory dir = new RAMDirectory();
+      IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+      writer.setUseCompoundFile(useCompoundFile);
+      writer.close();
+      Term searchTerm = new Term("content", "aaa");        
+      Query query = new TermQuery(searchTerm);
+
+      for(int i=0;i<N+1;i++) {
+        writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+        writer.setUseCompoundFile(useCompoundFile);
+        for(int j=0;j<17;j++) {
+          addDoc(writer);
+        }
+        // this is a commit when autoCommit=false:
+        writer.close();
+        IndexReader reader = IndexReader.open(dir, policy);
+        reader.deleteDocument(3*i+1);
+        reader.setNorm(4*i+1, "content", 2.0F);
+        IndexSearcher searcher = new IndexSearcher(reader);
+        Hits hits = searcher.search(query);
+        assertEquals(16*(1+i), hits.length());
+        // this is a commit when autoCommit=false:
+        reader.close();
+        searcher.close();
+      }
+      writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+      writer.setUseCompoundFile(useCompoundFile);
+      writer.optimize();
+      // this is a commit when autoCommit=false:
+      writer.close();
+
+      assertEquals(2*(N+2), policy.numOnInit);
+      if (autoCommit) {
+        assertTrue(policy.numOnCommit > 2*(N+2)-1);
+      } else {
+        assertEquals(2*(N+2)-1, policy.numOnCommit);
+      }
+
+      IndexSearcher searcher = new IndexSearcher(dir);
+      Hits hits = searcher.search(query);
+      assertEquals(176, hits.length());
+
+      // Simplistic check: just verify only the past N segments_N's still
+      // exist, and, I can open a reader on each:
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+
+      int expectedCount = 176;
+
+      for(int i=0;i<N+1;i++) {
+        try {
+          IndexReader reader = IndexReader.open(dir);
+
+          // Work backwards in commits on what the expected
+          // count should be.  Only check this in the
+          // autoCommit false case:
+          if (!autoCommit) {
+            searcher = new IndexSearcher(reader);
+            hits = searcher.search(query);
+            if (i > 1) {
+              if (i % 2 == 0) {
+                expectedCount += 1;
+              } else {
+                expectedCount -= 17;
+              }
+            }
+            assertEquals(expectedCount, hits.length());
+            searcher.close();
+          }
+          reader.close();
+          if (i == N) {
+            fail("should have failed on commits before last 5");
+          }
+        } catch (IOException e) {
+          if (i != N) {
+            throw e;
+          }
+        }
+        if (i < N) {
+          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
+        }
+        gen--;
+      }
+      
+      dir.close();
+    }
+  }
+
+  /*
+   * Test a deletion policy that keeps last N commits
+   * around, through creates.
+   */
+  public void testKeepLastNDeletionPolicyWithCreates() throws IOException {
+
+    final int N = 10;
+
+    for(int pass=0;pass<4;pass++) {
+
+      boolean autoCommit = pass < 2;
+      boolean useCompoundFile = (pass % 2) > 0;
+
+      KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
+
+      Directory dir = new RAMDirectory();
+      IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+      writer.setUseCompoundFile(useCompoundFile);
+      writer.close();
+      Term searchTerm = new Term("content", "aaa");        
+      Query query = new TermQuery(searchTerm);
+
+      for(int i=0;i<N+1;i++) {
+
+        writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
+        writer.setUseCompoundFile(useCompoundFile);
+        for(int j=0;j<17;j++) {
+          addDoc(writer);
+        }
+        // this is a commit when autoCommit=false:
+        writer.close();
+        IndexReader reader = IndexReader.open(dir, policy);
+        reader.deleteDocument(3);
+        reader.setNorm(5, "content", 2.0F);
+        IndexSearcher searcher = new IndexSearcher(reader);
+        Hits hits = searcher.search(query);
+        assertEquals(16, hits.length());
+        // this is a commit when autoCommit=false:
+        reader.close();
+        searcher.close();
+
+        writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
+        // This will not commit: there are no changes
+        // pending because we opened for "create":
+        writer.close();
+      }
+
+      assertEquals(1+3*(N+1), policy.numOnInit);
+      if (autoCommit) {
+        assertTrue(policy.numOnCommit > 3*(N+1)-1);
+      } else {
+        assertEquals(2*(N+1), policy.numOnCommit);
+      }
+
+      IndexSearcher searcher = new IndexSearcher(dir);
+      Hits hits = searcher.search(query);
+      assertEquals(0, hits.length());
+
+      // Simplistic check: just verify only the past N segments_N's still
+      // exist, and, I can open a reader on each:
+      long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+
+      int expectedCount = 0;
+
+      for(int i=0;i<N+1;i++) {
+        try {
+          IndexReader reader = IndexReader.open(dir);
+
+          // Work backwards in commits on what the expected
+          // count should be.  Only check this in the
+          // autoCommit false case:
+          if (!autoCommit) {
+            searcher = new IndexSearcher(reader);
+            hits = searcher.search(query);
+            assertEquals(expectedCount, hits.length());
+            searcher.close();
+            if (expectedCount == 0) {
+              expectedCount = 16;
+            } else if (expectedCount == 16) {
+              expectedCount = 17;
+            } else if (expectedCount == 17) {
+              expectedCount = 0;
+            }
+          }
+          reader.close();
+          if (i == N) {
+            fail("should have failed on commits before last " + N);
+          }
+        } catch (IOException e) {
+          if (i != N) {
+            throw e;
+          }
+        }
+        if (i < N) {
+          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
+        }
+        gen--;
+      }
+      
+      dir.close();
+    }
+  }
+
+  private void addDoc(IndexWriter writer) throws IOException
+  {
+    Document doc = new Document();
+    doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
+    writer.addDocument(doc);
+  }
+}

Propchange: lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java?view=diff&rev=517599&r1=517598&r2=517599
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java Tue Mar 13 02:06:22 2007
@@ -173,6 +173,8 @@
       out.writeBytes(b, len);
       remainder -= len;
     }
+    in.close();
+    out.close();
   }
 
   private void addDoc(IndexWriter writer, int id) throws IOException

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java?view=diff&rev=517599&r1=517598&r2=517599
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java Tue Mar 13 02:06:22 2007
@@ -802,9 +802,7 @@
           String[] startFiles = dir.list();
           SegmentInfos infos = new SegmentInfos();
           infos.read(dir);
-          IndexFileDeleter d = new IndexFileDeleter(infos, dir);
-          d.findDeletableFiles();
-          d.deleteFiles();
+          IndexFileDeleter d = new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null);
           String[] endFiles = dir.list();
 
           Arrays.sort(startFiles);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java?view=diff&rev=517599&r1=517598&r2=517599
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java Tue Mar 13 02:06:22 2007
@@ -71,7 +71,7 @@
         reader.close();
 
         // optimize the index and check that the new doc count is correct
-        writer = new IndexWriter(dir, new WhitespaceAnalyzer());
+        writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
         writer.optimize();
         assertEquals(60, writer.docCount());
         writer.close();
@@ -163,7 +163,7 @@
       // addIndexes will certainly run out of space &
       // fail.  Verify that when this happens, index is
       // not corrupt and index in fact has added no
-      // documents.  Then, we increase disk space by 1000
+      // documents.  Then, we increase disk space by 2000
       // bytes each iteration.  At some point there is
       // enough free disk space and addIndexes should
       // succeed and index should show all documents were
@@ -178,11 +178,14 @@
         startDiskUsage += startDir.fileLength(files[i]);
       }
 
-      for(int method=0;method<3;method++) {
+      for(int iter=0;iter<6;iter++) {
 
         // Start with 100 bytes more than we are currently using:
         long diskFree = diskUsage+100;
 
+        boolean autoCommit = iter % 2 == 0;
+        int method = iter/2;
+
         boolean success = false;
         boolean done = false;
 
@@ -195,7 +198,7 @@
           methodName = "addIndexesNoOptimize(Directory[])";
         }
 
-        String testName = "disk full test for method " + methodName + " with disk full at " + diskFree + " bytes";
+        String testName = "disk full test for method " + methodName + " with disk full at " + diskFree + " bytes with autoCommit = " + autoCommit;
 
         int cycleCount = 0;
 
@@ -205,7 +208,7 @@
 
           // Make a new dir that will enforce disk usage:
           MockRAMDirectory dir = new MockRAMDirectory(startDir);
-          writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+          writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
           IOException err = null;
 
           for(int x=0;x<2;x++) {
@@ -285,38 +288,27 @@
               }
             }
 
-            // Whether we succeeded or failed, check that all
-            // un-referenced files were in fact deleted (ie,
-            // we did not create garbage).  Just create a
-            // new IndexFileDeleter, have it delete
-            // unreferenced files, then verify that in fact
-            // no files were deleted:
-            String[] startFiles = dir.list();
-            SegmentInfos infos = new SegmentInfos();
-            infos.read(dir);
-            IndexFileDeleter d = new IndexFileDeleter(infos, dir);
-            d.findDeletableFiles();
-            d.deleteFiles();
-            String[] endFiles = dir.list();
-
-            Arrays.sort(startFiles);
-            Arrays.sort(endFiles);
-
-            /*
-              for(int i=0;i<startFiles.length;i++) {
-              System.out.println("  " + i + ": " + startFiles[i]);
-              }
-            */
+            if (autoCommit) {
+
+              // Whether we succeeded or failed, check that
+              // all un-referenced files were in fact
+              // deleted (ie, we did not create garbage).
+              // Only check this when autoCommit is true:
+              // when it's false, it's expected that there
+              // are unreferenced files (ie they won't be
+              // referenced until the "commit on close").
+              // Just create a new IndexFileDeleter, have it
+              // delete unreferenced files, then verify that
+              // in fact no files were deleted:
 
-            if (!Arrays.equals(startFiles, endFiles)) {
               String successStr;
               if (success) {
                 successStr = "success";
               } else {
                 successStr = "IOException";
-                err.printStackTrace();
               }
-              fail(methodName + " failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes): before delete:\n    " + arrayToString(startFiles) + "\n  after delete:\n    " + arrayToString(endFiles));
+              String message = methodName + " failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes)";
+              assertNoUnreferencedFiles(dir, message);
             }
 
             if (debug) {
@@ -335,8 +327,10 @@
             }
             int result = reader.docFreq(searchTerm);
             if (success) {
-              if (result != END_COUNT) {
+              if (autoCommit && result != END_COUNT) {
                 fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
+              } else if (!autoCommit && result != START_COUNT) {
+                fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " [autoCommit = false]");
               }
             } else {
               // On hitting exception we still may have added
@@ -374,31 +368,107 @@
               System.out.println("  count is " + result);
             }
 
-            if (result == END_COUNT) {
+            if (done || result == END_COUNT) {
               break;
             }
           }
 
-          // Javadocs state that temp free Directory space
-          // required is at most 2X total input size of
-          // indices so let's make sure:
-          assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName +
-                     ": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes; " +
-                     "starting disk usage = " + startDiskUsage + " bytes; " +
-                     "input index disk usage = " + inputDiskUsage + " bytes",
-                     (dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage));
+          if (debug) {
+            System.out.println("  start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.getMaxUsedSizeInBytes());
+          }
+
+          if (done) {
+            // Javadocs state that temp free Directory space
+            // required is at most 2X total input size of
+            // indices so let's make sure:
+            assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName +
+                       ": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes; " +
+                       "starting disk usage = " + startDiskUsage + " bytes; " +
+                       "input index disk usage = " + inputDiskUsage + " bytes",
+                       (dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage));
+          }
 
           writer.close();
           dir.close();
 
-          // Try again with 1000 more bytes of free space:
-          diskFree += 1000;
+          // Try again with 2000 more bytes of free space:
+          diskFree += 2000;
         }
       }
 
       startDir.close();
     }
 
+    /*
+     * Make sure IndexWriter cleans up on hitting a disk
+     * full exception in addDocument.
+     */
+    public void testAddDocumentOnDiskFull() throws IOException {
+
+      for(int pass=0;pass<3;pass++) {
+        boolean autoCommit = pass == 0;
+        boolean doAbort = pass == 2;
+        long diskFree = 200;
+        while(true) {
+          MockRAMDirectory dir = new MockRAMDirectory();
+          dir.setMaxSizeInBytes(diskFree);
+          IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
+          boolean hitError = false;
+          try {
+            for(int i=0;i<200;i++) {
+              addDoc(writer);
+            }
+          } catch (IOException e) {
+            // e.printStackTrace();
+            hitError = true;
+          }
+
+          if (hitError) {
+            if (doAbort) {
+              writer.abort();
+            } else {
+              try {
+                writer.close();
+              } catch (IOException e) {
+                // e.printStackTrace();
+                dir.setMaxSizeInBytes(0);
+                writer.close();
+              }
+            }
+
+            assertNoUnreferencedFiles(dir, "after disk full during addDocument with autoCommit=" + autoCommit);
+
+            // Make sure reader can open the index:
+            IndexReader.open(dir).close();
+
+            dir.close();
+
+            // Now try again w/ more space:
+            diskFree += 500;
+          } else {
+            dir.close();
+            break;
+          }
+        }
+      }
+    
+    }                                               
+
+    public void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
+      String[] startFiles = dir.list();
+      SegmentInfos infos = new SegmentInfos();
+      infos.read(dir);
+      IndexFileDeleter d = new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null);
+      String[] endFiles = dir.list();
+
+      Arrays.sort(startFiles);
+      Arrays.sort(endFiles);
+
+      if (!Arrays.equals(startFiles, endFiles)) {
+        fail(message + ": before delete:\n    " + arrayToString(startFiles) + "\n  after delete:\n    " + arrayToString(endFiles));
+      }
+    }
+
     /**
      * Make sure optimize doesn't use any more than 1X
      * starting index size as its temporary free space
@@ -692,6 +762,205 @@
         if (reader != null) {
           reader.close();
         }
+    }
+
+    /*
+     * Simple test for "commit on close": open writer with
+     * autoCommit=false, so it will only commit on close,
+     * then add a bunch of docs, making sure reader does not
+     * see these docs until writer is closed.
+     */
+    public void testCommitOnClose() throws IOException {
+        Directory dir = new RAMDirectory();      
+        IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+        for (int i = 0; i < 14; i++) {
+          addDoc(writer);
+        }
+        writer.close();
+
+        Term searchTerm = new Term("content", "aaa");        
+        IndexSearcher searcher = new IndexSearcher(dir);
+        Hits hits = searcher.search(new TermQuery(searchTerm));
+        assertEquals("first number of hits", 14, hits.length());
+        searcher.close();
+
+        IndexReader reader = IndexReader.open(dir);
+
+        writer = new IndexWriter(dir, false, new WhitespaceAnalyzer());
+        for(int i=0;i<3;i++) {
+          for(int j=0;j<11;j++) {
+            addDoc(writer);
+          }
+          searcher = new IndexSearcher(dir);
+          hits = searcher.search(new TermQuery(searchTerm));
+          assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length());
+          searcher.close();
+          assertTrue("reader should have still been current", reader.isCurrent());
+        }
+
+        // Now, close the writer:
+        writer.close();
+        assertFalse("reader should not be current now", reader.isCurrent());
+
+        searcher = new IndexSearcher(dir);
+        hits = searcher.search(new TermQuery(searchTerm));
+        assertEquals("reader did not see changes after writer was closed", 47, hits.length());
+        searcher.close();
+    }
+
+    /*
+     * Simple test for "commit on close": open writer with
+     * autoCommit=false, so it will only commit on close,
+     * then add a bunch of docs, making sure reader does not
+     * see them until writer has closed.  Then instead of
+     * closing the writer, call abort and verify reader sees
+     * nothing was added.  Then verify we can open the index
+     * and add docs to it.
+     */
+    public void testCommitOnCloseAbort() throws IOException {
+      Directory dir = new RAMDirectory();      
+      IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+      for (int i = 0; i < 14; i++) {
+        addDoc(writer);
+      }
+      writer.close();
+
+      Term searchTerm = new Term("content", "aaa");        
+      IndexSearcher searcher = new IndexSearcher(dir);
+      Hits hits = searcher.search(new TermQuery(searchTerm));
+      assertEquals("first number of hits", 14, hits.length());
+      searcher.close();
+
+      writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);
+      for(int j=0;j<17;j++) {
+        addDoc(writer);
+      }
+      // Delete all docs:
+      writer.deleteDocuments(searchTerm);
+
+      searcher = new IndexSearcher(dir);
+      hits = searcher.search(new TermQuery(searchTerm));
+      assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length());
+      searcher.close();
+
+      // Now, close the writer:
+      writer.abort();
+
+      assertNoUnreferencedFiles(dir, "unreferenced files remain after abort()");
+
+      searcher = new IndexSearcher(dir);
+      hits = searcher.search(new TermQuery(searchTerm));
+      assertEquals("saw changes after writer.abort", 14, hits.length());
+      searcher.close();
+          
+      // Now make sure we can re-open the index, add docs,
+      // and all is good:
+      writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);
+      for(int i=0;i<12;i++) {
+        for(int j=0;j<17;j++) {
+          addDoc(writer);
+        }
+        searcher = new IndexSearcher(dir);
+        hits = searcher.search(new TermQuery(searchTerm));
+        assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length());
+        searcher.close();
+      }
+
+      writer.close();
+      searcher = new IndexSearcher(dir);
+      hits = searcher.search(new TermQuery(searchTerm));
+      assertEquals("didn't see changes after close", 218, hits.length());
+      searcher.close();
+
+      dir.close();
+    }
+
+    /*
+     * Verify that a writer with "commit on close" indeed
+     * cleans up the temp segments created after opening
+     * that are not referenced by the starting segments
+     * file.  We check this by using MockRAMDirectory to
+     * measure max temp disk space used.
+     */
+    public void testCommitOnCloseDiskUsage() throws IOException {
+      MockRAMDirectory dir = new MockRAMDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+      for(int j=0;j<30;j++) {
+        addDocWithIndex(writer, j);
+      }
+      writer.close();
+      dir.resetMaxUsedSizeInBytes();
+
+      long startDiskUsage = dir.getMaxUsedSizeInBytes();
+      writer  = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);
+      for(int j=0;j<1470;j++) {
+        addDocWithIndex(writer, j);
+      }
+      long midDiskUsage = dir.getMaxUsedSizeInBytes();
+      dir.resetMaxUsedSizeInBytes();
+      writer.optimize();
+      writer.close();
+      long endDiskUsage = dir.getMaxUsedSizeInBytes();
+
+      // Ending index is 50X as large as starting index; due
+      // to 2X disk usage normally we allow 100X max
+      // transient usage.  If something is wrong w/ deleter
+      // and it doesn't delete intermediate segments then it
+      // will exceed this 100X:
+      // System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
+      assertTrue("writer used to much space while adding documents when autoCommit=false",     
+                 midDiskUsage < 100*startDiskUsage);
+      assertTrue("writer used to much space after close when autoCommit=false",     
+                 endDiskUsage < 100*startDiskUsage);
+    }
+
+
+    /*
+     * Verify that calling optimize when writer is open for
+     * "commit on close" works correctly both for abort()
+     * and close().
+     */
+    public void testCommitOnCloseOptimize() throws IOException {
+      RAMDirectory dir = new RAMDirectory();      
+      IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+      for(int j=0;j<17;j++) {
+        addDocWithIndex(writer, j);
+      }
+      writer.close();
+
+      writer  = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);
+      writer.optimize();
+
+      // Open a reader before closing (commiting) the writer:
+      IndexReader reader = IndexReader.open(dir);
+
+      // Reader should see index as unoptimized at this
+      // point:
+      assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
+      reader.close();
+
+      // Abort the writer:
+      writer.abort();
+      assertNoUnreferencedFiles(dir, "aborted writer after optimize");
+
+      // Open a reader after aborting writer:
+      reader = IndexReader.open(dir);
+
+      // Reader should still see index as unoptimized:
+      assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
+      reader.close();
+
+      writer  = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);
+      writer.optimize();
+      writer.close();
+      assertNoUnreferencedFiles(dir, "aborted writer after optimize");
+
+      // Open a reader after aborting writer:
+      reader = IndexReader.open(dir);
+
+      // Reader should still see index as unoptimized:
+      assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
+      reader.close();
     }
 
     // Make sure that a Directory implementation that does

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java?view=diff&rev=517599&r1=517598&r2=517599
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Tue Mar 13 02:06:22 2007
@@ -25,175 +25,259 @@
         "Venice has lots of canals" };
     String[] text = { "Amsterdam", "Venice" };
 
-    Directory dir = new RAMDirectory();
-    IndexWriter modifier = new IndexWriter(dir,
-        new WhitespaceAnalyzer(), true);
-    modifier.setUseCompoundFile(true);
-    modifier.setMaxBufferedDeleteTerms(1);
-
-    for (int i = 0; i < keywords.length; i++) {
-      Document doc = new Document();
-      doc.add(new Field("id", keywords[i], Field.Store.YES,
-          Field.Index.UN_TOKENIZED));
-      doc.add(new Field("country", unindexed[i], Field.Store.YES,
-          Field.Index.NO));
-      doc.add(new Field("contents", unstored[i], Field.Store.NO,
-          Field.Index.TOKENIZED));
-      doc
+    for(int pass=0;pass<2;pass++) {
+      boolean autoCommit = (0==pass);
+
+      Directory dir = new RAMDirectory();
+      IndexWriter modifier = new IndexWriter(dir, autoCommit,
+                                             new WhitespaceAnalyzer(), true);
+      modifier.setUseCompoundFile(true);
+      modifier.setMaxBufferedDeleteTerms(1);
+
+      for (int i = 0; i < keywords.length; i++) {
+        Document doc = new Document();
+        doc.add(new Field("id", keywords[i], Field.Store.YES,
+                          Field.Index.UN_TOKENIZED));
+        doc.add(new Field("country", unindexed[i], Field.Store.YES,
+                          Field.Index.NO));
+        doc.add(new Field("contents", unstored[i], Field.Store.NO,
+                          Field.Index.TOKENIZED));
+        doc
           .add(new Field("city", text[i], Field.Store.YES,
-              Field.Index.TOKENIZED));
-      modifier.addDocument(doc);
-    }
-    modifier.optimize();
+                         Field.Index.TOKENIZED));
+        modifier.addDocument(doc);
+      }
+      modifier.optimize();
 
-    Term term = new Term("city", "Amsterdam");
-    int hitCount = getHitCount(dir, term);
-    assertEquals(1, hitCount);
-    modifier.deleteDocuments(term);
-    hitCount = getHitCount(dir, term);
-    assertEquals(0, hitCount);
+      if (!autoCommit) {
+        modifier.close();
+      }
+
+      Term term = new Term("city", "Amsterdam");
+      int hitCount = getHitCount(dir, term);
+      assertEquals(1, hitCount);
+      if (!autoCommit) {
+        modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer());
+        modifier.setUseCompoundFile(true);
+      }
+      modifier.deleteDocuments(term);
+      if (!autoCommit) {
+        modifier.close();
+      }
+      hitCount = getHitCount(dir, term);
+      assertEquals(0, hitCount);
 
-    modifier.close();
+      if (autoCommit) {
+        modifier.close();
+      }
+      dir.close();
+    }
   }
 
   // test when delete terms only apply to disk segments
   public void testNonRAMDelete() throws IOException {
-    Directory dir = new RAMDirectory();
-    IndexWriter modifier = new IndexWriter(dir,
-        new WhitespaceAnalyzer(), true);
-    modifier.setMaxBufferedDocs(2);
-    modifier.setMaxBufferedDeleteTerms(2);
+    for(int pass=0;pass<2;pass++) {
+      boolean autoCommit = (0==pass);
 
-    int id = 0;
-    int value = 100;
+      Directory dir = new RAMDirectory();
+      IndexWriter modifier = new IndexWriter(dir, autoCommit,
+                                             new WhitespaceAnalyzer(), true);
+      modifier.setMaxBufferedDocs(2);
+      modifier.setMaxBufferedDeleteTerms(2);
 
-    for (int i = 0; i < 7; i++) {
-      addDoc(modifier, ++id, value);
-    }
-    modifier.flush();
+      int id = 0;
+      int value = 100;
+
+      for (int i = 0; i < 7; i++) {
+        addDoc(modifier, ++id, value);
+      }
+      modifier.flush();
 
-    assertEquals(0, modifier.getRamSegmentCount());
-    assertTrue(0 < modifier.getSegmentCount());
+      assertEquals(0, modifier.getRamSegmentCount());
+      assertTrue(0 < modifier.getSegmentCount());
 
-    IndexReader reader = IndexReader.open(dir);
-    assertEquals(7, reader.numDocs());
-    reader.close();
+      if (!autoCommit) {
+        modifier.close();
+      }
+
+      IndexReader reader = IndexReader.open(dir);
+      assertEquals(7, reader.numDocs());
+      reader.close();
+
+      if (!autoCommit) {
+        modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer());
+        modifier.setMaxBufferedDocs(2);
+        modifier.setMaxBufferedDeleteTerms(2);
+      }
 
-    modifier.deleteDocuments(new Term("value", String.valueOf(value)));
-    modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+      modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+      modifier.deleteDocuments(new Term("value", String.valueOf(value)));
 
-    reader = IndexReader.open(dir);
-    assertEquals(0, reader.numDocs());
-    reader.close();
+      if (!autoCommit) {
+        modifier.close();
+      }
 
-    modifier.close();
+      reader = IndexReader.open(dir);
+      assertEquals(0, reader.numDocs());
+      reader.close();
+      if (autoCommit) {
+        modifier.close();
+      }
+      dir.close();
+    }
   }
 
   // test when delete terms only apply to ram segments
   public void testRAMDeletes() throws IOException {
-    Directory dir = new RAMDirectory();
-    IndexWriter modifier = new IndexWriter(dir,
-        new WhitespaceAnalyzer(), true);
-    modifier.setMaxBufferedDocs(4);
-    modifier.setMaxBufferedDeleteTerms(4);
-
-    int id = 0;
-    int value = 100;
-
-    addDoc(modifier, ++id, value);
-    modifier.deleteDocuments(new Term("value", String.valueOf(value)));
-    addDoc(modifier, ++id, value);
-    modifier.deleteDocuments(new Term("value", String.valueOf(value)));
-
-    assertEquals(2, modifier.getNumBufferedDeleteTerms());
-    assertEquals(1, modifier.getBufferedDeleteTermsSize());
-
-    addDoc(modifier, ++id, value);
-    assertEquals(0, modifier.getSegmentCount());
-    modifier.flush();
-
-    IndexReader reader = IndexReader.open(dir);
-    assertEquals(1, reader.numDocs());
-
-    int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
-    assertEquals(1, hitCount);
-    reader.close();
+    for(int pass=0;pass<2;pass++) {
+      boolean autoCommit = (0==pass);
+      Directory dir = new RAMDirectory();
+      IndexWriter modifier = new IndexWriter(dir, autoCommit,
+                                             new WhitespaceAnalyzer(), true);
+      modifier.setMaxBufferedDocs(4);
+      modifier.setMaxBufferedDeleteTerms(4);
+
+      int id = 0;
+      int value = 100;
+
+      addDoc(modifier, ++id, value);
+      modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+      addDoc(modifier, ++id, value);
+      modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+
+      assertEquals(2, modifier.getNumBufferedDeleteTerms());
+      assertEquals(1, modifier.getBufferedDeleteTermsSize());
+
+      addDoc(modifier, ++id, value);
+      assertEquals(0, modifier.getSegmentCount());
+      modifier.flush();
 
-    modifier.close();
+      if (!autoCommit) {
+        modifier.close();
+      }
+
+      IndexReader reader = IndexReader.open(dir);
+      assertEquals(1, reader.numDocs());
+
+      int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
+      assertEquals(1, hitCount);
+      reader.close();
+      if (autoCommit) {
+        modifier.close();
+      }
+      dir.close();
+    }
   }
 
   // test when delete terms apply to both disk and ram segments
   public void testBothDeletes() throws IOException {
-    Directory dir = new RAMDirectory();
-    IndexWriter modifier = new IndexWriter(dir,
-        new WhitespaceAnalyzer(), true);
-    modifier.setMaxBufferedDocs(100);
-    modifier.setMaxBufferedDeleteTerms(100);
+    for(int pass=0;pass<2;pass++) {
+      boolean autoCommit = (0==pass);
 
-    int id = 0;
-    int value = 100;
+      Directory dir = new RAMDirectory();
+      IndexWriter modifier = new IndexWriter(dir, autoCommit,
+                                             new WhitespaceAnalyzer(), true);
+      modifier.setMaxBufferedDocs(100);
+      modifier.setMaxBufferedDeleteTerms(100);
 
-    for (int i = 0; i < 5; i++) {
-      addDoc(modifier, ++id, value);
-    }
+      int id = 0;
+      int value = 100;
 
-    value = 200;
-    for (int i = 0; i < 5; i++) {
-      addDoc(modifier, ++id, value);
-    }
-    modifier.flush();
+      for (int i = 0; i < 5; i++) {
+        addDoc(modifier, ++id, value);
+      }
 
-    for (int i = 0; i < 5; i++) {
-      addDoc(modifier, ++id, value);
-    }
-    modifier.deleteDocuments(new Term("value", String.valueOf(value)));
-    modifier.flush();
+      value = 200;
+      for (int i = 0; i < 5; i++) {
+        addDoc(modifier, ++id, value);
+      }
+      modifier.flush();
 
-    IndexReader reader = IndexReader.open(dir);
-    assertEquals(5, reader.numDocs());
+      for (int i = 0; i < 5; i++) {
+        addDoc(modifier, ++id, value);
+      }
+      modifier.deleteDocuments(new Term("value", String.valueOf(value)));
+
+      modifier.flush();
+      if (!autoCommit) {
+        modifier.close();
+      }
 
-    modifier.close();
+      IndexReader reader = IndexReader.open(dir);
+      assertEquals(5, reader.numDocs());
+      if (autoCommit) {
+        modifier.close();
+      }
+    }
   }
 
   // test that batched delete terms are flushed together
   public void testBatchDeletes() throws IOException {
-    Directory dir = new RAMDirectory();
-    IndexWriter modifier = new IndexWriter(dir,
-        new WhitespaceAnalyzer(), true);
-    modifier.setMaxBufferedDocs(2);
-    modifier.setMaxBufferedDeleteTerms(2);
+    for(int pass=0;pass<2;pass++) {
+      boolean autoCommit = (0==pass);
+      Directory dir = new RAMDirectory();
+      IndexWriter modifier = new IndexWriter(dir, autoCommit,
+                                             new WhitespaceAnalyzer(), true);
+      modifier.setMaxBufferedDocs(2);
+      modifier.setMaxBufferedDeleteTerms(2);
 
-    int id = 0;
-    int value = 100;
+      int id = 0;
+      int value = 100;
 
-    for (int i = 0; i < 7; i++) {
-      addDoc(modifier, ++id, value);
-    }
-    modifier.flush();
+      for (int i = 0; i < 7; i++) {
+        addDoc(modifier, ++id, value);
+      }
+      modifier.flush();
+      if (!autoCommit) {
+        modifier.close();
+      }
 
-    IndexReader reader = IndexReader.open(dir);
-    assertEquals(7, reader.numDocs());
-    reader.close();
-
-    id = 0;
-    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
-    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
-
-    reader = IndexReader.open(dir);
-    assertEquals(5, reader.numDocs());
-    reader.close();
-
-    Term[] terms = new Term[3];
-    for (int i = 0; i < terms.length; i++) {
-      terms[i] = new Term("id", String.valueOf(++id));
-    }
-    modifier.deleteDocuments(terms);
+      IndexReader reader = IndexReader.open(dir);
+      assertEquals(7, reader.numDocs());
+      reader.close();
+      
+      if (!autoCommit) {
+        modifier = new IndexWriter(dir, autoCommit,
+                                   new WhitespaceAnalyzer());
+        modifier.setMaxBufferedDocs(2);
+        modifier.setMaxBufferedDeleteTerms(2);
+      }
+
+      id = 0;
+      modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
+      modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
 
-    reader = IndexReader.open(dir);
-    assertEquals(2, reader.numDocs());
-    reader.close();
+      if (!autoCommit) {
+        modifier.close();
+      }
 
-    modifier.close();
+      reader = IndexReader.open(dir);
+      assertEquals(5, reader.numDocs());
+      reader.close();
+
+      Term[] terms = new Term[3];
+      for (int i = 0; i < terms.length; i++) {
+        terms[i] = new Term("id", String.valueOf(++id));
+      }
+      if (!autoCommit) {
+        modifier = new IndexWriter(dir, autoCommit,
+                                   new WhitespaceAnalyzer());
+        modifier.setMaxBufferedDocs(2);
+        modifier.setMaxBufferedDeleteTerms(2);
+      }
+      modifier.deleteDocuments(terms);
+      if (!autoCommit) {
+        modifier.close();
+      }
+      reader = IndexReader.open(dir);
+      assertEquals(2, reader.numDocs());
+      reader.close();
+
+      if (autoCommit) {
+        modifier.close();
+      }
+      dir.close();
+    }
   }
 
   private void addDoc(IndexWriter modifier, int id, int value)
@@ -233,201 +317,203 @@
     int START_COUNT = 157;
     int END_COUNT = 144;
 
-    // First build up a starting index:
-    RAMDirectory startDir = new RAMDirectory();
-    IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(),
-        true);
-    for (int i = 0; i < 157; i++) {
-      Document d = new Document();
-      d.add(new Field("id", Integer.toString(i), Field.Store.YES,
-          Field.Index.UN_TOKENIZED));
-      d.add(new Field("content", "aaa " + i, Field.Store.NO,
-          Field.Index.TOKENIZED));
-      writer.addDocument(d);
-    }
-    writer.close();
+    for(int pass=0;pass<2;pass++) {
+      boolean autoCommit = (0==pass);
 
-    long diskUsage = startDir.sizeInBytes();
-    long diskFree = diskUsage + 10;
+      // First build up a starting index:
+      RAMDirectory startDir = new RAMDirectory();
+      IndexWriter writer = new IndexWriter(startDir, autoCommit,
+                                           new WhitespaceAnalyzer(), true);
+      for (int i = 0; i < 157; i++) {
+        Document d = new Document();
+        d.add(new Field("id", Integer.toString(i), Field.Store.YES,
+                        Field.Index.UN_TOKENIZED));
+        d.add(new Field("content", "aaa " + i, Field.Store.NO,
+                        Field.Index.TOKENIZED));
+        writer.addDocument(d);
+      }
+      writer.close();
 
-    IOException err = null;
+      long diskUsage = startDir.sizeInBytes();
+      long diskFree = diskUsage + 10;
 
-    boolean done = false;
+      IOException err = null;
 
-    // Iterate w/ ever increasing free disk space:
-    while (!done) {
-      MockRAMDirectory dir = new MockRAMDirectory(startDir);
-      IndexWriter modifier = new IndexWriter(dir,
-          new WhitespaceAnalyzer(), false);
+      boolean done = false;
 
-      modifier.setMaxBufferedDocs(1000); // use flush or close
-      modifier.setMaxBufferedDeleteTerms(1000); // use flush or close
+      // Iterate w/ ever increasing free disk space:
+      while (!done) {
+        MockRAMDirectory dir = new MockRAMDirectory(startDir);
+        IndexWriter modifier = new IndexWriter(dir, autoCommit,
+                                               new WhitespaceAnalyzer());
 
-      // For each disk size, first try to commit against
-      // dir that will hit random IOExceptions & disk
-      // full; after, give it infinite disk space & turn
-      // off random IOExceptions & retry w/ same reader:
-      boolean success = false;
+        modifier.setMaxBufferedDocs(1000); // use flush or close
+        modifier.setMaxBufferedDeleteTerms(1000); // use flush or close
 
-      for (int x = 0; x < 2; x++) {
+        // For each disk size, first try to commit against
+        // dir that will hit random IOExceptions & disk
+        // full; after, give it infinite disk space & turn
+        // off random IOExceptions & retry w/ same reader:
+        boolean success = false;
 
-        double rate = 0.1;
-        double diskRatio = ((double)diskFree) / diskUsage;
-        long thisDiskFree;
-        String testName;
+        for (int x = 0; x < 2; x++) {
 
-        if (0 == x) {
-          thisDiskFree = diskFree;
-          if (diskRatio >= 2.0) {
-            rate /= 2;
-          }
-          if (diskRatio >= 4.0) {
-            rate /= 2;
-          }
-          if (diskRatio >= 6.0) {
-            rate = 0.0;
-          }
-          if (debug) {
-            System.out.println("\ncycle: " + diskFree + " bytes");
-          }
-          testName = "disk full during reader.close() @ " + thisDiskFree
+          double rate = 0.1;
+          double diskRatio = ((double)diskFree) / diskUsage;
+          long thisDiskFree;
+          String testName;
+
+          if (0 == x) {
+            thisDiskFree = diskFree;
+            if (diskRatio >= 2.0) {
+              rate /= 2;
+            }
+            if (diskRatio >= 4.0) {
+              rate /= 2;
+            }
+            if (diskRatio >= 6.0) {
+              rate = 0.0;
+            }
+            if (debug) {
+              System.out.println("\ncycle: " + diskFree + " bytes");
+            }
+            testName = "disk full during reader.close() @ " + thisDiskFree
               + " bytes";
-        } else {
-          thisDiskFree = 0;
-          rate = 0.0;
-          if (debug) {
-            System.out.println("\ncycle: same writer: unlimited disk space");
+          } else {
+            thisDiskFree = 0;
+            rate = 0.0;
+            if (debug) {
+              System.out.println("\ncycle: same writer: unlimited disk space");
+            }
+            testName = "reader re-use after disk full";
           }
-          testName = "reader re-use after disk full";
-        }
 
-        dir.setMaxSizeInBytes(thisDiskFree);
-        dir.setRandomIOExceptionRate(rate, diskFree);
+          dir.setMaxSizeInBytes(thisDiskFree);
+          dir.setRandomIOExceptionRate(rate, diskFree);
 
-        try {
-          if (0 == x) {
-            int docId = 12;
-            for (int i = 0; i < 13; i++) {
-              if (updates) {
-                Document d = new Document();
-                d.add(new Field("id", Integer.toString(i), Field.Store.YES,
-                    Field.Index.UN_TOKENIZED));
-                d.add(new Field("content", "bbb " + i, Field.Store.NO,
-                    Field.Index.TOKENIZED));
-                modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
-              } else { // deletes
-                modifier.deleteDocuments(new Term("id", Integer.toString(docId)));
-                // modifier.setNorm(docId, "contents", (float)2.0);
+          try {
+            if (0 == x) {
+              int docId = 12;
+              for (int i = 0; i < 13; i++) {
+                if (updates) {
+                  Document d = new Document();
+                  d.add(new Field("id", Integer.toString(i), Field.Store.YES,
+                                  Field.Index.UN_TOKENIZED));
+                  d.add(new Field("content", "bbb " + i, Field.Store.NO,
+                                  Field.Index.TOKENIZED));
+                  modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
+                } else { // deletes
+                  modifier.deleteDocuments(new Term("id", Integer.toString(docId)));
+                  // modifier.setNorm(docId, "contents", (float)2.0);
+                }
+                docId += 12;
               }
-              docId += 12;
+            }
+            modifier.close();
+            success = true;
+            if (0 == x) {
+              done = true;
             }
           }
-          modifier.close();
-          success = true;
-          if (0 == x) {
-            done = true;
+          catch (IOException e) {
+            if (debug) {
+              System.out.println("  hit IOException: " + e);
+            }
+            err = e;
+            if (1 == x) {
+              e.printStackTrace();
+              fail(testName + " hit IOException after disk space was freed up");
+            }
           }
-        }
-        catch (IOException e) {
-          if (debug) {
-            System.out.println("  hit IOException: " + e);
+
+          // Whether we succeeded or failed, check that all
+          // un-referenced files were in fact deleted (ie,
+          // we did not create garbage). Just create a
+          // new IndexFileDeleter, have it delete
+          // unreferenced files, then verify that in fact
+          // no files were deleted:
+          String[] startFiles = dir.list();
+          SegmentInfos infos = new SegmentInfos();
+          infos.read(dir);
+          IndexFileDeleter d = new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null);
+          String[] endFiles = dir.list();
+
+          Arrays.sort(startFiles);
+          Arrays.sort(endFiles);
+
+          // for(int i=0;i<startFiles.length;i++) {
+          // System.out.println(" startFiles: " + i + ": " + startFiles[i]);
+          // }
+
+          if (!Arrays.equals(startFiles, endFiles)) {
+            String successStr;
+            if (success) {
+              successStr = "success";
+            } else {
+              successStr = "IOException";
+              err.printStackTrace();
+            }
+            fail("reader.close() failed to delete unreferenced files after "
+                 + successStr + " (" + diskFree + " bytes): before delete:\n    "
+                 + arrayToString(startFiles) + "\n  after delete:\n    "
+                 + arrayToString(endFiles));
+          }
+
+          // Finally, verify index is not corrupt, and, if
+          // we succeeded, we see all docs changed, and if
+          // we failed, we see either all docs or no docs
+          // changed (transactional semantics):
+          IndexReader newReader = null;
+          try {
+            newReader = IndexReader.open(dir);
           }
-          err = e;
-          if (1 == x) {
+          catch (IOException e) {
             e.printStackTrace();
-            fail(testName + " hit IOException after disk space was freed up");
+            fail(testName
+                 + ":exception when creating IndexReader after disk full during close: "
+                 + e);
           }
-        }
 
-        // Whether we succeeded or failed, check that all
-        // un-referenced files were in fact deleted (ie,
-        // we did not create garbage). Just create a
-        // new IndexFileDeleter, have it delete
-        // unreferenced files, then verify that in fact
-        // no files were deleted:
-        String[] startFiles = dir.list();
-        SegmentInfos infos = new SegmentInfos();
-        infos.read(dir);
-        IndexFileDeleter d = new IndexFileDeleter(infos, dir);
-        d.findDeletableFiles();
-        d.deleteFiles();
-        String[] endFiles = dir.list();
-
-        Arrays.sort(startFiles);
-        Arrays.sort(endFiles);
-
-        // for(int i=0;i<startFiles.length;i++) {
-        // System.out.println(" startFiles: " + i + ": " + startFiles[i]);
-        // }
-
-        if (!Arrays.equals(startFiles, endFiles)) {
-          String successStr;
+          IndexSearcher searcher = new IndexSearcher(newReader);
+          Hits hits = null;
+          try {
+            hits = searcher.search(new TermQuery(searchTerm));
+          }
+          catch (IOException e) {
+            e.printStackTrace();
+            fail(testName + ": exception when searching: " + e);
+          }
+          int result2 = hits.length();
           if (success) {
-            successStr = "success";
+            if (result2 != END_COUNT) {
+              fail(testName
+                   + ": method did not throw exception but hits.length for search on term 'aaa' is "
+                   + result2 + " instead of expected " + END_COUNT);
+            }
           } else {
-            successStr = "IOException";
-            err.printStackTrace();
+            // On hitting exception we still may have added
+            // all docs:
+            if (result2 != START_COUNT && result2 != END_COUNT) {
+              err.printStackTrace();
+              fail(testName
+                   + ": method did throw exception but hits.length for search on term 'aaa' is "
+                   + result2 + " instead of expected " + START_COUNT);
+            }
           }
-          fail("reader.close() failed to delete unreferenced files after "
-              + successStr + " (" + diskFree + " bytes): before delete:\n    "
-              + arrayToString(startFiles) + "\n  after delete:\n    "
-              + arrayToString(endFiles));
-        }
 
-        // Finally, verify index is not corrupt, and, if
-        // we succeeded, we see all docs changed, and if
-        // we failed, we see either all docs or no docs
-        // changed (transactional semantics):
-        IndexReader newReader = null;
-        try {
-          newReader = IndexReader.open(dir);
-        }
-        catch (IOException e) {
-          e.printStackTrace();
-          fail(testName
-              + ":exception when creating IndexReader after disk full during close: "
-              + e);
-        }
+          searcher.close();
+          newReader.close();
 
-        IndexSearcher searcher = new IndexSearcher(newReader);
-        Hits hits = null;
-        try {
-          hits = searcher.search(new TermQuery(searchTerm));
-        }
-        catch (IOException e) {
-          e.printStackTrace();
-          fail(testName + ": exception when searching: " + e);
-        }
-        int result2 = hits.length();
-        if (success) {
-          if (result2 != END_COUNT) {
-            fail(testName
-                + ": method did not throw exception but hits.length for search on term 'aaa' is "
-                + result2 + " instead of expected " + END_COUNT);
-          }
-        } else {
-          // On hitting exception we still may have added
-          // all docs:
-          if (result2 != START_COUNT && result2 != END_COUNT) {
-            err.printStackTrace();
-            fail(testName
-                + ": method did throw exception but hits.length for search on term 'aaa' is "
-                + result2 + " instead of expected " + START_COUNT);
+          if (result2 == END_COUNT) {
+            break;
           }
         }
 
-        searcher.close();
-        newReader.close();
+        dir.close();
 
-        if (result2 == END_COUNT) {
-          break;
-        }
+        // Try again with 10 more bytes of free space:
+        diskFree += 10;
       }
-
-      dir.close();
-
-      // Try again with 10 more bytes of free space:
-      diskFree += 10;
     }
   }
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMOutputStream.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMOutputStream.java?view=diff&rev=517599&r1=517598&r2=517599
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMOutputStream.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMOutputStream.java Tue Mar 13 02:06:22 2007
@@ -68,7 +68,7 @@
       if (realUsage > dir.maxUsedSize) {
         dir.maxUsedSize = realUsage;
       }
-      throw new IOException("fake disk full at " + dir.sizeInBytes() + " bytes");
+      throw new IOException("fake disk full at " + dir.getRecomputedActualSizeInBytes() + " bytes");
     } else {
       super.flushBuffer(src, len);
     }