You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2010/12/11 12:07:02 UTC

svn commit: r1044635 [2/2] - in /lucene/dev/trunk/lucene/src: java/org/apache/lucene/index/ test/org/apache/lucene/ test/org/apache/lucene/index/ test/org/apache/lucene/search/ test/org/apache/lucene/store/ test/org/apache/lucene/util/

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriter.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriter.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriter.java Sat Dec 11 11:07:01 2010
@@ -35,6 +35,7 @@ import org.apache.lucene.util.Bits;
 import java.io.IOException;
 import java.io.Closeable;
 import java.io.PrintStream;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.List;
 import java.util.Collection;
 import java.util.ArrayList;
@@ -201,9 +202,8 @@ public class IndexWriter implements Clos
   private final static int MERGE_READ_BUFFER_SIZE = 4096;
 
   // Used for printing messages
-  private static Object MESSAGE_ID_LOCK = new Object();
-  private static int MESSAGE_ID = 0;
-  private int messageID = -1;
+  private static final AtomicInteger MESSAGE_ID = new AtomicInteger();
+  private int messageID = MESSAGE_ID.getAndIncrement();
   volatile private boolean hitOOM;
 
   private final Directory directory;  // where this index resides
@@ -218,7 +218,7 @@ public class IndexWriter implements Clos
   volatile SegmentInfos pendingCommit;            // set when a commit is pending (after prepareCommit() & before commit())
   volatile long pendingCommitChangeCount;
 
-  private final SegmentInfos segmentInfos;       // the segments
+  final SegmentInfos segmentInfos;       // the segments
 
   private DocumentsWriter docWriter;
   private IndexFileDeleter deleter;
@@ -245,10 +245,11 @@ public class IndexWriter implements Clos
   private long mergeGen;
   private boolean stopMerges;
 
-  private int flushCount;
-  private int flushDeletesCount;
+  private final AtomicInteger flushCount = new AtomicInteger();
+  private final AtomicInteger flushDeletesCount = new AtomicInteger();
 
   final ReaderPool readerPool = new ReaderPool();
+  final BufferedDeletes bufferedDeletes;
   
   // This is a "write once" variable (like the organic dye
   // on a DVD-R that may or may not be heated by a laser and
@@ -402,20 +403,26 @@ public class IndexWriter implements Clos
     /**
      * Release the segment reader (i.e. decRef it and close if there
      * are no more references.
+     * @return true if this release altered the index (eg
+     * the SegmentReader had pending changes to del docs and
+     * was closed).  Caller must call checkpoint() if so.
      * @param sr
      * @throws IOException
      */
-    public synchronized void release(SegmentReader sr) throws IOException {
-      release(sr, false);
+    public synchronized boolean release(SegmentReader sr) throws IOException {
+      return release(sr, false);
     }
     
     /**
      * Release the segment reader (i.e. decRef it and close if there
      * are no more references.
+     * @return true if this release altered the index (eg
+     * the SegmentReader had pending changes to del docs and
+     * was closed).  Caller must call checkpoint() if so.
      * @param sr
      * @throws IOException
      */
-    public synchronized void release(SegmentReader sr, boolean drop) throws IOException {
+    public synchronized boolean release(SegmentReader sr, boolean drop) throws IOException {
 
       final boolean pooled = readerMap.containsKey(sr.getSegmentInfo());
 
@@ -446,13 +453,10 @@ public class IndexWriter implements Clos
         // not pooling readers, we release it:
         readerMap.remove(sr.getSegmentInfo());
 
-        if (hasChanges) {
-          // Must checkpoint w/ deleter, because this
-          // segment reader will have created new _X_N.del
-          // file.
-          deleter.checkpoint(segmentInfos, false);
-        }
+        return hasChanges;
       }
+
+      return false;
     }
     
     /** Remove all our references to readers, and commits
@@ -600,6 +604,8 @@ public class IndexWriter implements Clos
     }
   }
   
+  
+  
   /**
    * Obtain the number of deleted docs for a pooled reader.
    * If the reader isn't being pooled, the segmentInfo's 
@@ -646,15 +652,6 @@ public class IndexWriter implements Clos
       infoStream.println("IW " + messageID + " [" + new Date() + "; " + Thread.currentThread().getName() + "]: " + message);
   }
 
-  private synchronized void setMessageID(PrintStream infoStream) {
-    if (infoStream != null && messageID == -1) {
-      synchronized(MESSAGE_ID_LOCK) {
-        messageID = MESSAGE_ID++;
-      }
-    }
-    this.infoStream = infoStream;
-  }
-
   CodecProvider codecs;
 
   /**
@@ -690,7 +687,7 @@ public class IndexWriter implements Clos
     config = (IndexWriterConfig) conf.clone();
     directory = d;
     analyzer = conf.getAnalyzer();
-    setMessageID(defaultInfoStream);
+    infoStream = defaultInfoStream;
     maxFieldLength = conf.getMaxFieldLength();
     termIndexInterval = conf.getTermIndexInterval();
     mergePolicy = conf.getMergePolicy();
@@ -699,6 +696,8 @@ public class IndexWriter implements Clos
     mergedSegmentWarmer = conf.getMergedSegmentWarmer();
     codecs = conf.getCodecProvider();
     
+    bufferedDeletes = new BufferedDeletes(messageID);
+    bufferedDeletes.setInfoStream(infoStream);
     poolReaders = conf.getReaderPooling();
 
     OpenMode mode = conf.getOpenMode();
@@ -766,7 +765,7 @@ public class IndexWriter implements Clos
 
       setRollbackSegmentInfos(segmentInfos);
 
-      docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates(), getCurrentFieldInfos());
+      docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates(), getCurrentFieldInfos(), bufferedDeletes);
       docWriter.setInfoStream(infoStream);
       docWriter.setMaxFieldLength(maxFieldLength);
 
@@ -785,7 +784,6 @@ public class IndexWriter implements Clos
         segmentInfos.changed();
       }
 
-      docWriter.setMaxBufferedDeleteTerms(conf.getMaxBufferedDeleteTerms());
       docWriter.setRAMBufferSizeMB(conf.getRAMBufferSizeMB());
       docWriter.setMaxBufferedDocs(conf.getMaxBufferedDocs());
       pushMaxBufferedDocs();
@@ -896,9 +894,10 @@ public class IndexWriter implements Clos
    */
   public void setInfoStream(PrintStream infoStream) {
     ensureOpen();
-    setMessageID(infoStream);
+    this.infoStream = infoStream;
     docWriter.setInfoStream(infoStream);
     deleter.setInfoStream(infoStream);
+    bufferedDeletes.setInfoStream(infoStream);
     if (infoStream != null)
       messageState();
   }
@@ -1029,8 +1028,6 @@ public class IndexWriter implements Clos
 
   private void closeInternal(boolean waitForMerges) throws CorruptIndexException, IOException {
 
-    docWriter.pauseAllThreads();
-
     try {
       if (infoStream != null)
         message("now flush at close");
@@ -1085,8 +1082,6 @@ public class IndexWriter implements Clos
         closing = false;
         notifyAll();
         if (!closed) {
-          if (docWriter != null)
-            docWriter.resumeAllThreads();
           if (infoStream != null)
             message("hit exception while closing");
         }
@@ -1094,87 +1089,6 @@ public class IndexWriter implements Clos
     }
   }
 
-  /** Tells the docWriter to close its currently open shared
-   *  doc stores (stored fields & vectors files).
-   *  Return value specifices whether new doc store files are compound or not.
-   */
-  private synchronized boolean flushDocStores() throws IOException {
-
-    if (infoStream != null) {
-      message("flushDocStores segment=" + docWriter.getDocStoreSegment());
-    }
-
-    boolean useCompoundDocStore = false;
-    if (infoStream != null) {
-      message("closeDocStores segment=" + docWriter.getDocStoreSegment());
-    }
-
-    String docStoreSegment;
-
-    boolean success = false;
-    try {
-      docStoreSegment = docWriter.closeDocStore();
-      success = true;
-    } finally {
-      if (!success && infoStream != null) {
-        message("hit exception closing doc store segment");
-      }
-    }
-
-    if (infoStream != null) {
-      message("flushDocStores files=" + docWriter.closedFiles());
-    }
-
-    useCompoundDocStore = mergePolicy.useCompoundDocStore(segmentInfos);
-      
-    if (useCompoundDocStore && docStoreSegment != null && docWriter.closedFiles().size() != 0) {
-      // Now build compound doc store file
-
-      if (infoStream != null) {
-        message("create compound file " + IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.COMPOUND_FILE_STORE_EXTENSION));
-      }
-
-      success = false;
-
-      final int numSegments = segmentInfos.size();
-      final String compoundFileName = IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.COMPOUND_FILE_STORE_EXTENSION);
-
-      try {
-        CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, compoundFileName);
-        for (final String file :  docWriter.closedFiles() ) {
-          cfsWriter.addFile(file);
-        }
-      
-        // Perform the merge
-        cfsWriter.close();
-        success = true;
-
-      } finally {
-        if (!success) {
-          if (infoStream != null)
-            message("hit exception building compound file doc store for segment " + docStoreSegment);
-          deleter.deleteFile(compoundFileName);
-          docWriter.abort();
-        }
-      }
-
-      for(int i=0;i<numSegments;i++) {
-        SegmentInfo si = segmentInfos.info(i);
-        if (si.getDocStoreOffset() != -1 &&
-            si.getDocStoreSegment().equals(docStoreSegment))
-          si.setDocStoreIsCompoundFile(true);
-      }
-
-      checkpoint();
-
-      // In case the files we just merged into a CFS were
-      // not previously checkpointed:
-      deleter.deleteNewFiles(docWriter.closedFiles());
-    }
-
-    return useCompoundDocStore;
-  }
-
   /** Returns the Directory used by this index. */
   public Directory getDirectory() {     
     // Pass false because the flush during closing calls getDirectory
@@ -1226,8 +1140,12 @@ public class IndexWriter implements Clos
 
   public synchronized boolean hasDeletions() throws IOException {
     ensureOpen();
-    if (docWriter.hasDeletes())
+    if (bufferedDeletes.any()) {
+      return true;
+    }
+    if (docWriter.anyDeletions()) {
       return true;
+    }
     for (int i = 0; i < segmentInfos.size(); i++)
       if (segmentInfos.info(i).hasDeletions())
         return true;
@@ -1321,13 +1239,14 @@ public class IndexWriter implements Clos
     boolean success = false;
     try {
       try {
-        doFlush = docWriter.addDocument(doc, analyzer);
+        doFlush = docWriter.updateDocument(doc, analyzer, null);
         success = true;
       } finally {
         if (!success) {
 
-          if (infoStream != null)
+          if (infoStream != null) {
             message("hit exception adding document");
+          }
 
           synchronized (this) {
             // If docWriter has some aborted files that were
@@ -1361,9 +1280,9 @@ public class IndexWriter implements Clos
   public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
-      boolean doFlush = docWriter.bufferDeleteTerm(term);
-      if (doFlush)
+      if (docWriter.deleteTerm(term, false)) {
         flush(true, false, false);
+      }
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "deleteDocuments(Term)");
     }
@@ -1385,9 +1304,9 @@ public class IndexWriter implements Clos
   public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
-      boolean doFlush = docWriter.bufferDeleteTerms(terms);
-      if (doFlush)
+      if (docWriter.deleteTerms(terms)) {
         flush(true, false, false);
+      }
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "deleteDocuments(Term..)");
     }
@@ -1406,9 +1325,13 @@ public class IndexWriter implements Clos
    */
   public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
     ensureOpen();
-    boolean doFlush = docWriter.bufferDeleteQuery(query);
-    if (doFlush)
-      flush(true, false, false);
+    try {
+      if (docWriter.deleteQuery(query)) {
+        flush(true, false, false);
+      }
+    } catch (OutOfMemoryError oom) {
+      handleOOM(oom, "deleteDocuments(Query)");
+    }
   }
 
   /**
@@ -1426,9 +1349,13 @@ public class IndexWriter implements Clos
    */
   public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
     ensureOpen();
-    boolean doFlush = docWriter.bufferDeleteQueries(queries);
-    if (doFlush)
-      flush(true, false, false);
+    try {
+      if (docWriter.deleteQueries(queries)) {
+        flush(true, false, false);
+      }
+    } catch (OutOfMemoryError oom) {
+      handleOOM(oom, "deleteDocuments(Query..)");
+    }
   }
 
   /**
@@ -1478,25 +1405,30 @@ public class IndexWriter implements Clos
       boolean doFlush = false;
       boolean success = false;
       try {
-        doFlush = docWriter.updateDocument(term, doc, analyzer);
+        doFlush = docWriter.updateDocument(doc, analyzer, term);
         success = true;
       } finally {
         if (!success) {
 
-          if (infoStream != null)
+          if (infoStream != null) {
             message("hit exception updating document");
+          }
 
           synchronized (this) {
             // If docWriter has some aborted files that were
             // never incref'd, then we clean them up here
-            final Collection<String> files = docWriter.abortedFiles();
-            if (files != null)
-              deleter.deleteNewFiles(files);
+            if (docWriter != null) {
+              final Collection<String> files = docWriter.abortedFiles();
+              if (files != null) {
+                deleter.deleteNewFiles(files);
+              }
+            }
           }
         }
       }
-      if (doFlush)
+      if (doFlush) {
         flush(true, false, false);
+      }
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "updateDocument");
     }
@@ -1522,13 +1454,13 @@ public class IndexWriter implements Clos
   }
 
   // for test purpose
-  final synchronized int getFlushCount() {
-    return flushCount;
+  final int getFlushCount() {
+    return flushCount.get();
   }
 
   // for test purpose
-  final synchronized int getFlushDeletesCount() {
-    return flushDeletesCount;
+  final int getFlushDeletesCount() {
+    return flushDeletesCount.get();
   }
 
   final String newSegmentName() {
@@ -1660,8 +1592,10 @@ public class IndexWriter implements Clos
     if (maxNumSegments < 1)
       throw new IllegalArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments);
 
-    if (infoStream != null)
+    if (infoStream != null) {
       message("optimize: index now " + segString());
+      message("now flush at optimize");
+    }
 
     flush(true, false, true);
 
@@ -1944,8 +1878,6 @@ public class IndexWriter implements Clos
       message("rollback");
     }
 
-    docWriter.pauseAllThreads();
-
     try {
       finishMerges(false);
 
@@ -1955,6 +1887,8 @@ public class IndexWriter implements Clos
       mergePolicy.close();
       mergeScheduler.close();
 
+      bufferedDeletes.clear();
+
       synchronized(this) {
 
         if (pendingCommit != null) {
@@ -1993,7 +1927,6 @@ public class IndexWriter implements Clos
     } finally {
       synchronized(this) {
         if (!success) {
-          docWriter.resumeAllThreads();
           closing = false;
           notifyAll();
           if (infoStream != null)
@@ -2021,7 +1954,6 @@ public class IndexWriter implements Clos
    *    will receive {@link MergePolicy.MergeAbortedException}s.
    */
   public synchronized void deleteAll() throws IOException {
-    docWriter.pauseAllThreads();
     try {
 
       // Abort any running merges
@@ -2029,7 +1961,6 @@ public class IndexWriter implements Clos
 
       // Remove any buffered docs
       docWriter.abort();
-      docWriter.setFlushedDocCount(0);
 
       // Remove all segments
       segmentInfos.clear();
@@ -2047,7 +1978,6 @@ public class IndexWriter implements Clos
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "deleteAll");
     } finally {
-      docWriter.resumeAllThreads();
       if (infoStream != null) {
         message("hit exception during deleteAll");
       }
@@ -2123,7 +2053,7 @@ public class IndexWriter implements Clos
    * the index files referenced exist (correctly) in the
    * index directory.
    */
-  private synchronized void checkpoint() throws IOException {
+  synchronized void checkpoint() throws IOException {
     changeCount++;
     segmentInfos.changed();
     deleter.checkpoint(segmentInfos, false);
@@ -2259,9 +2189,6 @@ public class IndexWriter implements Clos
       synchronized (this) {
         ensureOpen();
         segmentInfos.addAll(infos);
-        // Notify DocumentsWriter that the flushed count just increased
-        docWriter.updateFlushedDocCount(docCount);
-
         checkpoint();
       }
 
@@ -2324,10 +2251,6 @@ public class IndexWriter implements Clos
       // Register the new segment
       synchronized(this) {
         segmentInfos.add(info);
-        
-        // Notify DocumentsWriter that the flushed count just increased
-        docWriter.updateFlushedDocCount(docCount);
-        
         checkpoint();
       }
     } catch (OutOfMemoryError oom) {
@@ -2535,196 +2458,92 @@ public class IndexWriter implements Clos
 
     // We can be called during close, when closing==true, so we must pass false to ensureOpen:
     ensureOpen(false);
-    if (doFlush(flushDocStores, flushDeletes) && triggerMerge)
+    if (doFlush(flushDocStores, flushDeletes) && triggerMerge) {
       maybeMerge();
-  }
-
-  // TODO: this method should not have to be entirely
-  // synchronized, ie, merges should be allowed to commit
-  // even while a flush is happening
-  private synchronized final boolean doFlush(boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
-    try {
-      try {
-        return doFlushInternal(flushDocStores, flushDeletes);
-      } finally {
-        docWriter.balanceRAM();
-      }
-    } finally {
-      docWriter.clearFlushPending();
     }
   }
 
   // TODO: this method should not have to be entirely
   // synchronized, ie, merges should be allowed to commit
   // even while a flush is happening
-  private synchronized final boolean doFlushInternal(boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
+  private synchronized final boolean doFlush(boolean closeDocStores, boolean applyAllDeletes) throws CorruptIndexException, IOException {
 
     if (hitOOM) {
       throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot flush");
     }
 
-    ensureOpen(false);
+    doBeforeFlush();
 
     assert testPoint("startDoFlush");
 
-    doBeforeFlush();
-    
-    flushCount++;
+    // We may be flushing because it was triggered by doc
+    // count, del count, ram usage (in which case flush
+    // pending is already set), or we may be flushing
+    // due to external event eg getReader or commit is
+    // called (in which case we now set it, and this will
+    // pause all threads):
+    flushControl.setFlushPendingNoWait("explicit flush");
 
-    // If we are flushing because too many deletes
-    // accumulated, then we should apply the deletes to free
-    // RAM:
-    flushDeletes |= docWriter.doApplyDeletes();
-
-    // Make sure no threads are actively adding a document.
-    // Returns true if docWriter is currently aborting, in
-    // which case we skip flushing this segment
-    if (infoStream != null) {
-      message("flush: now pause all indexing threads");
-    }
-    if (docWriter.pauseAllThreads()) {
-      docWriter.resumeAllThreads();
-      return false;
-    }
+    boolean success = false;
 
     try {
 
-      SegmentInfo newSegment = null;
-
-      final int numDocs = docWriter.getNumDocsInRAM();
-
-      // Always flush docs if there are any
-      boolean flushDocs = numDocs > 0;
-
-      String docStoreSegment = docWriter.getDocStoreSegment();
-
-      assert docStoreSegment != null || numDocs == 0: "dss=" + docStoreSegment + " numDocs=" + numDocs;
-
-      if (docStoreSegment == null)
-        flushDocStores = false;
-
-      int docStoreOffset = docWriter.getDocStoreOffset();
-
-      boolean docStoreIsCompoundFile = false;
-
       if (infoStream != null) {
-        message("  flush: segment=" + docWriter.getSegment() +
-                " docStoreSegment=" + docWriter.getDocStoreSegment() +
-                " docStoreOffset=" + docStoreOffset +
-                " flushDocs=" + flushDocs +
-                " flushDeletes=" + flushDeletes +
-                " flushDocStores=" + flushDocStores +
-                " numDocs=" + numDocs +
-                " numBufDelTerms=" + docWriter.getNumBufferedDeleteTerms());
+        message("  start flush: applyAllDeletes=" + applyAllDeletes + " closeDocStores=" + closeDocStores);
         message("  index before flush " + segString());
       }
-
-      // Check if the doc stores must be separately flushed
-      // because other segments, besides the one we are about
-      // to flush, reference it
-      if (flushDocStores && (!flushDocs || !docWriter.getSegment().equals(docWriter.getDocStoreSegment()))) {
-        // We must separately flush the doc store
-        if (infoStream != null)
-          message("  flush shared docStore segment " + docStoreSegment);
-      
-        docStoreIsCompoundFile = flushDocStores();
-        flushDocStores = false;
-      }
-
-      String segment = docWriter.getSegment();
-
-      // If we are flushing docs, segment must not be null:
-      assert segment != null || !flushDocs;
-
-      if (flushDocs) {
-
-        boolean success = false;
-        final int flushedDocCount;
-
-        try {
-          flushedDocCount = docWriter.flush(flushDocStores);
-          if (infoStream != null) {
-            message("flushedFiles=" + docWriter.getFlushedFiles());
-          }
-          success = true;
-        } finally {
-          if (!success) {
-            if (infoStream != null)
-              message("hit exception flushing segment " + segment);
-            deleter.refresh(segment);
-          }
-        }
-        
-        if (0 == docStoreOffset && flushDocStores) {
-          // This means we are flushing private doc stores
-          // with this segment, so it will not be shared
-          // with other segments
-          assert docStoreSegment != null;
-          assert docStoreSegment.equals(segment);
-          docStoreOffset = -1;
-          docStoreIsCompoundFile = false;
-          docStoreSegment = null;
-        }
-
-        // Create new SegmentInfo, but do not add to our
-        // segmentInfos until deletes are flushed
-        // successfully.
-        newSegment = new SegmentInfo(segment,
-                                     flushedDocCount,
-                                     directory, false, docStoreOffset,
-                                     docStoreSegment, docStoreIsCompoundFile,
-                                     docWriter.hasProx(),    
-                                     docWriter.getSegmentCodecs());
-
-        if (infoStream != null) {
-          message("flush codec=" + docWriter.getSegmentCodecs());
-        }
+    
+      final SegmentInfo newSegment = docWriter.flush(this, closeDocStores, deleter, mergePolicy, segmentInfos);
+      if (newSegment != null) {
         setDiagnostics(newSegment, "flush");
-      }
-
-      docWriter.pushDeletes();
-
-      if (flushDocs) {
         segmentInfos.add(newSegment);
         checkpoint();
       }
 
-      if (flushDocs && mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
-        // Now build compound file
-        boolean success = false;
-        try {
-          docWriter.createCompoundFile(segment);
-          success = true;
-        } finally {
-          if (!success) {
-            if (infoStream != null)
-              message("hit exception creating compound file for newly flushed segment " + segment);
-            deleter.deleteFile(IndexFileNames.segmentFileName(segment, "", IndexFileNames.COMPOUND_FILE_EXTENSION));
+      if (!applyAllDeletes) {
+        // If deletes alone are consuming > 1/2 our RAM
+        // buffer, force them all to apply now. This is to
+        // prevent too-frequent flushing of a long tail of
+        // tiny segments:
+        if (flushControl.getFlushDeletes() ||
+            (config.getRAMBufferSizeMB() != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
+             bufferedDeletes.bytesUsed() > (1024*1024*config.getRAMBufferSizeMB()/2))) {
+          applyAllDeletes = true;
+          if (infoStream != null) {
+            message("force apply deletes bytesUsed=" + bufferedDeletes.bytesUsed() + " vs ramBuffer=" + (1024*1024*config.getRAMBufferSizeMB()));
           }
         }
-
-        newSegment.setUseCompoundFile(true);
-        checkpoint();
       }
 
-      if (flushDeletes) {
-        applyDeletes();
+      if (applyAllDeletes) {
+        if (infoStream != null) {
+          message("apply all deletes during flush");
+        }
+        flushDeletesCount.incrementAndGet();
+        if (bufferedDeletes.applyDeletes(readerPool, segmentInfos, segmentInfos)) {
+          checkpoint();
+        }
+        flushControl.clearDeletes();
+      } else if (infoStream != null) {
+        message("don't apply deletes now delTermCount=" + bufferedDeletes.numTerms() + " bytesUsed=" + bufferedDeletes.bytesUsed());
       }
-      
-      if (flushDocs)
-        checkpoint();
 
       doAfterFlush();
+      flushCount.incrementAndGet();
+
+      success = true;
 
-      return flushDocs;
+      return newSegment != null;
 
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "doFlush");
       // never hit
       return false;
     } finally {
-      docWriter.clearFlushPending();
-      docWriter.resumeAllThreads();
+      flushControl.clearFlushPending();
+      if (!success && infoStream != null) {
+        message("hit exception during flush");
+      }
     }
   }
 
@@ -2733,7 +2552,7 @@ public class IndexWriter implements Clos
    */
   public final long ramSizeInBytes() {
     ensureOpen();
-    return docWriter.getRAMUsed();
+    return docWriter.bytesUsed() + bufferedDeletes.bytesUsed();
   }
 
   /** Expert:  Return the number of documents currently
@@ -2776,7 +2595,7 @@ public class IndexWriter implements Clos
    *  saves the resulting deletes file (incrementing the
    *  delete generation for merge.info).  If no deletes were
    *  flushed, no new deletes file is saved. */
-  synchronized private void commitMergedDeletes(MergePolicy.OneMerge merge, SegmentReader mergeReader) throws IOException {
+  synchronized private void commitMergedDeletes(MergePolicy.OneMerge merge, SegmentReader mergedReader) throws IOException {
 
     assert testPoint("startCommitMergeDeletes");
 
@@ -2815,7 +2634,7 @@ public class IndexWriter implements Clos
               assert currentDelDocs.get(j);
             else {
               if (currentDelDocs.get(j)) {
-                mergeReader.doDelete(docUpto);
+                mergedReader.doDelete(docUpto);
                 delCount++;
               }
               docUpto++;
@@ -2829,7 +2648,7 @@ public class IndexWriter implements Clos
         // does:
         for(int j=0; j<docCount; j++) {
           if (currentDelDocs.get(j)) {
-            mergeReader.doDelete(docUpto);
+            mergedReader.doDelete(docUpto);
             delCount++;
           }
           docUpto++;
@@ -2839,13 +2658,13 @@ public class IndexWriter implements Clos
         docUpto += info.docCount;
     }
 
-    assert mergeReader.numDeletedDocs() == delCount;
+    assert mergedReader.numDeletedDocs() == delCount;
 
-    mergeReader.hasChanges = delCount > 0;
+    mergedReader.hasChanges = delCount > 0;
   }
 
   /* FIXME if we want to support non-contiguous segment merges */
-  synchronized private boolean commitMerge(MergePolicy.OneMerge merge, SegmentMerger merger, int mergedDocCount, SegmentReader mergedReader) throws IOException {
+  synchronized private boolean commitMerge(MergePolicy.OneMerge merge, SegmentMerger merger, SegmentReader mergedReader) throws IOException {
 
     assert testPoint("startCommitMerge");
 
@@ -2873,7 +2692,6 @@ public class IndexWriter implements Clos
     final int start = ensureContiguousMerge(merge);
 
     commitMergedDeletes(merge, mergedReader);
-    docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount);
       
     // If the doc store we are using has been closed and
     // is in now compound format (but wasn't when we
@@ -2886,7 +2704,7 @@ public class IndexWriter implements Clos
     segmentInfos.subList(start, start + merge.segments.size()).clear();
     assert !segmentInfos.contains(merge.info);
     segmentInfos.add(start, merge.info);
-
+    
     closeMergeReaders(merge, false);
 
     // Must note the change to segmentInfos so any commits
@@ -2897,6 +2715,12 @@ public class IndexWriter implements Clos
     // them so that they don't bother writing them to
     // disk, updating SegmentInfo, etc.:
     readerPool.clear(merge.segments);
+    
+    // remove pending deletes of the segments 
+    // that were merged, moving them onto the segment just
+    // before the merged segment
+    // Lock order: IW -> BD
+    bufferedDeletes.commitMerge(merge);
 
     if (merge.optimize) {
       // cascade the optimize:
@@ -3056,10 +2880,17 @@ public class IndexWriter implements Clos
   final synchronized void mergeInit(MergePolicy.OneMerge merge) throws IOException {
     boolean success = false;
     try {
+      // Lock order: IW -> BD
+      if (bufferedDeletes.applyDeletes(readerPool, segmentInfos, merge.segments)) {
+        checkpoint();
+      }
       _mergeInit(merge);
       success = true;
     } finally {
       if (!success) {
+        if (infoStream != null) {
+          message("hit exception in mergeInit");
+        }
         mergeFinish(merge);
       }
     }
@@ -3082,9 +2913,7 @@ public class IndexWriter implements Clos
 
     if (merge.isAborted())
       return;
-
-    applyDeletes();
-
+    
     final SegmentInfos sourceSegments = merge.segments;
     final int end = sourceSegments.size();
 
@@ -3274,10 +3103,11 @@ public class IndexWriter implements Clos
     if (suppressExceptions) {
       // Suppress any new exceptions so we throw the
       // original cause
+      boolean anyChanges = false;
       for (int i=0;i<numSegments;i++) {
         if (merge.readers[i] != null) {
           try {
-            readerPool.release(merge.readers[i], false);
+            anyChanges |= readerPool.release(merge.readers[i], false);
           } catch (Throwable t) {
           }
           merge.readers[i] = null;
@@ -3294,6 +3124,9 @@ public class IndexWriter implements Clos
           merge.readersClone[i] = null;
         }
       }
+      if (anyChanges) {
+        checkpoint();
+      }
     } else {
       for (int i=0;i<numSegments;i++) {
         if (merge.readers[i] != null) {
@@ -3521,15 +3354,21 @@ public class IndexWriter implements Clos
           mergedSegmentWarmer.warm(mergedReader);
         }
 
-        if (!commitMerge(merge, merger, mergedDocCount, mergedReader)) {
+        if (!commitMerge(merge, merger, mergedReader)) {
           // commitMerge will return false if this merge was aborted
           return 0;
         }
       } finally {
         synchronized(this) {
-          readerPool.release(mergedReader);
+          if (readerPool.release(mergedReader)) {
+            // Must checkpoint after releasing the
+            // mergedReader since it may have written a new
+            // deletes file:
+            checkpoint();
+          }
         }
       }
+
       success = true;
 
     } finally {
@@ -3549,37 +3388,14 @@ public class IndexWriter implements Clos
       mergeExceptions.add(merge);
   }
 
-  // Apply buffered deletes to all segments.
-  private final synchronized boolean applyDeletes() throws CorruptIndexException, IOException {
-    assert testPoint("startApplyDeletes");
-    if (infoStream != null) {
-      message("applyDeletes");
-    }
-    flushDeletesCount++;
-    boolean success = false;
-    boolean changed;
-    try {
-      changed = docWriter.applyDeletes(segmentInfos);
-      success = true;
-    } finally {
-      if (!success && infoStream != null) {
-        message("hit exception flushing deletes");
-      }
-    }
-
-    if (changed)
-      checkpoint();
-    return changed;
-  }
-
   // For test purposes.
-  final synchronized int getBufferedDeleteTermsSize() {
-    return docWriter.getBufferedDeleteTerms().size();
+  final int getBufferedDeleteTermsSize() {
+    return docWriter.getPendingDeletes().terms.size();
   }
 
   // For test purposes.
-  final synchronized int getNumBufferedDeleteTerms() {
-    return docWriter.getNumBufferedDeleteTerms();
+  final int getNumBufferedDeleteTerms() {
+    return docWriter.getPendingDeletes().numTermDeletes.get();
   }
 
   // utility routines for tests
@@ -3819,14 +3635,13 @@ public class IndexWriter implements Clos
   //   finishStartCommit
   //   startCommitMergeDeletes
   //   startMergeInit
-  //   startApplyDeletes
   //   DocumentsWriter.ThreadState.init start
   boolean testPoint(String name) {
     return true;
   }
 
   synchronized boolean nrtIsCurrent(SegmentInfos infos) {
-    return infos.version == segmentInfos.version && !docWriter.anyChanges();
+    return infos.version == segmentInfos.version && !docWriter.anyChanges() && !bufferedDeletes.any();
   }
 
   synchronized boolean isClosed() {
@@ -3863,7 +3678,6 @@ public class IndexWriter implements Clos
     deleter.revisitPolicy();
   }
 
-
   /**
    * Sets the {@link PayloadProcessorProvider} to use when merging payloads.
    * Note that the given <code>pcp</code> will be invoked for every segment that
@@ -3894,4 +3708,123 @@ public class IndexWriter implements Clos
     return payloadProcessorProvider;
   }
 
+  // decides when flushes happen
+  final class FlushControl {
+
+    private boolean flushPending;
+    private boolean flushDeletes;
+    private int delCount;
+    private int docCount;
+    private boolean flushing;
+
+    private synchronized boolean setFlushPending(String reason, boolean doWait) {
+      if (flushPending || flushing) {
+        if (doWait) {
+          while(flushPending || flushing) {
+            try {
+              wait();
+            } catch (InterruptedException ie) {
+              throw new ThreadInterruptedException(ie);
+            }
+          }
+        }
+        return false;
+      } else {
+        if (infoStream != null) {
+          message("now trigger flush reason=" + reason);
+        }
+        flushPending = true;
+        return flushPending;
+      }
+    }
+
+    public synchronized void setFlushPendingNoWait(String reason) {
+      setFlushPending(reason, false);
+    }
+
+    public synchronized boolean getFlushPending() {
+      return flushPending;
+    }
+
+    public synchronized boolean getFlushDeletes() {
+      return flushDeletes;
+    }
+
+    public synchronized void clearFlushPending() {
+      if (infoStream != null) {
+        message("clearFlushPending");
+      }
+      flushPending = false;
+      flushDeletes = false;
+      docCount = 0;
+      notifyAll();
+    }
+
+    public synchronized void clearDeletes() {
+      delCount = 0;
+    }
+
+    public synchronized boolean waitUpdate(int docInc, int delInc) {
+      return waitUpdate(docInc, delInc, false);
+    }
+
+    public synchronized boolean waitUpdate(int docInc, int delInc, boolean skipWait) {
+      while(flushPending) {
+        try {
+          wait();
+        } catch (InterruptedException ie) {
+          throw new ThreadInterruptedException(ie);
+        }
+      }
+
+      // skipWait is only used when a thread is BOTH adding
+      // a doc and buffering a del term, and, the adding of
+      // the doc already triggered a flush
+      if (skipWait) {
+        docCount += docInc;
+        delCount += delInc;
+        return false;
+      }
+
+      final int maxBufferedDocs = config.getMaxBufferedDocs();
+      if (maxBufferedDocs != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
+          (docCount+docInc) >= maxBufferedDocs) {
+        return setFlushPending("maxBufferedDocs", true);
+      }
+      docCount += docInc;
+
+      final int maxBufferedDeleteTerms = config.getMaxBufferedDeleteTerms();
+      if (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
+          (delCount+delInc) >= maxBufferedDeleteTerms) {
+        flushDeletes = true;
+        return setFlushPending("maxBufferedDeleteTerms", true);
+      }
+      delCount += delInc;
+
+      return flushByRAMUsage("add delete/doc");
+    }
+
+    public synchronized boolean flushByRAMUsage(String reason) {
+      final double ramBufferSizeMB = config.getRAMBufferSizeMB();
+      if (ramBufferSizeMB != IndexWriterConfig.DISABLE_AUTO_FLUSH) {
+        final long limit = (long) (ramBufferSizeMB*1024*1024);
+        long used = bufferedDeletes.bytesUsed() + docWriter.bytesUsed();
+        if (used >= limit) {
+          
+          // DocumentsWriter may be able to free up some
+          // RAM:
+          // Lock order: FC -> DW
+          docWriter.balanceRAM();
+
+          used = bufferedDeletes.bytesUsed() + docWriter.bytesUsed();
+          if (used >= limit) {
+            return setFlushPending("ram full: " + reason, false);
+          }
+        }
+      }
+      return false;
+    }
+  }
+
+  final FlushControl flushControl = new FlushControl();
 }

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/ParallelPostingsArray.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/ParallelPostingsArray.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/ParallelPostingsArray.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/ParallelPostingsArray.java Sat Dec 11 11:07:01 2010
@@ -1,7 +1,5 @@
 package org.apache.lucene.index;
 
-import org.apache.lucene.util.ArrayUtil;
-
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -19,9 +17,11 @@ import org.apache.lucene.util.ArrayUtil;
  * limitations under the License.
  */
 
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
 
 class ParallelPostingsArray {
-  final static int BYTES_PER_POSTING = 3 * DocumentsWriter.INT_NUM_BYTE;
+  final static int BYTES_PER_POSTING = 3 * RamUsageEstimator.NUM_BYTES_INT;
 
   final int size;
   final int[] textStarts;

Added: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/SegmentDeletes.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/SegmentDeletes.java?rev=1044635&view=auto
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/SegmentDeletes.java (added)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/SegmentDeletes.java Sat Dec 11 11:07:01 2010
@@ -0,0 +1,188 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.RamUsageEstimator;
+
+/** Holds buffered deletes, by docID, term or query for a
+ *  single segment. This is used to hold buffered pending
+ *  deletes against the to-be-flushed segment as well as
+ *  per-segment deletes for each segment in the index. */
+
+// NOTE: we are sync'd by BufferedDeletes, ie, all access to
+// instances of this class is via sync'd methods on
+// BufferedDeletes
+class SegmentDeletes {
+
+  /* Rough logic: HashMap has an array[Entry] w/ varying
+     load factor (say 2 * POINTER).  Entry is object w/ Term
+     key, Integer val, int hash, Entry next
+     (OBJ_HEADER + 3*POINTER + INT).  Term is object w/
+     String field and String text (OBJ_HEADER + 2*POINTER).
+     We don't count Term's field since it's interned.
+     Term's text is String (OBJ_HEADER + 4*INT + POINTER +
+     OBJ_HEADER + string.length*CHAR).  Integer is
+     OBJ_HEADER + INT. */
+  final static int BYTES_PER_DEL_TERM = 8*RamUsageEstimator.NUM_BYTES_OBJ_REF + 5*RamUsageEstimator.NUM_BYTES_OBJ_HEADER + 6*RamUsageEstimator.NUM_BYTES_INT;
+
+  /* Rough logic: del docIDs are List<Integer>.  Say list
+     allocates ~2X size (2*POINTER).  Integer is OBJ_HEADER
+     + int */
+  final static int BYTES_PER_DEL_DOCID = 2*RamUsageEstimator.NUM_BYTES_OBJ_REF + RamUsageEstimator.NUM_BYTES_OBJ_HEADER + RamUsageEstimator.NUM_BYTES_INT;
+
+  /* Rough logic: HashMap has an array[Entry] w/ varying
+     load factor (say 2 * POINTER).  Entry is object w/
+     Query key, Integer val, int hash, Entry next
+     (OBJ_HEADER + 3*POINTER + INT).  Query we often
+     undercount (say 24 bytes).  Integer is OBJ_HEADER + INT. */
+  final static int BYTES_PER_DEL_QUERY = 5*RamUsageEstimator.NUM_BYTES_OBJ_REF + 2*RamUsageEstimator.NUM_BYTES_OBJ_HEADER + 2*RamUsageEstimator.NUM_BYTES_INT + 24;
+
+  // TODO: many of the deletes stored here will map to
+  // Integer.MAX_VALUE; we could be more efficient for this
+  // case ie use a SortedSet not a SortedMap.  But: Java's
+  // SortedSet impls are simply backed by a Map so we won't
+  // save anything unless we do something custom...
+  final AtomicInteger numTermDeletes = new AtomicInteger();
+  final SortedMap<Term,Integer> terms = new TreeMap<Term,Integer>();
+  final Map<Query,Integer> queries = new HashMap<Query,Integer>();
+  final List<Integer> docIDs = new ArrayList<Integer>();
+
+  public static final Integer MAX_INT = Integer.valueOf(Integer.MAX_VALUE);
+
+  final AtomicLong bytesUsed = new AtomicLong();
+
+  private final static boolean VERBOSE_DELETES = false;
+
+  @Override
+  public String toString() {
+    if (VERBOSE_DELETES) {
+      return "SegmentDeletes [numTerms=" + numTermDeletes + ", terms=" + terms
+        + ", queries=" + queries + ", docIDs=" + docIDs + ", bytesUsed="
+        + bytesUsed + "]";
+    } else {
+      String s = "";
+      if (numTermDeletes.get() != 0) {
+        s += " " + numTermDeletes.get() + " deleted terms (unique count=" + terms.size() + ")";
+      }
+      if (queries.size() != 0) {
+        s += " " + queries.size() + " deleted queries";
+      }
+      if (docIDs.size() != 0) {
+        s += " " + docIDs.size() + " deleted docIDs";
+      }
+      if (bytesUsed.get() != 0) {
+        s += " bytesUsed=" + bytesUsed.get();
+      }
+
+      return s;
+    }
+  }
+  
+  void update(SegmentDeletes in, boolean noLimit) {
+    numTermDeletes.addAndGet(in.numTermDeletes.get());
+    for (Map.Entry<Term,Integer> ent : in.terms.entrySet()) {
+      final Term term = ent.getKey();
+      if (!terms.containsKey(term)) {
+        // only incr bytesUsed if this term wasn't already buffered:
+        bytesUsed.addAndGet(BYTES_PER_DEL_TERM);
+      }
+      final Integer limit;
+      if (noLimit) {
+        limit = MAX_INT;
+      } else {
+        limit = ent.getValue();
+      }
+      terms.put(term, limit);
+    }
+
+    for (Map.Entry<Query,Integer> ent : in.queries.entrySet()) {
+      final Query query = ent.getKey();
+      if (!queries.containsKey(query)) {
+        // only incr bytesUsed if this query wasn't already buffered:
+        bytesUsed.addAndGet(BYTES_PER_DEL_QUERY);
+      }
+      final Integer limit;
+      if (noLimit) {
+        limit = MAX_INT;
+      } else {
+        limit = ent.getValue();
+      }
+      queries.put(query, limit);
+    }
+
+    // docIDs never move across segments and the docIDs
+    // should already be cleared
+  }
+
+  public void addQuery(Query query, int docIDUpto) {
+    queries.put(query, docIDUpto);
+    bytesUsed.addAndGet(BYTES_PER_DEL_QUERY);
+  }
+
+  public void addDocID(int docID) {
+    docIDs.add(Integer.valueOf(docID));
+    bytesUsed.addAndGet(BYTES_PER_DEL_DOCID);
+  }
+
+  public void addTerm(Term term, int docIDUpto) {
+    Integer current = terms.get(term);
+    if (current != null && docIDUpto < current) {
+      // Only record the new number if it's greater than the
+      // current one.  This is important because if multiple
+      // threads are replacing the same doc at nearly the
+      // same time, it's possible that one thread that got a
+      // higher docID is scheduled before the other
+      // threads.  If we blindly replace than we can get
+      // double-doc in the segment.
+      return;
+    }
+
+    terms.put(term, Integer.valueOf(docIDUpto));
+    numTermDeletes.incrementAndGet();
+    if (current == null) {
+      bytesUsed.addAndGet(BYTES_PER_DEL_TERM + term.bytes.length);
+    }
+  }
+    
+  void clear() {
+    terms.clear();
+    queries.clear();
+    docIDs.clear();
+    numTermDeletes.set(0);
+    bytesUsed.set(0);
+  }
+  
+  void clearDocIDs() {
+    bytesUsed.addAndGet(-docIDs.size()*BYTES_PER_DEL_DOCID);
+    docIDs.clear();
+  }
+  
+  boolean any() {
+    return terms.size() > 0 || docIDs.size() > 0 || queries.size() > 0;
+  }
+}

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/SegmentInfo.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/SegmentInfo.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/SegmentInfo.java Sat Dec 11 11:07:01 2010
@@ -361,6 +361,10 @@ public final class SegmentInfo {
     return docStoreSegment;
   }
   
+  public void setDocStoreSegment(String segment) {
+    docStoreSegment = segment;
+  }
+  
   void setDocStoreOffset(int offset) {
     docStoreOffset = offset;
     clearFiles();
@@ -534,6 +538,12 @@ public final class SegmentInfo {
     
     if (docStoreOffset != -1) {
       s.append("->").append(docStoreSegment);
+      if (docStoreIsCompoundFile) {
+        s.append('c');
+      } else {
+        s.append('C');
+      }
+      s.append('+').append(docStoreOffset);
     }
 
     return s.toString();

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java Sat Dec 11 11:07:01 2010
@@ -24,6 +24,7 @@ import org.apache.lucene.document.Fielda
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.util.ByteBlockPool;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.RamUsageEstimator;
 
 final class TermVectorsTermsWriterPerField extends TermsHashConsumerPerField {
 
@@ -298,7 +299,7 @@ final class TermVectorsTermsWriterPerFie
 
     @Override
     int bytesPerPosting() {
-      return super.bytesPerPosting() + 3 * DocumentsWriter.INT_NUM_BYTE;
+      return super.bytesPerPosting() + 3 * RamUsageEstimator.NUM_BYTES_INT;
     }
   }
 }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestDemo.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestDemo.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestDemo.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestDemo.java Sat Dec 11 11:07:01 2010
@@ -50,6 +50,7 @@ public class TestDemo extends LuceneTest
     // To store an index on disk, use this instead:
     //Directory directory = FSDirectory.open("/tmp/testindex");
     RandomIndexWriter iwriter = new RandomIndexWriter(random, directory);
+    iwriter.w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java Sat Dec 11 11:07:01 2010
@@ -84,6 +84,10 @@ public class TestSearchForDuplicates ext
       lmp.setUseCompoundFile(useCompoundFiles);
       lmp.setUseCompoundDocStore(useCompoundFiles);
       IndexWriter writer = new IndexWriter(directory, conf);
+      if (VERBOSE) {
+        System.out.println("TEST: now build index");
+        writer.setInfoStream(System.out);
+      }
 
       final int MAX_DOCS = 225;
 

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Sat Dec 11 11:07:01 2010
@@ -57,9 +57,9 @@ public class TestConcurrentMergeSchedule
             isClose = true;
           }
         }
-        if (isDoFlush && !isClose) {
+        if (isDoFlush && !isClose && random.nextBoolean()) {
           hitExc = true;
-          throw new IOException("now failing during flush");
+          throw new IOException(Thread.currentThread().getName() + ": now failing during flush");
         }
       }
     }
@@ -73,12 +73,17 @@ public class TestConcurrentMergeSchedule
     directory.failOn(failure);
 
     IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
+    writer.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
     Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
     int extraCount = 0;
 
     for(int i=0;i<10;i++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + i);
+      }
+
       for(int j=0;j<20;j++) {
         idField.setValue(Integer.toString(i*20+j));
         writer.addDocument(doc);
@@ -97,10 +102,14 @@ public class TestConcurrentMergeSchedule
           }
           extraCount++;
         } catch (IOException ioe) {
+          if (VERBOSE) {
+            ioe.printStackTrace(System.out);
+          }
           failure.clearDoFail();
           break;
         }
       }
+      assertEquals(20*(i+1)+extraCount, writer.numDocs());
     }
 
     writer.close();
@@ -155,8 +164,12 @@ public class TestConcurrentMergeSchedule
     IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer())
         .setMaxBufferedDocs(2));
+    writer.setInfoStream(VERBOSE ? System.out : null);
 
     for(int iter=0;iter<7;iter++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + iter);
+      }
 
       for(int j=0;j<21;j++) {
         Document doc = new Document();

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Sat Dec 11 11:07:01 2010
@@ -1083,6 +1083,9 @@ public class TestIndexWriter extends Luc
     doc.add(idField);
 
     for(int pass=0;pass<2;pass++) {
+      if (VERBOSE) {
+        System.out.println("TEST: pass=" + pass);
+      }
 
       IndexWriter writer = new IndexWriter(
           directory,
@@ -1094,10 +1097,12 @@ public class TestIndexWriter extends Luc
               // backed directory:
               setMergePolicy(newLogMergePolicy(false, 10))
       );
+      writer.setInfoStream(VERBOSE ? System.out : null);
 
-      //System.out.println("TEST: pass=" + pass + " cms=" + (pass >= 2));
       for(int iter=0;iter<10;iter++) {
-        //System.out.println("TEST: iter=" + iter);
+        if (VERBOSE) {
+          System.out.println("TEST: iter=" + iter);
+        }
         for(int j=0;j<199;j++) {
           idField.setValue(Integer.toString(iter*201+j));
           writer.addDocument(doc);
@@ -1142,8 +1147,9 @@ public class TestIndexWriter extends Luc
             }
           };
 
-        if (failure.size() > 0)
+        if (failure.size() > 0) {
           throw failure.get(0);
+        }
 
         t1.start();
 
@@ -1156,6 +1162,7 @@ public class TestIndexWriter extends Luc
 
         // Reopen
         writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
+        writer.setInfoStream(VERBOSE ? System.out : null);
       }
       writer.close();
     }
@@ -2575,7 +2582,7 @@ public class TestIndexWriter extends Luc
 
     Directory dir = newDirectory();
     FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, false)).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
-    //w.setInfoStream(System.out);
+    w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
     doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
     int num = 6 * RANDOM_MULTIPLIER;
@@ -2583,6 +2590,9 @@ public class TestIndexWriter extends Luc
       int count = 0;
 
       final boolean doIndexing = r.nextBoolean();
+      if (VERBOSE) {
+        System.out.println("TEST: iter doIndexing=" + doIndexing);
+      }
       if (doIndexing) {
         // Add docs until a flush is triggered
         final int startFlushCount = w.flushCount;

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Sat Dec 11 11:07:01 2010
@@ -114,6 +114,9 @@ public class TestIndexWriterDelete exten
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(1));
+
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    writer.addDocument(new Document());
     writer.deleteDocuments(new Term("foobar", "1"));
     writer.deleteDocuments(new Term("foobar", "1"));
     writer.deleteDocuments(new Term("foobar", "1"));
@@ -125,11 +128,14 @@ public class TestIndexWriterDelete exten
   // test when delete terms only apply to ram segments
   public void testRAMDeletes() throws IOException {
     for(int t=0;t<2;t++) {
+      if (VERBOSE) {
+        System.out.println("TEST: t=" + t);
+      }
       Directory dir = newDirectory();
       IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
           TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(4)
           .setMaxBufferedDeleteTerms(4));
-
+      modifier.setInfoStream(VERBOSE ? System.out : null);
       int id = 0;
       int value = 100;
 
@@ -439,6 +445,9 @@ public class TestIndexWriterDelete exten
 
     // Iterate w/ ever increasing free disk space:
     while (!done) {
+      if (VERBOSE) {
+        System.out.println("TEST: cycle");
+      }
       MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir));
       dir.setPreventDoubleWrite(false);
       IndexWriter modifier = new IndexWriter(dir,
@@ -448,6 +457,7 @@ public class TestIndexWriterDelete exten
                                              .setMaxBufferedDeleteTerms(1000)
                                              .setMergeScheduler(new ConcurrentMergeScheduler()));
       ((ConcurrentMergeScheduler) modifier.getConfig().getMergeScheduler()).setSuppressExceptions();
+      modifier.setInfoStream(VERBOSE ? System.out : null);
 
       // For each disk size, first try to commit against
       // dir that will hit random IOExceptions & disk
@@ -456,6 +466,9 @@ public class TestIndexWriterDelete exten
       boolean success = false;
 
       for (int x = 0; x < 2; x++) {
+        if (VERBOSE) {
+          System.out.println("TEST: x=" + x);
+        }
 
         double rate = 0.1;
         double diskRatio = ((double)diskFree) / diskUsage;

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java Sat Dec 11 11:07:01 2010
@@ -51,7 +51,7 @@ public class TestIndexWriterExceptions e
     IndexWriter writer;
 
     final Random r = new java.util.Random(47);
-    Throwable failure;
+    volatile Throwable failure;
 
     public IndexerThread(int i, IndexWriter writer) {
       setName("Indexer " + i);
@@ -79,6 +79,9 @@ public class TestIndexWriterExceptions e
       final long stopTime = System.currentTimeMillis() + 500;
 
       do {
+        if (VERBOSE) {
+          System.out.println(Thread.currentThread().getName() + ": TEST: IndexerThread: cycle");
+        }
         doFail.set(this);
         final String id = ""+r.nextInt(50);
         idField.setValue(id);
@@ -136,7 +139,7 @@ public class TestIndexWriterExceptions e
       if (doFail.get() != null && !name.equals("startDoFlush") && r.nextInt(20) == 17) {
         if (VERBOSE) {
           System.out.println(Thread.currentThread().getName() + ": NOW FAIL: " + name);
-          //new Throwable().printStackTrace(System.out);
+          new Throwable().printStackTrace(System.out);
         }
         throw new RuntimeException(Thread.currentThread().getName() + ": intentionally failing at " + name);
       }
@@ -145,16 +148,23 @@ public class TestIndexWriterExceptions e
   }
 
   public void testRandomExceptions() throws Throwable {
+    if (VERBOSE) {
+      System.out.println("\nTEST: start testRandomExceptions");
+    }
     MockDirectoryWrapper dir = newDirectory();
 
     MockIndexWriter writer  = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
         .setRAMBufferSizeMB(0.1).setMergeScheduler(new ConcurrentMergeScheduler()));
     ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
     //writer.setMaxBufferedDocs(10);
+    if (VERBOSE) {
+      System.out.println("TEST: initial commit");
+    }
     writer.commit();
 
-    if (VERBOSE)
+    if (VERBOSE) {
       writer.setInfoStream(System.out);
+    }
 
     IndexerThread thread = new IndexerThread(0, writer);
     thread.run();
@@ -163,6 +173,9 @@ public class TestIndexWriterExceptions e
       fail("thread " + thread.getName() + ": hit unexpected failure");
     }
 
+    if (VERBOSE) {
+      System.out.println("TEST: commit after thread start");
+    }
     writer.commit();
 
     try {
@@ -192,8 +205,9 @@ public class TestIndexWriterExceptions e
     //writer.setMaxBufferedDocs(10);
     writer.commit();
 
-    if (VERBOSE)
+    if (VERBOSE) {
       writer.setInfoStream(System.out);
+    }
 
     final int NUM_THREADS = 4;
 
@@ -294,6 +308,7 @@ public class TestIndexWriterExceptions e
   public void testExceptionJustBeforeFlush() throws IOException {
     Directory dir = newDirectory();
     MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
+    w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
     doc.add(newField("field", "a field", Field.Store.YES,
                       Field.Index.ANALYZED));

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java Sat Dec 11 11:07:01 2010
@@ -47,29 +47,35 @@ public class TestIndexWriterOnDiskFull e
   public void testAddDocumentOnDiskFull() throws IOException {
 
     for(int pass=0;pass<2;pass++) {
-      if (VERBOSE)
+      if (VERBOSE) {
         System.out.println("TEST: pass=" + pass);
+      }
       boolean doAbort = pass == 1;
       long diskFree = 200;
       while(true) {
-        if (VERBOSE)
+        if (VERBOSE) {
           System.out.println("TEST: cycle: diskFree=" + diskFree);
+        }
         MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory());
         dir.setMaxSizeInBytes(diskFree);
         IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
         writer.setInfoStream(VERBOSE ? System.out : null);
         MergeScheduler ms = writer.getConfig().getMergeScheduler();
-        if (ms instanceof ConcurrentMergeScheduler)
+        if (ms instanceof ConcurrentMergeScheduler) {
           // This test intentionally produces exceptions
           // in the threads that CMS launches; we don't
           // want to pollute test output with these.
           ((ConcurrentMergeScheduler) ms).setSuppressExceptions();
+        }
 
         boolean hitError = false;
         try {
           for(int i=0;i<200;i++) {
             addDoc(writer);
           }
+          if (VERBOSE) {
+            System.out.println("TEST: done adding docs; now commit");
+          }
           writer.commit();
         } catch (IOException e) {
           if (VERBOSE) {
@@ -81,13 +87,19 @@ public class TestIndexWriterOnDiskFull e
 
         if (hitError) {
           if (doAbort) {
+            if (VERBOSE) {
+              System.out.println("TEST: now rollback");
+            }
             writer.rollback();
           } else {
             try {
+              if (VERBOSE) {
+                System.out.println("TEST: now close");
+              }
               writer.close();
             } catch (IOException e) {
               if (VERBOSE) {
-                System.out.println("TEST: exception on close");
+                System.out.println("TEST: exception on close; retry w/ no disk space limit");
                 e.printStackTrace(System.out);
               }
               dir.setMaxSizeInBytes(0);

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java Sat Dec 11 11:07:01 2010
@@ -106,6 +106,9 @@ public class TestIndexWriterWithThreads 
     int NUM_THREADS = 3;
 
     for(int iter=0;iter<10;iter++) {
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter);
+      }
       MockDirectoryWrapper dir = newDirectory();
       IndexWriter writer = new IndexWriter(
           dir,
@@ -116,6 +119,7 @@ public class TestIndexWriterWithThreads 
       );
       ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
       dir.setMaxSizeInBytes(4*1024+20*iter);
+      writer.setInfoStream(VERBOSE ? System.out : null);
 
       IndexerThread[] threads = new IndexerThread[NUM_THREADS];
 

Added: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java?rev=1044635&view=auto
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java (added)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java Sat Dec 11 11:07:01 2010
@@ -0,0 +1,299 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
+
+public class TestPerSegmentDeletes extends LuceneTestCase {
+  public void testDeletes1() throws Exception {
+    //IndexWriter.debug2 = System.out;
+    Directory dir = new MockDirectoryWrapper(new Random(), new RAMDirectory());
+    IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_CURRENT,
+        new MockAnalyzer());
+    iwc.setMergeScheduler(new SerialMergeScheduler());
+    iwc.setMaxBufferedDocs(5000);
+    iwc.setRAMBufferSizeMB(100);
+    RangeMergePolicy fsmp = new RangeMergePolicy(false);
+    iwc.setMergePolicy(fsmp);
+    IndexWriter writer = new IndexWriter(dir, iwc);
+    for (int x = 0; x < 5; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "1", 2));
+      //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    //System.out.println("commit1");
+    writer.commit();
+    assertEquals(1, writer.segmentInfos.size());
+    for (int x = 5; x < 10; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "2", 2));
+      //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    //System.out.println("commit2");
+    writer.commit();
+    assertEquals(2, writer.segmentInfos.size());
+
+    for (int x = 10; x < 15; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "3", 2));
+      //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    
+    writer.deleteDocuments(new Term("id", "1"));
+    
+    writer.deleteDocuments(new Term("id", "11"));
+
+    // flushing without applying deletes means 
+    // there will still be deletes in the segment infos
+    writer.flush(false, false, false);
+    assertTrue(writer.bufferedDeletes.any());
+    
+    // get reader flushes pending deletes
+    // so there should not be anymore
+    IndexReader r1 = writer.getReader();
+    assertFalse(writer.bufferedDeletes.any());
+    r1.close();
+    
+    // delete id:2 from the first segment
+    // merge segments 0 and 1
+    // which should apply the delete id:2
+    writer.deleteDocuments(new Term("id", "2"));
+    writer.flush(false, false, false);
+    fsmp.doMerge = true;
+    fsmp.start = 0;
+    fsmp.length = 2;
+    writer.maybeMerge();
+    
+    assertEquals(2, writer.segmentInfos.size());
+    
+    // id:2 shouldn't exist anymore because
+    // it's been applied in the merge and now it's gone
+    IndexReader r2 = writer.getReader();
+    int[] id2docs = toDocsArray(new Term("id", "2"), null, r2);
+    assertTrue(id2docs == null);
+    r2.close();
+    
+    /**
+    // added docs are in the ram buffer
+    for (int x = 15; x < 20; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "4", 2));
+      System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    assertTrue(writer.numRamDocs() > 0);
+    // delete from the ram buffer
+    writer.deleteDocuments(new Term("id", Integer.toString(13)));
+    
+    Term id3 = new Term("id", Integer.toString(3));
+    
+    // delete from the 1st segment
+    writer.deleteDocuments(id3);
+    
+    assertTrue(writer.numRamDocs() > 0);
+    
+    //System.out
+    //    .println("segdels1:" + writer.docWriter.deletesToString());
+    
+    //assertTrue(writer.docWriter.segmentDeletes.size() > 0);
+    
+    // we cause a merge to happen
+    fsmp.doMerge = true;
+    fsmp.start = 0;
+    fsmp.length = 2;
+    System.out.println("maybeMerge "+writer.segmentInfos);
+    
+    SegmentInfo info0 = writer.segmentInfos.get(0);
+    SegmentInfo info1 = writer.segmentInfos.get(1);
+    
+    writer.maybeMerge();
+    System.out.println("maybeMerge after "+writer.segmentInfos);
+    // there should be docs in RAM
+    assertTrue(writer.numRamDocs() > 0);
+    
+    // assert we've merged the 1 and 2 segments
+    // and still have a segment leftover == 2
+    assertEquals(2, writer.segmentInfos.size());
+    assertFalse(segThere(info0, writer.segmentInfos));
+    assertFalse(segThere(info1, writer.segmentInfos));
+    
+    //System.out.println("segdels2:" + writer.docWriter.deletesToString());
+    
+    //assertTrue(writer.docWriter.segmentDeletes.size() > 0);
+    
+    IndexReader r = writer.getReader();
+    IndexReader r1 = r.getSequentialSubReaders()[0];
+    printDelDocs(r1.getDeletedDocs());
+    int[] docs = toDocsArray(id3, null, r);
+    System.out.println("id3 docs:"+Arrays.toString(docs));
+    // there shouldn't be any docs for id:3
+    assertTrue(docs == null);
+    r.close();
+    
+    part2(writer, fsmp);
+    **/
+    // System.out.println("segdels2:"+writer.docWriter.segmentDeletes.toString());
+    //System.out.println("close");
+    writer.close();
+    dir.close();
+  }
+  
+  /**
+  static boolean hasPendingDeletes(SegmentInfos infos) {
+    for (SegmentInfo info : infos) {
+      if (info.deletes.any()) {
+        return true;
+      }
+    }
+    return false;
+  }
+  **/
+  void part2(IndexWriter writer, RangeMergePolicy fsmp) throws Exception {
+    for (int x = 20; x < 25; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "5", 2));
+      //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    writer.flush(false, true, false);
+    for (int x = 25; x < 30; x++) {
+      writer.addDocument(TestIndexWriterReader.createDocument(x, "5", 2));
+      //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
+    }
+    writer.flush(false, true, false);
+    
+    //System.out.println("infos3:"+writer.segmentInfos);
+    
+    Term delterm = new Term("id", "8");
+    writer.deleteDocuments(delterm);
+    //System.out.println("segdels3:" + writer.docWriter.deletesToString());
+    
+    fsmp.doMerge = true;
+    fsmp.start = 1;
+    fsmp.length = 2;
+    writer.maybeMerge();
+    
+    // deletes for info1, the newly created segment from the 
+    // merge should have no deletes because they were applied in
+    // the merge
+    //SegmentInfo info1 = writer.segmentInfos.get(1);
+    //assertFalse(exists(info1, writer.docWriter.segmentDeletes));
+    
+    //System.out.println("infos4:"+writer.segmentInfos);
+    //System.out.println("segdels4:" + writer.docWriter.deletesToString());
+  }
+  
+  boolean segThere(SegmentInfo info, SegmentInfos infos) {
+    for (SegmentInfo si : infos) {
+      if (si.name.equals(info.name)) return true; 
+    }
+    return false;
+  }
+  
+  public static void printDelDocs(Bits bits) {
+    if (bits == null) return;
+    for (int x = 0; x < bits.length(); x++) {
+      System.out.println(x + ":" + bits.get(x));
+    }
+  }
+  
+  public static int[] toDocsArray(Term term, Bits bits, IndexReader reader)
+      throws IOException {
+    Fields fields = MultiFields.getFields(reader);
+    Terms cterms = fields.terms(term.field);
+    TermsEnum ctermsEnum = cterms.iterator();
+    SeekStatus ss = ctermsEnum.seek(new BytesRef(term.text()), false);
+    if (ss.equals(SeekStatus.FOUND)) {
+      DocsEnum docsEnum = ctermsEnum.docs(bits, null);
+      return toArray(docsEnum);
+    }
+    return null;
+  }
+  
+  public static int[] toArray(DocsEnum docsEnum) throws IOException {
+    List<Integer> docs = new ArrayList<Integer>();
+    while (docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+      int docID = docsEnum.docID();
+      docs.add(docID);
+    }
+    return ArrayUtil.toIntArray(docs);
+  }
+  
+  public class RangeMergePolicy extends MergePolicy {
+    boolean doMerge = false;
+    int start;
+    int length;
+    
+    private final boolean useCompoundFile;
+    
+    private RangeMergePolicy(boolean useCompoundFile) {
+      this.useCompoundFile = useCompoundFile;
+    }
+    
+    @Override
+    public void close() {}
+    
+    public MergeSpecification findMerges(SegmentInfos segmentInfos)
+        throws CorruptIndexException, IOException {
+      MergeSpecification ms = new MergeSpecification();
+      if (doMerge) {
+        SegmentInfos mergeInfos = new SegmentInfos();
+        for (int x=start; x < (start+length); x++) {
+          mergeInfos.add(segmentInfos.get(x));
+        }
+        OneMerge om = new OneMerge(mergeInfos);
+        ms.add(om);
+        doMerge = false;
+        return ms;
+      }
+      return null;
+    }
+    
+    @Override
+    public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos,
+        int maxSegmentCount, Set<SegmentInfo> segmentsToOptimize)
+        throws CorruptIndexException, IOException {
+      return null;
+    }
+    
+    @Override
+    public MergeSpecification findMergesToExpungeDeletes(
+        SegmentInfos segmentInfos) throws CorruptIndexException, IOException {
+      return null;
+    }
+    
+    @Override
+    public boolean useCompoundDocStore(SegmentInfos segments) {
+      return useCompoundFile;
+    }
+    
+    @Override
+    public boolean useCompoundFile(SegmentInfos segments, SegmentInfo newSegment) {
+      return useCompoundFile;
+    }
+  }
+}

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java Sat Dec 11 11:07:01 2010
@@ -95,6 +95,9 @@ public class TestStressIndexing2 extends
 
     int num = 3 * RANDOM_MULTIPLIER;
     for (int i = 0; i < num; i++) { // increase iterations for better testing
+      if (VERBOSE) {
+        System.out.println("\n\nTEST: top iter=" + i);
+      }
       sameFieldOrder=random.nextBoolean();
       mergeFactor=random.nextInt(3)+2;
       maxBufferedDocs=random.nextInt(3)+2;
@@ -107,10 +110,17 @@ public class TestStressIndexing2 extends
       int range=random.nextInt(20)+1;
       Directory dir1 = newDirectory();
       Directory dir2 = newDirectory();
+      if (VERBOSE) {
+        System.out.println("  nThreads=" + nThreads + " iter=" + iter + " range=" + range + " doPooling=" + doReaderPooling + " maxThreadStates=" + maxThreadStates + " sameFieldOrder=" + sameFieldOrder + " mergeFactor=" + mergeFactor);
+      }
       Map<String,Document> docs = indexRandom(nThreads, iter, range, dir1, maxThreadStates, doReaderPooling);
-      //System.out.println("TEST: index serial");
+      if (VERBOSE) {
+        System.out.println("TEST: index serial");
+      }
       indexSerial(random, docs, dir2);
-      //System.out.println("TEST: verify");
+      if (VERBOSE) {
+        System.out.println("TEST: verify");
+      }
       verifyEquals(dir1, dir2, "id");
       dir1.close();
       dir2.close();
@@ -140,6 +150,7 @@ public class TestStressIndexing2 extends
     IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB(
         0.1).setMaxBufferedDocs(maxBufferedDocs));
+    w.setInfoStream(VERBOSE ? System.out : null);
     w.commit();
     LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy();
     lmp.setUseCompoundFile(false);
@@ -190,10 +201,14 @@ public class TestStressIndexing2 extends
                                           boolean doReaderPooling) throws IOException, InterruptedException {
     Map<String,Document> docs = new HashMap<String,Document>();
     for(int iter=0;iter<3;iter++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + iter);
+      }
       IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig(
           TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE)
                .setRAMBufferSizeMB(0.1).setMaxBufferedDocs(maxBufferedDocs).setMaxThreadStates(maxThreadStates)
                .setReaderPooling(doReaderPooling));
+      w.setInfoStream(VERBOSE ? System.out : null);
       LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy();
       lmp.setUseCompoundFile(false);
       lmp.setUseCompoundDocStore(false);
@@ -272,9 +287,33 @@ public class TestStressIndexing2 extends
     r2.close();
   }
 
+  private static void printDocs(IndexReader r) throws Throwable {
+    IndexReader[] subs = r.getSequentialSubReaders();
+    for(IndexReader sub : subs) {
+      Bits delDocs = sub.getDeletedDocs();
+      System.out.println("  " + ((SegmentReader) sub).getSegmentInfo());
+      for(int docID=0;docID<sub.maxDoc();docID++) {
+        Document doc = sub.document(docID);
+        if (delDocs == null || !delDocs.get(docID)) {
+          System.out.println("    docID=" + docID + " id:" + doc.get("id"));
+        } else {
+          System.out.println("    DEL docID=" + docID + " id:" + doc.get("id"));
+        }
+      }
+    }
+  }
+
 
   public static void verifyEquals(IndexReader r1, IndexReader r2, String idField) throws Throwable {
-    assertEquals(r1.numDocs(), r2.numDocs());
+    if (VERBOSE) {
+      System.out.println("\nr1 docs:");
+      printDocs(r1);
+      System.out.println("\nr2 docs:");
+      printDocs(r2);
+    }
+    if (r1.numDocs() != r2.numDocs()) {
+      assert false: "r1.numDocs()=" + r1.numDocs() + " vs r2.numDocs()=" + r2.numDocs();
+    }
     boolean hasDeletes = !(r1.maxDoc()==r2.maxDoc() && r1.numDocs()==r1.maxDoc());
 
     int[] r2r1 = new int[r2.maxDoc()];   // r2 id to r1 id mapping
@@ -692,19 +731,28 @@ public class TestStressIndexing2 extends
       for (int i=0; i<fields.size(); i++) {
         d.add(fields.get(i));
       }
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": indexing id:" + idString);
+      }
       w.updateDocument(idTerm.createTerm(idString), d);
-      // System.out.println("indexing "+d);
+      //System.out.println(Thread.currentThread().getName() + ": indexing "+d);
       docs.put(idString, d);
     }
 
     public void deleteDoc() throws IOException {
       String idString = getIdString();
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": del id:" + idString);
+      }
       w.deleteDocuments(idTerm.createTerm(idString));
       docs.remove(idString);
     }
 
     public void deleteByQuery() throws IOException {
       String idString = getIdString();
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": del query id:" + idString);
+      }
       w.deleteDocuments(new TermQuery(idTerm.createTerm(idString)));
       docs.remove(idString);
     }

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermVectors.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermVectors.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermVectors.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermVectors.java Sat Dec 11 11:07:01 2010
@@ -353,12 +353,19 @@ public class TestTermVectors extends Luc
     RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
         newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true))
         .setOpenMode(OpenMode.CREATE));
+    writer.w.setInfoStream(VERBOSE ? System.out : null);
+    if (VERBOSE) {
+      System.out.println("TEST: now add non-vectors");
+    }
     for (int i = 0; i < 100; i++) {
       Document doc = new Document();
       doc.add(new Field("field", English.intToEnglish(i),
                         Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
       writer.addDocument(doc);
     }
+    if (VERBOSE) {
+      System.out.println("TEST: now add vectors");
+    }
     for(int i=0;i<10;i++) {
       Document doc = new Document();
       doc.add(new Field("field", English.intToEnglish(100+i),
@@ -366,6 +373,9 @@ public class TestTermVectors extends Luc
       writer.addDocument(doc);
     }
 
+    if (VERBOSE) {
+      System.out.println("TEST: now getReader");
+    }
     IndexReader reader = writer.getReader();
     writer.close();
     searcher = new IndexSearcher(reader);
@@ -374,6 +384,7 @@ public class TestTermVectors extends Luc
     ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
     assertEquals(10, hits.length);
     for (int i = 0; i < hits.length; i++) {
+
       TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
       assertTrue(vector != null);
       assertTrue(vector.length == 1);

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/MockIndexOutputWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/MockIndexOutputWrapper.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/MockIndexOutputWrapper.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/MockIndexOutputWrapper.java Sat Dec 11 11:07:01 2010
@@ -19,6 +19,8 @@ package org.apache.lucene.store;
 
 import java.io.IOException;
 
+import org.apache.lucene.util.LuceneTestCase;
+
 /**
  * Used by MockRAMDirectory to create an output stream that
  * will throw an IOException on fake disk full, track max
@@ -102,6 +104,9 @@ public class MockIndexOutputWrapper exte
         message += "; wrote " + freeSpace + " of " + len + " bytes";
       }
       message += ")";
+      if (LuceneTestCase.VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": MDW: now throw fake disk full");
+      }
       throw new IOException(message);
     } else {
       if (dir.randomState.nextBoolean()) {

Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java?rev=1044635&r1=1044634&r2=1044635&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java Sat Dec 11 11:07:01 2010
@@ -143,7 +143,7 @@ public abstract class LuceneTestCase ext
   public static final boolean TEST_NIGHTLY = Boolean.parseBoolean(System.getProperty("tests.nightly", "false"));
   /** the line file used by LineFileDocs */
   public static final String TEST_LINE_DOCS_FILE = System.getProperty("tests.linedocsfile", "europarl.lines.txt.gz");
-  
+
   private static final Pattern codecWithParam = Pattern.compile("(.*)\\(\\s*(\\d+)\\s*\\)");
 
   /**