You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2010/12/17 10:52:51 UTC

svn commit: r1050331 [2/2] - in /lucene/dev/branches/branch_3x: ./ lucene/ lucene/src/java/org/apache/lucene/index/ lucene/src/test/org/apache/lucene/ lucene/src/test/org/apache/lucene/index/ lucene/src/test/org/apache/lucene/search/ lucene/src/test/or...

Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexWriter.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexWriter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexWriter.java Fri Dec 17 09:52:50 2010
@@ -35,6 +35,7 @@ import org.apache.lucene.util.Version;
 import java.io.IOException;
 import java.io.Closeable;
 import java.io.PrintStream;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.List;
 import java.util.Collection;
 import java.util.ArrayList;
@@ -257,9 +258,8 @@ public class IndexWriter implements Clos
   private final static int MERGE_READ_BUFFER_SIZE = 4096;
 
   // Used for printing messages
-  private static Object MESSAGE_ID_LOCK = new Object();
-  private static int MESSAGE_ID = 0;
-  private int messageID = -1;
+  private static final AtomicInteger MESSAGE_ID = new AtomicInteger();
+  private int messageID = MESSAGE_ID.getAndIncrement();
   volatile private boolean hitOOM;
 
   private final Directory directory;  // where this index resides
@@ -277,7 +277,7 @@ public class IndexWriter implements Clos
   volatile SegmentInfos pendingCommit;            // set when a commit is pending (after prepareCommit() & before commit())
   volatile long pendingCommitChangeCount;
 
-  private SegmentInfos segmentInfos = new SegmentInfos();       // the segments
+  final SegmentInfos segmentInfos = new SegmentInfos();       // the segments
 
   private DocumentsWriter docWriter;
   private IndexFileDeleter deleter;
@@ -306,10 +306,11 @@ public class IndexWriter implements Clos
   private long mergeGen;
   private boolean stopMerges;
 
-  private int flushCount;
-  private int flushDeletesCount;
+  private final AtomicInteger flushCount = new AtomicInteger();
+  private final AtomicInteger flushDeletesCount = new AtomicInteger();
 
   final ReaderPool readerPool = new ReaderPool();
+  final BufferedDeletes bufferedDeletes;
   
   // This is a "write once" variable (like the organic dye
   // on a DVD-R that may or may not be heated by a laser and
@@ -481,20 +482,26 @@ public class IndexWriter implements Clos
     /**
      * Release the segment reader (i.e. decRef it and close if there
      * are no more references.
+     * @return true if this release altered the index (eg
+     * the SegmentReader had pending changes to del docs and
+     * was closed).  Caller must call checkpoint() if so.
      * @param sr
      * @throws IOException
      */
-    public synchronized void release(SegmentReader sr) throws IOException {
-      release(sr, false);
+    public synchronized boolean release(SegmentReader sr) throws IOException {
+      return release(sr, false);
     }
     
     /**
      * Release the segment reader (i.e. decRef it and close if there
      * are no more references.
+     * @return true if this release altered the index (eg
+     * the SegmentReader had pending changes to del docs and
+     * was closed).  Caller must call checkpoint() if so.
      * @param sr
      * @throws IOException
      */
-    public synchronized void release(SegmentReader sr, boolean drop) throws IOException {
+    public synchronized boolean release(SegmentReader sr, boolean drop) throws IOException {
 
       final boolean pooled = readerMap.containsKey(sr.getSegmentInfo());
 
@@ -525,13 +532,10 @@ public class IndexWriter implements Clos
         // not pooling readers, we release it:
         readerMap.remove(sr.getSegmentInfo());
 
-        if (hasChanges) {
-          // Must checkpoint w/ deleter, because this
-          // segment reader will have created new _X_N.del
-          // file.
-          deleter.checkpoint(segmentInfos, false);
-        }
+        return hasChanges;
       }
+
+      return false;
     }
     
     /** Remove all our references to readers, and commits
@@ -679,6 +683,8 @@ public class IndexWriter implements Clos
     }
   }
   
+  
+  
   /**
    * Obtain the number of deleted docs for a pooled reader.
    * If the reader isn't being pooled, the segmentInfo's 
@@ -725,15 +731,6 @@ public class IndexWriter implements Clos
       infoStream.println("IW " + messageID + " [" + new Date() + "; " + Thread.currentThread().getName() + "]: " + message);
   }
 
-  private synchronized void setMessageID(PrintStream infoStream) {
-    if (infoStream != null && messageID == -1) {
-      synchronized(MESSAGE_ID_LOCK) {
-        messageID = MESSAGE_ID++;
-      }
-    }
-    this.infoStream = infoStream;
-  }
-
   /**
    * Casts current mergePolicy to LogMergePolicy, and throws
    * an exception if the mergePolicy is not a LogMergePolicy.
@@ -1048,7 +1045,7 @@ public class IndexWriter implements Clos
     config = (IndexWriterConfig) conf.clone();
     directory = d;
     analyzer = conf.getAnalyzer();
-    setMessageID(defaultInfoStream);
+    infoStream = defaultInfoStream;
     maxFieldLength = conf.getMaxFieldLength();
     termIndexInterval = conf.getTermIndexInterval();
     writeLockTimeout = conf.getWriteLockTimeout();
@@ -1057,6 +1054,8 @@ public class IndexWriter implements Clos
     mergePolicy.setIndexWriter(this);
     mergeScheduler = conf.getMergeScheduler();
     mergedSegmentWarmer = conf.getMergedSegmentWarmer();
+    bufferedDeletes = new BufferedDeletes(messageID);
+    bufferedDeletes.setInfoStream(infoStream);
     poolReaders = conf.getReaderPooling();
 
     OpenMode mode = conf.getOpenMode();
@@ -1124,7 +1123,7 @@ public class IndexWriter implements Clos
 
       setRollbackSegmentInfos(segmentInfos);
 
-      docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates(), getCurrentFieldInfos());
+      docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates(), getCurrentFieldInfos(), bufferedDeletes);
       docWriter.setInfoStream(infoStream);
       docWriter.setMaxFieldLength(maxFieldLength);
 
@@ -1143,7 +1142,6 @@ public class IndexWriter implements Clos
         segmentInfos.changed();
       }
 
-      docWriter.setMaxBufferedDeleteTerms(conf.getMaxBufferedDeleteTerms());
       docWriter.setRAMBufferSizeMB(conf.getRAMBufferSizeMB());
       docWriter.setMaxBufferedDocs(conf.getMaxBufferedDocs());
       pushMaxBufferedDocs();
@@ -1542,7 +1540,6 @@ public class IndexWriter implements Clos
         && maxBufferedDeleteTerms < 1)
       throw new IllegalArgumentException(
           "maxBufferedDeleteTerms must at least be 1 when enabled");
-    docWriter.setMaxBufferedDeleteTerms(maxBufferedDeleteTerms);
     if (infoStream != null)
       message("setMaxBufferedDeleteTerms " + maxBufferedDeleteTerms);
     // Required so config.getMaxBufferedDeleteTerms returns the right value. But
@@ -1559,7 +1556,7 @@ public class IndexWriter implements Clos
   @Deprecated
   public int getMaxBufferedDeleteTerms() {
     ensureOpen();
-    return docWriter.getMaxBufferedDeleteTerms();
+    return config.getMaxBufferedDeleteTerms();
   }
 
   /** Determines how often segment indices are merged by addDocument().  With
@@ -1624,9 +1621,10 @@ public class IndexWriter implements Clos
    */
   public void setInfoStream(PrintStream infoStream) {
     ensureOpen();
-    setMessageID(infoStream);
+    this.infoStream = infoStream;
     docWriter.setInfoStream(infoStream);
     deleter.setInfoStream(infoStream);
+    bufferedDeletes.setInfoStream(infoStream);
     if (infoStream != null)
       messageState();
   }
@@ -1803,8 +1801,6 @@ public class IndexWriter implements Clos
 
   private void closeInternal(boolean waitForMerges) throws CorruptIndexException, IOException {
 
-    docWriter.pauseAllThreads();
-
     try {
       if (infoStream != null)
         message("now flush at close");
@@ -1859,8 +1855,6 @@ public class IndexWriter implements Clos
         closing = false;
         notifyAll();
         if (!closed) {
-          if (docWriter != null)
-            docWriter.resumeAllThreads();
           if (infoStream != null)
             message("hit exception while closing");
         }
@@ -1868,87 +1862,6 @@ public class IndexWriter implements Clos
     }
   }
 
-  /** Tells the docWriter to close its currently open shared
-   *  doc stores (stored fields & vectors files).
-   *  Return value specifices whether new doc store files are compound or not.
-   */
-  private synchronized boolean flushDocStores() throws IOException {
-
-    if (infoStream != null) {
-      message("flushDocStores segment=" + docWriter.getDocStoreSegment());
-    }
-
-    boolean useCompoundDocStore = false;
-    if (infoStream != null) {
-      message("closeDocStores segment=" + docWriter.getDocStoreSegment());
-    }
-
-    String docStoreSegment;
-
-    boolean success = false;
-    try {
-      docStoreSegment = docWriter.closeDocStore();
-      success = true;
-    } finally {
-      if (!success && infoStream != null) {
-        message("hit exception closing doc store segment");
-      }
-    }
-
-    if (infoStream != null) {
-      message("flushDocStores files=" + docWriter.closedFiles());
-    }
-
-    useCompoundDocStore = mergePolicy.useCompoundDocStore(segmentInfos);
-      
-    if (useCompoundDocStore && docStoreSegment != null && docWriter.closedFiles().size() != 0) {
-      // Now build compound doc store file
-
-      if (infoStream != null) {
-        message("create compound file " + IndexFileNames.segmentFileName(docStoreSegment, IndexFileNames.COMPOUND_FILE_STORE_EXTENSION));
-      }
-
-      success = false;
-
-      final int numSegments = segmentInfos.size();
-      final String compoundFileName = IndexFileNames.segmentFileName(docStoreSegment, IndexFileNames.COMPOUND_FILE_STORE_EXTENSION);
-
-      try {
-        CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, compoundFileName);
-        for (final String file :  docWriter.closedFiles() ) {
-          cfsWriter.addFile(file);
-        }
-      
-        // Perform the merge
-        cfsWriter.close();
-        success = true;
-
-      } finally {
-        if (!success) {
-          if (infoStream != null)
-            message("hit exception building compound file doc store for segment " + docStoreSegment);
-          deleter.deleteFile(compoundFileName);
-          docWriter.abort();
-        }
-      }
-
-      for(int i=0;i<numSegments;i++) {
-        SegmentInfo si = segmentInfos.info(i);
-        if (si.getDocStoreOffset() != -1 &&
-            si.getDocStoreSegment().equals(docStoreSegment))
-          si.setDocStoreIsCompoundFile(true);
-      }
-
-      checkpoint();
-
-      // In case the files we just merged into a CFS were
-      // not previously checkpointed:
-      deleter.deleteNewFiles(docWriter.closedFiles());
-    }
-
-    return useCompoundDocStore;
-  }
-
   /** Returns the Directory used by this index. */
   public Directory getDirectory() {     
     // Pass false because the flush during closing calls getDirectory
@@ -2000,8 +1913,12 @@ public class IndexWriter implements Clos
 
   public synchronized boolean hasDeletions() throws IOException {
     ensureOpen();
-    if (docWriter.hasDeletes())
+    if (bufferedDeletes.any()) {
+      return true;
+    }
+    if (docWriter.anyDeletions()) {
       return true;
+    }
     for (int i = 0; i < segmentInfos.size(); i++)
       if (segmentInfos.info(i).hasDeletions())
         return true;
@@ -2095,13 +2012,14 @@ public class IndexWriter implements Clos
     boolean success = false;
     try {
       try {
-        doFlush = docWriter.addDocument(doc, analyzer);
+        doFlush = docWriter.updateDocument(doc, analyzer, null);
         success = true;
       } finally {
         if (!success) {
 
-          if (infoStream != null)
+          if (infoStream != null) {
             message("hit exception adding document");
+          }
 
           synchronized (this) {
             // If docWriter has some aborted files that were
@@ -2135,9 +2053,9 @@ public class IndexWriter implements Clos
   public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
-      boolean doFlush = docWriter.bufferDeleteTerm(term);
-      if (doFlush)
+      if (docWriter.deleteTerm(term, false)) {
         flush(true, false, false);
+      }
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "deleteDocuments(Term)");
     }
@@ -2159,9 +2077,9 @@ public class IndexWriter implements Clos
   public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
     ensureOpen();
     try {
-      boolean doFlush = docWriter.bufferDeleteTerms(terms);
-      if (doFlush)
+      if (docWriter.deleteTerms(terms)) {
         flush(true, false, false);
+      }
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "deleteDocuments(Term..)");
     }
@@ -2180,9 +2098,13 @@ public class IndexWriter implements Clos
    */
   public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
     ensureOpen();
-    boolean doFlush = docWriter.bufferDeleteQuery(query);
-    if (doFlush)
-      flush(true, false, false);
+    try {
+      if (docWriter.deleteQuery(query)) {
+        flush(true, false, false);
+      }
+    } catch (OutOfMemoryError oom) {
+      handleOOM(oom, "deleteDocuments(Query)");
+    }
   }
 
   /**
@@ -2200,9 +2122,13 @@ public class IndexWriter implements Clos
    */
   public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
     ensureOpen();
-    boolean doFlush = docWriter.bufferDeleteQueries(queries);
-    if (doFlush)
-      flush(true, false, false);
+    try {
+      if (docWriter.deleteQueries(queries)) {
+        flush(true, false, false);
+      }
+    } catch (OutOfMemoryError oom) {
+      handleOOM(oom, "deleteDocuments(Query..)");
+    }
   }
 
   /**
@@ -2252,25 +2178,30 @@ public class IndexWriter implements Clos
       boolean doFlush = false;
       boolean success = false;
       try {
-        doFlush = docWriter.updateDocument(term, doc, analyzer);
+        doFlush = docWriter.updateDocument(doc, analyzer, term);
         success = true;
       } finally {
         if (!success) {
 
-          if (infoStream != null)
+          if (infoStream != null) {
             message("hit exception updating document");
+          }
 
           synchronized (this) {
             // If docWriter has some aborted files that were
             // never incref'd, then we clean them up here
-            final Collection<String> files = docWriter.abortedFiles();
-            if (files != null)
-              deleter.deleteNewFiles(files);
+            if (docWriter != null) {
+              final Collection<String> files = docWriter.abortedFiles();
+              if (files != null) {
+                deleter.deleteNewFiles(files);
+              }
+            }
           }
         }
       }
-      if (doFlush)
+      if (doFlush) {
         flush(true, false, false);
+      }
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "updateDocument");
     }
@@ -2296,13 +2227,13 @@ public class IndexWriter implements Clos
   }
 
   // for test purpose
-  final synchronized int getFlushCount() {
-    return flushCount;
+  final int getFlushCount() {
+    return flushCount.get();
   }
 
   // for test purpose
-  final synchronized int getFlushDeletesCount() {
-    return flushDeletesCount;
+  final int getFlushDeletesCount() {
+    return flushDeletesCount.get();
   }
 
   final String newSegmentName() {
@@ -2434,8 +2365,10 @@ public class IndexWriter implements Clos
     if (maxNumSegments < 1)
       throw new IllegalArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments);
 
-    if (infoStream != null)
+    if (infoStream != null) {
       message("optimize: index now " + segString());
+      message("now flush at optimize");
+    }
 
     flush(true, false, true);
 
@@ -2718,8 +2651,6 @@ public class IndexWriter implements Clos
       message("rollback");
     }
 
-    docWriter.pauseAllThreads();
-
     try {
       finishMerges(false);
 
@@ -2729,6 +2660,8 @@ public class IndexWriter implements Clos
       mergePolicy.close();
       mergeScheduler.close();
 
+      bufferedDeletes.clear();
+
       synchronized(this) {
 
         if (pendingCommit != null) {
@@ -2767,7 +2700,6 @@ public class IndexWriter implements Clos
     } finally {
       synchronized(this) {
         if (!success) {
-          docWriter.resumeAllThreads();
           closing = false;
           notifyAll();
           if (infoStream != null)
@@ -2795,7 +2727,6 @@ public class IndexWriter implements Clos
    *    will receive {@link MergePolicy.MergeAbortedException}s.
    */
   public synchronized void deleteAll() throws IOException {
-    docWriter.pauseAllThreads();
     try {
 
       // Abort any running merges
@@ -2803,7 +2734,6 @@ public class IndexWriter implements Clos
 
       // Remove any buffered docs
       docWriter.abort();
-      docWriter.setFlushedDocCount(0);
 
       // Remove all segments
       segmentInfos.clear();
@@ -2821,7 +2751,6 @@ public class IndexWriter implements Clos
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "deleteAll");
     } finally {
-      docWriter.resumeAllThreads();
       if (infoStream != null) {
         message("hit exception during deleteAll");
       }
@@ -2897,7 +2826,7 @@ public class IndexWriter implements Clos
    * the index files referenced exist (correctly) in the
    * index directory.
    */
-  private synchronized void checkpoint() throws IOException {
+  synchronized void checkpoint() throws IOException {
     changeCount++;
     segmentInfos.changed();
     deleter.checkpoint(segmentInfos, false);
@@ -2986,10 +2915,6 @@ public class IndexWriter implements Clos
       // Register the new segment
       synchronized(this) {
         segmentInfos.add(info);
-        
-        // Notify DocumentsWriter that the flushed count just increased
-        docWriter.updateFlushedDocCount(docCount);
-
         checkpoint();
       }
       
@@ -3117,9 +3042,6 @@ public class IndexWriter implements Clos
       synchronized (this) {
         ensureOpen();
         segmentInfos.addAll(infos);
-        // Notify DocumentsWriter that the flushed count just increased
-        docWriter.updateFlushedDocCount(docCount);
-        
         checkpoint();
       }
       
@@ -3328,192 +3250,92 @@ public class IndexWriter implements Clos
 
     // We can be called during close, when closing==true, so we must pass false to ensureOpen:
     ensureOpen(false);
-    if (doFlush(flushDocStores, flushDeletes) && triggerMerge)
+    if (doFlush(flushDocStores, flushDeletes) && triggerMerge) {
       maybeMerge();
-  }
-
-  // TODO: this method should not have to be entirely
-  // synchronized, ie, merges should be allowed to commit
-  // even while a flush is happening
-  private synchronized final boolean doFlush(boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
-    try {
-      try {
-        return doFlushInternal(flushDocStores, flushDeletes);
-      } finally {
-        docWriter.balanceRAM();
-      }
-    } finally {
-      docWriter.clearFlushPending();
     }
   }
 
   // TODO: this method should not have to be entirely
   // synchronized, ie, merges should be allowed to commit
   // even while a flush is happening
-  private synchronized final boolean doFlushInternal(boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
+  private synchronized final boolean doFlush(boolean closeDocStores, boolean applyAllDeletes) throws CorruptIndexException, IOException {
 
     if (hitOOM) {
       throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot flush");
     }
 
-    ensureOpen(false);
+    doBeforeFlush();
 
     assert testPoint("startDoFlush");
 
-    doBeforeFlush();
-    
-    flushCount++;
+    // We may be flushing because it was triggered by doc
+    // count, del count, ram usage (in which case flush
+    // pending is already set), or we may be flushing
+    // due to external event eg getReader or commit is
+    // called (in which case we now set it, and this will
+    // pause all threads):
+    flushControl.setFlushPendingNoWait("explicit flush");
 
-    // If we are flushing because too many deletes
-    // accumulated, then we should apply the deletes to free
-    // RAM:
-    flushDeletes |= docWriter.doApplyDeletes();
-
-    // Make sure no threads are actively adding a document.
-    // Returns true if docWriter is currently aborting, in
-    // which case we skip flushing this segment
-    if (infoStream != null) {
-      message("flush: now pause all indexing threads");
-    }
-    if (docWriter.pauseAllThreads()) {
-      docWriter.resumeAllThreads();
-      return false;
-    }
+    boolean success = false;
 
     try {
 
-      SegmentInfo newSegment = null;
-
-      final int numDocs = docWriter.getNumDocsInRAM();
-
-      // Always flush docs if there are any
-      boolean flushDocs = numDocs > 0;
-
-      String docStoreSegment = docWriter.getDocStoreSegment();
-
-      assert docStoreSegment != null || numDocs == 0: "dss=" + docStoreSegment + " numDocs=" + numDocs;
-
-      if (docStoreSegment == null)
-        flushDocStores = false;
-
-      int docStoreOffset = docWriter.getDocStoreOffset();
-
-      boolean docStoreIsCompoundFile = false;
-
       if (infoStream != null) {
-        message("  flush: segment=" + docWriter.getSegment() +
-                " docStoreSegment=" + docWriter.getDocStoreSegment() +
-                " docStoreOffset=" + docStoreOffset +
-                " flushDocs=" + flushDocs +
-                " flushDeletes=" + flushDeletes +
-                " flushDocStores=" + flushDocStores +
-                " numDocs=" + numDocs +
-                " numBufDelTerms=" + docWriter.getNumBufferedDeleteTerms());
+        message("  start flush: applyAllDeletes=" + applyAllDeletes + " closeDocStores=" + closeDocStores);
         message("  index before flush " + segString());
       }
-
-      // Check if the doc stores must be separately flushed
-      // because other segments, besides the one we are about
-      // to flush, reference it
-      if (flushDocStores && (!flushDocs || !docWriter.getSegment().equals(docWriter.getDocStoreSegment()))) {
-        // We must separately flush the doc store
-        if (infoStream != null)
-          message("  flush shared docStore segment " + docStoreSegment);
-      
-        docStoreIsCompoundFile = flushDocStores();
-        flushDocStores = false;
-      }
-
-      String segment = docWriter.getSegment();
-
-      // If we are flushing docs, segment must not be null:
-      assert segment != null || !flushDocs;
-
-      if (flushDocs) {
-
-        boolean success = false;
-        final int flushedDocCount;
-
-        try {
-          flushedDocCount = docWriter.flush(flushDocStores);
-          if (infoStream != null) {
-            message("flushedFiles=" + docWriter.getFlushedFiles());
-          }
-          success = true;
-        } finally {
-          if (!success) {
-            if (infoStream != null)
-              message("hit exception flushing segment " + segment);
-            deleter.refresh(segment);
-          }
-        }
-        
-        if (0 == docStoreOffset && flushDocStores) {
-          // This means we are flushing private doc stores
-          // with this segment, so it will not be shared
-          // with other segments
-          assert docStoreSegment != null;
-          assert docStoreSegment.equals(segment);
-          docStoreOffset = -1;
-          docStoreIsCompoundFile = false;
-          docStoreSegment = null;
-        }
-
-        // Create new SegmentInfo, but do not add to our
-        // segmentInfos until deletes are flushed
-        // successfully.
-        newSegment = new SegmentInfo(segment,
-                                     flushedDocCount,
-                                     directory, false, true,
-                                     docStoreOffset, docStoreSegment,
-                                     docStoreIsCompoundFile,    
-                                     docWriter.hasProx());
+    
+      final SegmentInfo newSegment = docWriter.flush(this, closeDocStores, deleter, mergePolicy, segmentInfos);
+      if (newSegment != null) {
         setDiagnostics(newSegment, "flush");
-      }
-
-      docWriter.pushDeletes();
-
-      if (flushDocs) {
         segmentInfos.add(newSegment);
         checkpoint();
       }
 
-      if (flushDocs && mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
-        // Now build compound file
-        boolean success = false;
-        try {
-          docWriter.createCompoundFile(segment);
-          success = true;
-        } finally {
-          if (!success) {
-            if (infoStream != null)
-              message("hit exception creating compound file for newly flushed segment " + segment);
-            deleter.deleteFile(IndexFileNames.segmentFileName(segment, IndexFileNames.COMPOUND_FILE_EXTENSION));
+      if (!applyAllDeletes) {
+        // If deletes alone are consuming > 1/2 our RAM
+        // buffer, force them all to apply now. This is to
+        // prevent too-frequent flushing of a long tail of
+        // tiny segments:
+        if (flushControl.getFlushDeletes() ||
+            (config.getRAMBufferSizeMB() != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
+             bufferedDeletes.bytesUsed() > (1024*1024*config.getRAMBufferSizeMB()/2))) {
+          applyAllDeletes = true;
+          if (infoStream != null) {
+            message("force apply deletes bytesUsed=" + bufferedDeletes.bytesUsed() + " vs ramBuffer=" + (1024*1024*config.getRAMBufferSizeMB()));
           }
         }
-
-        newSegment.setUseCompoundFile(true);
-        checkpoint();
       }
 
-      if (flushDeletes) {
-        applyDeletes();
+      if (applyAllDeletes) {
+        if (infoStream != null) {
+          message("apply all deletes during flush");
+        }
+        flushDeletesCount.incrementAndGet();
+        if (bufferedDeletes.applyDeletes(readerPool, segmentInfos, segmentInfos)) {
+          checkpoint();
+        }
+        flushControl.clearDeletes();
+      } else if (infoStream != null) {
+        message("don't apply deletes now delTermCount=" + bufferedDeletes.numTerms() + " bytesUsed=" + bufferedDeletes.bytesUsed());
       }
-      
-      if (flushDocs)
-        checkpoint();
 
       doAfterFlush();
+      flushCount.incrementAndGet();
+
+      success = true;
 
-      return flushDocs;
+      return newSegment != null;
 
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "doFlush");
       // never hit
       return false;
     } finally {
-      docWriter.clearFlushPending();
-      docWriter.resumeAllThreads();
+      flushControl.clearFlushPending();
+      if (!success && infoStream != null) {
+        message("hit exception during flush");
+      }
     }
   }
 
@@ -3522,7 +3344,7 @@ public class IndexWriter implements Clos
    */
   public final long ramSizeInBytes() {
     ensureOpen();
-    return docWriter.getRAMUsed();
+    return docWriter.bytesUsed() + bufferedDeletes.bytesUsed();
   }
 
   /** Expert:  Return the number of documents currently
@@ -3565,7 +3387,7 @@ public class IndexWriter implements Clos
    *  saves the resulting deletes file (incrementing the
    *  delete generation for merge.info).  If no deletes were
    *  flushed, no new deletes file is saved. */
-  synchronized private void commitMergedDeletes(MergePolicy.OneMerge merge, SegmentReader mergeReader) throws IOException {
+  synchronized private void commitMergedDeletes(MergePolicy.OneMerge merge, SegmentReader mergedReader) throws IOException {
 
     assert testPoint("startCommitMergeDeletes");
 
@@ -3602,7 +3424,7 @@ public class IndexWriter implements Clos
               assert currentReader.isDeleted(j);
             else {
               if (currentReader.isDeleted(j)) {
-                mergeReader.doDelete(docUpto);
+                mergedReader.doDelete(docUpto);
                 delCount++;
               }
               docUpto++;
@@ -3616,7 +3438,7 @@ public class IndexWriter implements Clos
         // does:
         for(int j=0; j<docCount; j++) {
           if (currentReader.isDeleted(j)) {
-            mergeReader.doDelete(docUpto);
+            mergedReader.doDelete(docUpto);
             delCount++;
           }
           docUpto++;
@@ -3626,13 +3448,13 @@ public class IndexWriter implements Clos
         docUpto += info.docCount;
     }
 
-    assert mergeReader.numDeletedDocs() == delCount;
+    assert mergedReader.numDeletedDocs() == delCount;
 
-    mergeReader.hasChanges = delCount > 0;
+    mergedReader.hasChanges = delCount > 0;
   }
 
   /* FIXME if we want to support non-contiguous segment merges */
-  synchronized private boolean commitMerge(MergePolicy.OneMerge merge, SegmentMerger merger, int mergedDocCount, SegmentReader mergedReader) throws IOException {
+  synchronized private boolean commitMerge(MergePolicy.OneMerge merge, SegmentMerger merger, SegmentReader mergedReader) throws IOException {
 
     assert testPoint("startCommitMerge");
 
@@ -3660,7 +3482,6 @@ public class IndexWriter implements Clos
     final int start = ensureContiguousMerge(merge);
 
     commitMergedDeletes(merge, mergedReader);
-    docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount);
       
     // If the doc store we are using has been closed and
     // is in now compound format (but wasn't when we
@@ -3673,7 +3494,7 @@ public class IndexWriter implements Clos
     segmentInfos.subList(start, start + merge.segments.size()).clear();
     assert !segmentInfos.contains(merge.info);
     segmentInfos.add(start, merge.info);
-
+    
     closeMergeReaders(merge, false);
 
     // Must note the change to segmentInfos so any commits
@@ -3684,6 +3505,12 @@ public class IndexWriter implements Clos
     // them so that they don't bother writing them to
     // disk, updating SegmentInfo, etc.:
     readerPool.clear(merge.segments);
+    
+    // remove pending deletes of the segments 
+    // that were merged, moving them onto the segment just
+    // before the merged segment
+    // Lock order: IW -> BD
+    bufferedDeletes.commitMerge(merge);
 
     if (merge.optimize) {
       // cascade the optimize:
@@ -3843,10 +3670,17 @@ public class IndexWriter implements Clos
   final synchronized void mergeInit(MergePolicy.OneMerge merge) throws IOException {
     boolean success = false;
     try {
+      // Lock order: IW -> BD
+      if (bufferedDeletes.applyDeletes(readerPool, segmentInfos, merge.segments)) {
+        checkpoint();
+      }
       _mergeInit(merge);
       success = true;
     } finally {
       if (!success) {
+        if (infoStream != null) {
+          message("hit exception in mergeInit");
+        }
         mergeFinish(merge);
       }
     }
@@ -3869,9 +3703,7 @@ public class IndexWriter implements Clos
 
     if (merge.isAborted())
       return;
-
-    applyDeletes();
-
+    
     final SegmentInfos sourceSegments = merge.segments;
     final int end = sourceSegments.size();
 
@@ -4061,10 +3893,11 @@ public class IndexWriter implements Clos
     if (suppressExceptions) {
       // Suppress any new exceptions so we throw the
       // original cause
+      boolean anyChanges = false;
       for (int i=0;i<numSegments;i++) {
         if (merge.readers[i] != null) {
           try {
-            readerPool.release(merge.readers[i], false);
+            anyChanges |= readerPool.release(merge.readers[i], false);
           } catch (Throwable t) {
           }
           merge.readers[i] = null;
@@ -4081,6 +3914,9 @@ public class IndexWriter implements Clos
           merge.readersClone[i] = null;
         }
       }
+      if (anyChanges) {
+        checkpoint();
+      }
     } else {
       for (int i=0;i<numSegments;i++) {
         if (merge.readers[i] != null) {
@@ -4296,15 +4132,21 @@ public class IndexWriter implements Clos
           mergedSegmentWarmer.warm(mergedReader);
         }
 
-        if (!commitMerge(merge, merger, mergedDocCount, mergedReader)) {
+        if (!commitMerge(merge, merger, mergedReader)) {
           // commitMerge will return false if this merge was aborted
           return 0;
         }
       } finally {
         synchronized(this) {
-          readerPool.release(mergedReader);
+          if (readerPool.release(mergedReader)) {
+            // Must checkpoint after releasing the
+            // mergedReader since it may have written a new
+            // deletes file:
+            checkpoint();
+          }
         }
       }
+
       success = true;
 
     } finally {
@@ -4324,37 +4166,14 @@ public class IndexWriter implements Clos
       mergeExceptions.add(merge);
   }
 
-  // Apply buffered deletes to all segments.
-  private final synchronized boolean applyDeletes() throws CorruptIndexException, IOException {
-    assert testPoint("startApplyDeletes");
-    if (infoStream != null) {
-      message("applyDeletes");
-    }
-    flushDeletesCount++;
-    boolean success = false;
-    boolean changed;
-    try {
-      changed = docWriter.applyDeletes(segmentInfos);
-      success = true;
-    } finally {
-      if (!success && infoStream != null) {
-        message("hit exception flushing deletes");
-      }
-    }
-
-    if (changed)
-      checkpoint();
-    return changed;
-  }
-
   // For test purposes.
-  final synchronized int getBufferedDeleteTermsSize() {
-    return docWriter.getBufferedDeleteTerms().size();
+  final int getBufferedDeleteTermsSize() {
+    return docWriter.getPendingDeletes().terms.size();
   }
 
   // For test purposes.
-  final synchronized int getNumBufferedDeleteTerms() {
-    return docWriter.getNumBufferedDeleteTerms();
+  final int getNumBufferedDeleteTerms() {
+    return docWriter.getPendingDeletes().numTermDeletes.get();
   }
 
   // utility routines for tests
@@ -4676,14 +4495,13 @@ public class IndexWriter implements Clos
   //   finishStartCommit
   //   startCommitMergeDeletes
   //   startMergeInit
-  //   startApplyDeletes
   //   DocumentsWriter.ThreadState.init start
   boolean testPoint(String name) {
     return true;
   }
 
   synchronized boolean nrtIsCurrent(SegmentInfos infos) {
-    return infos.version == segmentInfos.version && !docWriter.anyChanges();
+    return infos.version == segmentInfos.version && !docWriter.anyChanges() && !bufferedDeletes.any();
   }
 
   synchronized boolean isClosed() {
@@ -4750,4 +4568,123 @@ public class IndexWriter implements Clos
     return payloadProcessorProvider;
   }
 
+  // decides when flushes happen
+  final class FlushControl {
+
+    private boolean flushPending;
+    private boolean flushDeletes;
+    private int delCount;
+    private int docCount;
+    private boolean flushing;
+
+    private synchronized boolean setFlushPending(String reason, boolean doWait) {
+      if (flushPending || flushing) {
+        if (doWait) {
+          while(flushPending || flushing) {
+            try {
+              wait();
+            } catch (InterruptedException ie) {
+              throw new ThreadInterruptedException(ie);
+            }
+          }
+        }
+        return false;
+      } else {
+        if (infoStream != null) {
+          message("now trigger flush reason=" + reason);
+        }
+        flushPending = true;
+        return flushPending;
+      }
+    }
+
+    public synchronized void setFlushPendingNoWait(String reason) {
+      setFlushPending(reason, false);
+    }
+
+    public synchronized boolean getFlushPending() {
+      return flushPending;
+    }
+
+    public synchronized boolean getFlushDeletes() {
+      return flushDeletes;
+    }
+
+    public synchronized void clearFlushPending() {
+      if (infoStream != null) {
+        message("clearFlushPending");
+      }
+      flushPending = false;
+      flushDeletes = false;
+      docCount = 0;
+      notifyAll();
+    }
+
+    public synchronized void clearDeletes() {
+      delCount = 0;
+    }
+
+    public synchronized boolean waitUpdate(int docInc, int delInc) {
+      return waitUpdate(docInc, delInc, false);
+    }
+
+    public synchronized boolean waitUpdate(int docInc, int delInc, boolean skipWait) {
+      while(flushPending) {
+        try {
+          wait();
+        } catch (InterruptedException ie) {
+          throw new ThreadInterruptedException(ie);
+        }
+      }
+
+      // skipWait is only used when a thread is BOTH adding
+      // a doc and buffering a del term, and, the adding of
+      // the doc already triggered a flush
+      if (skipWait) {
+        docCount += docInc;
+        delCount += delInc;
+        return false;
+      }
+
+      final int maxBufferedDocs = config.getMaxBufferedDocs();
+      if (maxBufferedDocs != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
+          (docCount+docInc) >= maxBufferedDocs) {
+        return setFlushPending("maxBufferedDocs", true);
+      }
+      docCount += docInc;
+
+      final int maxBufferedDeleteTerms = config.getMaxBufferedDeleteTerms();
+      if (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
+          (delCount+delInc) >= maxBufferedDeleteTerms) {
+        flushDeletes = true;
+        return setFlushPending("maxBufferedDeleteTerms", true);
+      }
+      delCount += delInc;
+
+      return flushByRAMUsage("add delete/doc");
+    }
+
+    public synchronized boolean flushByRAMUsage(String reason) {
+      final double ramBufferSizeMB = config.getRAMBufferSizeMB();
+      if (ramBufferSizeMB != IndexWriterConfig.DISABLE_AUTO_FLUSH) {
+        final long limit = (long) (ramBufferSizeMB*1024*1024);
+        long used = bufferedDeletes.bytesUsed() + docWriter.bytesUsed();
+        if (used >= limit) {
+          
+          // DocumentsWriter may be able to free up some
+          // RAM:
+          // Lock order: FC -> DW
+          docWriter.balanceRAM();
+
+          used = bufferedDeletes.bytesUsed() + docWriter.bytesUsed();
+          if (used >= limit) {
+            return setFlushPending("ram full: " + reason, false);
+          }
+        }
+      }
+      return false;
+    }
+  }
+
+  final FlushControl flushControl = new FlushControl();
 }

Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/ParallelPostingsArray.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/ParallelPostingsArray.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/ParallelPostingsArray.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/ParallelPostingsArray.java Fri Dec 17 09:52:50 2010
@@ -1,7 +1,5 @@
 package org.apache.lucene.index;
 
-import org.apache.lucene.util.ArrayUtil;
-
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -19,9 +17,11 @@ import org.apache.lucene.util.ArrayUtil;
  * limitations under the License.
  */
 
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
 
 class ParallelPostingsArray {
-  final static int BYTES_PER_POSTING = 3 * DocumentsWriter.INT_NUM_BYTE;
+  final static int BYTES_PER_POSTING = 3 * RamUsageEstimator.NUM_BYTES_INT;
 
   final int size;
   final int[] textStarts;

Copied: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/SegmentDeletes.java (from r1044635, lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/SegmentDeletes.java)
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/SegmentDeletes.java?p2=lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/SegmentDeletes.java&p1=lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/SegmentDeletes.java&r1=1044635&r2=1050331&rev=1050331&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/SegmentDeletes.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/SegmentDeletes.java Fri Dec 17 09:52:50 2010
@@ -48,19 +48,19 @@ class SegmentDeletes {
      Term's text is String (OBJ_HEADER + 4*INT + POINTER +
      OBJ_HEADER + string.length*CHAR).  Integer is
      OBJ_HEADER + INT. */
-  final static int BYTES_PER_DEL_TERM = 8*RamUsageEstimator.NUM_BYTES_OBJ_REF + 5*RamUsageEstimator.NUM_BYTES_OBJ_HEADER + 6*RamUsageEstimator.NUM_BYTES_INT;
+  final static int BYTES_PER_DEL_TERM = 8*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 5*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 6*RamUsageEstimator.NUM_BYTES_INT;
 
   /* Rough logic: del docIDs are List<Integer>.  Say list
      allocates ~2X size (2*POINTER).  Integer is OBJ_HEADER
      + int */
-  final static int BYTES_PER_DEL_DOCID = 2*RamUsageEstimator.NUM_BYTES_OBJ_REF + RamUsageEstimator.NUM_BYTES_OBJ_HEADER + RamUsageEstimator.NUM_BYTES_INT;
+  final static int BYTES_PER_DEL_DOCID = 2*RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_INT;
 
   /* Rough logic: HashMap has an array[Entry] w/ varying
      load factor (say 2 * POINTER).  Entry is object w/
      Query key, Integer val, int hash, Entry next
      (OBJ_HEADER + 3*POINTER + INT).  Query we often
      undercount (say 24 bytes).  Integer is OBJ_HEADER + INT. */
-  final static int BYTES_PER_DEL_QUERY = 5*RamUsageEstimator.NUM_BYTES_OBJ_REF + 2*RamUsageEstimator.NUM_BYTES_OBJ_HEADER + 2*RamUsageEstimator.NUM_BYTES_INT + 24;
+  final static int BYTES_PER_DEL_QUERY = 5*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 2*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2*RamUsageEstimator.NUM_BYTES_INT + 24;
 
   // TODO: many of the deletes stored here will map to
   // Integer.MAX_VALUE; we could be more efficient for this
@@ -165,7 +165,7 @@ class SegmentDeletes {
     terms.put(term, Integer.valueOf(docIDUpto));
     numTermDeletes.incrementAndGet();
     if (current == null) {
-      bytesUsed.addAndGet(BYTES_PER_DEL_TERM + term.bytes.length);
+      bytesUsed.addAndGet(BYTES_PER_DEL_TERM + term.text.length() * RamUsageEstimator.NUM_BYTES_CHAR);
     }
   }
     

Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/SegmentInfo.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/SegmentInfo.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/SegmentInfo.java Fri Dec 17 09:52:50 2010
@@ -512,6 +512,10 @@ public final class SegmentInfo {
     return docStoreSegment;
   }
   
+  public void setDocStoreSegment(String segment) {
+    docStoreSegment = segment;
+  }
+  
   void setDocStoreOffset(int offset) {
     docStoreOffset = offset;
     clearFiles();
@@ -728,6 +732,12 @@ public final class SegmentInfo {
     
     if (docStoreOffset != -1) {
       s.append("->").append(docStoreSegment);
+      if (docStoreIsCompoundFile) {
+        s.append('c');
+      } else {
+        s.append('C');
+      }
+      s.append('+').append(docStoreOffset);
     }
 
     return s.toString();

Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java Fri Dec 17 09:52:50 2010
@@ -23,6 +23,7 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.util.UnicodeUtil;
+import org.apache.lucene.util.RamUsageEstimator;
 
 final class TermVectorsTermsWriterPerField extends TermsHashConsumerPerField {
 
@@ -301,7 +302,7 @@ final class TermVectorsTermsWriterPerFie
 
     @Override
     int bytesPerPosting() {
-      return super.bytesPerPosting() + 3 * DocumentsWriter.INT_NUM_BYTE;
+      return super.bytesPerPosting() + 3 * RamUsageEstimator.NUM_BYTES_INT;
     }
   }
 }

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/TestDemo.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/TestDemo.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/TestDemo.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/TestDemo.java Fri Dec 17 09:52:50 2010
@@ -49,6 +49,7 @@ public class TestDemo extends LuceneTest
     // To store an index on disk, use this instead:
     //Directory directory = FSDirectory.open("/tmp/testindex");
     RandomIndexWriter iwriter = new RandomIndexWriter(random, directory);
+    iwriter.w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
     String text = "This is the text to be indexed.";
     doc.add(newField("fieldname", text, Field.Store.YES,

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java Fri Dec 17 09:52:50 2010
@@ -84,6 +84,10 @@ public class TestSearchForDuplicates ext
       lmp.setUseCompoundFile(useCompoundFiles);
       lmp.setUseCompoundDocStore(useCompoundFiles);
       IndexWriter writer = new IndexWriter(directory, conf);
+      if (VERBOSE) {
+        System.out.println("TEST: now build index");
+        writer.setInfoStream(System.out);
+      }
 
       final int MAX_DOCS = 225;
 

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Fri Dec 17 09:52:50 2010
@@ -57,9 +57,9 @@ public class TestConcurrentMergeSchedule
             isClose = true;
           }
         }
-        if (isDoFlush && !isClose) {
+        if (isDoFlush && !isClose && random.nextBoolean()) {
           hitExc = true;
-          throw new IOException("now failing during flush");
+          throw new IOException(Thread.currentThread().getName() + ": now failing during flush");
         }
       }
     }
@@ -73,12 +73,17 @@ public class TestConcurrentMergeSchedule
     directory.failOn(failure);
 
     IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(2));
+    writer.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
     Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
     int extraCount = 0;
 
     for(int i=0;i<10;i++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + i);
+      }
+
       for(int j=0;j<20;j++) {
         idField.setValue(Integer.toString(i*20+j));
         writer.addDocument(doc);
@@ -97,10 +102,14 @@ public class TestConcurrentMergeSchedule
           }
           extraCount++;
         } catch (IOException ioe) {
+          if (VERBOSE) {
+            ioe.printStackTrace(System.out);
+          }
           failure.clearDoFail();
           break;
         }
       }
+      assertEquals(20*(i+1)+extraCount, writer.numDocs());
     }
 
     writer.close();
@@ -155,8 +164,12 @@ public class TestConcurrentMergeSchedule
     IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT))
         .setMaxBufferedDocs(2));
+    writer.setInfoStream(VERBOSE ? System.out : null);
 
     for(int iter=0;iter<7;iter++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + iter);
+      }
 
       for(int j=0;j<21;j++) {
         Document doc = new Document();

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Fri Dec 17 09:52:50 2010
@@ -1235,6 +1235,9 @@ public class TestIndexWriter extends Luc
     doc.add(idField);
 
     for(int pass=0;pass<2;pass++) {
+      if (VERBOSE) {
+        System.out.println("TEST: pass=" + pass);
+      }
 
       IndexWriterConfig conf = newIndexWriterConfig(
           TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE)
@@ -1244,15 +1247,17 @@ public class TestIndexWriter extends Luc
       }
       IndexWriter writer = new IndexWriter(directory, conf);
       ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(100);
+      writer.setInfoStream(VERBOSE ? System.out : null);
 
       // have to use compound file to prevent running out of
       // descripters when newDirectory returns a file-system
       // backed directory:
       ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(true);
       
-      //System.out.println("TEST: pass=" + pass + " cms=" + (pass >= 2));
       for(int iter=0;iter<10;iter++) {
-        //System.out.println("TEST: iter=" + iter);
+        if (VERBOSE) {
+          System.out.println("TEST: iter=" + iter);
+        }
         for(int j=0;j<199;j++) {
           idField.setValue(Integer.toString(iter*201+j));
           writer.addDocument(doc);
@@ -1297,8 +1302,9 @@ public class TestIndexWriter extends Luc
             }
           };
 
-        if (failure.size() > 0)
+        if (failure.size() > 0) {
           throw failure.get(0);
+        }
 
         t1.start();
 
@@ -1311,6 +1317,7 @@ public class TestIndexWriter extends Luc
 
         // Reopen
         writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.APPEND));
+        writer.setInfoStream(VERBOSE ? System.out : null);
       }
       writer.close();
     }
@@ -2563,7 +2570,7 @@ public class TestIndexWriter extends Luc
 
     Directory dir = newDirectory();
     FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
-    //w.setInfoStream(System.out);
+    w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
     doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
     int num = 6*RANDOM_MULTIPLIER;
@@ -2571,6 +2578,9 @@ public class TestIndexWriter extends Luc
       int count = 0;
 
       final boolean doIndexing = r.nextBoolean();
+      if (VERBOSE) {
+        System.out.println("TEST: iter doIndexing=" + doIndexing);
+      }
       if (doIndexing) {
         // Add docs until a flush is triggered
         final int startFlushCount = w.flushCount;

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Fri Dec 17 09:52:50 2010
@@ -114,6 +114,9 @@ public class TestIndexWriterDelete exten
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDeleteTerms(1));
+
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    writer.addDocument(new Document());
     writer.deleteDocuments(new Term("foobar", "1"));
     writer.deleteDocuments(new Term("foobar", "1"));
     writer.deleteDocuments(new Term("foobar", "1"));
@@ -125,11 +128,14 @@ public class TestIndexWriterDelete exten
   // test when delete terms only apply to ram segments
   public void testRAMDeletes() throws IOException {
     for(int t=0;t<2;t++) {
+      if (VERBOSE) {
+        System.out.println("TEST: t=" + t);
+      }
       Directory dir = newDirectory();
       IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
           TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(4)
           .setMaxBufferedDeleteTerms(4));
-
+      modifier.setInfoStream(VERBOSE ? System.out : null);
       int id = 0;
       int value = 100;
 
@@ -439,12 +445,16 @@ public class TestIndexWriterDelete exten
 
     // Iterate w/ ever increasing free disk space:
     while (!done) {
+      if (VERBOSE) {
+        System.out.println("TEST: cycle");
+      }
       MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir));
       dir.setPreventDoubleWrite(false);
       IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
           TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setMaxBufferedDocs(1000)
           .setMaxBufferedDeleteTerms(1000).setMergeScheduler(new ConcurrentMergeScheduler()));
       ((ConcurrentMergeScheduler) modifier.getConfig().getMergeScheduler()).setSuppressExceptions();
+      modifier.setInfoStream(VERBOSE ? System.out : null);
 
       // For each disk size, first try to commit against
       // dir that will hit random IOExceptions & disk
@@ -453,6 +463,9 @@ public class TestIndexWriterDelete exten
       boolean success = false;
 
       for (int x = 0; x < 2; x++) {
+        if (VERBOSE) {
+          System.out.println("TEST: x=" + x);
+        }
 
         double rate = 0.1;
         double diskRatio = ((double)diskFree) / diskUsage;

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java Fri Dec 17 09:52:50 2010
@@ -50,7 +50,7 @@ public class TestIndexWriterExceptions e
     IndexWriter writer;
 
     final Random r = new Random(random.nextLong());
-    Throwable failure;
+    volatile Throwable failure;
 
     public IndexerThread(int i, IndexWriter writer) {
       setName("Indexer " + i);
@@ -78,6 +78,9 @@ public class TestIndexWriterExceptions e
       final long stopTime = System.currentTimeMillis() + 500;
 
       do {
+        if (VERBOSE) {
+          System.out.println(Thread.currentThread().getName() + ": TEST: IndexerThread: cycle");
+        }
         doFail.set(this);
         final String id = ""+r.nextInt(50);
         idField.setValue(id);
@@ -135,7 +138,7 @@ public class TestIndexWriterExceptions e
       if (doFail.get() != null && !name.equals("startDoFlush") && r.nextInt(20) == 17) {
         if (VERBOSE) {
           System.out.println(Thread.currentThread().getName() + ": NOW FAIL: " + name);
-          //new Throwable().printStackTrace(System.out);
+          new Throwable().printStackTrace(System.out);
         }
         throw new RuntimeException(Thread.currentThread().getName() + ": intentionally failing at " + name);
       }
@@ -144,16 +147,23 @@ public class TestIndexWriterExceptions e
   }
 
   public void testRandomExceptions() throws Throwable {
+    if (VERBOSE) {
+      System.out.println("\nTEST: start testRandomExceptions");
+    }
     MockDirectoryWrapper dir = newDirectory();
 
     MockIndexWriter writer  = new MockIndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))
         .setRAMBufferSizeMB(0.1).setMergeScheduler(new ConcurrentMergeScheduler()));
     ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
     //writer.setMaxBufferedDocs(10);
+    if (VERBOSE) {
+      System.out.println("TEST: initial commit");
+    }
     writer.commit();
 
-    if (VERBOSE)
+    if (VERBOSE) {
       writer.setInfoStream(System.out);
+    }
 
     IndexerThread thread = new IndexerThread(0, writer);
     thread.run();
@@ -162,6 +172,9 @@ public class TestIndexWriterExceptions e
       fail("thread " + thread.getName() + ": hit unexpected failure");
     }
 
+    if (VERBOSE) {
+      System.out.println("TEST: commit after thread start");
+    }
     writer.commit();
 
     try {
@@ -191,8 +204,9 @@ public class TestIndexWriterExceptions e
     //writer.setMaxBufferedDocs(10);
     writer.commit();
 
-    if (VERBOSE)
+    if (VERBOSE) {
       writer.setInfoStream(System.out);
+    }
 
     final int NUM_THREADS = 4;
 
@@ -293,6 +307,7 @@ public class TestIndexWriterExceptions e
   public void testExceptionJustBeforeFlush() throws IOException {
     Directory dir = newDirectory();
     MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
+    w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
     doc.add(newField("field", "a field", Field.Store.YES,
                       Field.Index.ANALYZED));

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java Fri Dec 17 09:52:50 2010
@@ -47,29 +47,35 @@ public class TestIndexWriterOnDiskFull e
   public void testAddDocumentOnDiskFull() throws IOException {
 
     for(int pass=0;pass<2;pass++) {
-      if (VERBOSE)
+      if (VERBOSE) {
         System.out.println("TEST: pass=" + pass);
+      }
       boolean doAbort = pass == 1;
       long diskFree = 200;
       while(true) {
-        if (VERBOSE)
+        if (VERBOSE) {
           System.out.println("TEST: cycle: diskFree=" + diskFree);
+        }
         MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory());
         dir.setMaxSizeInBytes(diskFree);
         IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
         writer.setInfoStream(VERBOSE ? System.out : null);
         MergeScheduler ms = writer.getConfig().getMergeScheduler();
-        if (ms instanceof ConcurrentMergeScheduler)
+        if (ms instanceof ConcurrentMergeScheduler) {
           // This test intentionally produces exceptions
           // in the threads that CMS launches; we don't
           // want to pollute test output with these.
           ((ConcurrentMergeScheduler) ms).setSuppressExceptions();
+        }
 
         boolean hitError = false;
         try {
           for(int i=0;i<200;i++) {
             addDoc(writer);
           }
+          if (VERBOSE) {
+            System.out.println("TEST: done adding docs; now commit");
+          }
           writer.commit();
         } catch (IOException e) {
           if (VERBOSE) {
@@ -81,13 +87,19 @@ public class TestIndexWriterOnDiskFull e
 
         if (hitError) {
           if (doAbort) {
+            if (VERBOSE) {
+              System.out.println("TEST: now rollback");
+            }
             writer.rollback();
           } else {
             try {
+              if (VERBOSE) {
+                System.out.println("TEST: now close");
+              }
               writer.close();
             } catch (IOException e) {
               if (VERBOSE) {
-                System.out.println("TEST: exception on close");
+                System.out.println("TEST: exception on close; retry w/ no disk space limit");
                 e.printStackTrace(System.out);
               }
               dir.setMaxSizeInBytes(0);

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java Fri Dec 17 09:52:50 2010
@@ -105,6 +105,9 @@ public class TestIndexWriterWithThreads 
     int NUM_THREADS = 3;
 
     for(int iter=0;iter<10;iter++) {
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter);
+      }
       MockDirectoryWrapper dir = newDirectory();
       IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
         .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler());
@@ -113,6 +116,7 @@ public class TestIndexWriterWithThreads 
       IndexWriter writer = new IndexWriter(dir, conf);
       ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
       dir.setMaxSizeInBytes(4*1024+20*iter);
+      writer.setInfoStream(VERBOSE ? System.out : null);
 
       IndexerThread[] threads = new IndexerThread[NUM_THREADS];
 

Copied: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java (from r1044635, lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java)
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java?p2=lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java&p1=lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java&r1=1044635&r2=1050331&rev=1050331&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java Fri Dec 17 09:52:50 2010
@@ -24,14 +24,10 @@ import java.util.Random;
 import java.util.Set;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.Version;
 
@@ -97,7 +93,7 @@ public class TestPerSegmentDeletes exten
     // id:2 shouldn't exist anymore because
     // it's been applied in the merge and now it's gone
     IndexReader r2 = writer.getReader();
-    int[] id2docs = toDocsArray(new Term("id", "2"), null, r2);
+    int[] id2docs = toDocsArray(new Term("id", "2"), r2);
     assertTrue(id2docs == null);
     r2.close();
     
@@ -214,33 +210,23 @@ public class TestPerSegmentDeletes exten
     return false;
   }
   
-  public static void printDelDocs(Bits bits) {
-    if (bits == null) return;
-    for (int x = 0; x < bits.length(); x++) {
-      System.out.println(x + ":" + bits.get(x));
-    }
-  }
-  
-  public static int[] toDocsArray(Term term, Bits bits, IndexReader reader)
+  public static int[] toDocsArray(Term term, IndexReader reader)
       throws IOException {
-    Fields fields = MultiFields.getFields(reader);
-    Terms cterms = fields.terms(term.field);
-    TermsEnum ctermsEnum = cterms.iterator();
-    SeekStatus ss = ctermsEnum.seek(new BytesRef(term.text()), false);
-    if (ss.equals(SeekStatus.FOUND)) {
-      DocsEnum docsEnum = ctermsEnum.docs(bits, null);
-      return toArray(docsEnum);
-    }
-    return null;
+    TermDocs termDocs = reader.termDocs();
+    termDocs.seek(term);
+    return toArray(termDocs);
   }
   
-  public static int[] toArray(DocsEnum docsEnum) throws IOException {
+  public static int[] toArray(TermDocs termDocs) throws IOException {
     List<Integer> docs = new ArrayList<Integer>();
-    while (docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
-      int docID = docsEnum.docID();
-      docs.add(docID);
+    while (termDocs.next()) {
+      docs.add(termDocs.doc());
+    }
+    if (docs.size() == 0) {
+      return null;
+    } else {
+      return ArrayUtil.toIntArray(docs);
     }
-    return ArrayUtil.toIntArray(docs);
   }
   
   public class RangeMergePolicy extends MergePolicy {

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java Fri Dec 17 09:52:50 2010
@@ -96,6 +96,9 @@ public class TestStressIndexing2 extends
     // test lots of smaller different params together
     int num = 3 * RANDOM_MULTIPLIER;
     for (int i = 0; i < num; i++) { // increase iterations for better testing
+      if (VERBOSE) {
+        System.out.println("\n\nTEST: top iter=" + i);
+      }
       sameFieldOrder=random.nextBoolean();
       mergeFactor=random.nextInt(3)+2;
       maxBufferedDocs=random.nextInt(3)+2;
@@ -108,8 +111,17 @@ public class TestStressIndexing2 extends
       int range=random.nextInt(20)+1;
       Directory dir1 = newDirectory();
       Directory dir2 = newDirectory();
+      if (VERBOSE) {
+        System.out.println("  nThreads=" + nThreads + " iter=" + iter + " range=" + range + " doPooling=" + doReaderPooling + " maxThreadStates=" + maxThreadStates + " sameFieldOrder=" + sameFieldOrder + " mergeFactor=" + mergeFactor);
+      }
       Map<String,Document> docs = indexRandom(nThreads, iter, range, dir1, maxThreadStates, doReaderPooling);
+      if (VERBOSE) {
+        System.out.println("TEST: index serial");
+      }
       indexSerial(random, docs, dir2);
+      if (VERBOSE) {
+        System.out.println("TEST: verify");
+      }
       verifyEquals(dir1, dir2, "id");
       dir1.close();
       dir2.close();
@@ -139,6 +151,7 @@ public class TestStressIndexing2 extends
     IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB(
         0.1).setMaxBufferedDocs(maxBufferedDocs));
+    w.setInfoStream(VERBOSE ? System.out : null);
     w.commit();
     LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy();
     lmp.setUseCompoundFile(false);
@@ -189,10 +202,14 @@ public class TestStressIndexing2 extends
                                           boolean doReaderPooling) throws IOException, InterruptedException {
     Map<String,Document> docs = new HashMap<String,Document>();
     for(int iter=0;iter<3;iter++) {
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + iter);
+      }
       IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig(
           TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE)
                .setRAMBufferSizeMB(0.1).setMaxBufferedDocs(maxBufferedDocs).setMaxThreadStates(maxThreadStates)
                .setReaderPooling(doReaderPooling));
+      w.setInfoStream(VERBOSE ? System.out : null);
       LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy();
       lmp.setUseCompoundFile(false);
       lmp.setUseCompoundDocStore(false);
@@ -270,9 +287,32 @@ public class TestStressIndexing2 extends
     r2.close();
   }
 
+  private static void printDocs(IndexReader r) throws Throwable {
+    IndexReader[] subs = r.getSequentialSubReaders();
+    for(IndexReader sub : subs) {
+      System.out.println("  " + ((SegmentReader) sub).getSegmentInfo());
+      for(int docID=0;docID<sub.maxDoc();docID++) {
+        Document doc = sub.document(docID);
+        if (!sub.isDeleted(docID)) {
+          System.out.println("    docID=" + docID + " id:" + doc.get("id"));
+        } else {
+          System.out.println("    DEL docID=" + docID + " id:" + doc.get("id"));
+        }
+      }
+    }
+  }
+
 
   public static void verifyEquals(IndexReader r1, IndexReader r2, String idField) throws Throwable {
-    assertEquals(r1.numDocs(), r2.numDocs());
+    if (VERBOSE) {
+      System.out.println("\nr1 docs:");
+      printDocs(r1);
+      System.out.println("\nr2 docs:");
+      printDocs(r2);
+    }
+    if (r1.numDocs() != r2.numDocs()) {
+      assert false: "r1.numDocs()=" + r1.numDocs() + " vs r2.numDocs()=" + r2.numDocs();
+    }
     boolean hasDeletes = !(r1.maxDoc()==r2.maxDoc() && r1.numDocs()==r1.maxDoc());
 
     int[] r2r1 = new int[r2.maxDoc()];   // r2 id to r1 id mapping
@@ -621,19 +661,28 @@ public class TestStressIndexing2 extends
       for (int i=0; i<fields.size(); i++) {
         d.add(fields.get(i));
       }
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": indexing id:" + idString);
+      }
       w.updateDocument(idTerm.createTerm(idString), d);
-      // System.out.println("indexing "+d);
+      //System.out.println(Thread.currentThread().getName() + ": indexing "+d);
       docs.put(idString, d);
     }
 
     public void deleteDoc() throws IOException {
       String idString = getIdString();
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": del id:" + idString);
+      }
       w.deleteDocuments(idTerm.createTerm(idString));
       docs.remove(idString);
     }
 
     public void deleteByQuery() throws IOException {
       String idString = getIdString();
+      if (VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": del query id:" + idString);
+      }
       w.deleteDocuments(new TermQuery(idTerm.createTerm(idString)));
       docs.remove(idString);
     }

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermVectors.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermVectors.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermVectors.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/search/TestTermVectors.java Fri Dec 17 09:52:50 2010
@@ -354,12 +354,19 @@ public class TestTermVectors extends Luc
     RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
         newIndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT))
         .setOpenMode(OpenMode.CREATE));
+    writer.w.setInfoStream(VERBOSE ? System.out : null);
+    if (VERBOSE) {
+      System.out.println("TEST: now add non-vectors");
+    }
     for (int i = 0; i < 100; i++) {
       Document doc = new Document();
       doc.add(new Field("field", English.intToEnglish(i),
                         Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
       writer.addDocument(doc);
     }
+    if (VERBOSE) {
+      System.out.println("TEST: now add vectors");
+    }
     for(int i=0;i<10;i++) {
       Document doc = new Document();
       doc.add(new Field("field", English.intToEnglish(100+i),
@@ -367,6 +374,9 @@ public class TestTermVectors extends Luc
       writer.addDocument(doc);
     }
 
+    if (VERBOSE) {
+      System.out.println("TEST: now getReader");
+    }
     IndexReader reader = writer.getReader();
     writer.close();
     searcher = new IndexSearcher(reader);
@@ -375,6 +385,7 @@ public class TestTermVectors extends Luc
     ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
     assertEquals(10, hits.length);
     for (int i = 0; i < hits.length; i++) {
+
       TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
       assertTrue(vector != null);
       assertTrue(vector.length == 1);

Modified: lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/store/MockIndexOutputWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/store/MockIndexOutputWrapper.java?rev=1050331&r1=1050330&r2=1050331&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/store/MockIndexOutputWrapper.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/store/MockIndexOutputWrapper.java Fri Dec 17 09:52:50 2010
@@ -19,6 +19,8 @@ package org.apache.lucene.store;
 
 import java.io.IOException;
 
+import org.apache.lucene.util.LuceneTestCase;
+
 /**
  * Used by MockRAMDirectory to create an output stream that
  * will throw an IOException on fake disk full, track max
@@ -102,6 +104,9 @@ public class MockIndexOutputWrapper exte
         message += "; wrote " + freeSpace + " of " + len + " bytes";
       }
       message += ")";
+      if (LuceneTestCase.VERBOSE) {
+        System.out.println(Thread.currentThread().getName() + ": MDW: now throw fake disk full");
+      }
       throw new IOException(message);
     } else {
       if (dir.randomState.nextBoolean()) {