You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2010/12/19 01:24:06 UTC

svn commit: r1050738 [1/3] - in /lucene/dev/branches/bulkpostings: ./ lucene/ lucene/contrib/ant/src/java/org/apache/lucene/ant/ lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/ lucene/contrib/wordnet/src/java/org/apache/lucene/wo...

Author: rmuir
Date: Sun Dec 19 00:24:04 2010
New Revision: 1050738

URL: http://svn.apache.org/viewvc?rev=1050738&view=rev
Log:
sync branch to trunk (1049186:1050737)

Added:
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/index.31.cfs.zip
      - copied unchanged from r1050733, lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/index.31.cfs.zip
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/index.31.nocfs.zip
      - copied unchanged from r1050733, lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/index.31.nocfs.zip
Modified:
    lucene/dev/branches/bulkpostings/   (props changed)
    lucene/dev/branches/bulkpostings/lucene/   (props changed)
    lucene/dev/branches/bulkpostings/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java
    lucene/dev/branches/bulkpostings/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java
    lucene/dev/branches/bulkpostings/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java
    lucene/dev/branches/bulkpostings/lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocConsumer.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocFieldConsumer.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocInverter.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FieldInfos.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FieldsWriter.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/IndexReader.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/IndexWriter.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/InvertedDocConsumer.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/InvertedDocEndConsumer.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/MergePolicy.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/MultiFields.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/NoMergePolicy.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/NormsWriter.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/SegmentInfos.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/StoredFieldsWriter.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/TermsHash.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/TermsHashConsumer.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/codecs/DefaultSegmentInfosReader.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/codecs/DefaultSegmentInfosWriter.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/store/MMapDirectory.java
    lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/util/IOUtils.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/TestSearch.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestCodecs.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestDoc.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestLazyBug.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestNoMergePolicy.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestNorms.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestOmitTf.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/search/TestMultiSearcher.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/search/TestParallelMultiSearcher.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/search/TestSort.java
    lucene/dev/branches/bulkpostings/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java
    lucene/dev/branches/bulkpostings/solr/   (props changed)
    lucene/dev/branches/bulkpostings/solr/CHANGES.txt
    lucene/dev/branches/bulkpostings/solr/src/java/org/apache/solr/analysis/ClassicTokenizerFactory.java
    lucene/dev/branches/bulkpostings/solr/src/java/org/apache/solr/analysis/StandardTokenizerFactory.java
    lucene/dev/branches/bulkpostings/solr/src/java/org/apache/solr/analysis/UAX29URLEmailTokenizerFactory.java
    lucene/dev/branches/bulkpostings/solr/src/test/org/apache/solr/analysis/TestStandardFactories.java
    lucene/dev/branches/bulkpostings/solr/src/test/org/apache/solr/analysis/TestUAX29URLEmailTokenizerFactory.java

Modified: lucene/dev/branches/bulkpostings/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java Sun Dec 19 00:24:04 2010
@@ -288,7 +288,6 @@ public class IndexTask extends Task {
           create ? OpenMode.CREATE : OpenMode.APPEND);
       LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
       lmp.setUseCompoundFile(useCompoundIndex);
-      lmp.setUseCompoundDocStore(useCompoundIndex);
       lmp.setMergeFactor(mergeFactor);
       IndexWriter writer = new IndexWriter(dir, conf);
       int totalFiles = 0;

Modified: lucene/dev/branches/bulkpostings/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java Sun Dec 19 00:24:04 2010
@@ -138,7 +138,6 @@ public class TestAppendingCodec extends 
     
     cfg.setCodecProvider(new AppendingCodecProvider());
     ((LogMergePolicy)cfg.getMergePolicy()).setUseCompoundFile(false);
-    ((LogMergePolicy)cfg.getMergePolicy()).setUseCompoundDocStore(false);
     IndexWriter writer = new IndexWriter(dir, cfg);
     Document doc = new Document();
     doc.add(newField("f", text, Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));

Modified: lucene/dev/branches/bulkpostings/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java Sun Dec 19 00:24:04 2010
@@ -251,7 +251,6 @@ public class Syns2Index
           IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
               Version.LUCENE_CURRENT, ana).setOpenMode(OpenMode.CREATE));
           ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(true); // why?
-          ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(true); // why?
           Iterator<String> i1 = word2Nums.keySet().iterator();
           while (i1.hasNext()) // for each word
           {

Modified: lucene/dev/branches/bulkpostings/lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java Sun Dec 19 00:24:04 2010
@@ -20,6 +20,8 @@ import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Version;
 import org.apache.lucene.util.LuceneTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -38,23 +40,17 @@ import org.apache.lucene.util.LuceneTest
  */
 
 public class TestParser extends LuceneTestCase {
-
-	CoreParser builder;
-	static Directory dir;
-  // TODO: rewrite test (this needs to set QueryParser.enablePositionIncrements, too, for work with CURRENT):
-	Analyzer analyzer=new MockAnalyzer(MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET, false); 
-	IndexReader reader;
-	private IndexSearcher searcher;
-
-	/*
-	 * @see TestCase#setUp()
-	 */
-	@Override
-	public void setUp() throws Exception {
-		super.setUp();
-		
-		//initialize the parser
-		builder=new CorePlusExtensionsParser("contents",analyzer);
+	private static CoreParser builder;
+	private static Directory dir;
+	private static IndexReader reader;
+	private static IndexSearcher searcher;
+
+	@BeforeClass
+	public static void beforeClass() throws Exception {
+	  // TODO: rewrite test (this needs to set QueryParser.enablePositionIncrements, too, for work with CURRENT):
+	  Analyzer analyzer=new MockAnalyzer(MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET, false); 
+    //initialize the parser
+	  builder=new CorePlusExtensionsParser("contents",analyzer);
 		
 			BufferedReader d = new BufferedReader(new InputStreamReader(TestParser.class.getResourceAsStream("reuters21578.txt"))); 
 			dir=newDirectory();
@@ -84,13 +80,17 @@ public class TestParser extends LuceneTe
 	
 	
 	
-	@Override
-	public void tearDown() throws Exception {
+	@AfterClass
+	public static void afterClass() throws Exception {
 		reader.close();
 		searcher.close();
 		dir.close();
-		super.tearDown();
+		reader = null;
+		searcher = null;
+		dir = null;
+		builder = null;
 	}
+	
 	public void testSimpleXML() throws ParserException, IOException
 	{
 			Query q=parse("TermQuery.xml");

Modified: lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java Sun Dec 19 00:24:04 2010
@@ -188,7 +188,14 @@ class BufferedDeletes {
 
       if (segIdx <= lastIdx && hasDeletes) {
 
-        any |= applyDeletes(readerPool, info, coalescedDeletes, deletes);
+        final long delCountInc = applyDeletes(readerPool, info, coalescedDeletes, deletes);
+
+        if (delCountInc != 0) {
+          any = true;
+        }
+        if (infoStream != null) {
+          message("deletes touched " + delCountInc + " docIDs");
+        }
       
         if (deletes != null) {
           // we've applied doc ids, and they're only applied
@@ -259,7 +266,7 @@ class BufferedDeletes {
     return any;
   }
   
-  private synchronized boolean applyDeletes(IndexWriter.ReaderPool readerPool,
+  private synchronized long applyDeletes(IndexWriter.ReaderPool readerPool,
                                             SegmentInfo info, 
                                             SegmentDeletes coalescedDeletes,
                                             SegmentDeletes segmentDeletes) throws IOException {    
@@ -267,25 +274,26 @@ class BufferedDeletes {
     
     assert coalescedDeletes == null || coalescedDeletes.docIDs.size() == 0;
     
-    boolean any = false;
+    long delCount = 0;
 
     // Lock order: IW -> BD -> RP
     SegmentReader reader = readerPool.get(info, false);
     try {
       if (coalescedDeletes != null) {
-        any |= applyDeletes(coalescedDeletes, reader);
+        delCount += applyDeletes(coalescedDeletes, reader);
       }
       if (segmentDeletes != null) {
-        any |= applyDeletes(segmentDeletes, reader);
+        delCount += applyDeletes(segmentDeletes, reader);
       }
     } finally {
       readerPool.release(reader);
     }
-    return any;
+    return delCount;
   }
   
-  private synchronized boolean applyDeletes(SegmentDeletes deletes, SegmentReader reader) throws IOException {
-    boolean any = false;
+  private synchronized long applyDeletes(SegmentDeletes deletes, SegmentReader reader) throws IOException {
+
+    long delCount = 0;
 
     assert checkDeleteTerm(null);
     
@@ -293,7 +301,7 @@ class BufferedDeletes {
       Fields fields = reader.fields();
       if (fields == null) {
         // This reader has no postings
-        return false;
+        return 0;
       }
 
       TermsEnum termsEnum = null;
@@ -334,7 +342,12 @@ class BufferedDeletes {
                 break;
               }
               reader.deleteDocument(docID);
-              any = true;
+              // TODO: we could/should change
+              // reader.deleteDocument to return boolean
+              // true if it did in fact delete, because here
+              // we could be deleting an already-deleted doc
+              // which makes this an upper bound:
+              delCount++;
             }
           }
         }
@@ -345,7 +358,7 @@ class BufferedDeletes {
     for (Integer docIdInt : deletes.docIDs) {
       int docID = docIdInt.intValue();
       reader.deleteDocument(docID);
-      any = true;
+      delCount++;
     }
 
     // Delete by query
@@ -362,8 +375,14 @@ class BufferedDeletes {
               int doc = scorer.nextDoc();
               if (doc >= limit)
                 break;
+
               reader.deleteDocument(doc);
-              any = true;
+              // TODO: we could/should change
+              // reader.deleteDocument to return boolean
+              // true if it did in fact delete, because here
+              // we could be deleting an already-deleted doc
+              // which makes this an upper bound:
+              delCount++;
             }
           }
         }
@@ -371,7 +390,8 @@ class BufferedDeletes {
         searcher.close();
       }
     }
-    return any;
+
+    return delCount;
   }
   
   public synchronized SegmentDeletes getDeletes(SegmentInfo info) {

Modified: lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java Sun Dec 19 00:24:04 2010
@@ -69,13 +69,6 @@ public class ConcurrentMergeScheduler ex
   protected IndexWriter writer;
   protected int mergeThreadCount;
 
-  public ConcurrentMergeScheduler() {
-    if (allInstances != null) {
-      // Only for testing
-      addMyself();
-    }
-  }
-
   /** Sets the max # simultaneous merge threads that should
    *  be running at once.  This must be <= {@link
    *  #setMaxMergeCount}. */
@@ -431,7 +424,6 @@ public class ConcurrentMergeScheduler ex
           if (!suppressExceptions) {
             // suppressExceptions is normally only set during
             // testing.
-            anyExceptions = true;
             handleMergeException(exc);
           }
         }
@@ -471,48 +463,6 @@ public class ConcurrentMergeScheduler ex
     throw new MergePolicy.MergeException(exc, dir);
   }
 
-  static boolean anyExceptions = false;
-
-  /** Used for testing */
-  public static boolean anyUnhandledExceptions() {
-    if (allInstances == null) {
-      throw new RuntimeException("setTestMode() was not called; often this is because your test case's setUp method fails to call super.setUp in LuceneTestCase");
-    }
-    synchronized(allInstances) {
-      final int count = allInstances.size();
-      // Make sure all outstanding threads are done so we see
-      // any exceptions they may produce:
-      for(int i=0;i<count;i++)
-        allInstances.get(i).sync();
-      boolean v = anyExceptions;
-      anyExceptions = false;
-      return v;
-    }
-  }
-
-  public static void clearUnhandledExceptions() {
-    synchronized(allInstances) {
-      anyExceptions = false;
-    }
-  }
-
-  /** Used for testing */
-  private void addMyself() {
-    synchronized(allInstances) {
-      final int size = allInstances.size();
-      int upto = 0;
-      for(int i=0;i<size;i++) {
-        final ConcurrentMergeScheduler other = allInstances.get(i);
-        if (!(other.closed && 0 == other.mergeThreadCount()))
-          // Keep this one for now: it still has threads or
-          // may spawn new threads
-          allInstances.set(upto++, other);
-      }
-      allInstances.subList(upto, allInstances.size()).clear();
-      allInstances.add(this);
-    }
-  }
-
   private boolean suppressExceptions;
 
   /** Used for testing */
@@ -524,10 +474,4 @@ public class ConcurrentMergeScheduler ex
   void clearSuppressExceptions() {
     suppressExceptions = false;
   }
-
-  /** Used for testing */
-  private static List<ConcurrentMergeScheduler> allInstances;
-  public static void setTestMode() {
-    allInstances = new ArrayList<ConcurrentMergeScheduler>();
-  }
 }

Modified: lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DirectoryReader.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DirectoryReader.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DirectoryReader.java Sun Dec 19 00:24:04 2010
@@ -765,7 +765,7 @@ class DirectoryReader extends IndexReade
       // KeepOnlyLastCommitDeleter:
       IndexFileDeleter deleter = new IndexFileDeleter(directory,
                                                       deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy,
-                                                      segmentInfos, null, null, codecs);
+                                                      segmentInfos, null, codecs);
       segmentInfos.updateGeneration(deleter.getLastSegmentInfos());
       segmentInfos.changed();
 

Modified: lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocConsumer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocConsumer.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocConsumer.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocConsumer.java Sun Dec 19 00:24:04 2010
@@ -23,7 +23,6 @@ import java.util.Collection;
 abstract class DocConsumer {
   abstract DocConsumerPerThread addThread(DocumentsWriterThreadState perThread) throws IOException;
   abstract void flush(final Collection<DocConsumerPerThread> threads, final SegmentWriteState state) throws IOException;
-  abstract void closeDocStore(final SegmentWriteState state) throws IOException;
   abstract void abort();
   abstract boolean freeRAM();
 }

Modified: lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocFieldConsumer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocFieldConsumer.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocFieldConsumer.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocFieldConsumer.java Sun Dec 19 00:24:04 2010
@@ -29,10 +29,6 @@ abstract class DocFieldConsumer {
    *  segment */
   abstract void flush(Map<DocFieldConsumerPerThread,Collection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state) throws IOException;
 
-  /** Called when DocumentsWriter decides to close the doc
-   *  stores */
-  abstract void closeDocStore(SegmentWriteState state) throws IOException;
-  
   /** Called when an aborting exception is hit */
   abstract void abort();
 

Modified: lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java Sun Dec 19 00:24:04 2010
@@ -47,12 +47,6 @@ final class DocFieldProcessor extends Do
   }
 
   @Override
-  public void closeDocStore(SegmentWriteState state) throws IOException {
-    consumer.closeDocStore(state);
-    fieldsWriter.closeDocStore(state);
-  }
-
-  @Override
   public void flush(Collection<DocConsumerPerThread> threads, SegmentWriteState state) throws IOException {
 
     Map<DocFieldConsumerPerThread, Collection<DocFieldConsumerPerField>> childThreadsAndFields = new HashMap<DocFieldConsumerPerThread, Collection<DocFieldConsumerPerField>>();

Modified: lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocInverter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocInverter.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocInverter.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocInverter.java Sun Dec 19 00:24:04 2010
@@ -74,12 +74,6 @@ final class DocInverter extends DocField
   }
 
   @Override
-  public void closeDocStore(SegmentWriteState state) throws IOException {
-    consumer.closeDocStore(state);
-    endConsumer.closeDocStore(state);
-  }
-
-  @Override
   void abort() {
     consumer.abort();
     endConsumer.abort();

Modified: lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java Sun Dec 19 00:24:04 2010
@@ -114,12 +114,9 @@ final class DocumentsWriter {
   Directory directory;
 
   String segment;                         // Current segment we are working on
-  private String docStoreSegment;         // Current doc-store segment we are writing
-  private int docStoreOffset;                     // Current starting doc-store offset of current segment
 
-  private int nextDocID;                          // Next docID to be added
-  private int numDocsInRAM;                       // # docs buffered in RAM
-  int numDocsInStore;                     // # docs written to doc stores
+  private int nextDocID;                  // Next docID to be added
+  private int numDocs;                    // # of docs added, but not yet flushed
 
   // Max # ThreadState instances; if there are more threads
   // than this they share ThreadStates
@@ -129,8 +126,6 @@ final class DocumentsWriter {
   boolean bufferIsFull;                   // True when it's time to write segment
   private boolean aborting;               // True if an abort is pending
 
-  private DocFieldProcessor docFieldProcessor;
-
   PrintStream infoStream;
   int maxFieldLength = IndexWriterConfig.UNLIMITED_FIELD_LENGTH;
   Similarity similarity;
@@ -139,8 +134,6 @@ final class DocumentsWriter {
   // this, they wait for others to finish first
   private final int maxThreadStates;
 
-  List<String> newFiles;
-
   // Deletes for our still-in-RAM (to be flushed next) segment
   private SegmentDeletes pendingDeletes = new SegmentDeletes();
   
@@ -299,9 +292,6 @@ final class DocumentsWriter {
     flushControl = writer.flushControl;
 
     consumer = indexingChain.getChain(this);
-    if (consumer instanceof DocFieldProcessor) {
-      docFieldProcessor = (DocFieldProcessor) consumer;
-    }
   }
 
   // Buffer a specific docID for deletion.  Currently only
@@ -323,7 +313,7 @@ final class DocumentsWriter {
     final boolean doFlush = flushControl.waitUpdate(0, queries.length);
     synchronized(this) {
       for (Query query : queries) {
-        pendingDeletes.addQuery(query, numDocsInRAM);
+        pendingDeletes.addQuery(query, numDocs);
       }
     }
     return doFlush;
@@ -332,7 +322,7 @@ final class DocumentsWriter {
   boolean deleteQuery(Query query) { 
     final boolean doFlush = flushControl.waitUpdate(0, 1);
     synchronized(this) {
-      pendingDeletes.addQuery(query, numDocsInRAM);
+      pendingDeletes.addQuery(query, numDocs);
     }
     return doFlush;
   }
@@ -341,7 +331,7 @@ final class DocumentsWriter {
     final boolean doFlush = flushControl.waitUpdate(0, terms.length);
     synchronized(this) {
       for (Term term : terms) {
-        pendingDeletes.addTerm(term, numDocsInRAM);
+        pendingDeletes.addTerm(term, numDocs);
       }
     }
     return doFlush;
@@ -350,7 +340,7 @@ final class DocumentsWriter {
   boolean deleteTerm(Term term, boolean skipWait) {
     final boolean doFlush = flushControl.waitUpdate(0, 1, skipWait);
     synchronized(this) {
-      pendingDeletes.addTerm(term, numDocsInRAM);
+      pendingDeletes.addTerm(term, numDocs);
     }
     return doFlush;
   }
@@ -359,31 +349,27 @@ final class DocumentsWriter {
     return fieldInfos;
   }
 
-  /** Returns true if any of the fields in the current
-   *  buffered docs have omitTermFreqAndPositions==false */
-  boolean hasProx() {
-    return (docFieldProcessor != null) ? fieldInfos.hasProx()
-                                       : true;
-  }
-
   /** If non-null, various details of indexing are printed
    *  here. */
   synchronized void setInfoStream(PrintStream infoStream) {
     this.infoStream = infoStream;
-    for(int i=0;i<threadStates.length;i++)
+    for(int i=0;i<threadStates.length;i++) {
       threadStates[i].docState.infoStream = infoStream;
+    }
   }
 
   synchronized void setMaxFieldLength(int maxFieldLength) {
     this.maxFieldLength = maxFieldLength;
-    for(int i=0;i<threadStates.length;i++)
+    for(int i=0;i<threadStates.length;i++) {
       threadStates[i].docState.maxFieldLength = maxFieldLength;
+    }
   }
 
   synchronized void setSimilarity(Similarity similarity) {
     this.similarity = similarity;
-    for(int i=0;i<threadStates.length;i++)
+    for(int i=0;i<threadStates.length;i++) {
       threadStates[i].docState.similarity = similarity;
+    }
   }
 
   /** Set how much RAM we can use before flushing. */
@@ -424,126 +410,14 @@ final class DocumentsWriter {
   }
 
   /** Returns how many docs are currently buffered in RAM. */
-  synchronized int getNumDocsInRAM() {
-    return numDocsInRAM;
-  }
-
-  /** Returns the current doc store segment we are writing
-   *  to. */
-  synchronized String getDocStoreSegment() {
-    return docStoreSegment;
-  }
-
-  /** Returns the doc offset into the shared doc store for
-   *  the current buffered docs. */
-  synchronized int getDocStoreOffset() {
-    return docStoreOffset;
-  }
-
-  /** Closes the current open doc stores an sets the
-   *  docStoreSegment and docStoreUseCFS on the provided
-   *  SegmentInfo. */
-  synchronized void closeDocStore(SegmentWriteState flushState, IndexWriter writer, IndexFileDeleter deleter, SegmentInfo newSegment, MergePolicy mergePolicy, SegmentInfos segmentInfos) throws IOException {
-    
-    final boolean isSeparate = numDocsInRAM == 0 || !segment.equals(docStoreSegment);
-
-    assert docStoreSegment != null;
-
-    if (infoStream != null) {
-      message("closeDocStore: openFiles=" + openFiles + "; segment=" + docStoreSegment + "; docStoreOffset=" + docStoreOffset + "; numDocsInStore=" + numDocsInStore + "; isSeparate=" + isSeparate);
-    }
-
-    closedFiles.clear();
-    consumer.closeDocStore(flushState);
-    flushState.numDocsInStore = 0;
-    assert 0 == openFiles.size();
-
-    if (isSeparate) {
-      flushState.flushedFiles.clear();
-
-      if (mergePolicy.useCompoundDocStore(segmentInfos)) {
-
-        final String compoundFileName = IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.COMPOUND_FILE_STORE_EXTENSION);
-
-        if (infoStream != null) {
-          message("closeDocStore: create compound file " + compoundFileName);
-        }
-
-        boolean success = false;
-        try {
-
-          CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, compoundFileName);
-          for (final String file : closedFiles) {
-            cfsWriter.addFile(file);
-          }
-      
-          // Perform the merge
-          cfsWriter.close();
-
-          success = true;
-        } finally {
-          if (!success) {
-            deleter.deleteFile(compoundFileName);
-          }
-        }
-
-        // In case the files we just merged into a CFS were
-        // not registered w/ IFD:
-        deleter.deleteNewFiles(closedFiles);
-
-        final int numSegments = segmentInfos.size();
-        for(int i=0;i<numSegments;i++) {
-          SegmentInfo si = segmentInfos.info(i);
-          if (si.getDocStoreOffset() != -1 &&
-              si.getDocStoreSegment().equals(docStoreSegment)) {
-            si.setDocStoreIsCompoundFile(true);
-          }
-        }
-
-        newSegment.setDocStoreIsCompoundFile(true);
-        if (infoStream != null) {
-          message("closeDocStore: after compound file index=" + segmentInfos);
-        }
-
-        writer.checkpoint();
-      }
-    }
-
-    docStoreSegment = null;
-    docStoreOffset = 0;
-    numDocsInStore = 0;
-  }
-
-  private Collection<String> abortedFiles;               // List of files that were written before last abort()
-
-  Collection<String> abortedFiles() {
-    return abortedFiles;
+  synchronized int getNumDocs() {
+    return numDocs;
   }
 
   void message(String message) {
-    if (infoStream != null)
+    if (infoStream != null) {
       writer.message("DW: " + message);
-  }
-
-  final List<String> openFiles = new ArrayList<String>();
-  final List<String> closedFiles = new ArrayList<String>();
-
-  /* Returns Collection of files in use by this instance,
-   * including any flushed segments. */
-  @SuppressWarnings("unchecked")
-  synchronized List<String> openFiles() {
-    return (List<String>) ((ArrayList<String>) openFiles).clone();
-  }
-
-  synchronized void addOpenFile(String name) {
-    assert !openFiles.contains(name);
-    openFiles.add(name);
-  }
-
-  synchronized void removeOpenFile(String name) {
-    assert openFiles.contains(name);
-    openFiles.remove(name);
-    closedFiles.add(name);
+    }
   }
 
   synchronized void setAborting() {
@@ -558,7 +432,6 @@ final class DocumentsWriter {
    *  currently buffered docs.  This resets our state,
    *  discarding any docs added since last flush. */
   synchronized void abort() throws IOException {
-
     if (infoStream != null) {
       message("docWriter: abort");
     }
@@ -582,19 +455,11 @@ final class DocumentsWriter {
 
       waitQueue.waitingBytes = 0;
 
-      try {
-        abortedFiles = openFiles();
-      } catch (Throwable t) {
-        abortedFiles = null;
-      }
-
       pendingDeletes.clear();
-        
-      openFiles.clear();
 
-      for(int i=0;i<threadStates.length;i++)
+      for (DocumentsWriterThreadState threadState : threadStates)
         try {
-          threadStates[i].consumer.abort();
+          threadState.consumer.abort();
         } catch (Throwable t) {
         }
 
@@ -603,10 +468,6 @@ final class DocumentsWriter {
       } catch (Throwable t) {
       }
 
-      docStoreSegment = null;
-      numDocsInStore = 0;
-      docStoreOffset = 0;
-
       // Reset all postings data
       doAfterFlush();
       success = true;
@@ -614,7 +475,7 @@ final class DocumentsWriter {
       aborting = false;
       notifyAll();
       if (infoStream != null) {
-        message("docWriter: done abort; abortedFiles=" + abortedFiles + " success=" + success);
+        message("docWriter: done abort; success=" + success);
       }
     }
   }
@@ -626,22 +487,25 @@ final class DocumentsWriter {
     threadBindings.clear();
     waitQueue.reset();
     segment = null;
-    numDocsInRAM = 0;
+    numDocs = 0;
     nextDocID = 0;
     bufferIsFull = false;
-    for(int i=0;i<threadStates.length;i++)
+    for(int i=0;i<threadStates.length;i++) {
       threadStates[i].doAfterFlush();
+    }
   }
 
   private synchronized boolean allThreadsIdle() {
-    for(int i=0;i<threadStates.length;i++)
-      if (!threadStates[i].isIdle)
+    for(int i=0;i<threadStates.length;i++) {
+      if (!threadStates[i].isIdle) {
         return false;
+      }
+    }
     return true;
   }
 
   synchronized boolean anyChanges() {
-    return numDocsInRAM != 0 || pendingDeletes.any();
+    return numDocs != 0 || pendingDeletes.any();
   }
 
   // for testing
@@ -680,14 +544,14 @@ final class DocumentsWriter {
 
   /** Flush all pending docs to a new segment */
   // Lock order: IW -> DW
-  synchronized SegmentInfo flush(IndexWriter writer, boolean closeDocStore, IndexFileDeleter deleter, MergePolicy mergePolicy, SegmentInfos segmentInfos) throws IOException {
+  synchronized SegmentInfo flush(IndexWriter writer, IndexFileDeleter deleter, MergePolicy mergePolicy, SegmentInfos segmentInfos) throws IOException {
 
     // We change writer's segmentInfos:
     assert Thread.holdsLock(writer);
 
     waitIdle();
 
-    if (numDocsInRAM == 0 && numDocsInStore == 0) {
+    if (numDocs == 0) {
       // nothing to do!
       if (infoStream != null) {
         message("flush: no docs; skipping");
@@ -709,101 +573,60 @@ final class DocumentsWriter {
     SegmentInfo newSegment;
 
     try {
-
+      assert nextDocID == numDocs;
+      assert waitQueue.numWaiting == 0;
       assert waitQueue.waitingBytes == 0;
 
-      assert docStoreSegment != null || numDocsInRAM == 0: "dss=" + docStoreSegment + " numDocsInRAM=" + numDocsInRAM;
-
-      assert numDocsInStore >= numDocsInRAM: "numDocsInStore=" + numDocsInStore + " numDocsInRAM=" + numDocsInRAM;
+      if (infoStream != null) {
+        message("flush postings as segment " + segment + " numDocs=" + numDocs);
+      }
 
       final SegmentWriteState flushState = new SegmentWriteState(infoStream, directory, segment, fieldInfos,
-                                                                 docStoreSegment, numDocsInRAM, numDocsInStore, writer.getConfig().getTermIndexInterval(),
+                                                                 numDocs, writer.getConfig().getTermIndexInterval(),
                                                                  SegmentCodecs.build(fieldInfos, writer.codecs));
 
-      newSegment = new SegmentInfo(segment, numDocsInRAM, directory, false, -1, null, false, hasProx(), flushState.segmentCodecs, false);
+      newSegment = new SegmentInfo(segment, numDocs, directory, false, fieldInfos.hasProx(), flushState.segmentCodecs, false);
 
-      if (!closeDocStore || docStoreOffset != 0) {
-        newSegment.setDocStoreSegment(docStoreSegment);
-        newSegment.setDocStoreOffset(docStoreOffset);
+      Collection<DocConsumerPerThread> threads = new HashSet<DocConsumerPerThread>();
+      for (DocumentsWriterThreadState threadState : threadStates) {
+        threads.add(threadState.consumer);
       }
-      
-      boolean hasVectors = false;
-
-      if (closeDocStore) {
-        closeDocStore(flushState, writer, deleter, newSegment, mergePolicy, segmentInfos);
-      }
-
-      hasVectors |= flushState.hasVectors;
-
-      if (numDocsInRAM > 0) {
 
-        assert nextDocID == numDocsInRAM;
-        assert waitQueue.numWaiting == 0;
-        assert waitQueue.waitingBytes == 0;
+      long startNumBytesUsed = bytesUsed();
 
-        if (infoStream != null) {
-          message("flush postings as segment " + segment + " numDocs=" + numDocsInRAM);
-        }
-    
-        final Collection<DocConsumerPerThread> threads = new HashSet<DocConsumerPerThread>();
-        for(int i=0;i<threadStates.length;i++) {
-          threads.add(threadStates[i].consumer);
-        }
+      consumer.flush(threads, flushState);
+      newSegment.setHasVectors(flushState.hasVectors);
 
-        final long startNumBytesUsed = bytesUsed();
-        consumer.flush(threads, flushState);
+      if (infoStream != null) {
+        message("new segment has " + (flushState.hasVectors ? "vectors" : "no vectors"));
+        message("flushedFiles=" + flushState.flushedFiles);
+        message("flushed codecs=" + newSegment.getSegmentCodecs());
+      }
 
-        hasVectors |= flushState.hasVectors;
-
-        if (hasVectors) {
-          if (infoStream != null) {
-            message("new segment has vectors");
-          }
-          newSegment.setHasVectors(true);
-        } else {
-          if (infoStream != null) {
-            message("new segment has no vectors");
-          }
-        }
+      if (mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
+        final String cfsFileName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.COMPOUND_FILE_EXTENSION);
 
         if (infoStream != null) {
-          message("flushedFiles=" + flushState.flushedFiles);
-          message("flushed codecs=" + newSegment.getSegmentCodecs());
+          message("flush: create compound file \"" + cfsFileName + "\"");
         }
 
-        if (mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
-
-          final String cfsFileName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.COMPOUND_FILE_EXTENSION);
-
-          if (infoStream != null) {
-            message("flush: create compound file \"" + cfsFileName + "\"");
-          }
-
-          CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, cfsFileName);
-          for(String fileName : flushState.flushedFiles) {
-            cfsWriter.addFile(fileName);
-          }
-          cfsWriter.close();
-          deleter.deleteNewFiles(flushState.flushedFiles);
-
-          newSegment.setUseCompoundFile(true);
+        CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, cfsFileName);
+        for(String fileName : flushState.flushedFiles) {
+          cfsWriter.addFile(fileName);
         }
+        cfsWriter.close();
+        deleter.deleteNewFiles(flushState.flushedFiles);
 
-        if (infoStream != null) {
-          message("flush: segment=" + newSegment);
-          final long newSegmentSize = newSegment.sizeInBytes();
-          String message = "  ramUsed=" + nf.format(startNumBytesUsed/1024./1024.) + " MB" +
-            " newFlushedSize=" + nf.format(newSegmentSize/1024/1024) + " MB" +
-            " docs/MB=" + nf.format(numDocsInRAM/(newSegmentSize/1024./1024.)) +
-            " new/old=" + nf.format(100.0*newSegmentSize/startNumBytesUsed) + "%";
-          message(message);
-        }
+        newSegment.setUseCompoundFile(true);
+      }
 
-      } else {
-        if (infoStream != null) {
-          message("skip flushing segment: no docs");
-        }
-        newSegment = null;
+      if (infoStream != null) {
+        message("flush: segment=" + newSegment);
+        final long newSegmentSize = newSegment.sizeInBytes();
+        message("  ramUsed=" + nf.format(startNumBytesUsed / 1024. / 1024.) + " MB" +
+            " newFlushedSize=" + nf.format(newSegmentSize / 1024 / 1024) + " MB" +
+            " docs/MB=" + nf.format(numDocs / (newSegmentSize / 1024. / 1024.)) +
+            " new/old=" + nf.format(100.0 * newSegmentSize / startNumBytesUsed) + "%");
       }
 
       success = true;
@@ -822,8 +645,6 @@ final class DocumentsWriter {
     // Lock order: IW -> DW -> BD
     pushDeletes(newSegment, segmentInfos);
 
-    docStoreOffset = numDocsInStore;
-
     return newSegment;
   }
 
@@ -832,17 +653,6 @@ final class DocumentsWriter {
     notifyAll();
   }
 
-  synchronized void initSegmentName(boolean onlyDocStore) {
-    if (segment == null && (!onlyDocStore || docStoreSegment == null)) {
-      segment = writer.newSegmentName();
-      assert numDocsInRAM == 0;
-    }
-    if (docStoreSegment == null) {
-      docStoreSegment = segment;
-      assert numDocsInStore == 0;
-    }
-  }
-
   /** Returns a free (idle) ThreadState that may be used for
    * indexing this one document.  This call also pauses if a
    * flush is pending.  If delTerm is non-null then we
@@ -864,8 +674,9 @@ final class DocumentsWriter {
       DocumentsWriterThreadState minThreadState = null;
       for(int i=0;i<threadStates.length;i++) {
         DocumentsWriterThreadState ts = threadStates[i];
-        if (minThreadState == null || ts.numThreads < minThreadState.numThreads)
+        if (minThreadState == null || ts.numThreads < minThreadState.numThreads) {
           minThreadState = ts;
+        }
       }
       if (minThreadState != null && (minThreadState.numThreads == 0 || threadStates.length >= maxThreadStates)) {
         state = minThreadState;
@@ -873,8 +684,9 @@ final class DocumentsWriter {
       } else {
         // Just create a new "private" thread state
         DocumentsWriterThreadState[] newArray = new DocumentsWriterThreadState[1+threadStates.length];
-        if (threadStates.length > 0)
+        if (threadStates.length > 0) {
           System.arraycopy(threadStates, 0, newArray, 0, threadStates.length);
+        }
         state = newArray[threadStates.length] = new DocumentsWriterThreadState(this);
         threadStates = newArray;
       }
@@ -888,7 +700,10 @@ final class DocumentsWriter {
 
     // Allocate segment name if this is the first doc since
     // last flush:
-    initSegmentName(false);
+    if (segment == null) {
+      segment = writer.newSegmentName();
+      assert numDocs == 0;
+    }
 
     state.docState.docID = nextDocID++;
 
@@ -896,7 +711,7 @@ final class DocumentsWriter {
       pendingDeletes.addTerm(delTerm, state.docState.docID);
     }
 
-    numDocsInRAM++;
+    numDocs++;
     state.isIdle = false;
     return state;
   }
@@ -1040,15 +855,16 @@ final class DocumentsWriter {
 
       final boolean doPause;
 
-      if (docWriter != null)
+      if (docWriter != null) {
         doPause = waitQueue.add(docWriter);
-      else {
+      } else {
         skipDocWriter.docID = perThread.docState.docID;
         doPause = waitQueue.add(skipDocWriter);
       }
 
-      if (doPause)
+      if (doPause) {
         waitForWaitQueue();
+      }
 
       perThread.isIdle = true;
 
@@ -1097,7 +913,7 @@ final class DocumentsWriter {
   final static int INT_BLOCK_SIZE = 1 << INT_BLOCK_SHIFT;
   final static int INT_BLOCK_MASK = INT_BLOCK_SIZE - 1;
 
-  private ArrayList<int[]> freeIntBlocks = new ArrayList<int[]>();
+  private List<int[]> freeIntBlocks = new ArrayList<int[]>();
 
   /* Allocate another int[] from the shared pool */
   synchronized int[] getIntBlock() {
@@ -1106,8 +922,9 @@ final class DocumentsWriter {
     if (0 == size) {
       b = new int[INT_BLOCK_SIZE];
       bytesUsed.addAndGet(INT_BLOCK_SIZE*RamUsageEstimator.NUM_BYTES_INT);
-    } else
+    } else {
       b = freeIntBlocks.remove(size-1);
+    }
     return b;
   }
 
@@ -1160,12 +977,13 @@ final class DocumentsWriter {
 
     if (doBalance) {
 
-      if (infoStream != null)
+      if (infoStream != null) {
         message("  RAM: balance allocations: usedMB=" + toMB(bytesUsed()) +
                 " vs trigger=" + toMB(ramBufferSize) +
                 " deletesMB=" + toMB(deletesRAMUsed) +
                 " byteBlockFree=" + toMB(byteBlockAllocator.bytesUsed()) +
                 " perDocFree=" + toMB(perDocAllocator.bytesUsed()));
+      }
 
       final long startBytesUsed = bytesUsed() + deletesRAMUsed;
 
@@ -1186,10 +1004,11 @@ final class DocumentsWriter {
             // Nothing else to free -- must flush now.
             bufferIsFull = bytesUsed()+deletesRAMUsed > ramBufferSize;
             if (infoStream != null) {
-              if (bytesUsed()+deletesRAMUsed > ramBufferSize)
+              if (bytesUsed()+deletesRAMUsed > ramBufferSize) {
                 message("    nothing to free; set bufferIsFull");
-              else
+              } else {
                 message("    nothing to free");
+              }
             }
             break;
           }
@@ -1206,15 +1025,17 @@ final class DocumentsWriter {
           }
         }
 
-        if ((3 == iter % 4) && any)
+        if ((3 == iter % 4) && any) {
           // Ask consumer to free any recycled state
           any = consumer.freeRAM();
+        }
 
         iter++;
       }
 
-      if (infoStream != null)
+      if (infoStream != null) {
         message("    after free: freedMB=" + nf.format((startBytesUsed-bytesUsed()-deletesRAMUsed)/1024./1024.) + " usedMB=" + nf.format((bytesUsed()+deletesRAMUsed)/1024./1024.));
+      }
     }
   }
 
@@ -1267,11 +1088,11 @@ final class DocumentsWriter {
       try {
         doc.finish();
         nextWriteDocID++;
-        numDocsInStore++;
         nextWriteLoc++;
         assert nextWriteLoc <= waiting.length;
-        if (nextWriteLoc == waiting.length)
+        if (nextWriteLoc == waiting.length) {
           nextWriteLoc = 0;
+        }
         success = true;
       } finally {
         if (!success) {
@@ -1318,8 +1139,9 @@ final class DocumentsWriter {
         }
 
         int loc = nextWriteLoc + gap;
-        if (loc >= waiting.length)
+        if (loc >= waiting.length) {
           loc -= waiting.length;
+        }
 
         // We should only wrap one time
         assert loc < waiting.length;

Modified: lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FieldInfos.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FieldInfos.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FieldInfos.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FieldInfos.java Sun Dec 19 00:24:04 2010
@@ -224,6 +224,13 @@ public final class FieldInfos {
     return fi;
   }
 
+  synchronized public FieldInfo add(FieldInfo fi) {
+    return add(fi.name, fi.isIndexed, fi.storeTermVector,
+               fi.storePositionWithTermVector, fi.storeOffsetWithTermVector,
+               fi.omitNorms, fi.storePayloads,
+               fi.omitTermFreqAndPositions);
+  }
+
   private FieldInfo addInternal(String name, boolean isIndexed,
                                 boolean storeTermVector, boolean storePositionWithTermVector, 
                                 boolean storeOffsetWithTermVector, boolean omitNorms, boolean storePayloads, boolean omitTermFreqAndPositions) {

Modified: lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FieldsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FieldsWriter.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FieldsWriter.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FieldsWriter.java Sun Dec 19 00:24:04 2010
@@ -25,9 +25,9 @@ import org.apache.lucene.store.Directory
 import org.apache.lucene.store.RAMOutputStream;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.IOUtils;
 
-final class FieldsWriter
-{
+final class FieldsWriter {
   static final byte FIELD_IS_TOKENIZED = 0x1;
   static final byte FIELD_IS_BINARY = 0x2;
   
@@ -41,191 +41,147 @@ final class FieldsWriter
   
   // when removing support for old versions, leave the last supported version here
   static final int FORMAT_MINIMUM = FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS;
-  
-    private FieldInfos fieldInfos;
-
-    private IndexOutput fieldsStream;
-
-    private IndexOutput indexStream;
 
-    private boolean doClose;
-
-    FieldsWriter(Directory d, String segment, FieldInfos fn) throws IOException {
-        fieldInfos = fn;
-
-        boolean success = false;
-        final String fieldsName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_EXTENSION);
-        try {
-          fieldsStream = d.createOutput(fieldsName);
-          fieldsStream.writeInt(FORMAT_CURRENT);
-          success = true;
-        } finally {
-          if (!success) {
-            try {
-              close();
-            } catch (Throwable t) {
-              // Suppress so we keep throwing the original exception
-            }
-            try {
-              d.deleteFile(fieldsName);
-            } catch (Throwable t) {
-              // Suppress so we keep throwing the original exception
-            }
-          }
-        }
-
-        success = false;
-        final String indexName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_INDEX_EXTENSION);
-        try {
-          indexStream = d.createOutput(indexName);
-          indexStream.writeInt(FORMAT_CURRENT);
-          success = true;
-        } finally {
-          if (!success) {
-            try {
-              close();
-            } catch (IOException ioe) {
-            }
-            try {
-              d.deleteFile(fieldsName);
-            } catch (Throwable t) {
-              // Suppress so we keep throwing the original exception
-            }
-            try {
-              d.deleteFile(indexName);
-            } catch (Throwable t) {
-              // Suppress so we keep throwing the original exception
-            }
-          }
-        }
-
-        doClose = true;
-    }
-
-    FieldsWriter(IndexOutput fdx, IndexOutput fdt, FieldInfos fn) {
-        fieldInfos = fn;
-        fieldsStream = fdt;
-        indexStream = fdx;
-        doClose = false;
-    }
-
-    void setFieldsStream(IndexOutput stream) {
-      this.fieldsStream = stream;
-    }
-
-    // Writes the contents of buffer into the fields stream
-    // and adds a new entry for this document into the index
-    // stream.  This assumes the buffer was already written
-    // in the correct fields format.
-    void flushDocument(int numStoredFields, RAMOutputStream buffer) throws IOException {
-      indexStream.writeLong(fieldsStream.getFilePointer());
-      fieldsStream.writeVInt(numStoredFields);
-      buffer.writeTo(fieldsStream);
-    }
-
-    void skipDocument() throws IOException {
-      indexStream.writeLong(fieldsStream.getFilePointer());
-      fieldsStream.writeVInt(0);
-    }
-
-    void flush() throws IOException {
-      indexStream.flush();
-      fieldsStream.flush();
-    }
-
-    final void close() throws IOException {
-      if (doClose) {
-        try {
-          if (fieldsStream != null) {
-            try {
-              fieldsStream.close();
-            } finally {
-              fieldsStream = null;
-            }
-          }
-        } catch (IOException ioe) {
-          try {
-            if (indexStream != null) {
-              try {
-                indexStream.close();
-              } finally {
-                indexStream = null;
-              }
-            }
-          } catch (IOException ioe2) {
-            // Ignore so we throw only first IOException hit
-          }
-          throw ioe;
-        } finally {
-          if (indexStream != null) {
-            try {
-              indexStream.close();
-            } finally {
-              indexStream = null;
-            }
-          }
-        }
+  // If null - we were supplied with streams, if notnull - we manage them ourselves
+  private Directory directory;
+  private String segment;
+  private FieldInfos fieldInfos;
+  private IndexOutput fieldsStream;
+  private IndexOutput indexStream;
+
+  FieldsWriter(Directory directory, String segment, FieldInfos fn) throws IOException {
+    this.directory = directory;
+    this.segment = segment;
+    fieldInfos = fn;
+
+    boolean success = false;
+    try {
+      fieldsStream = directory.createOutput(IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_EXTENSION));
+      indexStream = directory.createOutput(IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_INDEX_EXTENSION));
+
+      fieldsStream.writeInt(FORMAT_CURRENT);
+      indexStream.writeInt(FORMAT_CURRENT);
+
+      success = true;
+    } finally {
+      if (!success) {
+        abort();
       }
     }
+  }
 
-    final void writeField(FieldInfo fi, Fieldable field) throws IOException {
-      fieldsStream.writeVInt(fi.number);
-      byte bits = 0;
-      if (field.isTokenized())
-        bits |= FieldsWriter.FIELD_IS_TOKENIZED;
-      if (field.isBinary())
-        bits |= FieldsWriter.FIELD_IS_BINARY;
-                
-      fieldsStream.writeByte(bits);
-                
-      if (field.isBinary()) {
-        final byte[] data;
-        final int len;
-        final int offset;
-        data = field.getBinaryValue();
-        len = field.getBinaryLength();
-        offset =  field.getBinaryOffset();
-
-        fieldsStream.writeVInt(len);
-        fieldsStream.writeBytes(data, offset, len);
-      }
-      else {
-        fieldsStream.writeString(field.stringValue());
+  FieldsWriter(IndexOutput fdx, IndexOutput fdt, FieldInfos fn) {
+    directory = null;
+    segment = null;
+    fieldInfos = fn;
+    fieldsStream = fdt;
+    indexStream = fdx;
+  }
+
+  void setFieldsStream(IndexOutput stream) {
+    this.fieldsStream = stream;
+  }
+
+  // Writes the contents of buffer into the fields stream
+  // and adds a new entry for this document into the index
+  // stream.  This assumes the buffer was already written
+  // in the correct fields format.
+  void flushDocument(int numStoredFields, RAMOutputStream buffer) throws IOException {
+    indexStream.writeLong(fieldsStream.getFilePointer());
+    fieldsStream.writeVInt(numStoredFields);
+    buffer.writeTo(fieldsStream);
+  }
+
+  void skipDocument() throws IOException {
+    indexStream.writeLong(fieldsStream.getFilePointer());
+    fieldsStream.writeVInt(0);
+  }
+
+  void close() throws IOException {
+    if (directory != null) {
+      try {
+        IOUtils.closeSafely(fieldsStream, indexStream);
+      } finally {
+        fieldsStream = indexStream = null;
       }
     }
+  }
 
-    /** Bulk write a contiguous series of documents.  The
-     *  lengths array is the length (in bytes) of each raw
-     *  document.  The stream IndexInput is the
-     *  fieldsStream from which we should bulk-copy all
-     *  bytes. */
-    final void addRawDocuments(IndexInput stream, int[] lengths, int numDocs) throws IOException {
-      long position = fieldsStream.getFilePointer();
-      long start = position;
-      for(int i=0;i<numDocs;i++) {
-        indexStream.writeLong(position);
-        position += lengths[i];
+  void abort() {
+    if (directory != null) {
+      try {
+        close();
+      } catch (IOException ignored) {
+      }
+      try {
+        directory.deleteFile(IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_EXTENSION));
+      } catch (IOException ignored) {
+      }
+      try {
+        directory.deleteFile(IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_INDEX_EXTENSION));
+      } catch (IOException ignored) {
       }
-      fieldsStream.copyBytes(stream, position-start);
-      assert fieldsStream.getFilePointer() == position;
     }
+  }
+
+  final void writeField(FieldInfo fi, Fieldable field) throws IOException {
+    fieldsStream.writeVInt(fi.number);
+    byte bits = 0;
+    if (field.isTokenized())
+      bits |= FieldsWriter.FIELD_IS_TOKENIZED;
+    if (field.isBinary())
+      bits |= FieldsWriter.FIELD_IS_BINARY;
+
+    fieldsStream.writeByte(bits);
+
+    if (field.isBinary()) {
+      final byte[] data;
+      final int len;
+      final int offset;
+      data = field.getBinaryValue();
+      len = field.getBinaryLength();
+      offset =  field.getBinaryOffset();
+
+      fieldsStream.writeVInt(len);
+      fieldsStream.writeBytes(data, offset, len);
+    }
+    else {
+      fieldsStream.writeString(field.stringValue());
+    }
+  }
+
+  /** Bulk write a contiguous series of documents.  The
+   *  lengths array is the length (in bytes) of each raw
+   *  document.  The stream IndexInput is the
+   *  fieldsStream from which we should bulk-copy all
+   *  bytes. */
+  final void addRawDocuments(IndexInput stream, int[] lengths, int numDocs) throws IOException {
+    long position = fieldsStream.getFilePointer();
+    long start = position;
+    for(int i=0;i<numDocs;i++) {
+      indexStream.writeLong(position);
+      position += lengths[i];
+    }
+    fieldsStream.copyBytes(stream, position-start);
+    assert fieldsStream.getFilePointer() == position;
+  }
+
+  final void addDocument(Document doc) throws IOException {
+    indexStream.writeLong(fieldsStream.getFilePointer());
+
+    int storedCount = 0;
+    List<Fieldable> fields = doc.getFields();
+    for (Fieldable field : fields) {
+      if (field.isStored())
+          storedCount++;
+    }
+    fieldsStream.writeVInt(storedCount);
+
 
-    final void addDocument(Document doc) throws IOException {
-        indexStream.writeLong(fieldsStream.getFilePointer());
 
-        int storedCount = 0;
-        List<Fieldable> fields = doc.getFields();
-        for (Fieldable field : fields) {
-            if (field.isStored())
-                storedCount++;
-        }
-        fieldsStream.writeVInt(storedCount);
-
-        
-
-        for (Fieldable field : fields) {
-            if (field.isStored())
-              writeField(fieldInfos.fieldInfo(field.name()), field);
-        }
+    for (Fieldable field : fields) {
+      if (field.isStored())
+        writeField(fieldInfos.fieldInfo(field.name()), field);
     }
+  }
 }

Modified: lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java Sun Dec 19 00:24:04 2010
@@ -20,7 +20,6 @@ package org.apache.lucene.index;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Comparator;
@@ -39,9 +38,6 @@ final class FreqProxTermsWriter extends 
   }
 
   @Override
-  void closeDocStore(SegmentWriteState state) {}
-
-  @Override
   void abort() {}
 
   private int flushedDocCount;

Modified: lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java Sun Dec 19 00:24:04 2010
@@ -21,14 +21,7 @@ import java.io.FileNotFoundException;
 import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-
-import java.util.List;
-import java.util.Map;
+import java.util.*;
 
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.store.Directory;
@@ -101,7 +94,6 @@ final class IndexFileDeleter {
   private PrintStream infoStream;
   private Directory directory;
   private IndexDeletionPolicy policy;
-  private DocumentsWriter docWriter;
 
   final boolean startingCommitDeleted;
   private SegmentInfos lastSegmentInfos;
@@ -112,8 +104,9 @@ final class IndexFileDeleter {
 
   void setInfoStream(PrintStream infoStream) {
     this.infoStream = infoStream;
-    if (infoStream != null)
+    if (infoStream != null) {
       message("setInfoStream deletionPolicy=" + policy);
+    }
   }
   
   private void message(String message) {
@@ -130,17 +123,14 @@ final class IndexFileDeleter {
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
    */
-  public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream, DocumentsWriter docWriter,
-                          CodecProvider codecs)
-    throws CorruptIndexException, IOException {
-
-    this.docWriter = docWriter;
+  public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream, CodecProvider codecs) throws CorruptIndexException, IOException {
     this.infoStream = infoStream;
 
     final String currentSegmentsFile = segmentInfos.getCurrentSegmentFileName();
 
-    if (infoStream != null)
+    if (infoStream != null) {
       message("init: current segments file is \"" + currentSegmentsFile + "\"; deletionPolicy=" + policy);
+    }
 
     this.policy = policy;
     this.directory = directory;
@@ -229,8 +219,9 @@ final class IndexFileDeleter {
       } catch (IOException e) {
         throw new CorruptIndexException("failed to locate current segments_N file");
       }
-      if (infoStream != null)
+      if (infoStream != null) {
         message("forced open of current segments file " + segmentInfos.getCurrentSegmentFileName());
+      }
       currentCommitPoint = new CommitPoint(commitsToDelete, directory, sis);
       commits.add(currentCommitPoint);
       incRef(sis, true);
@@ -360,8 +351,9 @@ final class IndexFileDeleter {
     // DecRef old files from the last checkpoint, if any:
     int size = lastFiles.size();
     if (size > 0) {
-      for(int i=0;i<size;i++)
+      for(int i=0;i<size;i++) {
         decRef(lastFiles.get(i));
+      }
       lastFiles.clear();
     }
 
@@ -394,8 +386,9 @@ final class IndexFileDeleter {
       deletable = null;
       int size = oldDeletable.size();
       for(int i=0;i<size;i++) {
-        if (infoStream != null)
+        if (infoStream != null) {
           message("delete pending file " + oldDeletable.get(i));
+        }
         deleteFile(oldDeletable.get(i));
       }
     }
@@ -444,37 +437,20 @@ final class IndexFileDeleter {
       // Decref files for commits that were deleted by the policy:
       deleteCommits();
     } else {
-
-      final List<String> docWriterFiles;
-      if (docWriter != null) {
-        docWriterFiles = docWriter.openFiles();
-        if (docWriterFiles != null)
-          // We must incRef these files before decRef'ing
-          // last files to make sure we don't accidentally
-          // delete them:
-          incRef(docWriterFiles);
-      } else
-        docWriterFiles = null;
-
       // DecRef old files from the last checkpoint, if any:
-      int size = lastFiles.size();
-      if (size > 0) {
-        for(int i=0;i<size;i++)
-          decRef(lastFiles.get(i));
-        lastFiles.clear();
+      for (Collection<String> lastFile : lastFiles) {
+        decRef(lastFile);
       }
+      lastFiles.clear();
 
       // Save files so we can decr on next checkpoint/commit:
       lastFiles.add(segmentInfos.files(directory, false));
-
-      if (docWriterFiles != null)
-        lastFiles.add(docWriterFiles);
     }
   }
 
   void incRef(SegmentInfos segmentInfos, boolean isCommit) throws IOException {
-     // If this is a commit point, also incRef the
-     // segments_N file:
+    // If this is a commit point, also incRef the
+    // segments_N file:
     for( final String fileName: segmentInfos.files(directory, isCommit) ) {
       incRef(fileName);
     }
@@ -539,8 +515,9 @@ final class IndexFileDeleter {
   }
 
   void deleteFiles(List<String> files) throws IOException {
-    for(final String file: files)
+    for(final String file: files) {
       deleteFile(file);
+    }
   }
 
   /** Deletes the specified files, but only if they are new
@@ -699,6 +676,5 @@ final class IndexFileDeleter {
     public boolean isDeleted() {
       return deleted;
     }
-
   }
 }

Modified: lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/IndexReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/IndexReader.java?rev=1050738&r1=1050737&r2=1050738&view=diff
==============================================================================
--- lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/IndexReader.java (original)
+++ lucene/dev/branches/bulkpostings/lucene/src/java/org/apache/lucene/index/IndexReader.java Sun Dec 19 00:24:04 2010
@@ -1257,6 +1257,8 @@ public abstract class IndexReader implem
    *  method should return null when there are no deleted
    *  docs.
    *
+   *  The returned instance has been safely published for use by
+   *  multiple threads without additional synchronization.
    * @lucene.experimental */
   public abstract Bits getDeletedDocs();
 
@@ -1434,7 +1436,7 @@ public abstract class IndexReader implem
   }
 
 
-  private Fields fields;
+  private volatile Fields fields;
 
   /** @lucene.internal */
   void storeFields(Fields fields) {