You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2011/01/05 18:34:00 UTC
svn commit: r1055547 [1/3] - in /lucene/dev/branches/branch_3x: ./ lucene/
lucene/backwards/src/test/org/apache/lucene/index/
lucene/contrib/ant/src/java/org/apache/lucene/ant/
lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/
lucene/src/java/...
Author: mikemccand
Date: Wed Jan 5 17:33:58 2011
New Revision: 1055547
URL: http://svn.apache.org/viewvc?rev=1055547&view=rev
Log:
LUCENE-2814: stop writing shared doc stores
Modified:
lucene/dev/branches/branch_3x/ (props changed)
lucene/dev/branches/branch_3x/lucene/ (props changed)
lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReader.java
lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriter.java
lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
lucene/dev/branches/branch_3x/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java
lucene/dev/branches/branch_3x/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocConsumer.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldConsumer.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocInverter.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FieldsWriter.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FormatPostingsFieldsWriter.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexWriter.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/InvertedDocConsumer.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/InvertedDocEndConsumer.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/MergePolicy.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/NoMergePolicy.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/NormsWriter.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/StoredFieldsWriter.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/TermsHash.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/TermsHashConsumer.java
lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/util/IOUtils.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/TestSearch.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/Test2BTerms.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestDoc.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexReader.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestLazyBug.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestNoMergePolicy.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestNorms.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestOmitTf.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/index/TestTermVectorsReader.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
lucene/dev/branches/branch_3x/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java
lucene/dev/branches/branch_3x/solr/ (props changed)
Modified: lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReader.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReader.java (original)
+++ lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexReader.java Wed Jan 5 17:33:58 2011
@@ -21,7 +21,6 @@ package org.apache.lucene.index;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
@@ -39,7 +38,6 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.document.SetBasedFieldSelector;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader.FieldOption;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.IndexSearcher;
@@ -988,29 +986,8 @@ public class TestIndexReader extends Luc
// new IndexFileDeleter, have it delete
// unreferenced files, then verify that in fact
// no files were deleted:
- String[] startFiles = dir.listAll();
- SegmentInfos infos = new SegmentInfos();
- infos.read(dir);
- new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
- String[] endFiles = dir.listAll();
-
- Arrays.sort(startFiles);
- Arrays.sort(endFiles);
-
- //for(int i=0;i<startFiles.length;i++) {
- // System.out.println(" startFiles: " + i + ": " + startFiles[i]);
- //}
-
- if (!Arrays.equals(startFiles, endFiles)) {
- String successStr;
- if (success) {
- successStr = "success";
- } else {
- successStr = "IOException";
- err.printStackTrace();
- }
- fail("reader.close() failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes): before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles));
- }
+ IndexWriter.unlock(dir);
+ TestIndexWriter.assertNoUnreferencedFiles(dir, "reader.close() failed to delete unreferenced files after");
// Finally, verify index is not corrupt, and, if
// we succeeded, we see all docs changed, and if
Modified: lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriter.java Wed Jan 5 17:33:58 2011
@@ -48,8 +48,6 @@ import org.apache.lucene.analysis.tokena
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
@@ -68,7 +66,6 @@ import org.apache.lucene.store.RAMDirect
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util.ThreadInterruptedException;
public class TestIndexWriter extends LuceneTestCase {
@@ -530,7 +527,7 @@ public class TestIndexWriter extends Luc
String[] startFiles = dir.listAll();
SegmentInfos infos = new SegmentInfos();
infos.read(dir);
- new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
+ new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED).rollback();
String[] endFiles = dir.listAll();
Arrays.sort(startFiles);
@@ -2533,47 +2530,6 @@ public class TestIndexWriter extends Luc
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(true));
}
- // Throws IOException during DocumentsWriter.closeDocStore
- private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
- private boolean onlyOnce;
- public FailOnlyInCloseDocStore(boolean onlyOnce) {
- this.onlyOnce = onlyOnce;
- }
- @Override
- public void eval(MockRAMDirectory dir) throws IOException {
- if (doFail) {
- StackTraceElement[] trace = new Exception().getStackTrace();
- for (int i = 0; i < trace.length; i++) {
- if ("closeDocStore".equals(trace[i].getMethodName())) {
- if (onlyOnce)
- doFail = false;
- throw new IOException("now failing on purpose");
- }
- }
- }
- }
- }
-
- // LUCENE-1130: test IOException in closeDocStore
- public void testIOExceptionDuringCloseDocStore() throws IOException {
- _testSingleThreadFailure(new FailOnlyInCloseDocStore(false));
- }
-
- // LUCENE-1130: test IOException in closeDocStore
- public void testIOExceptionDuringCloseDocStoreOnlyOnce() throws IOException {
- _testSingleThreadFailure(new FailOnlyInCloseDocStore(true));
- }
-
- // LUCENE-1130: test IOException in closeDocStore, with threads
- public void testIOExceptionDuringCloseDocStoreWithThreads() throws Exception {
- _testMultipleThreadsFailure(new FailOnlyInCloseDocStore(false));
- }
-
- // LUCENE-1130: test IOException in closeDocStore, with threads
- public void testIOExceptionDuringCloseDocStoreWithThreadsOnlyOnce() throws Exception {
- _testMultipleThreadsFailure(new FailOnlyInCloseDocStore(true));
- }
-
// Throws IOException during DocumentsWriter.writeSegment
private static class FailOnlyInWriteSegment extends MockRAMDirectory.Failure {
private boolean onlyOnce;
Modified: lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterDelete.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (original)
+++ lucene/dev/branches/branch_3x/lucene/backwards/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Wed Jan 5 17:33:58 2011
@@ -18,12 +18,10 @@ package org.apache.lucene.index;
*/
import java.io.IOException;
-import java.util.Arrays;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
@@ -785,20 +783,9 @@ public class TestIndexWriterDelete exten
}
}
- String[] startFiles = dir.listAll();
- SegmentInfos infos = new SegmentInfos();
- infos.read(dir);
- new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
- String[] endFiles = dir.listAll();
-
- if (!Arrays.equals(startFiles, endFiles)) {
- fail("docswriter abort() failed to delete unreferenced files:\n before delete:\n "
- + arrayToString(startFiles) + "\n after delete:\n "
- + arrayToString(endFiles));
- }
-
modifier.close();
+ TestIndexWriter.assertNoUnreferencedFiles(dir, "unreferenced files");
}
private String arrayToString(String[] l) {
Modified: lucene/dev/branches/branch_3x/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java (original)
+++ lucene/dev/branches/branch_3x/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java Wed Jan 5 17:33:58 2011
@@ -288,7 +288,6 @@ public class IndexTask extends Task {
create ? OpenMode.CREATE : OpenMode.APPEND);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setUseCompoundFile(useCompoundIndex);
- lmp.setUseCompoundDocStore(useCompoundIndex);
lmp.setMergeFactor(mergeFactor);
IndexWriter writer = new IndexWriter(dir, conf);
int totalFiles = 0;
Modified: lucene/dev/branches/branch_3x/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java (original)
+++ lucene/dev/branches/branch_3x/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java Wed Jan 5 17:33:58 2011
@@ -251,7 +251,6 @@ public class Syns2Index
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
Version.LUCENE_CURRENT, ana).setOpenMode(OpenMode.CREATE));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(true); // why?
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(true); // why?
Iterator<String> i1 = word2Nums.keySet().iterator();
while (i1.hasNext()) // for each word
{
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DirectoryReader.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DirectoryReader.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DirectoryReader.java Wed Jan 5 17:33:58 2011
@@ -760,7 +760,7 @@ class DirectoryReader extends IndexReade
// KeepOnlyLastCommitDeleter:
IndexFileDeleter deleter = new IndexFileDeleter(directory,
deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy,
- segmentInfos, null, null);
+ segmentInfos, null);
segmentInfos.updateGeneration(deleter.getLastSegmentInfos());
segmentInfos.changed();
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocConsumer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocConsumer.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocConsumer.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocConsumer.java Wed Jan 5 17:33:58 2011
@@ -23,7 +23,6 @@ import java.util.Collection;
abstract class DocConsumer {
abstract DocConsumerPerThread addThread(DocumentsWriterThreadState perThread) throws IOException;
abstract void flush(final Collection<DocConsumerPerThread> threads, final SegmentWriteState state) throws IOException;
- abstract void closeDocStore(final SegmentWriteState state) throws IOException;
abstract void abort();
abstract boolean freeRAM();
}
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldConsumer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldConsumer.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldConsumer.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldConsumer.java Wed Jan 5 17:33:58 2011
@@ -29,10 +29,6 @@ abstract class DocFieldConsumer {
* segment */
abstract void flush(Map<DocFieldConsumerPerThread,Collection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state) throws IOException;
- /** Called when DocumentsWriter decides to close the doc
- * stores */
- abstract void closeDocStore(SegmentWriteState state) throws IOException;
-
/** Called when an aborting exception is hit */
abstract void abort();
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java Wed Jan 5 17:33:58 2011
@@ -47,12 +47,6 @@ final class DocFieldProcessor extends Do
}
@Override
- public void closeDocStore(SegmentWriteState state) throws IOException {
- consumer.closeDocStore(state);
- fieldsWriter.closeDocStore(state);
- }
-
- @Override
public void flush(Collection<DocConsumerPerThread> threads, SegmentWriteState state) throws IOException {
Map<DocFieldConsumerPerThread, Collection<DocFieldConsumerPerField>> childThreadsAndFields = new HashMap<DocFieldConsumerPerThread, Collection<DocFieldConsumerPerField>>();
@@ -68,7 +62,7 @@ final class DocFieldProcessor extends Do
// consumer can alter the FieldInfo* if necessary. EG,
// FreqProxTermsWriter does this with
// FieldInfo.storePayload.
- final String fileName = state.segmentFileName(IndexFileNames.FIELD_INFOS_EXTENSION);
+ final String fileName = IndexFileNames.segmentFileName(state.segmentName, IndexFileNames.FIELD_INFOS_EXTENSION);
fieldInfos.write(state.directory, fileName);
state.flushedFiles.add(fileName);
}
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java Wed Jan 5 17:33:58 2011
@@ -114,8 +114,8 @@ final class DocFieldProcessorPerThread e
else
lastPerField.next = perField.next;
- if (state.docWriter.infoStream != null)
- state.docWriter.infoStream.println(" purge field=" + perField.fieldInfo.name);
+ if (state.infoStream != null)
+ state.infoStream.println(" purge field=" + perField.fieldInfo.name);
totalFieldCount--;
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocInverter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocInverter.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocInverter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocInverter.java Wed Jan 5 17:33:58 2011
@@ -74,12 +74,6 @@ final class DocInverter extends DocField
}
@Override
- public void closeDocStore(SegmentWriteState state) throws IOException {
- consumer.closeDocStore(state);
- endConsumer.closeDocStore(state);
- }
-
- @Override
void abort() {
consumer.abort();
endConsumer.abort();
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java Wed Jan 5 17:33:58 2011
@@ -111,12 +111,9 @@ final class DocumentsWriter {
Directory directory;
String segment; // Current segment we are working on
- private String docStoreSegment; // Current doc-store segment we are writing
- private int docStoreOffset; // Current starting doc-store offset of current segment
- private int nextDocID; // Next docID to be added
- private int numDocsInRAM; // # docs buffered in RAM
- int numDocsInStore; // # docs written to doc stores
+ private int nextDocID; // Next docID to be added
+ private int numDocs; // # of docs added, but not yet flushed
// Max # ThreadState instances; if there are more threads
// than this they share ThreadStates
@@ -134,8 +131,6 @@ final class DocumentsWriter {
// this, they wait for others to finish first
private final int maxThreadStates;
- List<String> newFiles;
-
// Deletes for our still-in-RAM (to be flushed next) segment
private SegmentDeletes pendingDeletes = new SegmentDeletes();
@@ -311,7 +306,7 @@ final class DocumentsWriter {
final boolean doFlush = flushControl.waitUpdate(0, queries.length);
synchronized(this) {
for (Query query : queries) {
- pendingDeletes.addQuery(query, numDocsInRAM);
+ pendingDeletes.addQuery(query, numDocs);
}
}
return doFlush;
@@ -320,7 +315,7 @@ final class DocumentsWriter {
boolean deleteQuery(Query query) {
final boolean doFlush = flushControl.waitUpdate(0, 1);
synchronized(this) {
- pendingDeletes.addQuery(query, numDocsInRAM);
+ pendingDeletes.addQuery(query, numDocs);
}
return doFlush;
}
@@ -329,7 +324,7 @@ final class DocumentsWriter {
final boolean doFlush = flushControl.waitUpdate(0, terms.length);
synchronized(this) {
for (Term term : terms) {
- pendingDeletes.addTerm(term, numDocsInRAM);
+ pendingDeletes.addTerm(term, numDocs);
}
}
return doFlush;
@@ -338,7 +333,7 @@ final class DocumentsWriter {
boolean deleteTerm(Term term, boolean skipWait) {
final boolean doFlush = flushControl.waitUpdate(0, 1, skipWait);
synchronized(this) {
- pendingDeletes.addTerm(term, numDocsInRAM);
+ pendingDeletes.addTerm(term, numDocs);
}
return doFlush;
}
@@ -351,20 +346,23 @@ final class DocumentsWriter {
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
- for(int i=0;i<threadStates.length;i++)
+ for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.infoStream = infoStream;
+ }
}
synchronized void setMaxFieldLength(int maxFieldLength) {
this.maxFieldLength = maxFieldLength;
- for(int i=0;i<threadStates.length;i++)
+ for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.maxFieldLength = maxFieldLength;
+ }
}
synchronized void setSimilarity(Similarity similarity) {
this.similarity = similarity;
- for(int i=0;i<threadStates.length;i++)
+ for(int i=0;i<threadStates.length;i++) {
threadStates[i].docState.similarity = similarity;
+ }
}
/** Set how much RAM we can use before flushing. */
@@ -405,126 +403,14 @@ final class DocumentsWriter {
}
/** Returns how many docs are currently buffered in RAM. */
- synchronized int getNumDocsInRAM() {
- return numDocsInRAM;
- }
-
- /** Returns the current doc store segment we are writing
- * to. */
- synchronized String getDocStoreSegment() {
- return docStoreSegment;
- }
-
- /** Returns the doc offset into the shared doc store for
- * the current buffered docs. */
- synchronized int getDocStoreOffset() {
- return docStoreOffset;
- }
-
- /** Closes the current open doc stores an sets the
- * docStoreSegment and docStoreUseCFS on the provided
- * SegmentInfo. */
- synchronized void closeDocStore(SegmentWriteState flushState, IndexWriter writer, IndexFileDeleter deleter, SegmentInfo newSegment, MergePolicy mergePolicy, SegmentInfos segmentInfos) throws IOException {
-
- final boolean isSeparate = numDocsInRAM == 0 || !segment.equals(docStoreSegment);
-
- assert docStoreSegment != null;
-
- if (infoStream != null) {
- message("closeDocStore: openFiles=" + openFiles + "; segment=" + docStoreSegment + "; docStoreOffset=" + docStoreOffset + "; numDocsInStore=" + numDocsInStore + "; isSeparate=" + isSeparate);
- }
-
- closedFiles.clear();
- consumer.closeDocStore(flushState);
- flushState.numDocsInStore = 0;
- assert 0 == openFiles.size();
-
- if (isSeparate) {
- flushState.flushedFiles.clear();
-
- if (mergePolicy.useCompoundDocStore(segmentInfos)) {
-
- final String compoundFileName = IndexFileNames.segmentFileName(docStoreSegment, IndexFileNames.COMPOUND_FILE_STORE_EXTENSION);
-
- if (infoStream != null) {
- message("closeDocStore: create compound file " + compoundFileName);
- }
-
- boolean success = false;
- try {
-
- CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, compoundFileName);
- for (final String file : closedFiles) {
- cfsWriter.addFile(file);
- }
-
- // Perform the merge
- cfsWriter.close();
-
- success = true;
- } finally {
- if (!success) {
- deleter.deleteFile(compoundFileName);
- }
- }
-
- // In case the files we just merged into a CFS were
- // not registered w/ IFD:
- deleter.deleteNewFiles(closedFiles);
-
- final int numSegments = segmentInfos.size();
- for(int i=0;i<numSegments;i++) {
- SegmentInfo si = segmentInfos.info(i);
- if (si.getDocStoreOffset() != -1 &&
- si.getDocStoreSegment().equals(docStoreSegment)) {
- si.setDocStoreIsCompoundFile(true);
- }
- }
-
- newSegment.setDocStoreIsCompoundFile(true);
- if (infoStream != null) {
- message("closeDocStore: after compound file index=" + segmentInfos);
- }
-
- writer.checkpoint();
- }
- }
-
- docStoreSegment = null;
- docStoreOffset = 0;
- numDocsInStore = 0;
- }
-
- private Collection<String> abortedFiles; // List of files that were written before last abort()
-
- Collection<String> abortedFiles() {
- return abortedFiles;
+ synchronized int getNumDocs() {
+ return numDocs;
}
void message(String message) {
- if (infoStream != null)
+ if (infoStream != null) {
writer.message("DW: " + message);
- }
-
- final List<String> openFiles = new ArrayList<String>();
- final List<String> closedFiles = new ArrayList<String>();
-
- /* Returns Collection of files in use by this instance,
- * including any flushed segments. */
- @SuppressWarnings("unchecked")
- synchronized List<String> openFiles() {
- return (List<String>) ((ArrayList<String>) openFiles).clone();
- }
-
- synchronized void addOpenFile(String name) {
- assert !openFiles.contains(name);
- openFiles.add(name);
- }
-
- synchronized void removeOpenFile(String name) {
- assert openFiles.contains(name);
- openFiles.remove(name);
- closedFiles.add(name);
+ }
}
synchronized void setAborting() {
@@ -539,7 +425,6 @@ final class DocumentsWriter {
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
-
if (infoStream != null) {
message("docWriter: abort");
}
@@ -563,19 +448,11 @@ final class DocumentsWriter {
waitQueue.waitingBytes = 0;
- try {
- abortedFiles = openFiles();
- } catch (Throwable t) {
- abortedFiles = null;
- }
-
pendingDeletes.clear();
-
- openFiles.clear();
- for(int i=0;i<threadStates.length;i++)
+ for (DocumentsWriterThreadState threadState : threadStates)
try {
- threadStates[i].consumer.abort();
+ threadState.consumer.abort();
} catch (Throwable t) {
}
@@ -584,10 +461,6 @@ final class DocumentsWriter {
} catch (Throwable t) {
}
- docStoreSegment = null;
- numDocsInStore = 0;
- docStoreOffset = 0;
-
// Reset all postings data
doAfterFlush();
success = true;
@@ -595,7 +468,7 @@ final class DocumentsWriter {
aborting = false;
notifyAll();
if (infoStream != null) {
- message("docWriter: done abort; abortedFiles=" + abortedFiles + " success=" + success);
+ message("docWriter: done abort; success=" + success);
}
}
}
@@ -607,22 +480,25 @@ final class DocumentsWriter {
threadBindings.clear();
waitQueue.reset();
segment = null;
- numDocsInRAM = 0;
+ numDocs = 0;
nextDocID = 0;
bufferIsFull = false;
- for(int i=0;i<threadStates.length;i++)
+ for(int i=0;i<threadStates.length;i++) {
threadStates[i].doAfterFlush();
+ }
}
private synchronized boolean allThreadsIdle() {
- for(int i=0;i<threadStates.length;i++)
- if (!threadStates[i].isIdle)
+ for(int i=0;i<threadStates.length;i++) {
+ if (!threadStates[i].isIdle) {
return false;
+ }
+ }
return true;
}
synchronized boolean anyChanges() {
- return numDocsInRAM != 0 || pendingDeletes.any();
+ return numDocs != 0 || pendingDeletes.any();
}
// for testing
@@ -661,14 +537,14 @@ final class DocumentsWriter {
/** Flush all pending docs to a new segment */
// Lock order: IW -> DW
- synchronized SegmentInfo flush(IndexWriter writer, boolean closeDocStore, IndexFileDeleter deleter, MergePolicy mergePolicy, SegmentInfos segmentInfos) throws IOException {
+ synchronized SegmentInfo flush(IndexWriter writer, IndexFileDeleter deleter, MergePolicy mergePolicy, SegmentInfos segmentInfos) throws IOException {
// We change writer's segmentInfos:
assert Thread.holdsLock(writer);
waitIdle();
- if (numDocsInRAM == 0 && numDocsInStore == 0) {
+ if (numDocs == 0) {
// nothing to do!
if (infoStream != null) {
message("flush: no docs; skipping");
@@ -690,98 +566,60 @@ final class DocumentsWriter {
SegmentInfo newSegment;
try {
-
+ assert nextDocID == numDocs;
+ assert waitQueue.numWaiting == 0;
assert waitQueue.waitingBytes == 0;
- assert docStoreSegment != null || numDocsInRAM == 0: "dss=" + docStoreSegment + " numDocsInRAM=" + numDocsInRAM;
-
- assert numDocsInStore >= numDocsInRAM: "numDocsInStore=" + numDocsInStore + " numDocsInRAM=" + numDocsInRAM;
+ if (infoStream != null) {
+ message("flush postings as segment " + segment + " numDocs=" + numDocs);
+ }
- final SegmentWriteState flushState = new SegmentWriteState(this, directory, segment, docStoreSegment, numDocsInRAM, numDocsInStore, writer.getConfig().getTermIndexInterval());
+ final SegmentWriteState flushState = new SegmentWriteState(infoStream, directory, segment, fieldInfos,
+ numDocs, writer.getConfig().getTermIndexInterval());
- newSegment = new SegmentInfo(segment, numDocsInRAM, directory, false, true, -1, null, false, fieldInfos.hasProx(), false);
+ newSegment = new SegmentInfo(segment, numDocs, directory, false, true, fieldInfos.hasProx(), false);
- if (!closeDocStore || docStoreOffset != 0) {
- newSegment.setDocStoreSegment(docStoreSegment);
- newSegment.setDocStoreOffset(docStoreOffset);
- }
-
- if (closeDocStore) {
- closeDocStore(flushState, writer, deleter, newSegment, mergePolicy, segmentInfos);
+ Collection<DocConsumerPerThread> threads = new HashSet<DocConsumerPerThread>();
+ for (DocumentsWriterThreadState threadState : threadStates) {
+ threads.add(threadState.consumer);
}
- boolean hasVectors = flushState.hasVectors;
+ double startMBUsed = bytesUsed()/1024./1024.;
- if (numDocsInRAM > 0) {
+ consumer.flush(threads, flushState);
+ newSegment.setHasVectors(flushState.hasVectors);
- assert nextDocID == numDocsInRAM;
- assert waitQueue.numWaiting == 0;
- assert waitQueue.waitingBytes == 0;
-
- if (infoStream != null) {
- message("flush postings as segment " + segment + " numDocs=" + numDocsInRAM);
- }
-
- final Collection<DocConsumerPerThread> threads = new HashSet<DocConsumerPerThread>();
- for(int i=0;i<threadStates.length;i++) {
- threads.add(threadStates[i].consumer);
- }
-
- final double startMBUsed = bytesUsed()/1024./1024.;
- consumer.flush(threads, flushState);
-
- hasVectors |= flushState.hasVectors;
+ if (infoStream != null) {
+ message("new segment has " + (flushState.hasVectors ? "vectors" : "no vectors"));
+ message("flushedFiles=" + flushState.flushedFiles);
+ }
- if (hasVectors) {
- if (infoStream != null) {
- message("new segment has vectors");
- }
- newSegment.setHasVectors(true);
- } else {
- if (infoStream != null) {
- message("new segment has no vectors");
- }
- }
+ if (mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
+ final String cfsFileName = IndexFileNames.segmentFileName(segment, IndexFileNames.COMPOUND_FILE_EXTENSION);
if (infoStream != null) {
- message("flushedFiles=" + flushState.flushedFiles);
+ message("flush: create compound file \"" + cfsFileName + "\"");
}
- if (mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
-
- final String cfsFileName = IndexFileNames.segmentFileName(segment, IndexFileNames.COMPOUND_FILE_EXTENSION);
-
- if (infoStream != null) {
- message("flush: create compound file \"" + cfsFileName + "\"");
- }
-
- CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, cfsFileName);
- for(String fileName : flushState.flushedFiles) {
- cfsWriter.addFile(fileName);
- }
- cfsWriter.close();
- deleter.deleteNewFiles(flushState.flushedFiles);
-
- newSegment.setUseCompoundFile(true);
+ CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, cfsFileName);
+ for(String fileName : flushState.flushedFiles) {
+ cfsWriter.addFile(fileName);
}
+ cfsWriter.close();
+ deleter.deleteNewFiles(flushState.flushedFiles);
- if (infoStream != null) {
- message("flush: segment=" + newSegment);
- final double newSegmentSizeNoStore = newSegment.sizeInBytes(false)/1024./1024.;
- final double newSegmentSize = newSegment.sizeInBytes(true)/1024./1024.;
- String message = " ramUsed=" + nf.format(startMBUsed) + " MB" +
- " newFlushedSize=" + nf.format(newSegmentSize) + " MB" +
- " (" + nf.format(newSegmentSizeNoStore) + " MB w/o doc stores)" +
- " docs/MB=" + nf.format(numDocsInRAM/newSegmentSize) +
- " new/old=" + nf.format(100.0*newSegmentSize/startMBUsed) + "%";
- message(message);
- }
+ newSegment.setUseCompoundFile(true);
+ }
- } else {
- if (infoStream != null) {
- message("skip flushing segment: no docs");
- }
- newSegment = null;
+ if (infoStream != null) {
+ message("flush: segment=" + newSegment);
+ final double newSegmentSizeNoStore = newSegment.sizeInBytes(false)/1024./1024.;
+ final double newSegmentSize = newSegment.sizeInBytes(true)/1024./1024.;
+ message(" ramUsed=" + nf.format(startMBUsed) + " MB" +
+ " newFlushedSize=" + nf.format(newSegmentSize) + " MB" +
+ " (" + nf.format(newSegmentSizeNoStore) + " MB w/o doc stores)" +
+ " docs/MB=" + nf.format(numDocs / newSegmentSize) +
+ " new/old=" + nf.format(100.0 * newSegmentSizeNoStore / startMBUsed) + "%");
}
success = true;
@@ -800,8 +638,6 @@ final class DocumentsWriter {
// Lock order: IW -> DW -> BD
pushDeletes(newSegment, segmentInfos);
- docStoreOffset = numDocsInStore;
-
return newSegment;
}
@@ -810,17 +646,6 @@ final class DocumentsWriter {
notifyAll();
}
- synchronized void initSegmentName(boolean onlyDocStore) {
- if (segment == null && (!onlyDocStore || docStoreSegment == null)) {
- segment = writer.newSegmentName();
- assert numDocsInRAM == 0;
- }
- if (docStoreSegment == null) {
- docStoreSegment = segment;
- assert numDocsInStore == 0;
- }
- }
-
/** Returns a free (idle) ThreadState that may be used for
* indexing this one document. This call also pauses if a
* flush is pending. If delTerm is non-null then we
@@ -842,8 +667,9 @@ final class DocumentsWriter {
DocumentsWriterThreadState minThreadState = null;
for(int i=0;i<threadStates.length;i++) {
DocumentsWriterThreadState ts = threadStates[i];
- if (minThreadState == null || ts.numThreads < minThreadState.numThreads)
+ if (minThreadState == null || ts.numThreads < minThreadState.numThreads) {
minThreadState = ts;
+ }
}
if (minThreadState != null && (minThreadState.numThreads == 0 || threadStates.length >= maxThreadStates)) {
state = minThreadState;
@@ -851,8 +677,9 @@ final class DocumentsWriter {
} else {
// Just create a new "private" thread state
DocumentsWriterThreadState[] newArray = new DocumentsWriterThreadState[1+threadStates.length];
- if (threadStates.length > 0)
+ if (threadStates.length > 0) {
System.arraycopy(threadStates, 0, newArray, 0, threadStates.length);
+ }
state = newArray[threadStates.length] = new DocumentsWriterThreadState(this);
threadStates = newArray;
}
@@ -866,7 +693,10 @@ final class DocumentsWriter {
// Allocate segment name if this is the first doc since
// last flush:
- initSegmentName(false);
+ if (segment == null) {
+ segment = writer.newSegmentName();
+ assert numDocs == 0;
+ }
state.docState.docID = nextDocID++;
@@ -874,7 +704,7 @@ final class DocumentsWriter {
pendingDeletes.addTerm(delTerm, state.docState.docID);
}
- numDocsInRAM++;
+ numDocs++;
state.isIdle = false;
return state;
}
@@ -1018,15 +848,16 @@ final class DocumentsWriter {
final boolean doPause;
- if (docWriter != null)
+ if (docWriter != null) {
doPause = waitQueue.add(docWriter);
- else {
+ } else {
skipDocWriter.docID = perThread.docState.docID;
doPause = waitQueue.add(skipDocWriter);
}
- if (doPause)
+ if (doPause) {
waitForWaitQueue();
+ }
perThread.isIdle = true;
@@ -1122,7 +953,7 @@ final class DocumentsWriter {
final static int INT_BLOCK_SIZE = 1 << INT_BLOCK_SHIFT;
final static int INT_BLOCK_MASK = INT_BLOCK_SIZE - 1;
- private ArrayList<int[]> freeIntBlocks = new ArrayList<int[]>();
+ private List<int[]> freeIntBlocks = new ArrayList<int[]>();
/* Allocate another int[] from the shared pool */
synchronized int[] getIntBlock() {
@@ -1131,8 +962,9 @@ final class DocumentsWriter {
if (0 == size) {
b = new int[INT_BLOCK_SIZE];
bytesUsed.addAndGet(INT_BLOCK_SIZE*RamUsageEstimator.NUM_BYTES_INT);
- } else
+ } else {
b = freeIntBlocks.remove(size-1);
+ }
return b;
}
@@ -1227,13 +1059,14 @@ final class DocumentsWriter {
if (doBalance) {
- if (infoStream != null)
+ if (infoStream != null) {
message(" RAM: balance allocations: usedMB=" + toMB(bytesUsed()) +
" vs trigger=" + toMB(ramBufferSize) +
" deletesMB=" + toMB(deletesRAMUsed) +
" byteBlockFree=" + toMB(byteBlockAllocator.freeByteBlocks.size()*BYTE_BLOCK_SIZE) +
" perDocFree=" + toMB(perDocAllocator.freeByteBlocks.size()*PER_DOC_BLOCK_SIZE) +
" charBlockFree=" + toMB(freeCharBlocks.size()*CHAR_BLOCK_SIZE*RamUsageEstimator.NUM_BYTES_CHAR));
+ }
final long startBytesUsed = bytesUsed() + deletesRAMUsed;
@@ -1256,10 +1089,11 @@ final class DocumentsWriter {
// Nothing else to free -- must flush now.
bufferIsFull = bytesUsed()+deletesRAMUsed > ramBufferSize;
if (infoStream != null) {
- if (bytesUsed()+deletesRAMUsed > ramBufferSize)
+ if (bytesUsed()+deletesRAMUsed > ramBufferSize) {
message(" nothing to free; set bufferIsFull");
- else
+ } else {
message(" nothing to free");
+ }
}
break;
}
@@ -1291,15 +1125,17 @@ final class DocumentsWriter {
}
}
- if ((4 == iter % 5) && any)
+ if ((4 == iter % 5) && any) {
// Ask consumer to free any recycled state
any = consumer.freeRAM();
+ }
iter++;
}
- if (infoStream != null)
+ if (infoStream != null) {
message(" after free: freedMB=" + nf.format((startBytesUsed-bytesUsed()-deletesRAMUsed)/1024./1024.) + " usedMB=" + nf.format((bytesUsed()+deletesRAMUsed)/1024./1024.));
+ }
}
}
@@ -1352,11 +1188,11 @@ final class DocumentsWriter {
try {
doc.finish();
nextWriteDocID++;
- numDocsInStore++;
nextWriteLoc++;
assert nextWriteLoc <= waiting.length;
- if (nextWriteLoc == waiting.length)
+ if (nextWriteLoc == waiting.length) {
nextWriteLoc = 0;
+ }
success = true;
} finally {
if (!success) {
@@ -1403,8 +1239,9 @@ final class DocumentsWriter {
}
int loc = nextWriteLoc + gap;
- if (loc >= waiting.length)
+ if (loc >= waiting.length) {
loc -= waiting.length;
+ }
// We should only wrap one time
assert loc < waiting.length;
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FieldsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FieldsWriter.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FieldsWriter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FieldsWriter.java Wed Jan 5 17:33:58 2011
@@ -25,9 +25,9 @@ import org.apache.lucene.store.Directory
import org.apache.lucene.store.RAMOutputStream;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.IOUtils;
-final class FieldsWriter
-{
+final class FieldsWriter {
static final byte FIELD_IS_TOKENIZED = 0x1;
static final byte FIELD_IS_BINARY = 0x2;
@@ -49,190 +49,147 @@ final class FieldsWriter
// switch to a new format!
static final int FORMAT_CURRENT = FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS;
- private FieldInfos fieldInfos;
+ private FieldInfos fieldInfos;
- private IndexOutput fieldsStream;
-
- private IndexOutput indexStream;
-
- private boolean doClose;
-
- FieldsWriter(Directory d, String segment, FieldInfos fn) throws IOException {
- fieldInfos = fn;
-
- boolean success = false;
- final String fieldsName = IndexFileNames.segmentFileName(segment, IndexFileNames.FIELDS_EXTENSION);
- try {
- fieldsStream = d.createOutput(fieldsName);
- fieldsStream.writeInt(FORMAT_CURRENT);
- success = true;
- } finally {
- if (!success) {
- try {
- close();
- } catch (Throwable t) {
- // Suppress so we keep throwing the original exception
- }
- try {
- d.deleteFile(fieldsName);
- } catch (Throwable t) {
- // Suppress so we keep throwing the original exception
- }
- }
- }
-
- success = false;
- final String indexName = IndexFileNames.segmentFileName(segment, IndexFileNames.FIELDS_INDEX_EXTENSION);
- try {
- indexStream = d.createOutput(indexName);
- indexStream.writeInt(FORMAT_CURRENT);
- success = true;
- } finally {
- if (!success) {
- try {
- close();
- } catch (IOException ioe) {
- }
- try {
- d.deleteFile(fieldsName);
- } catch (Throwable t) {
- // Suppress so we keep throwing the original exception
- }
- try {
- d.deleteFile(indexName);
- } catch (Throwable t) {
- // Suppress so we keep throwing the original exception
- }
- }
- }
-
- doClose = true;
- }
-
- FieldsWriter(IndexOutput fdx, IndexOutput fdt, FieldInfos fn) {
- fieldInfos = fn;
- fieldsStream = fdt;
- indexStream = fdx;
- doClose = false;
- }
-
- void setFieldsStream(IndexOutput stream) {
- this.fieldsStream = stream;
- }
-
- // Writes the contents of buffer into the fields stream
- // and adds a new entry for this document into the index
- // stream. This assumes the buffer was already written
- // in the correct fields format.
- void flushDocument(int numStoredFields, RAMOutputStream buffer) throws IOException {
- indexStream.writeLong(fieldsStream.getFilePointer());
- fieldsStream.writeVInt(numStoredFields);
- buffer.writeTo(fieldsStream);
- }
-
- void skipDocument() throws IOException {
- indexStream.writeLong(fieldsStream.getFilePointer());
- fieldsStream.writeVInt(0);
- }
-
- void flush() throws IOException {
- indexStream.flush();
- fieldsStream.flush();
- }
-
- final void close() throws IOException {
- if (doClose) {
- try {
- if (fieldsStream != null) {
- try {
- fieldsStream.close();
- } finally {
- fieldsStream = null;
- }
- }
- } catch (IOException ioe) {
- try {
- if (indexStream != null) {
- try {
- indexStream.close();
- } finally {
- indexStream = null;
- }
- }
- } catch (IOException ioe2) {
- // Ignore so we throw only first IOException hit
- }
- throw ioe;
- } finally {
- if (indexStream != null) {
- try {
- indexStream.close();
- } finally {
- indexStream = null;
- }
- }
- }
+ // If null - we were supplied with streams, if notnull - we manage them ourselves
+ private Directory directory;
+ private String segment;
+ private IndexOutput fieldsStream;
+ private IndexOutput indexStream;
+
+ FieldsWriter(Directory directory, String segment, FieldInfos fn) throws IOException {
+ this.directory = directory;
+ this.segment = segment;
+ fieldInfos = fn;
+
+ boolean success = false;
+ try {
+ fieldsStream = directory.createOutput(IndexFileNames.segmentFileName(segment, IndexFileNames.FIELDS_EXTENSION));
+ indexStream = directory.createOutput(IndexFileNames.segmentFileName(segment, IndexFileNames.FIELDS_INDEX_EXTENSION));
+
+ fieldsStream.writeInt(FORMAT_CURRENT);
+ indexStream.writeInt(FORMAT_CURRENT);
+
+ success = true;
+ } finally {
+ if (!success) {
+ abort();
}
}
+ }
- final void writeField(FieldInfo fi, Fieldable field) throws IOException {
- fieldsStream.writeVInt(fi.number);
- byte bits = 0;
- if (field.isTokenized())
- bits |= FieldsWriter.FIELD_IS_TOKENIZED;
- if (field.isBinary())
- bits |= FieldsWriter.FIELD_IS_BINARY;
-
- fieldsStream.writeByte(bits);
-
- if (field.isBinary()) {
- final byte[] data;
- final int len;
- final int offset;
- data = field.getBinaryValue();
- len = field.getBinaryLength();
- offset = field.getBinaryOffset();
-
- fieldsStream.writeVInt(len);
- fieldsStream.writeBytes(data, offset, len);
- }
- else {
- fieldsStream.writeString(field.stringValue());
+ FieldsWriter(IndexOutput fdx, IndexOutput fdt, FieldInfos fn) {
+ directory = null;
+ segment = null;
+ fieldInfos = fn;
+ fieldsStream = fdt;
+ indexStream = fdx;
+ }
+
+ void setFieldsStream(IndexOutput stream) {
+ this.fieldsStream = stream;
+ }
+
+ // Writes the contents of buffer into the fields stream
+ // and adds a new entry for this document into the index
+ // stream. This assumes the buffer was already written
+ // in the correct fields format.
+ void flushDocument(int numStoredFields, RAMOutputStream buffer) throws IOException {
+ indexStream.writeLong(fieldsStream.getFilePointer());
+ fieldsStream.writeVInt(numStoredFields);
+ buffer.writeTo(fieldsStream);
+ }
+
+ void skipDocument() throws IOException {
+ indexStream.writeLong(fieldsStream.getFilePointer());
+ fieldsStream.writeVInt(0);
+ }
+
+ void close() throws IOException {
+ if (directory != null) {
+ try {
+ IOUtils.closeSafely(fieldsStream, indexStream);
+ } finally {
+ fieldsStream = indexStream = null;
}
}
+ }
- /** Bulk write a contiguous series of documents. The
- * lengths array is the length (in bytes) of each raw
- * document. The stream IndexInput is the
- * fieldsStream from which we should bulk-copy all
- * bytes. */
- final void addRawDocuments(IndexInput stream, int[] lengths, int numDocs) throws IOException {
- long position = fieldsStream.getFilePointer();
- long start = position;
- for(int i=0;i<numDocs;i++) {
- indexStream.writeLong(position);
- position += lengths[i];
+ void abort() {
+ if (directory != null) {
+ try {
+ close();
+ } catch (IOException ignored) {
+ }
+ try {
+ directory.deleteFile(IndexFileNames.segmentFileName(segment, IndexFileNames.FIELDS_EXTENSION));
+ } catch (IOException ignored) {
+ }
+ try {
+ directory.deleteFile(IndexFileNames.segmentFileName(segment, IndexFileNames.FIELDS_INDEX_EXTENSION));
+ } catch (IOException ignored) {
}
- fieldsStream.copyBytes(stream, position-start);
- assert fieldsStream.getFilePointer() == position;
}
+ }
+
+ final void writeField(FieldInfo fi, Fieldable field) throws IOException {
+ fieldsStream.writeVInt(fi.number);
+ byte bits = 0;
+ if (field.isTokenized())
+ bits |= FieldsWriter.FIELD_IS_TOKENIZED;
+ if (field.isBinary())
+ bits |= FieldsWriter.FIELD_IS_BINARY;
+
+ fieldsStream.writeByte(bits);
+
+ if (field.isBinary()) {
+ final byte[] data;
+ final int len;
+ final int offset;
+ data = field.getBinaryValue();
+ len = field.getBinaryLength();
+ offset = field.getBinaryOffset();
+
+ fieldsStream.writeVInt(len);
+ fieldsStream.writeBytes(data, offset, len);
+ }
+ else {
+ fieldsStream.writeString(field.stringValue());
+ }
+ }
+
+ /** Bulk write a contiguous series of documents. The
+ * lengths array is the length (in bytes) of each raw
+ * document. The stream IndexInput is the
+ * fieldsStream from which we should bulk-copy all
+ * bytes. */
+ final void addRawDocuments(IndexInput stream, int[] lengths, int numDocs) throws IOException {
+ long position = fieldsStream.getFilePointer();
+ long start = position;
+ for(int i=0;i<numDocs;i++) {
+ indexStream.writeLong(position);
+ position += lengths[i];
+ }
+ fieldsStream.copyBytes(stream, position-start);
+ assert fieldsStream.getFilePointer() == position;
+ }
+
+ final void addDocument(Document doc) throws IOException {
+ indexStream.writeLong(fieldsStream.getFilePointer());
+
+ int storedCount = 0;
+ List<Fieldable> fields = doc.getFields();
+ for (Fieldable field : fields) {
+ if (field.isStored())
+ storedCount++;
+ }
+ fieldsStream.writeVInt(storedCount);
+
- final void addDocument(Document doc) throws IOException {
- indexStream.writeLong(fieldsStream.getFilePointer());
- int storedCount = 0;
- List<Fieldable> fields = doc.getFields();
- for (Fieldable field : fields) {
- if (field.isStored())
- storedCount++;
- }
- fieldsStream.writeVInt(storedCount);
-
-
-
- for (Fieldable field : fields) {
- if (field.isStored())
- writeField(fieldInfos.fieldInfo(field.name()), field);
- }
+ for (Fieldable field : fields) {
+ if (field.isStored())
+ writeField(fieldInfos.fieldInfo(field.name()), field);
}
+ }
}
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FormatPostingsFieldsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FormatPostingsFieldsWriter.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FormatPostingsFieldsWriter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FormatPostingsFieldsWriter.java Wed Jan 5 17:33:58 2011
@@ -53,8 +53,8 @@ final class FormatPostingsFieldsWriter e
null,
null);
- state.flushedFiles.add(state.segmentFileName(IndexFileNames.TERMS_EXTENSION));
- state.flushedFiles.add(state.segmentFileName(IndexFileNames.TERMS_INDEX_EXTENSION));
+ state.flushedFiles.add(IndexFileNames.segmentFileName(state.segmentName, IndexFileNames.TERMS_EXTENSION));
+ state.flushedFiles.add(IndexFileNames.segmentFileName(state.segmentName, IndexFileNames.TERMS_INDEX_EXTENSION));
termsWriter = new FormatPostingsTermsWriter(state, this);
}
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java Wed Jan 5 17:33:58 2011
@@ -50,11 +50,8 @@ final class FreqProxTermsWriter extends
}
@Override
- void closeDocStore(SegmentWriteState state) {}
- @Override
void abort() {}
-
// TODO: would be nice to factor out more of this, eg the
// FreqProxFieldMergeState, and code to visit all Fields
// under the same FieldInfo together, up into TermsHash*.
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java Wed Jan 5 17:33:58 2011
@@ -22,14 +22,7 @@ import org.apache.lucene.store.Directory
import java.io.IOException;
import java.io.FileNotFoundException;
import java.io.PrintStream;
-import java.util.Map;
-import java.util.Date;
-import java.util.HashMap;
-
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Collection;
+import java.util.*;
import org.apache.lucene.store.NoSuchDirectoryException;
import org.apache.lucene.util.CollectionUtil;
@@ -100,7 +93,6 @@ final class IndexFileDeleter {
private PrintStream infoStream;
private Directory directory;
private IndexDeletionPolicy policy;
- private DocumentsWriter docWriter;
final boolean startingCommitDeleted;
private SegmentInfos lastSegmentInfos;
@@ -111,8 +103,9 @@ final class IndexFileDeleter {
void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
- if (infoStream != null)
+ if (infoStream != null) {
message("setInfoStream deletionPolicy=" + policy);
+ }
}
private void message(String message) {
@@ -127,16 +120,16 @@ final class IndexFileDeleter {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream, DocumentsWriter docWriter)
+ public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream)
throws CorruptIndexException, IOException {
- this.docWriter = docWriter;
this.infoStream = infoStream;
final String currentSegmentsFile = segmentInfos.getCurrentSegmentFileName();
- if (infoStream != null)
+ if (infoStream != null) {
message("init: current segments file is \"" + currentSegmentsFile + "\"; deletionPolicy=" + policy);
+ }
this.policy = policy;
this.directory = directory;
@@ -225,8 +218,9 @@ final class IndexFileDeleter {
} catch (IOException e) {
throw new CorruptIndexException("failed to locate current segments_N file");
}
- if (infoStream != null)
+ if (infoStream != null) {
message("forced open of current segments file " + segmentInfos.getCurrentSegmentFileName());
+ }
currentCommitPoint = new CommitPoint(commitsToDelete, directory, sis);
commits.add(currentCommitPoint);
incRef(sis, true);
@@ -357,8 +351,9 @@ final class IndexFileDeleter {
// DecRef old files from the last checkpoint, if any:
int size = lastFiles.size();
if (size > 0) {
- for(int i=0;i<size;i++)
+ for(int i=0;i<size;i++) {
decRef(lastFiles.get(i));
+ }
lastFiles.clear();
}
@@ -391,8 +386,9 @@ final class IndexFileDeleter {
deletable = null;
int size = oldDeletable.size();
for(int i=0;i<size;i++) {
- if (infoStream != null)
+ if (infoStream != null) {
message("delete pending file " + oldDeletable.get(i));
+ }
deleteFile(oldDeletable.get(i));
}
}
@@ -441,37 +437,20 @@ final class IndexFileDeleter {
// Decref files for commits that were deleted by the policy:
deleteCommits();
} else {
-
- final List<String> docWriterFiles;
- if (docWriter != null) {
- docWriterFiles = docWriter.openFiles();
- if (docWriterFiles != null)
- // We must incRef these files before decRef'ing
- // last files to make sure we don't accidentally
- // delete them:
- incRef(docWriterFiles);
- } else
- docWriterFiles = null;
-
// DecRef old files from the last checkpoint, if any:
- int size = lastFiles.size();
- if (size > 0) {
- for(int i=0;i<size;i++)
- decRef(lastFiles.get(i));
- lastFiles.clear();
+ for (Collection<String> lastFile : lastFiles) {
+ decRef(lastFile);
}
+ lastFiles.clear();
// Save files so we can decr on next checkpoint/commit:
lastFiles.add(segmentInfos.files(directory, false));
-
- if (docWriterFiles != null)
- lastFiles.add(docWriterFiles);
}
}
void incRef(SegmentInfos segmentInfos, boolean isCommit) throws IOException {
- // If this is a commit point, also incRef the
- // segments_N file:
+ // If this is a commit point, also incRef the
+ // segments_N file:
for( final String fileName: segmentInfos.files(directory, isCommit) ) {
incRef(fileName);
}
@@ -536,8 +515,9 @@ final class IndexFileDeleter {
}
void deleteFiles(List<String> files) throws IOException {
- for(final String file: files)
+ for(final String file: files) {
deleteFile(file);
+ }
}
/** Deletes the specified files, but only if they are new
@@ -696,6 +676,5 @@ final class IndexFileDeleter {
public boolean isDeleted() {
return deleted;
}
-
}
}
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexWriter.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexWriter.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/IndexWriter.java Wed Jan 5 17:33:58 2011
@@ -424,7 +424,7 @@ public class IndexWriter implements Clos
// just like we do when loading segments_N
IndexReader r;
synchronized(this) {
- flush(false, true, true);
+ flush(false, true);
r = new ReadOnlyDirectoryReader(this, segmentInfos, termInfosIndexDivisor);
if (infoStream != null) {
message("return reader version=" + r.getVersion() + " reader=" + r);
@@ -783,7 +783,6 @@ public class IndexWriter implements Clos
@Deprecated
public void setUseCompoundFile(boolean value) {
getLogMergePolicy().setUseCompoundFile(value);
- getLogMergePolicy().setUseCompoundDocStore(value);
}
/** Expert: Set the Similarity implementation used by this IndexWriter.
@@ -1131,7 +1130,7 @@ public class IndexWriter implements Clos
// KeepOnlyLastCommitDeleter:
deleter = new IndexFileDeleter(directory,
conf.getIndexDeletionPolicy(),
- segmentInfos, infoStream, docWriter);
+ segmentInfos, infoStream);
if (deleter.startingCommitDeleted) {
// Deletion policy deleted the "head" commit point.
@@ -1831,7 +1830,7 @@ public class IndexWriter implements Clos
// Only allow a new merge to be triggered if we are
// going to wait for merges:
if (!hitOOM) {
- flush(waitForMerges, true, true);
+ flush(waitForMerges, true);
}
if (waitForMerges)
@@ -1905,7 +1904,7 @@ public class IndexWriter implements Clos
public synchronized int maxDoc() {
int count;
if (docWriter != null)
- count = docWriter.getNumDocsInRAM();
+ count = docWriter.getNumDocs();
else
count = 0;
@@ -1923,7 +1922,7 @@ public class IndexWriter implements Clos
public synchronized int numDocs() throws IOException {
int count;
if (docWriter != null)
- count = docWriter.getNumDocsInRAM();
+ count = docWriter.getNumDocs();
else
count = 0;
@@ -2038,27 +2037,11 @@ public class IndexWriter implements Clos
doFlush = docWriter.updateDocument(doc, analyzer, null);
success = true;
} finally {
- if (!success) {
-
- if (infoStream != null) {
- message("hit exception adding document");
- }
-
- synchronized (this) {
- // If docWriter has some aborted files that were
- // never incref'd, then we clean them up here
- deleter.checkpoint(segmentInfos, false);
- if (docWriter != null) {
- final Collection<String> files = docWriter.abortedFiles();
- if (files != null) {
- deleter.deleteNewFiles(files);
- }
- }
- }
- }
+ if (!success && infoStream != null)
+ message("hit exception adding document");
}
if (doFlush)
- flush(true, false, false);
+ flush(true, false);
} catch (OutOfMemoryError oom) {
handleOOM(oom, "addDocument");
}
@@ -2079,7 +2062,7 @@ public class IndexWriter implements Clos
ensureOpen();
try {
if (docWriter.deleteTerm(term, false)) {
- flush(true, false, false);
+ flush(true, false);
}
} catch (OutOfMemoryError oom) {
handleOOM(oom, "deleteDocuments(Term)");
@@ -2103,7 +2086,7 @@ public class IndexWriter implements Clos
ensureOpen();
try {
if (docWriter.deleteTerms(terms)) {
- flush(true, false, false);
+ flush(true, false);
}
} catch (OutOfMemoryError oom) {
handleOOM(oom, "deleteDocuments(Term..)");
@@ -2125,7 +2108,7 @@ public class IndexWriter implements Clos
ensureOpen();
try {
if (docWriter.deleteQuery(query)) {
- flush(true, false, false);
+ flush(true, false);
}
} catch (OutOfMemoryError oom) {
handleOOM(oom, "deleteDocuments(Query)");
@@ -2149,7 +2132,7 @@ public class IndexWriter implements Clos
ensureOpen();
try {
if (docWriter.deleteQueries(queries)) {
- flush(true, false, false);
+ flush(true, false);
}
} catch (OutOfMemoryError oom) {
handleOOM(oom, "deleteDocuments(Query..)");
@@ -2206,26 +2189,11 @@ public class IndexWriter implements Clos
doFlush = docWriter.updateDocument(doc, analyzer, term);
success = true;
} finally {
- if (!success) {
-
- if (infoStream != null) {
- message("hit exception updating document");
- }
-
- synchronized (this) {
- // If docWriter has some aborted files that were
- // never incref'd, then we clean them up here
- if (docWriter != null) {
- final Collection<String> files = docWriter.abortedFiles();
- if (files != null) {
- deleter.deleteNewFiles(files);
- }
- }
- }
- }
+ if (!success && infoStream != null)
+ message("hit exception updating document");
}
if (doFlush) {
- flush(true, false, false);
+ flush(true, false);
}
} catch (OutOfMemoryError oom) {
handleOOM(oom, "updateDocument");
@@ -2239,7 +2207,7 @@ public class IndexWriter implements Clos
// for test purpose
final synchronized int getNumBufferedDocuments(){
- return docWriter.getNumDocsInRAM();
+ return docWriter.getNumDocs();
}
// for test purpose
@@ -2395,7 +2363,7 @@ public class IndexWriter implements Clos
message("now flush at optimize");
}
- flush(true, false, true);
+ flush(true, true);
synchronized(this) {
resetMergeExceptions();
@@ -2928,7 +2896,7 @@ public class IndexWriter implements Clos
int docCount = merger.merge(); // merge 'em
SegmentInfo info = new SegmentInfo(mergedName, docCount, directory,
- false, true, -1, null, false,
+ false, true,
merger.fieldInfos().hasProx(),
merger.fieldInfos().hasVectors());
setDiagnostics(info, "addIndexes(IndexReader...)");
@@ -3019,7 +2987,7 @@ public class IndexWriter implements Clos
try {
if (infoStream != null)
message("flush at addIndexes(Directory...)");
- flush(false, false, true);
+ flush(false, true);
int docCount = 0;
List<SegmentInfo> infos = new ArrayList<SegmentInfo>();
@@ -3164,7 +3132,7 @@ public class IndexWriter implements Clos
if (infoStream != null)
message("prepareCommit: flush");
- flush(true, true, true);
+ flush(true, true);
startCommit(commitUserData);
}
@@ -3276,17 +3244,21 @@ public class IndexWriter implements Clos
}
}
+ /** NOTE: flushDocStores is ignored now (hardwired to
+ * true); this method is only here for backwards
+ * compatibility */
+ protected final void flush(boolean triggerMerge, boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
+ flush(triggerMerge, flushDeletes);
+ }
+
/**
* Flush all in-memory buffered udpates (adds and deletes)
* to the Directory.
* @param triggerMerge if true, we may merge segments (if
* deletes or docs were flushed) if necessary
- * @param flushDocStores if false we are allowed to keep
- * doc stores open to share with the next segment
* @param flushDeletes whether pending deletes should also
- * be flushed
*/
- protected final void flush(boolean triggerMerge, boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
+ protected final void flush(boolean triggerMerge, boolean flushDeletes) throws CorruptIndexException, IOException {
// NOTE: this method cannot be sync'd because
// maybeMerge() in turn calls mergeScheduler.merge which
@@ -3297,7 +3269,7 @@ public class IndexWriter implements Clos
// We can be called during close, when closing==true, so we must pass false to ensureOpen:
ensureOpen(false);
- if (doFlush(flushDocStores, flushDeletes) && triggerMerge) {
+ if (doFlush(flushDeletes) && triggerMerge) {
maybeMerge();
}
}
@@ -3305,7 +3277,7 @@ public class IndexWriter implements Clos
// TODO: this method should not have to be entirely
// synchronized, ie, merges should be allowed to commit
// even while a flush is happening
- private synchronized final boolean doFlush(boolean closeDocStores, boolean applyAllDeletes) throws CorruptIndexException, IOException {
+ private synchronized boolean doFlush(boolean applyAllDeletes) throws CorruptIndexException, IOException {
if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot flush");
@@ -3328,11 +3300,11 @@ public class IndexWriter implements Clos
try {
if (infoStream != null) {
- message(" start flush: applyAllDeletes=" + applyAllDeletes + " closeDocStores=" + closeDocStores);
+ message(" start flush: applyAllDeletes=" + applyAllDeletes);
message(" index before flush " + segString());
}
- final SegmentInfo newSegment = docWriter.flush(this, closeDocStores, deleter, mergePolicy, segmentInfos);
+ final SegmentInfo newSegment = docWriter.flush(this, deleter, mergePolicy, segmentInfos);
if (newSegment != null) {
setDiagnostics(newSegment, "flush");
segmentInfos.add(newSegment);
@@ -3380,17 +3352,8 @@ public class IndexWriter implements Clos
return false;
} finally {
flushControl.clearFlushPending();
- if (!success) {
- if (infoStream != null) {
- message("hit exception during flush");
- }
- if (docWriter != null) {
- final Collection<String> files = docWriter.abortedFiles();
- if (files != null) {
- deleter.deleteNewFiles(files);
- }
- }
- }
+ if (!success && infoStream != null)
+ message("hit exception during flush");
}
}
@@ -3406,7 +3369,7 @@ public class IndexWriter implements Clos
* buffered in RAM. */
public final synchronized int numRamDocs() {
ensureOpen();
- return docWriter.getNumDocsInRAM();
+ return docWriter.getNumDocs();
}
private int ensureContiguousMerge(MergePolicy.OneMerge merge) {
@@ -3509,7 +3472,7 @@ public class IndexWriter implements Clos
}
/* FIXME if we want to support non-contiguous segment merges */
- synchronized private boolean commitMerge(MergePolicy.OneMerge merge, SegmentMerger merger, SegmentReader mergedReader) throws IOException {
+ synchronized private boolean commitMerge(MergePolicy.OneMerge merge, SegmentReader mergedReader) throws IOException {
assert testPoint("startCommitMerge");
@@ -3739,7 +3702,7 @@ public class IndexWriter implements Clos
}
}
- final synchronized private void _mergeInit(MergePolicy.OneMerge merge) throws IOException {
+ synchronized private void _mergeInit(MergePolicy.OneMerge merge) throws IOException {
assert testPoint("startMergeInit");
@@ -3756,133 +3719,26 @@ public class IndexWriter implements Clos
if (merge.isAborted())
return;
-
- final SegmentInfos sourceSegments = merge.segments;
- final int end = sourceSegments.size();
-
- // Check whether this merge will allow us to skip
- // merging the doc stores (stored field & vectors).
- // This is a very substantial optimization (saves tons
- // of IO).
-
- Directory lastDir = directory;
- String lastDocStoreSegment = null;
- int next = -1;
- boolean mergeDocStores = false;
- boolean doFlushDocStore = false;
boolean hasVectors = false;
- final String currentDocStoreSegment = docWriter.getDocStoreSegment();
-
- // Test each segment to be merged: check if we need to
- // flush/merge doc stores
- for (int i = 0; i < end; i++) {
- SegmentInfo si = sourceSegments.info(i);
-
- // If it has deletions we must merge the doc stores
- if (si.hasDeletions())
- mergeDocStores = true;
-
- if (si.getHasVectors()) {
+ for (SegmentInfo sourceSegment : merge.segments) {
+ if (sourceSegment.getHasVectors()) {
hasVectors = true;
}
-
- // If it has its own (private) doc stores we must
- // merge the doc stores
- if (-1 == si.getDocStoreOffset())
- mergeDocStores = true;
-
- // If it has a different doc store segment than
- // previous segments, we must merge the doc stores
- String docStoreSegment = si.getDocStoreSegment();
- if (docStoreSegment == null)
- mergeDocStores = true;
- else if (lastDocStoreSegment == null)
- lastDocStoreSegment = docStoreSegment;
- else if (!lastDocStoreSegment.equals(docStoreSegment))
- mergeDocStores = true;
-
- // Segments' docScoreOffsets must be in-order,
- // contiguous. For the default merge policy now
- // this will always be the case but for an arbitrary
- // merge policy this may not be the case
- if (-1 == next)
- next = si.getDocStoreOffset() + si.docCount;
- else if (next != si.getDocStoreOffset())
- mergeDocStores = true;
- else
- next = si.getDocStoreOffset() + si.docCount;
-
- // If the segment comes from a different directory
- // we must merge
- if (lastDir != si.dir)
- mergeDocStores = true;
-
- // If the segment is referencing the current "live"
- // doc store outputs then we must merge
- if (si.getDocStoreOffset() != -1 && currentDocStoreSegment != null && si.getDocStoreSegment().equals(currentDocStoreSegment)) {
- doFlushDocStore = true;
- }
- }
-
- // if a mergedSegmentWarmer is installed, we must merge
- // the doc stores because we will open a full
- // SegmentReader on the merged segment:
- if (!mergeDocStores && mergedSegmentWarmer != null && currentDocStoreSegment != null && lastDocStoreSegment != null && lastDocStoreSegment.equals(currentDocStoreSegment)) {
- mergeDocStores = true;
- }
-
- final int docStoreOffset;
- final String docStoreSegment;
- final boolean docStoreIsCompoundFile;
-
- if (mergeDocStores) {
- docStoreOffset = -1;
- docStoreSegment = null;
- docStoreIsCompoundFile = false;
- } else {
- SegmentInfo si = sourceSegments.info(0);
- docStoreOffset = si.getDocStoreOffset();
- docStoreSegment = si.getDocStoreSegment();
- docStoreIsCompoundFile = si.getDocStoreIsCompoundFile();
- }
-
- if (mergeDocStores && doFlushDocStore) {
- // SegmentMerger intends to merge the doc stores
- // (stored fields, vectors), and at least one of the
- // segments to be merged refers to the currently
- // live doc stores.
-
- // TODO: if we know we are about to merge away these
- // newly flushed doc store files then we should not
- // make compound file out of them...
- if (infoStream != null)
- message("now flush at merge");
- doFlush(true, false);
- updatePendingMerges(1, false);
}
- merge.mergeDocStores = mergeDocStores;
-
// Bind a new segment name here so even with
// ConcurrentMergePolicy we keep deterministic segment
// names.
- merge.info = new SegmentInfo(newSegmentName(), 0,
- directory, false, true,
- docStoreOffset,
- docStoreSegment,
- docStoreIsCompoundFile,
- false,
- hasVectors);
+ merge.info = new SegmentInfo(newSegmentName(), 0, directory, false, true, false, hasVectors);
Map<String,String> details = new HashMap<String,String>();
details.put("optimize", Boolean.toString(merge.optimize));
- details.put("mergeFactor", Integer.toString(end));
- details.put("mergeDocStores", Boolean.toString(mergeDocStores));
+ details.put("mergeFactor", Integer.toString(merge.segments.size()));
setDiagnostics(merge.info, "merge", details);
if (infoStream != null) {
- message("merge seg=" + merge.info.name + " mergeDocStores=" + mergeDocStores);
+ message("merge seg=" + merge.info.name);
}
// Also enroll the merged segment into mergingSegments;
@@ -4015,10 +3871,6 @@ public class IndexWriter implements Clos
payloadProcessorProvider,
((FieldInfos) docWriter.getFieldInfos().clone()));
- if (merger.fieldInfos().hasVectors() && merge.mergeDocStores) {
- merge.info.setHasVectors(true);
- }
-
if (infoStream != null) {
message("merging " + merge.segString(directory) + " mergeVectors=" + merge.info.getHasVectors());
}
@@ -4026,15 +3878,6 @@ public class IndexWriter implements Clos
merge.readers = new SegmentReader[numSegments];
merge.readersClone = new SegmentReader[numSegments];
- boolean mergeDocStores = false;
-
- final String currentDocStoreSegment;
- synchronized(this) {
- currentDocStoreSegment = docWriter.getDocStoreSegment();
- }
-
- boolean currentDSSMerged = false;
-
// This is try/finally to make sure merger's readers are
// closed:
boolean success = false;
@@ -4046,7 +3889,7 @@ public class IndexWriter implements Clos
// Hold onto the "live" reader; we will use this to
// commit merged deletes
- SegmentReader reader = merge.readers[i] = readerPool.get(info, merge.mergeDocStores,
+ SegmentReader reader = merge.readers[i] = readerPool.get(info, true,
MERGE_READ_BUFFER_SIZE,
-1);
@@ -4056,14 +3899,6 @@ public class IndexWriter implements Clos
SegmentReader clone = merge.readersClone[i] = (SegmentReader) reader.clone(true);
merger.add(clone);
- if (clone.hasDeletions()) {
- mergeDocStores = true;
- }
-
- if (info.getDocStoreOffset() != -1 && currentDocStoreSegment != null) {
- currentDSSMerged |= currentDocStoreSegment.equals(info.getDocStoreSegment());
- }
-
totDocCount += clone.numDocs();
}
@@ -4073,40 +3908,8 @@ public class IndexWriter implements Clos
merge.checkAborted(directory);
- // If deletions have arrived and it has now become
- // necessary to merge doc stores, go and open them:
- if (mergeDocStores && !merge.mergeDocStores) {
- merge.mergeDocStores = true;
- synchronized(this) {
-
- // If 1) we must now merge doc stores, and 2) at
- // least one of the segments we are merging uses
- // the doc store we are now writing to, we must at
- // this point force this doc store closed (by
- // calling flush). If we didn't do this then the
- // readers will attempt to open an IndexInput
- // on files that have still-open IndexOutputs
- // against them:
- if (currentDSSMerged) {
- if (infoStream != null) {
- message("now flush at mergeMiddle");
- }
- doFlush(true, false);
- updatePendingMerges(1, false);
- }
- }
-
- for(int i=0;i<numSegments;i++) {
- merge.readersClone[i].openDocStores();
- }
-
- // Clear DSS
- merge.info.setDocStore(-1, null, false);
- message("merge store matchedCount=" + merger.getMatchedSubReaderCount() + " vs " + numSegments);
- }
-
// This is where all the work happens:
- mergedDocCount = merge.info.docCount = merger.merge(merge.mergeDocStores);
+ mergedDocCount = merge.info.docCount = merger.merge();
assert mergedDocCount == totDocCount;
@@ -4179,12 +3982,7 @@ public class IndexWriter implements Clos
final int termsIndexDivisor;
final boolean loadDocStores;
- // if the merged segment warmer was not installed when
- // this merge was started, causing us to not force
- // the docStores to close, we can't warm it now
- final boolean canWarm = merge.info.getDocStoreSegment() == null || currentDocStoreSegment == null || !merge.info.getDocStoreSegment().equals(currentDocStoreSegment);
-
- if (poolReaders && mergedSegmentWarmer != null && canWarm) {
+ if (poolReaders && mergedSegmentWarmer != null) {
// Load terms index & doc stores so the segment
// warmer can run searches, load documents/term
// vectors
@@ -4205,7 +4003,7 @@ public class IndexWriter implements Clos
mergedSegmentWarmer.warm(mergedReader);
}
- if (!commitMerge(merge, merger, mergedReader)) {
+ if (!commitMerge(merge, mergedReader)) {
// commitMerge will return false if this merge was aborted
return 0;
}
@@ -4336,7 +4134,7 @@ public class IndexWriter implements Clos
}
// First, we clone & incref the segmentInfos we intend
- // to sync, then, without locking, we sync() each file
+ // to sync, then, without locking, we sync() all files
// referenced by toSync, in the background.
if (infoStream != null)
@@ -4344,25 +4142,7 @@ public class IndexWriter implements Clos
readerPool.commit();
- // It's possible another flush (that did not close
- // the open do stores) snuck in after the flush we
- // just did, so we remove any tail segments
- // referencing the open doc store from the
- // SegmentInfos we are about to sync (the main
- // SegmentInfos will keep them):
toSync = (SegmentInfos) segmentInfos.clone();
- final String dss = docWriter.getDocStoreSegment();
- if (dss != null) {
- while(true) {
- final String dss2 = toSync.info(toSync.size()-1).getDocStoreSegment();
- if (dss2 == null || !dss2.equals(dss)) {
- break;
- }
- toSync.remove(toSync.size()-1);
- changeCount++;
- segmentInfos.changed();
- }
- }
assert filesExist(toSync);
if (commitUserData != null)
Modified: lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/InvertedDocConsumer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/InvertedDocConsumer.java?rev=1055547&r1=1055546&r2=1055547&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/InvertedDocConsumer.java (original)
+++ lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/index/InvertedDocConsumer.java Wed Jan 5 17:33:58 2011
@@ -32,9 +32,6 @@ abstract class InvertedDocConsumer {
/** Flush a new segment */
abstract void flush(Map<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>> threadsAndFields, SegmentWriteState state) throws IOException;
- /** Close doc stores */
- abstract void closeDocStore(SegmentWriteState state) throws IOException;
-
/** Attempt to free RAM, returning true if any RAM was
* freed */
abstract boolean freeRAM();