You are viewing a plain text version of this content. The canonical link for it is here.
Posted to java-commits@lucene.apache.org by mi...@apache.org on 2010/11/20 15:17:28 UTC
svn commit: r1037221 - in /lucene/java/branches/lucene_3_0: ./
src/java/org/apache/lucene/index/ src/test/org/apache/lucene/index/
src/test/org/apache/lucene/store/
Author: mikemccand
Date: Sat Nov 20 14:17:28 2010
New Revision: 1037221
URL: http://svn.apache.org/viewvc?rev=1037221&view=rev
Log:
LUCENE-2762: fix IW to not hold open but deleted files when CFS is true
Modified:
lucene/java/branches/lucene_3_0/CHANGES.txt
lucene/java/branches/lucene_3_0/common-build.xml
lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/IndexWriter.java
lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/MergePolicy.java
lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/SegmentMerger.java
lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestDoc.java
lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriter.java
lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriterReader.java
lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/store/MockRAMDirectory.java
lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/store/MockRAMInputStream.java
Modified: lucene/java/branches/lucene_3_0/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_3_0/CHANGES.txt?rev=1037221&r1=1037220&r2=1037221&view=diff
==============================================================================
--- lucene/java/branches/lucene_3_0/CHANGES.txt (original)
+++ lucene/java/branches/lucene_3_0/CHANGES.txt Sat Nov 20 14:17:28 2010
@@ -9,6 +9,13 @@ Changes in runtime behavior
test lock just before the real lock is acquired. (Surinder Pal
Singh Bindra via Mike McCandless)
+* LUCENE-2762: Fixed bug in IndexWriter causing it to hold open file
+ handles against deleted files when compound-file was enabled (the
+ default) and readers are pooled. As a result of this the peak
+ worst-case free disk space required during optimize is now 3X the
+ index size, when compound file is enabled (else 2X). (Mike
+ McCandless)
+
Bug fixes
* LUCENE-2142 (correct fix): FieldCacheImpl.getStringIndex no longer
@@ -91,6 +98,13 @@ Bug fixes
that could potentially result in index corruption. (Mike
McCandless)
+* LUCENE-2762: Fixed bug in IndexWriter causing it to hold open file
+ handles against deleted files when compound-file was enabled (the
+ default) and readers are pooled. As a result of this the peak
+ worst-case free disk space required during optimize is now 3X the
+ index size, when compound file is enabled (else 2X). (Mike
+ McCandless)
+
Optimizations
* LUCENE-2556: Improve memory usage after cloning TermAttribute.
Modified: lucene/java/branches/lucene_3_0/common-build.xml
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_3_0/common-build.xml?rev=1037221&r1=1037220&r2=1037221&view=diff
==============================================================================
--- lucene/java/branches/lucene_3_0/common-build.xml (original)
+++ lucene/java/branches/lucene_3_0/common-build.xml Sat Nov 20 14:17:28 2010
@@ -42,7 +42,7 @@
<property name="Name" value="Lucene"/>
<property name="dev.version" value="3.0.3-dev"/>
<property name="version" value="${dev.version}"/>
- <property name="compatibility.tag" value="lucene_2_9_back_compat_tests_20101031"/>
+ <property name="compatibility.tag" value="lucene_2_9_back_compat_tests_20101120"/>
<property name="spec.version" value="${version}"/>
<property name="year" value="2000-${current.year}"/>
<property name="final.name" value="lucene-${name}-${version}"/>
Modified: lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/IndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/IndexWriter.java?rev=1037221&r1=1037220&r2=1037221&view=diff
==============================================================================
--- lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/IndexWriter.java (original)
+++ lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/IndexWriter.java Sat Nov 20 14:17:28 2010
@@ -1779,6 +1779,9 @@ public class IndexWriter implements Clos
}
boolean useCompoundDocStore = false;
+ if (infoStream != null) {
+ message("closeDocStores segment=" + docWriter.getDocStoreSegment());
+ }
String docStoreSegment;
@@ -2235,9 +2238,10 @@ public class IndexWriter implements Clos
* <p>See http://www.gossamer-threads.com/lists/lucene/java-dev/47895 for more discussion. </p>
*
* <p>Note that optimize requires 2X the index size free
- * space in your Directory. For example, if your index
+ * space in your Directory (3X if you're using compound
+ * file format). For example, if your index
* size is 10 MB then you need 20 MB free for optimize to
- * complete.</p>
+ * complete (30 MB if you're using compound fiel format).</p>
*
* <p>If some but not all readers re-open while an
* optimize is underway, this will cause > 2X temporary
@@ -3905,8 +3909,6 @@ public class IndexWriter implements Clos
if (merge.isAborted()) {
if (infoStream != null)
message("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted");
-
- deleter.refresh(merge.info.name);
return false;
}
@@ -3915,13 +3917,20 @@ public class IndexWriter implements Clos
commitMergedDeletes(merge, mergedReader);
docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount);
+ // If the doc store we are using has been closed and
+ // is in now compound format (but wasn't when we
+ // started), then we will switch to the compound
+ // format as well:
setMergeDocStoreIsCompoundFile(merge);
+
merge.info.setHasProx(merger.hasProx());
segmentInfos.subList(start, start + merge.segments.size()).clear();
assert !segmentInfos.contains(merge.info);
segmentInfos.add(start, merge.info);
+ closeMergeReaders(merge, false);
+
// Must note the change to segmentInfos so any commits
// in-flight don't lose it:
checkpoint();
@@ -3938,11 +3947,6 @@ public class IndexWriter implements Clos
return true;
}
- private synchronized void decrefMergeSegments(MergePolicy.OneMerge merge) throws IOException {
- assert merge.increfDone;
- merge.increfDone = false;
- }
-
final private void handleMergeException(Throwable t, MergePolicy.OneMerge merge) throws IOException {
if (infoStream != null) {
@@ -4216,8 +4220,6 @@ public class IndexWriter implements Clos
doFlush(true, false);
}
- merge.increfDone = true;
-
merge.mergeDocStores = mergeDocStores;
// Bind a new segment name here so even with
@@ -4271,14 +4273,6 @@ public class IndexWriter implements Clos
// on merges to finish.
notifyAll();
- if (merge.increfDone)
- decrefMergeSegments(merge);
-
- if (merge.mergeFiles != null) {
- deleter.decRef(merge.mergeFiles);
- merge.mergeFiles = null;
- }
-
// It's possible we are called twice, eg if there was an
// exception inside mergeInit
if (merge.registerDone) {
@@ -4308,7 +4302,49 @@ public class IndexWriter implements Clos
}
}
}
- }
+ }
+
+ private final synchronized void closeMergeReaders(MergePolicy.OneMerge merge, boolean suppressExceptions) throws IOException {
+ final int numSegments = merge.segments.size();
+ if (suppressExceptions) {
+ // Suppress any new exceptions so we throw the
+ // original cause
+ for (int i=0;i<numSegments;i++) {
+ if (merge.readers[i] != null) {
+ try {
+ readerPool.release(merge.readers[i], false);
+ } catch (Throwable t) {
+ }
+ merge.readers[i] = null;
+ }
+
+ if (merge.readersClone[i] != null) {
+ try {
+ merge.readersClone[i].close();
+ } catch (Throwable t) {
+ }
+ // This was a private clone and we had the
+ // only reference
+ assert merge.readersClone[i].getRefCount() == 0: "refCount should be 0 but is " + merge.readersClone[i].getRefCount();
+ merge.readersClone[i] = null;
+ }
+ }
+ } else {
+ for (int i=0;i<numSegments;i++) {
+ if (merge.readers[i] != null) {
+ readerPool.release(merge.readers[i], true);
+ merge.readers[i] = null;
+ }
+
+ if (merge.readersClone[i] != null) {
+ merge.readersClone[i].close();
+ // This was a private clone and we had the only reference
+ assert merge.readersClone[i].getRefCount() == 0;
+ merge.readersClone[i] = null;
+ }
+ }
+ }
+ }
/** Does the actual (time-consuming) work of the merge,
* but without holding synchronized lock on IndexWriter
@@ -4337,8 +4373,13 @@ public class IndexWriter implements Clos
boolean mergeDocStores = false;
- final Set<String> dss = new HashSet<String>();
-
+ final String currentDocStoreSegment;
+ synchronized(this) {
+ currentDocStoreSegment = docWriter.getDocStoreSegment();
+ }
+
+ boolean currentDSSMerged = false;
+
// This is try/finally to make sure merger's readers are
// closed:
boolean success = false;
@@ -4346,7 +4387,6 @@ public class IndexWriter implements Clos
int totDocCount = 0;
for (int i = 0; i < numSegments; i++) {
-
final SegmentInfo info = sourceSegments.info(i);
// Hold onto the "live" reader; we will use this to
@@ -4365,8 +4405,8 @@ public class IndexWriter implements Clos
mergeDocStores = true;
}
- if (info.getDocStoreOffset() != -1) {
- dss.add(info.getDocStoreSegment());
+ if (info.getDocStoreOffset() != -1 && currentDocStoreSegment != null) {
+ currentDSSMerged |= currentDocStoreSegment.equals(info.getDocStoreSegment());
}
totDocCount += clone.numDocs();
@@ -4383,9 +4423,10 @@ public class IndexWriter implements Clos
if (mergeDocStores && !merge.mergeDocStores) {
merge.mergeDocStores = true;
synchronized(this) {
- if (dss.contains(docWriter.getDocStoreSegment())) {
- if (infoStream != null)
+ if (currentDSSMerged) {
+ if (infoStream != null) {
message("now flush at mergeMiddle");
+ }
doFlush(true, false);
}
}
@@ -4395,9 +4436,7 @@ public class IndexWriter implements Clos
}
// Clear DSS
- synchronized(this) {
- merge.info.setDocStore(-1, null, false);
- }
+ merge.info.setDocStore(-1, null, false);
}
// This is where all the work happens:
@@ -4405,26 +4444,65 @@ public class IndexWriter implements Clos
assert mergedDocCount == totDocCount;
- // TODO: in the non-realtime case, we may want to only
- // keep deletes (it's costly to open entire reader
- // when we just need deletes)
+ if (merge.useCompoundFile) {
- final int termsIndexDivisor;
- final boolean loadDocStores;
+ success = false;
+ final String compoundFileName = IndexFileNames.segmentFileName(mergedName, IndexFileNames.COMPOUND_FILE_EXTENSION);
- synchronized(this) {
- // If the doc store we are using has been closed and
- // is in now compound format (but wasn't when we
- // started), then we will switch to the compound
- // format as well:
- setMergeDocStoreIsCompoundFile(merge);
- assert merge.mergeFiles == null;
- merge.mergeFiles = merge.info.files();
- deleter.incRef(merge.mergeFiles);
+ try {
+ if (infoStream != null) {
+ message("create compound file " + compoundFileName);
+ }
+ merger.createCompoundFile(compoundFileName);
+ success = true;
+ } catch (IOException ioe) {
+ synchronized(this) {
+ if (merge.isAborted()) {
+ // This can happen if rollback or close(false)
+ // is called -- fall through to logic below to
+ // remove the partially created CFS:
+ } else {
+ handleMergeException(ioe, merge);
+ }
+ }
+ } catch (Throwable t) {
+ handleMergeException(t, merge);
+ } finally {
+ if (!success) {
+ if (infoStream != null) {
+ message("hit exception creating compound file during merge");
+ }
+
+ synchronized(this) {
+ deleter.deleteFile(compoundFileName);
+ deleter.deleteNewFiles(merger.getMergedFiles());
+ }
+ }
+ }
+
+ success = false;
+
+ synchronized(this) {
+
+ // delete new non cfs files directly: they were never
+ // registered with IFD
+ deleter.deleteNewFiles(merger.getMergedFiles());
+
+ if (merge.isAborted()) {
+ if (infoStream != null) {
+ message("abort merge after building CFS");
+ }
+ deleter.deleteFile(compoundFileName);
+ return 0;
+ }
+ }
+
+ merge.info.setUseCompoundFile(true);
}
- final String currentDocStoreSegment = docWriter.getDocStoreSegment();
-
+ final int termsIndexDivisor;
+ final boolean loadDocStores;
+
// if the merged segment warmer was not installed when
// this merge was started, causing us to not force
// the docStores to close, we can't warm it now
@@ -4441,14 +4519,19 @@ public class IndexWriter implements Clos
loadDocStores = false;
}
+ // TODO: in the non-realtime case, we may want to only
+ // keep deletes (it's costly to open entire reader
+ // when we just need deletes)
+
final SegmentReader mergedReader = readerPool.get(merge.info, loadDocStores, BufferedIndexInput.BUFFER_SIZE, termsIndexDivisor);
try {
if (poolReaders && mergedSegmentWarmer != null) {
mergedSegmentWarmer.warm(mergedReader);
}
- if (!commitMerge(merge, merger, mergedDocCount, mergedReader))
+ if (!commitMerge(merge, merger, mergedDocCount, mergedReader)) {
// commitMerge will return false if this merge was aborted
return 0;
+ }
} finally {
synchronized(this) {
readerPool.release(mergedReader);
@@ -4457,100 +4540,10 @@ public class IndexWriter implements Clos
success = true;
} finally {
- synchronized(this) {
- if (!success) {
- // Suppress any new exceptions so we throw the
- // original cause
- for (int i=0;i<numSegments;i++) {
- if (merge.readers[i] != null) {
- try {
- readerPool.release(merge.readers[i], false);
- } catch (Throwable t) {
- }
- }
-
- if (merge.readersClone[i] != null) {
- try {
- merge.readersClone[i].close();
- } catch (Throwable t) {
- }
- // This was a private clone and we had the only reference
- assert merge.readersClone[i].getRefCount() == 0;
- }
- }
- } else {
- for (int i=0;i<numSegments;i++) {
- if (merge.readers[i] != null) {
- readerPool.release(merge.readers[i], true);
- }
-
- if (merge.readersClone[i] != null) {
- merge.readersClone[i].close();
- // This was a private clone and we had the only reference
- assert merge.readersClone[i].getRefCount() == 0;
- }
- }
- }
- }
- }
-
- // Must checkpoint before decrefing so any newly
- // referenced files in the new merge.info are incref'd
- // first:
- synchronized(this) {
- deleter.checkpoint(segmentInfos, false);
- }
- decrefMergeSegments(merge);
-
- if (merge.useCompoundFile) {
-
- success = false;
- final String compoundFileName = mergedName + "." + IndexFileNames.COMPOUND_FILE_EXTENSION;
-
- try {
- merger.createCompoundFile(compoundFileName);
- success = true;
- } catch (IOException ioe) {
- synchronized(this) {
- if (merge.isAborted()) {
- // This can happen if rollback or close(false)
- // is called -- fall through to logic below to
- // remove the partially created CFS:
- success = true;
- } else
- handleMergeException(ioe, merge);
- }
- } catch (Throwable t) {
- handleMergeException(t, merge);
- } finally {
- if (!success) {
- if (infoStream != null)
- message("hit exception creating compound file during merge");
- synchronized(this) {
- deleter.deleteFile(compoundFileName);
- }
- }
- }
-
- if (merge.isAborted()) {
- if (infoStream != null)
- message("abort merge after building CFS");
- synchronized(this) {
- deleter.deleteFile(compoundFileName);
- }
- return 0;
- }
-
- synchronized(this) {
- if (segmentInfos.indexOf(merge.info) == -1 || merge.isAborted()) {
- // Our segment (committed in non-compound
- // format) got merged away while we were
- // building the compound format.
- deleter.deleteFile(compoundFileName);
- } else {
- merge.info.setUseCompoundFile(true);
- checkpoint();
- }
+ // Readers are already closed in commitMerge if we didn't hit
+ // an exc:
+ if (!success) {
+ closeMergeReaders(merge, true);
}
}
Modified: lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/MergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/MergePolicy.java?rev=1037221&r1=1037220&r2=1037221&view=diff
==============================================================================
--- lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/MergePolicy.java (original)
+++ lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/MergePolicy.java Sat Nov 20 14:17:28 2010
@@ -74,14 +74,12 @@ public abstract class MergePolicy implem
SegmentInfo info; // used by IndexWriter
boolean mergeDocStores; // used by IndexWriter
boolean optimize; // used by IndexWriter
- boolean increfDone; // used by IndexWriter
boolean registerDone; // used by IndexWriter
long mergeGen; // used by IndexWriter
boolean isExternal; // used by IndexWriter
int maxNumSegmentsOptimize; // used by IndexWriter
SegmentReader[] readers; // used by IndexWriter
SegmentReader[] readersClone; // used by IndexWriter
- List<String> mergeFiles; // used by IndexWriter
final SegmentInfos segments;
final boolean useCompoundFile;
boolean aborted;
Modified: lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/SegmentMerger.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/SegmentMerger.java?rev=1037221&r1=1037220&r2=1037221&view=diff
==============================================================================
--- lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/SegmentMerger.java (original)
+++ lucene/java/branches/lucene_3_0/src/java/org/apache/lucene/index/SegmentMerger.java Sat Nov 20 14:17:28 2010
@@ -20,6 +20,8 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
import java.util.List;
@@ -171,13 +173,8 @@ final class SegmentMerger {
}
}
- final List<String> createCompoundFile(String fileName)
- throws IOException {
- CompoundFileWriter cfsWriter =
- new CompoundFileWriter(directory, fileName, checkAbort);
-
- List<String> files =
- new ArrayList<String>(IndexFileNames.COMPOUND_EXTENSIONS.length + 1);
+ final Collection<String> getMergedFiles() throws IOException {
+ Set<String> fileSet = new HashSet<String>();
// Basic files
for (int i = 0; i < IndexFileNames.COMPOUND_EXTENSIONS.length; i++) {
@@ -188,14 +185,14 @@ final class SegmentMerger {
if (mergeDocStores || (!ext.equals(IndexFileNames.FIELDS_EXTENSION) &&
!ext.equals(IndexFileNames.FIELDS_INDEX_EXTENSION)))
- files.add(segment + "." + ext);
+ fileSet.add(segment + "." + ext);
}
// Fieldable norm files
for (int i = 0; i < fieldInfos.size(); i++) {
FieldInfo fi = fieldInfos.fieldInfo(i);
if (fi.isIndexed && !fi.omitNorms) {
- files.add(segment + "." + IndexFileNames.NORMS_EXTENSION);
+ fileSet.add(segment + "." + IndexFileNames.NORMS_EXTENSION);
break;
}
}
@@ -203,10 +200,19 @@ final class SegmentMerger {
// Vector files
if (fieldInfos.hasVectors() && mergeDocStores) {
for (int i = 0; i < IndexFileNames.VECTOR_EXTENSIONS.length; i++) {
- files.add(segment + "." + IndexFileNames.VECTOR_EXTENSIONS[i]);
+ fileSet.add(segment + "." + IndexFileNames.VECTOR_EXTENSIONS[i]);
}
}
+ return fileSet;
+ }
+
+ final Collection<String> createCompoundFile(String fileName)
+ throws IOException {
+
+ Collection<String> files = getMergedFiles();
+ CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, fileName, checkAbort);
+
// Now merge all added files
for (String file : files) {
cfsWriter.addFile(file);
Modified: lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestDoc.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestDoc.java?rev=1037221&r1=1037220&r2=1037221&view=diff
==============================================================================
--- lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestDoc.java (original)
+++ lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestDoc.java Sat Nov 20 14:17:28 2010
@@ -24,7 +24,7 @@ import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Iterator;
import java.util.LinkedList;
-import java.util.List;
+import java.util.Collection;
import junit.framework.TestSuite;
import junit.textui.TestRunner;
@@ -188,7 +188,7 @@ public class TestDoc extends LuceneTestC
merger.closeReaders();
if (useCompoundFile) {
- List filesToDelete = merger.createCompoundFile(merged + ".cfs");
+ Collection filesToDelete = merger.createCompoundFile(merged + ".cfs");
for (Iterator iter = filesToDelete.iterator(); iter.hasNext();)
si1.dir.deleteFile((String) iter.next());
}
Modified: lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=1037221&r1=1037220&r2=1037221&view=diff
==============================================================================
--- lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriter.java Sat Nov 20 14:17:28 2010
@@ -414,13 +414,13 @@ public class TestIndexWriter extends Luc
if (done) {
// Javadocs state that temp free Directory space
- // required is at most 2X total input size of
+ // required is at most 3X total input size of
// indices so let's make sure:
assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName +
": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes; " +
"starting disk usage = " + startDiskUsage + " bytes; " +
"input index disk usage = " + inputDiskUsage + " bytes",
- (dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage));
+ (dir.getMaxUsedSizeInBytes()-startDiskUsage) < 3*(startDiskUsage + inputDiskUsage));
}
// Make sure we don't hit disk full during close below:
@@ -687,6 +687,12 @@ public class TestIndexWriter extends Luc
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
+
+ // force one extra segment w/ different doc store so
+ // we see the doc stores get merged
+ writer.commit();
+ addDocWithIndex(writer, 500);
+
writer.close();
long startDiskUsage = 0;
@@ -701,8 +707,8 @@ public class TestIndexWriter extends Luc
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
- assertTrue("optimized used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (2*startDiskUsage) + " (= 2X starting usage)",
- maxDiskUsage <= 2*startDiskUsage);
+ assertTrue("optimized used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
+ maxDiskUsage <= 4*startDiskUsage);
dir.close();
}
@@ -1068,15 +1074,15 @@ public class TestIndexWriter extends Luc
long endDiskUsage = dir.getMaxUsedSizeInBytes();
// Ending index is 50X as large as starting index; due
- // to 2X disk usage normally we allow 100X max
+ // to 3X disk usage normally we allow 150X max
// transient usage. If something is wrong w/ deleter
// and it doesn't delete intermediate segments then it
- // will exceed this 100X:
+ // will exceed this 150X:
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
- assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage,
- midDiskUsage < 100*startDiskUsage);
- assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage,
- endDiskUsage < 100*startDiskUsage);
+ assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
+ midDiskUsage < 150*startDiskUsage);
+ assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
+ endDiskUsage < 150*startDiskUsage);
}
Modified: lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java?rev=1037221&r1=1037220&r2=1037221&view=diff
==============================================================================
--- lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (original)
+++ lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java Sat Nov 20 14:17:28 2010
@@ -210,7 +210,7 @@ public class TestIndexWriterMergePolicy
}
private void checkInvariants(IndexWriter writer) throws IOException {
- _TestUtil.syncConcurrentMerges(writer);
+ writer.waitForMerges();
int maxBufferedDocs = writer.getMaxBufferedDocs();
int mergeFactor = writer.getMergeFactor();
int maxMergeDocs = writer.getMaxMergeDocs();
@@ -252,7 +252,7 @@ public class TestIndexWriterMergePolicy
segmentCfsCount++;
}
}
- assertEquals(segmentCount, segmentCfsCount);
+ assertEquals("index=" + writer.segString(), segmentCount, segmentCfsCount);
}
/*
Modified: lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriterReader.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriterReader.java?rev=1037221&r1=1037220&r2=1037221&view=diff
==============================================================================
--- lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriterReader.java (original)
+++ lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/index/TestIndexWriterReader.java Sat Nov 20 14:17:28 2010
@@ -704,7 +704,7 @@ public class TestIndexWriterReader exten
// Stress test reopen during addIndexes
public void testDuringAddIndexes() throws Exception {
- Directory dir1 = new MockRAMDirectory();
+ MockRAMDirectory dir1 = new MockRAMDirectory();
final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(),
IndexWriter.MaxFieldLength.LIMITED);
writer.setInfoStream(infoStream);
@@ -764,10 +764,12 @@ public class TestIndexWriterReader exten
}
assertEquals(0, excs.size());
+ r.close();
+ assertEquals(0, dir1.getOpenDeletedFiles().size());
+
writer.close();
_TestUtil.checkIndex(dir1);
- r.close();
dir1.close();
}
Modified: lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/store/MockRAMDirectory.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/store/MockRAMDirectory.java?rev=1037221&r1=1037220&r2=1037221&view=diff
==============================================================================
--- lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/store/MockRAMDirectory.java (original)
+++ lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/store/MockRAMDirectory.java Sat Nov 20 14:17:28 2010
@@ -52,9 +52,15 @@ public class MockRAMDirectory extends RA
// like super is called, then our members are initialized:
Map<String,Integer> openFiles;
+ // Only tracked if noDeleteOpenFile is true: if an attempt
+ // is made to delete an open file, we enroll it here.
+ Set<String> openFilesDeleted;
+
private synchronized void init() {
- if (openFiles == null)
+ if (openFiles == null) {
openFiles = new HashMap<String,Integer>();
+ openFilesDeleted = new HashSet<String>();
+ }
if (createdFiles == null)
createdFiles = new HashSet();
if (unSyncedFiles == null)
@@ -90,6 +96,7 @@ public class MockRAMDirectory extends RA
public synchronized void crash() throws IOException {
crashed = true;
openFiles = new HashMap();
+ openFilesDeleted = new HashSet<String>();
Iterator<String> it = unSyncedFiles.iterator();
unSyncedFiles = new HashSet();
int count = 0;
@@ -184,14 +191,21 @@ public class MockRAMDirectory extends RA
if (unSyncedFiles.contains(name))
unSyncedFiles.remove(name);
- if (!forced) {
- if (noDeleteOpenFile && openFiles.containsKey(name)) {
+ if (!forced && noDeleteOpenFile) {
+ if (openFiles.containsKey(name)) {
+ openFilesDeleted.add(name);
throw new IOException("MockRAMDirectory: file \"" + name + "\" is still open: cannot delete");
+ } else {
+ openFilesDeleted.remove(name);
}
}
super.deleteFile(name);
}
+ public synchronized Set<String> getOpenDeletedFiles() {
+ return new HashSet<String>(openFilesDeleted);
+ }
+
@Override
public synchronized IndexOutput createOutput(String name) throws IOException {
if (crashed)
@@ -265,6 +279,7 @@ public class MockRAMDirectory extends RA
public synchronized void close() {
if (openFiles == null) {
openFiles = new HashMap();
+ openFilesDeleted = new HashSet<String>();
}
if (noDeleteOpenFile && openFiles.size() > 0) {
// RuntimeException instead of IOException because
Modified: lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/store/MockRAMInputStream.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/store/MockRAMInputStream.java?rev=1037221&r1=1037220&r2=1037221&view=diff
==============================================================================
--- lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/store/MockRAMInputStream.java (original)
+++ lucene/java/branches/lucene_3_0/src/test/org/apache/lucene/store/MockRAMInputStream.java Sat Nov 20 14:17:28 2010
@@ -50,6 +50,7 @@ public class MockRAMInputStream extends
if (v != null) {
if (v.intValue() == 1) {
dir.openFiles.remove(name);
+ dir.openFilesDeleted.remove(name);
} else {
v = Integer.valueOf(v.intValue()-1);
dir.openFiles.put(name, v);