You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by si...@apache.org on 2010/11/12 10:15:38 UTC
svn commit: r1034304 [4/7] - in /lucene/dev/branches/docvalues: ./ lucene/
lucene/contrib/ lucene/contrib/benchmark/conf/
lucene/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/
lucene/contrib/benchmark/src/test/org/apache/lucene/be...
Modified: lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=1034304&r1=1034303&r2=1034304&view=diff
==============================================================================
--- lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/dev/branches/docvalues/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Fri Nov 12 09:15:30 2010
@@ -159,377 +159,7 @@ public class TestIndexWriter extends Luc
writer.addDocument(doc);
}
- /*
- Test: make sure when we run out of disk space or hit
- random IOExceptions in any of the addIndexes(*) calls
- that 1) index is not corrupt (searcher can open/search
- it) and 2) transactional semantics are followed:
- either all or none of the incoming documents were in
- fact added.
- */
- public void testAddIndexOnDiskFull() throws IOException
- {
- int START_COUNT = 57;
- int NUM_DIR = 50;
- int END_COUNT = START_COUNT + NUM_DIR*25;
-
- // Build up a bunch of dirs that have indexes which we
- // will then merge together by calling addIndexes(*):
- Directory[] dirs = new Directory[NUM_DIR];
- long inputDiskUsage = 0;
- for(int i=0;i<NUM_DIR;i++) {
- dirs[i] = newDirectory();
- IndexWriter writer = new IndexWriter(dirs[i], newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- for(int j=0;j<25;j++) {
- addDocWithIndex(writer, 25*i+j);
- }
- writer.close();
- String[] files = dirs[i].listAll();
- for(int j=0;j<files.length;j++) {
- inputDiskUsage += dirs[i].fileLength(files[j]);
- }
- }
-
- // Now, build a starting index that has START_COUNT docs. We
- // will then try to addIndexesNoOptimize into a copy of this:
- MockDirectoryWrapper startDir = newDirectory();
- IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- for(int j=0;j<START_COUNT;j++) {
- addDocWithIndex(writer, j);
- }
- writer.close();
-
- // Make sure starting index seems to be working properly:
- Term searchTerm = new Term("content", "aaa");
- IndexReader reader = IndexReader.open(startDir, true);
- assertEquals("first docFreq", 57, reader.docFreq(searchTerm));
-
- IndexSearcher searcher = new IndexSearcher(reader);
- ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
- assertEquals("first number of hits", 57, hits.length);
- searcher.close();
- reader.close();
-
- // Iterate with larger and larger amounts of free
- // disk space. With little free disk space,
- // addIndexes will certainly run out of space &
- // fail. Verify that when this happens, index is
- // not corrupt and index in fact has added no
- // documents. Then, we increase disk space by 2000
- // bytes each iteration. At some point there is
- // enough free disk space and addIndexes should
- // succeed and index should show all documents were
- // added.
-
- // String[] files = startDir.listAll();
- long diskUsage = startDir.sizeInBytes();
-
- long startDiskUsage = 0;
- String[] files = startDir.listAll();
- for(int i=0;i<files.length;i++) {
- startDiskUsage += startDir.fileLength(files[i]);
- }
-
- for(int iter=0;iter<3;iter++) {
-
- if (VERBOSE)
- System.out.println("TEST: iter=" + iter);
-
- // Start with 100 bytes more than we are currently using:
- long diskFree = diskUsage+100;
-
- int method = iter;
-
- boolean success = false;
- boolean done = false;
-
- String methodName;
- if (0 == method) {
- methodName = "addIndexes(Directory[]) + optimize()";
- } else if (1 == method) {
- methodName = "addIndexes(IndexReader[])";
- } else {
- methodName = "addIndexes(Directory[])";
- }
-
- while(!done) {
-
- // Make a new dir that will enforce disk usage:
- MockDirectoryWrapper dir = new MockDirectoryWrapper(new RAMDirectory(startDir));
- writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
- IOException err = null;
-
- MergeScheduler ms = writer.getConfig().getMergeScheduler();
- for(int x=0;x<2;x++) {
- if (ms instanceof ConcurrentMergeScheduler)
- // This test intentionally produces exceptions
- // in the threads that CMS launches; we don't
- // want to pollute test output with these.
- if (0 == x)
- ((ConcurrentMergeScheduler) ms).setSuppressExceptions();
- else
- ((ConcurrentMergeScheduler) ms).clearSuppressExceptions();
-
- // Two loops: first time, limit disk space &
- // throw random IOExceptions; second time, no
- // disk space limit:
-
- double rate = 0.05;
- double diskRatio = ((double) diskFree)/diskUsage;
- long thisDiskFree;
-
- String testName = null;
-
- if (0 == x) {
- thisDiskFree = diskFree;
- if (diskRatio >= 2.0) {
- rate /= 2;
- }
- if (diskRatio >= 4.0) {
- rate /= 2;
- }
- if (diskRatio >= 6.0) {
- rate = 0.0;
- }
- if (VERBOSE)
- testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
- } else {
- thisDiskFree = 0;
- rate = 0.0;
- if (VERBOSE)
- testName = "disk full test " + methodName + " with unlimited disk space";
- }
-
- if (VERBOSE)
- System.out.println("\ncycle: " + testName);
-
- dir.setTrackDiskUsage(true);
- dir.setMaxSizeInBytes(thisDiskFree);
- dir.setRandomIOExceptionRate(rate, diskFree);
-
- try {
-
- if (0 == method) {
- writer.addIndexes(dirs);
- writer.optimize();
- } else if (1 == method) {
- IndexReader readers[] = new IndexReader[dirs.length];
- for(int i=0;i<dirs.length;i++) {
- readers[i] = IndexReader.open(dirs[i], true);
- }
- try {
- writer.addIndexes(readers);
- } finally {
- for(int i=0;i<dirs.length;i++) {
- readers[i].close();
- }
- }
- } else {
- writer.addIndexes(dirs);
- }
-
- success = true;
- if (VERBOSE) {
- System.out.println(" success!");
- }
-
- if (0 == x) {
- done = true;
- }
-
- } catch (IOException e) {
- success = false;
- err = e;
- if (VERBOSE) {
- System.out.println(" hit IOException: " + e);
- e.printStackTrace(System.out);
- }
-
- if (1 == x) {
- e.printStackTrace(System.out);
- fail(methodName + " hit IOException after disk space was freed up");
- }
- }
-
- // Make sure all threads from
- // ConcurrentMergeScheduler are done
- _TestUtil.syncConcurrentMerges(writer);
-
- if (VERBOSE) {
- System.out.println(" now test readers");
- }
-
- // Finally, verify index is not corrupt, and, if
- // we succeeded, we see all docs added, and if we
- // failed, we see either all docs or no docs added
- // (transactional semantics):
- try {
- reader = IndexReader.open(dir, true);
- } catch (IOException e) {
- e.printStackTrace(System.out);
- fail(testName + ": exception when creating IndexReader: " + e);
- }
- int result = reader.docFreq(searchTerm);
- if (success) {
- if (result != START_COUNT) {
- fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
- }
- } else {
- // On hitting exception we still may have added
- // all docs:
- if (result != START_COUNT && result != END_COUNT) {
- err.printStackTrace(System.out);
- fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
- }
- }
-
- searcher = new IndexSearcher(reader);
- try {
- hits = searcher.search(new TermQuery(searchTerm), null, END_COUNT).scoreDocs;
- } catch (IOException e) {
- e.printStackTrace(System.out);
- fail(testName + ": exception when searching: " + e);
- }
- int result2 = hits.length;
- if (success) {
- if (result2 != result) {
- fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
- }
- } else {
- // On hitting exception we still may have added
- // all docs:
- if (result2 != result) {
- err.printStackTrace(System.out);
- fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
- }
- }
-
- searcher.close();
- reader.close();
- if (VERBOSE) {
- System.out.println(" count is " + result);
- }
-
- if (done || result == END_COUNT) {
- break;
- }
- }
-
- if (VERBOSE) {
- System.out.println(" start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.getMaxUsedSizeInBytes());
- }
-
- if (done) {
- // Javadocs state that temp free Directory space
- // required is at most 2X total input size of
- // indices so let's make sure:
- assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName +
- ": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes; " +
- "starting disk usage = " + startDiskUsage + " bytes; " +
- "input index disk usage = " + inputDiskUsage + " bytes",
- (dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage));
- }
-
- // Make sure we don't hit disk full during close below:
- dir.setMaxSizeInBytes(0);
- dir.setRandomIOExceptionRate(0.0, 0);
-
- writer.close();
-
- // Wait for all BG threads to finish else
- // dir.close() will throw IOException because
- // there are still open files
- _TestUtil.syncConcurrentMerges(ms);
-
- dir.close();
-
- // Try again with 5000 more bytes of free space:
- diskFree += 5000;
- }
- }
-
- startDir.close();
- for (Directory dir : dirs)
- dir.close();
- }
-
- /*
- * Make sure IndexWriter cleans up on hitting a disk
- * full exception in addDocument.
- * TODO: how to do this on windows with FSDirectory?
- */
- public void testAddDocumentOnDiskFull() throws IOException {
-
- for(int pass=0;pass<2;pass++) {
- if (VERBOSE)
- System.out.println("TEST: pass=" + pass);
- boolean doAbort = pass == 1;
- long diskFree = 200;
- while(true) {
- if (VERBOSE)
- System.out.println("TEST: cycle: diskFree=" + diskFree);
- MockDirectoryWrapper dir = new MockDirectoryWrapper(new RAMDirectory());
- dir.setMaxSizeInBytes(diskFree);
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- MergeScheduler ms = writer.getConfig().getMergeScheduler();
- if (ms instanceof ConcurrentMergeScheduler)
- // This test intentionally produces exceptions
- // in the threads that CMS launches; we don't
- // want to pollute test output with these.
- ((ConcurrentMergeScheduler) ms).setSuppressExceptions();
-
- boolean hitError = false;
- try {
- for(int i=0;i<200;i++) {
- addDoc(writer);
- }
- writer.commit();
- } catch (IOException e) {
- if (VERBOSE) {
- System.out.println("TEST: exception on addDoc");
- e.printStackTrace(System.out);
- }
- hitError = true;
- }
-
- if (hitError) {
- if (doAbort) {
- writer.rollback();
- } else {
- try {
- writer.close();
- } catch (IOException e) {
- if (VERBOSE) {
- System.out.println("TEST: exception on close");
- e.printStackTrace(System.out);
- }
- dir.setMaxSizeInBytes(0);
- writer.close();
- }
- }
-
- //_TestUtil.syncConcurrentMerges(ms);
-
- if (_TestUtil.anyFilesExceptWriteLock(dir)) {
- assertNoUnreferencedFiles(dir, "after disk full during addDocument");
-
- // Make sure reader can open the index:
- IndexReader.open(dir, true).close();
- }
-
- dir.close();
- // Now try again w/ more space:
-
- diskFree += 500;
- } else {
- //_TestUtil.syncConcurrentMerges(writer);
- writer.close();
- dir.close();
- break;
- }
- }
- }
- }
+
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
String[] startFiles = dir.listAll();
@@ -605,7 +235,8 @@ public class TestIndexWriter extends Luc
for(int i=0;i<19;i++)
writer.addDocument(doc);
- ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
+ writer.commit();
+ writer.waitForMerges();
writer.commit();
SegmentInfos sis = new SegmentInfos();
@@ -615,9 +246,9 @@ public class TestIndexWriter extends Luc
writer.optimize(7);
writer.commit();
+ writer.waitForMerges();
sis = new SegmentInfos();
- ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
sis.read(dir);
final int optSegCount = sis.size();
@@ -709,117 +340,6 @@ public class TestIndexWriter extends Luc
}
}
- // Simulate a writer that crashed while writing segments
- // file: make sure we can still open the index (ie,
- // gracefully fallback to the previous segments file),
- // and that we can add to the index:
- public void testSimulatedCrashedWriter() throws IOException {
- MockDirectoryWrapper dir = newDirectory();
- dir.setPreventDoubleWrite(false);
-
- IndexWriter writer = null;
-
- writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
-
- // add 100 documents
- for (int i = 0; i < 100; i++) {
- addDoc(writer);
- }
-
- // close
- writer.close();
-
- long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
- assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
-
- // Make the next segments file, with last byte
- // missing, to simulate a writer that crashed while
- // writing segments file:
- String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
- String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
- "",
- 1+gen);
- IndexInput in = dir.openInput(fileNameIn);
- IndexOutput out = dir.createOutput(fileNameOut);
- long length = in.length();
- for(int i=0;i<length-1;i++) {
- out.writeByte(in.readByte());
- }
- in.close();
- out.close();
-
- IndexReader reader = null;
- try {
- reader = IndexReader.open(dir, true);
- } catch (Exception e) {
- fail("reader failed to open on a crashed index");
- }
- reader.close();
-
- try {
- writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
- } catch (Exception e) {
- e.printStackTrace(System.out);
- fail("writer failed to open on a crashed index");
- }
-
- // add 100 documents
- for (int i = 0; i < 100; i++) {
- addDoc(writer);
- }
-
- // close
- writer.close();
- dir.close();
- }
-
- // Simulate a corrupt index by removing last byte of
- // latest segments file and make sure we get an
- // IOException trying to open the index:
- public void testSimulatedCorruptIndex1() throws IOException {
- Directory dir = newDirectory();
-
- IndexWriter writer = null;
-
- writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
-
- // add 100 documents
- for (int i = 0; i < 100; i++) {
- addDoc(writer);
- }
-
- // close
- writer.close();
-
- long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
- assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
-
- String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
- String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
- "",
- 1+gen);
- IndexInput in = dir.openInput(fileNameIn);
- IndexOutput out = dir.createOutput(fileNameOut);
- long length = in.length();
- for(int i=0;i<length-1;i++) {
- out.writeByte(in.readByte());
- }
- in.close();
- out.close();
- dir.deleteFile(fileNameIn);
-
- IndexReader reader = null;
- try {
- reader = IndexReader.open(dir, true);
- fail("reader did not hit IOException on opening a corrupt index");
- } catch (Exception e) {
- }
- if (reader != null) {
- reader.close();
- }
- dir.close();
- }
-
public void testChangesAfterClose() throws IOException {
Directory dir = newDirectory();
@@ -838,52 +358,6 @@ public class TestIndexWriter extends Luc
}
dir.close();
}
-
-
- // Simulate a corrupt index by removing one of the cfs
- // files and make sure we get an IOException trying to
- // open the index:
- public void testSimulatedCorruptIndex2() throws IOException {
- Directory dir = newDirectory();
-
- IndexWriter writer = null;
-
- writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(true);
-
- // add 100 documents
- for (int i = 0; i < 100; i++) {
- addDoc(writer);
- }
-
- // close
- writer.close();
-
- long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
- assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
-
- String[] files = dir.listAll();
- boolean corrupted = false;
- for(int i=0;i<files.length;i++) {
- if (files[i].endsWith(".cfs")) {
- dir.deleteFile(files[i]);
- corrupted = true;
- break;
- }
- }
- assertTrue("failed to find cfs file to remove", corrupted);
-
- IndexReader reader = null;
- try {
- reader = IndexReader.open(dir, true);
- fail("reader did not hit IOException on opening a corrupt index");
- } catch (Exception e) {
- }
- if (reader != null) {
- reader.close();
- }
- dir.close();
- }
/*
* Simple test for "commit on close": open writer then
@@ -1577,64 +1051,6 @@ public class TestIndexWriter extends Luc
dir.close();
}
- // LUCENE-1008
- public void testNoTermVectorAfterTermVector() throws IOException {
- Directory dir = newDirectory();
- IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document document = new Document();
- document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
- Field.TermVector.YES));
- iw.addDocument(document);
- document = new Document();
- document.add(newField("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
- Field.TermVector.NO));
- iw.addDocument(document);
- // Make first segment
- iw.commit();
-
- document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
- Field.TermVector.YES));
- iw.addDocument(document);
- // Make 2nd segment
- iw.commit();
-
- iw.optimize();
- iw.close();
- dir.close();
- }
-
- // LUCENE-1010
- public void testNoTermVectorAfterTermVectorMerge() throws IOException {
- Directory dir = newDirectory();
- IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document document = new Document();
- document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
- Field.TermVector.YES));
- iw.addDocument(document);
- iw.commit();
-
- document = new Document();
- document.add(newField("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
- Field.TermVector.NO));
- iw.addDocument(document);
- // Make first segment
- iw.commit();
-
- iw.optimize();
-
- document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
- Field.TermVector.YES));
- iw.addDocument(document);
- // Make 2nd segment
- iw.commit();
- iw.optimize();
-
- iw.close();
- dir.close();
- }
-
// LUCENE-1036
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
@@ -1698,366 +1114,19 @@ public class TestIndexWriter extends Luc
dir.close();
}
- // LUCENE-1072
- public void testExceptionFromTokenStream() throws IOException {
- Directory dir = newDirectory();
- IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new Analyzer() {
-
- @Override
- public TokenStream tokenStream(String fieldName, Reader reader) {
- return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) {
- private int count = 0;
-
- @Override
- public boolean incrementToken() throws IOException {
- if (count++ == 5) {
- throw new IOException();
- }
- return input.incrementToken();
- }
- };
- }
-
- });
- IndexWriter writer = new IndexWriter(dir, conf);
-
- Document doc = new Document();
- String contents = "aa bb cc dd ee ff gg hh ii jj kk";
- doc.add(newField("content", contents, Field.Store.NO,
- Field.Index.ANALYZED));
- try {
- writer.addDocument(doc);
- fail("did not hit expected exception");
- } catch (Exception e) {
- }
- // Make sure we can add another normal document
- doc = new Document();
- doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
- Field.Index.ANALYZED));
- writer.addDocument(doc);
- // Make sure we can add another normal document
- doc = new Document();
- doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
- Field.Index.ANALYZED));
- writer.addDocument(doc);
-
- writer.close();
- IndexReader reader = IndexReader.open(dir, true);
- final Term t = new Term("content", "aa");
- assertEquals(reader.docFreq(t), 3);
-
- // Make sure the doc that hit the exception was marked
- // as deleted:
- DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
- MultiFields.getDeletedDocs(reader),
- t.field(),
- new BytesRef(t.text()));
-
- int count = 0;
- while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
- count++;
- }
- assertEquals(2, count);
-
- assertEquals(reader.docFreq(new Term("content", "gg")), 0);
- reader.close();
- dir.close();
- }
-
- private static class FailOnlyOnFlush extends MockDirectoryWrapper.Failure {
- boolean doFail = false;
- int count;
-
- @Override
- public void setDoFail() {
- this.doFail = true;
- }
- @Override
- public void clearDoFail() {
- this.doFail = false;
- }
-
- @Override
- public void eval(MockDirectoryWrapper dir) throws IOException {
- if (doFail) {
- StackTraceElement[] trace = new Exception().getStackTrace();
- boolean sawAppend = false;
- boolean sawFlush = false;
- for (int i = 0; i < trace.length; i++) {
- if ("org.apache.lucene.index.FreqProxTermsWriter".equals(trace[i].getClassName()) && "appendPostings".equals(trace[i].getMethodName()))
- sawAppend = true;
- if ("doFlush".equals(trace[i].getMethodName()))
- sawFlush = true;
- }
-
- if (sawAppend && sawFlush && count++ >= 30) {
- doFail = false;
- throw new IOException("now failing during flush");
- }
- }
- }
- }
-
- // LUCENE-1072: make sure an errant exception on flushing
- // one segment only takes out those docs in that one flush
- public void testDocumentsWriterAbort() throws IOException {
- MockDirectoryWrapper dir = newDirectory();
- FailOnlyOnFlush failure = new FailOnlyOnFlush();
- failure.setDoFail();
- dir.failOn(failure);
-
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
- Document doc = new Document();
- String contents = "aa bb cc dd ee ff gg hh ii jj kk";
- doc.add(newField("content", contents, Field.Store.NO,
- Field.Index.ANALYZED));
- boolean hitError = false;
- for(int i=0;i<200;i++) {
- try {
- writer.addDocument(doc);
- } catch (IOException ioe) {
- // only one flush should fail:
- assertFalse(hitError);
- hitError = true;
- }
- }
- assertTrue(hitError);
- writer.close();
- IndexReader reader = IndexReader.open(dir, true);
- assertEquals(198, reader.docFreq(new Term("content", "aa")));
- reader.close();
- dir.close();
- }
-
- private class CrashingFilter extends TokenFilter {
- String fieldName;
- int count;
-
- public CrashingFilter(String fieldName, TokenStream input) {
- super(input);
- this.fieldName = fieldName;
- }
-
- @Override
- public boolean incrementToken() throws IOException {
- if (this.fieldName.equals("crash") && count++ >= 4)
- throw new IOException("I'm experiencing problems");
- return input.incrementToken();
- }
-
- @Override
- public void reset() throws IOException {
- super.reset();
- count = 0;
- }
- }
-
- public void testDocumentsWriterExceptions() throws IOException {
- Analyzer analyzer = new Analyzer() {
- @Override
- public TokenStream tokenStream(String fieldName, Reader reader) {
- return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
- }
- };
-
- for(int i=0;i<2;i++) {
- MockDirectoryWrapper dir = newDirectory();
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer));
- //writer.setInfoStream(System.out);
- Document doc = new Document();
- doc.add(newField("contents", "here are some contents", Field.Store.YES,
- Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- writer.addDocument(doc);
- writer.addDocument(doc);
- doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
- Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- doc.add(newField("other", "this will not get indexed", Field.Store.YES,
- Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- try {
- writer.addDocument(doc);
- fail("did not hit expected exception");
- } catch (IOException ioe) {
- }
-
- if (0 == i) {
- doc = new Document();
- doc.add(newField("contents", "here are some contents", Field.Store.YES,
- Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- writer.addDocument(doc);
- writer.addDocument(doc);
- }
- writer.close();
-
- IndexReader reader = IndexReader.open(dir, true);
- int expected = 3+(1-i)*2;
- assertEquals(expected, reader.docFreq(new Term("contents", "here")));
- assertEquals(expected, reader.maxDoc());
- int numDel = 0;
- final Bits delDocs = MultiFields.getDeletedDocs(reader);
- assertNotNull(delDocs);
- for(int j=0;j<reader.maxDoc();j++) {
- if (delDocs.get(j))
- numDel++;
- else {
- reader.document(j);
- reader.getTermFreqVectors(j);
- }
- }
- reader.close();
-
- assertEquals(1, numDel);
-
- writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
- analyzer).setMaxBufferedDocs(10));
- doc = new Document();
- doc.add(newField("contents", "here are some contents", Field.Store.YES,
- Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- for(int j=0;j<17;j++)
- writer.addDocument(doc);
- writer.optimize();
- writer.close();
-
- reader = IndexReader.open(dir, true);
- expected = 19+(1-i)*2;
- assertEquals(expected, reader.docFreq(new Term("contents", "here")));
- assertEquals(expected, reader.maxDoc());
- numDel = 0;
- assertNull(MultiFields.getDeletedDocs(reader));
- for(int j=0;j<reader.maxDoc();j++) {
- reader.document(j);
- reader.getTermFreqVectors(j);
- }
- reader.close();
- assertEquals(0, numDel);
-
- dir.close();
- }
- }
-
- public void testDocumentsWriterExceptionThreads() throws Exception {
- Analyzer analyzer = new Analyzer() {
- @Override
- public TokenStream tokenStream(String fieldName, Reader reader) {
- return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
- }
- };
-
- final int NUM_THREAD = 3;
- final int NUM_ITER = 100;
-
- for(int i=0;i<2;i++) {
- MockDirectoryWrapper dir = newDirectory();
-
- {
- final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(-1));
- ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
- final int finalI = i;
-
- Thread[] threads = new Thread[NUM_THREAD];
- for(int t=0;t<NUM_THREAD;t++) {
- threads[t] = new Thread() {
- @Override
- public void run() {
- try {
- for(int iter=0;iter<NUM_ITER;iter++) {
- Document doc = new Document();
- doc.add(newField("contents", "here are some contents", Field.Store.YES,
- Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- writer.addDocument(doc);
- writer.addDocument(doc);
- doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
- Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- doc.add(newField("other", "this will not get indexed", Field.Store.YES,
- Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- try {
- writer.addDocument(doc);
- fail("did not hit expected exception");
- } catch (IOException ioe) {
- }
-
- if (0 == finalI) {
- doc = new Document();
- doc.add(newField("contents", "here are some contents", Field.Store.YES,
- Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- writer.addDocument(doc);
- writer.addDocument(doc);
- }
- }
- } catch (Throwable t) {
- synchronized(this) {
- System.out.println(Thread.currentThread().getName() + ": ERROR: hit unexpected exception");
- t.printStackTrace(System.out);
- }
- fail();
- }
- }
- };
- threads[t].start();
- }
-
- for(int t=0;t<NUM_THREAD;t++)
- threads[t].join();
-
- writer.close();
- }
-
- IndexReader reader = IndexReader.open(dir, true);
- int expected = (3+(1-i)*2)*NUM_THREAD*NUM_ITER;
- assertEquals("i=" + i, expected, reader.docFreq(new Term("contents", "here")));
- assertEquals(expected, reader.maxDoc());
- int numDel = 0;
- final Bits delDocs = MultiFields.getDeletedDocs(reader);
- assertNotNull(delDocs);
- for(int j=0;j<reader.maxDoc();j++) {
- if (delDocs.get(j))
- numDel++;
- else {
- reader.document(j);
- reader.getTermFreqVectors(j);
- }
- }
- reader.close();
-
- assertEquals(NUM_THREAD*NUM_ITER, numDel);
-
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
- TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10));
- Document doc = new Document();
- doc.add(newField("contents", "here are some contents", Field.Store.YES,
- Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- for(int j=0;j<17;j++)
- writer.addDocument(doc);
- writer.optimize();
- writer.close();
-
- reader = IndexReader.open(dir, true);
- expected += 17-NUM_THREAD*NUM_ITER;
- assertEquals(expected, reader.docFreq(new Term("contents", "here")));
- assertEquals(expected, reader.maxDoc());
- assertNull(MultiFields.getDeletedDocs(reader));
- for(int j=0;j<reader.maxDoc();j++) {
- reader.document(j);
- reader.getTermFreqVectors(j);
- }
- reader.close();
-
- dir.close();
- }
- }
-
- public void testVariableSchema() throws Exception {
- Directory dir = newDirectory();
- int delID = 0;
- for(int i=0;i<20;i++) {
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
- LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
- lmp.setMergeFactor(2);
- lmp.setUseCompoundFile(false);
- lmp.setUseCompoundDocStore(false);
- Document doc = new Document();
- String contents = "aa bb cc dd ee ff gg hh ii jj kk";
+ public void testVariableSchema() throws Exception {
+ Directory dir = newDirectory();
+ int delID = 0;
+ for(int i=0;i<20;i++) {
+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
+ LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
+ lmp.setMergeFactor(2);
+ lmp.setUseCompoundFile(false);
+ lmp.setUseCompoundDocStore(false);
+ Document doc = new Document();
+ String contents = "aa bb cc dd ee ff gg hh ii jj kk";
if (i == 7) {
// Add empty docs here
@@ -2190,421 +1259,6 @@ public class TestIndexWriter extends Luc
directory.close();
}
- // Used by test cases below
- private class IndexerThread extends Thread {
-
- boolean diskFull;
- Throwable error;
- AlreadyClosedException ace;
- IndexWriter writer;
- boolean noErrors;
- volatile int addCount;
-
- public IndexerThread(IndexWriter writer, boolean noErrors) {
- this.writer = writer;
- this.noErrors = noErrors;
- }
-
- @Override
- public void run() {
-
- final Document doc = new Document();
- doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-
- int idUpto = 0;
- int fullCount = 0;
- final long stopTime = System.currentTimeMillis() + 200;
-
- do {
- try {
- writer.updateDocument(new Term("id", ""+(idUpto++)), doc);
- addCount++;
- } catch (IOException ioe) {
- //System.out.println(Thread.currentThread().getName() + ": hit exc");
- //ioe.printStackTrace(System.out);
- if (ioe.getMessage().startsWith("fake disk full at") ||
- ioe.getMessage().equals("now failing on purpose")) {
- diskFull = true;
- try {
- Thread.sleep(1);
- } catch (InterruptedException ie) {
- throw new ThreadInterruptedException(ie);
- }
- if (fullCount++ >= 5)
- break;
- } else {
- if (noErrors) {
- System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected IOException:");
- ioe.printStackTrace(System.out);
- error = ioe;
- }
- break;
- }
- } catch (Throwable t) {
- //t.printStackTrace(System.out);
- if (noErrors) {
- System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:");
- t.printStackTrace(System.out);
- error = t;
- }
- break;
- }
- } while(System.currentTimeMillis() < stopTime);
- }
- }
-
- // LUCENE-1130: make sure we can close() even while
- // threads are trying to add documents. Strictly
- // speaking, this isn't valid us of Lucene's APIs, but we
- // still want to be robust to this case:
- public void testCloseWithThreads() throws Exception {
- int NUM_THREADS = 3;
-
- for(int iter=0;iter<7;iter++) {
- Directory dir = newDirectory();
- IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(10).setMergeScheduler(new ConcurrentMergeScheduler());
- // We expect AlreadyClosedException
- ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
- IndexWriter writer = new IndexWriter(dir, conf);
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
-
- IndexerThread[] threads = new IndexerThread[NUM_THREADS];
-
- for(int i=0;i<NUM_THREADS;i++)
- threads[i] = new IndexerThread(writer, false);
-
- for(int i=0;i<NUM_THREADS;i++)
- threads[i].start();
-
- boolean done = false;
- while(!done) {
- Thread.sleep(100);
- for(int i=0;i<NUM_THREADS;i++)
- // only stop when at least one thread has added a doc
- if (threads[i].addCount > 0) {
- done = true;
- break;
- }
- }
-
- writer.close(false);
-
- // Make sure threads that are adding docs are not hung:
- for(int i=0;i<NUM_THREADS;i++) {
- // Without fix for LUCENE-1130: one of the
- // threads will hang
- threads[i].join();
- if (threads[i].isAlive())
- fail("thread seems to be hung");
- }
-
- // Quick test to make sure index is not corrupt:
- IndexReader reader = IndexReader.open(dir, true);
- DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
- MultiFields.getDeletedDocs(reader),
- "field",
- new BytesRef("aaa"));
- int count = 0;
- while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
- count++;
- }
- assertTrue(count > 0);
- reader.close();
-
- dir.close();
- }
- }
-
- // LUCENE-1130: make sure immeidate disk full on creating
- // an IndexWriter (hit during DW.ThreadState.init()) is
- // OK:
- public void testImmediateDiskFull() throws IOException {
- MockDirectoryWrapper dir = newDirectory();
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
- dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
- final Document doc = new Document();
- doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- try {
- writer.addDocument(doc);
- fail("did not hit disk full");
- } catch (IOException ioe) {
- }
- // Without fix for LUCENE-1130: this call will hang:
- try {
- writer.addDocument(doc);
- fail("did not hit disk full");
- } catch (IOException ioe) {
- }
- try {
- writer.close(false);
- fail("did not hit disk full");
- } catch (IOException ioe) {
- }
-
- // Make sure once disk space is avail again, we can
- // cleanly close:
- dir.setMaxSizeInBytes(0);
- writer.close(false);
- dir.close();
- }
-
- // LUCENE-1130: make sure immediate disk full on creating
- // an IndexWriter (hit during DW.ThreadState.init()), with
- // multiple threads, is OK:
- public void testImmediateDiskFullWithThreads() throws Exception {
-
- int NUM_THREADS = 3;
-
- for(int iter=0;iter<10;iter++) {
- MockDirectoryWrapper dir = newDirectory();
- IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler());
- // We expect disk full exceptions in the merge threads
- ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
- IndexWriter writer = new IndexWriter(dir, conf);
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
- dir.setMaxSizeInBytes(4*1024+20*iter);
-
- IndexerThread[] threads = new IndexerThread[NUM_THREADS];
-
- for(int i=0;i<NUM_THREADS;i++)
- threads[i] = new IndexerThread(writer, true);
-
- for(int i=0;i<NUM_THREADS;i++)
- threads[i].start();
-
- for(int i=0;i<NUM_THREADS;i++) {
- // Without fix for LUCENE-1130: one of the
- // threads will hang
- threads[i].join();
- assertTrue("hit unexpected Throwable", threads[i].error == null);
- }
-
- // Make sure once disk space is avail again, we can
- // cleanly close:
- dir.setMaxSizeInBytes(0);
- writer.close(false);
- dir.close();
- }
- }
-
- // Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
- private static class FailOnlyOnAbortOrFlush extends MockDirectoryWrapper.Failure {
- private boolean onlyOnce;
- public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
- this.onlyOnce = onlyOnce;
- }
- @Override
- public void eval(MockDirectoryWrapper dir) throws IOException {
- if (doFail) {
- StackTraceElement[] trace = new Exception().getStackTrace();
- for (int i = 0; i < trace.length; i++) {
- if ("abort".equals(trace[i].getMethodName()) ||
- "flushDocument".equals(trace[i].getMethodName())) {
- if (onlyOnce)
- doFail = false;
- //System.out.println(Thread.currentThread().getName() + ": now fail");
- //new Throwable().printStackTrace(System.out);
- throw new IOException("now failing on purpose");
- }
- }
- }
- }
- }
-
- // Runs test, with one thread, using the specific failure
- // to trigger an IOException
- public void _testSingleThreadFailure(MockDirectoryWrapper.Failure failure) throws IOException {
- MockDirectoryWrapper dir = newDirectory();
-
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
- final Document doc = new Document();
- doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-
- for(int i=0;i<6;i++)
- writer.addDocument(doc);
-
- dir.failOn(failure);
- failure.setDoFail();
- try {
- writer.addDocument(doc);
- writer.addDocument(doc);
- writer.commit();
- fail("did not hit exception");
- } catch (IOException ioe) {
- }
- failure.clearDoFail();
- writer.addDocument(doc);
- writer.close(false);
- dir.close();
- }
-
- // Runs test, with multiple threads, using the specific
- // failure to trigger an IOException
- public void _testMultipleThreadsFailure(MockDirectoryWrapper.Failure failure) throws Exception {
-
- int NUM_THREADS = 3;
-
- for(int iter=0;iter<2;iter++) {
- MockDirectoryWrapper dir = newDirectory();
- IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT,
- new MockAnalyzer()).setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler());
- // We expect disk full exceptions in the merge threads
- ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
- IndexWriter writer = new IndexWriter(dir, conf);
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
-
- IndexerThread[] threads = new IndexerThread[NUM_THREADS];
-
- for(int i=0;i<NUM_THREADS;i++)
- threads[i] = new IndexerThread(writer, true);
-
- for(int i=0;i<NUM_THREADS;i++)
- threads[i].start();
-
- Thread.sleep(10);
-
- dir.failOn(failure);
- failure.setDoFail();
-
- for(int i=0;i<NUM_THREADS;i++) {
- threads[i].join();
- assertTrue("hit unexpected Throwable", threads[i].error == null);
- }
-
- boolean success = false;
- try {
- writer.close(false);
- success = true;
- } catch (IOException ioe) {
- failure.clearDoFail();
- writer.close(false);
- }
-
- if (success) {
- IndexReader reader = IndexReader.open(dir, true);
- final Bits delDocs = MultiFields.getDeletedDocs(reader);
- for(int j=0;j<reader.maxDoc();j++) {
- if (delDocs == null || !delDocs.get(j)) {
- reader.document(j);
- reader.getTermFreqVectors(j);
- }
- }
- reader.close();
- }
-
- dir.close();
- }
- }
-
- // LUCENE-1130: make sure initial IOException, and then 2nd
- // IOException during rollback(), is OK:
- public void testIOExceptionDuringAbort() throws IOException {
- _testSingleThreadFailure(new FailOnlyOnAbortOrFlush(false));
- }
-
- // LUCENE-1130: make sure initial IOException, and then 2nd
- // IOException during rollback(), is OK:
- public void testIOExceptionDuringAbortOnlyOnce() throws IOException {
- _testSingleThreadFailure(new FailOnlyOnAbortOrFlush(true));
- }
-
- // LUCENE-1130: make sure initial IOException, and then 2nd
- // IOException during rollback(), with multiple threads, is OK:
- public void testIOExceptionDuringAbortWithThreads() throws Exception {
- _testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(false));
- }
-
- // LUCENE-1130: make sure initial IOException, and then 2nd
- // IOException during rollback(), with multiple threads, is OK:
- public void testIOExceptionDuringAbortWithThreadsOnlyOnce() throws Exception {
- _testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(true));
- }
-
- // Throws IOException during DocumentsWriter.closeDocStore
- private static class FailOnlyInCloseDocStore extends MockDirectoryWrapper.Failure {
- private boolean onlyOnce;
- public FailOnlyInCloseDocStore(boolean onlyOnce) {
- this.onlyOnce = onlyOnce;
- }
- @Override
- public void eval(MockDirectoryWrapper dir) throws IOException {
- if (doFail) {
- StackTraceElement[] trace = new Exception().getStackTrace();
- for (int i = 0; i < trace.length; i++) {
- if ("closeDocStore".equals(trace[i].getMethodName())) {
- if (onlyOnce)
- doFail = false;
- throw new IOException("now failing on purpose");
- }
- }
- }
- }
- }
-
- // LUCENE-1130: test IOException in closeDocStore
- public void testIOExceptionDuringCloseDocStore() throws IOException {
- _testSingleThreadFailure(new FailOnlyInCloseDocStore(false));
- }
-
- // LUCENE-1130: test IOException in closeDocStore
- public void testIOExceptionDuringCloseDocStoreOnlyOnce() throws IOException {
- _testSingleThreadFailure(new FailOnlyInCloseDocStore(true));
- }
-
- // LUCENE-1130: test IOException in closeDocStore, with threads
- public void testIOExceptionDuringCloseDocStoreWithThreads() throws Exception {
- _testMultipleThreadsFailure(new FailOnlyInCloseDocStore(false));
- }
-
- // LUCENE-1130: test IOException in closeDocStore, with threads
- public void testIOExceptionDuringCloseDocStoreWithThreadsOnlyOnce() throws Exception {
- _testMultipleThreadsFailure(new FailOnlyInCloseDocStore(true));
- }
-
- // Throws IOException during DocumentsWriter.writeSegment
- private static class FailOnlyInWriteSegment extends MockDirectoryWrapper.Failure {
- private boolean onlyOnce;
- public FailOnlyInWriteSegment(boolean onlyOnce) {
- this.onlyOnce = onlyOnce;
- }
- @Override
- public void eval(MockDirectoryWrapper dir) throws IOException {
- if (doFail) {
- StackTraceElement[] trace = new Exception().getStackTrace();
- for (int i = 0; i < trace.length; i++) {
- if ("flush".equals(trace[i].getMethodName()) && "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
- if (onlyOnce)
- doFail = false;
- throw new IOException("now failing on purpose");
- }
- }
- }
- }
- }
-
- // LUCENE-1130: test IOException in writeSegment
- public void testIOExceptionDuringWriteSegment() throws IOException {
- _testSingleThreadFailure(new FailOnlyInWriteSegment(false));
- }
-
- // LUCENE-1130: test IOException in writeSegment
- public void testIOExceptionDuringWriteSegmentOnlyOnce() throws IOException {
- _testSingleThreadFailure(new FailOnlyInWriteSegment(true));
- }
-
- // LUCENE-1130: test IOException in writeSegment, with threads
- public void testIOExceptionDuringWriteSegmentWithThreads() throws Exception {
- _testMultipleThreadsFailure(new FailOnlyInWriteSegment(false));
- }
-
- // LUCENE-1130: test IOException in writeSegment, with threads
- public void testIOExceptionDuringWriteSegmentWithThreadsOnlyOnce() throws Exception {
- _testMultipleThreadsFailure(new FailOnlyInWriteSegment(true));
- }
// LUCENE-1084: test unlimited field length
public void testUnlimitedMaxFieldLength() throws IOException {
@@ -2628,45 +1282,6 @@ public class TestIndexWriter extends Luc
dir.close();
}
- // LUCENE-1044: Simulate checksum error in segments_N
- public void testSegmentsChecksumError() throws IOException {
- Directory dir = newDirectory();
-
- IndexWriter writer = null;
-
- writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
-
- // add 100 documents
- for (int i = 0; i < 100; i++) {
- addDoc(writer);
- }
-
- // close
- writer.close();
-
- long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
- assertTrue("segment generation should be > 0 but got " + gen, gen > 0);
-
- final String segmentsFileName = SegmentInfos.getCurrentSegmentFileName(dir);
- IndexInput in = dir.openInput(segmentsFileName);
- IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen));
- out.copyBytes(in, in.length()-1);
- byte b = in.readByte();
- out.writeByte((byte) (1+b));
- out.close();
- in.close();
-
- IndexReader reader = null;
- try {
- reader = IndexReader.open(dir, true);
- } catch (IOException e) {
- e.printStackTrace(System.out);
- fail("segmentInfos failed to retry fallback to correct segments_N file");
- }
- reader.close();
- dir.close();
- }
-
// LUCENE-1044: test writer.commit() when ac=false
public void testForceCommit() throws IOException {
Directory dir = newDirectory();
@@ -2704,189 +1319,6 @@ public class TestIndexWriter extends Luc
dir.close();
}
- // Throws IOException during MockDirectoryWrapper.sync
- private static class FailOnlyInSync extends MockDirectoryWrapper.Failure {
- boolean didFail;
- @Override
- public void eval(MockDirectoryWrapper dir) throws IOException {
- if (doFail) {
- StackTraceElement[] trace = new Exception().getStackTrace();
- for (int i = 0; i < trace.length; i++) {
- if (doFail && "org.apache.lucene.store.MockDirectoryWrapper".equals(trace[i].getClassName()) && "sync".equals(trace[i].getMethodName())) {
- didFail = true;
- throw new IOException("now failing on purpose during sync");
- }
- }
- }
- }
- }
-
- // LUCENE-1044: test exception during sync
- public void testExceptionDuringSync() throws IOException {
- MockDirectoryWrapper dir = newDirectory();
- FailOnlyInSync failure = new FailOnlyInSync();
- dir.failOn(failure);
-
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
- failure.setDoFail();
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
-
- for (int i = 0; i < 23; i++) {
- addDoc(writer);
- if ((i-1)%2 == 0) {
- try {
- writer.commit();
- } catch (IOException ioe) {
- // expected
- }
- }
- }
-
- ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
- assertTrue(failure.didFail);
- failure.clearDoFail();
- writer.close();
-
- IndexReader reader = IndexReader.open(dir, true);
- assertEquals(23, reader.numDocs());
- reader.close();
- dir.close();
- }
-
- // LUCENE-1168
- public void testTermVectorCorruption() throws IOException {
-
- Directory dir = newDirectory();
- for(int iter=0;iter<2;iter++) {
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(2).setRAMBufferSizeMB(
- IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
- new SerialMergeScheduler()).setMergePolicy(
- new LogDocMergePolicy()));
-
- Document document = new Document();
-
- Field storedField = newField("stored", "stored", Field.Store.YES,
- Field.Index.NO);
- document.add(storedField);
- writer.addDocument(document);
- writer.addDocument(document);
-
- document = new Document();
- document.add(storedField);
- Field termVectorField = newField("termVector", "termVector",
- Field.Store.NO, Field.Index.NOT_ANALYZED,
- Field.TermVector.WITH_POSITIONS_OFFSETS);
-
- document.add(termVectorField);
- writer.addDocument(document);
- writer.optimize();
- writer.close();
-
- IndexReader reader = IndexReader.open(dir, true);
- for(int i=0;i<reader.numDocs();i++) {
- reader.document(i);
- reader.getTermFreqVectors(i);
- }
- reader.close();
-
- writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
- new MockAnalyzer()).setMaxBufferedDocs(2)
- .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
- .setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(
- new LogDocMergePolicy()));
-
- Directory[] indexDirs = {new MockDirectoryWrapper(new RAMDirectory(dir))};
- writer.addIndexes(indexDirs);
- writer.optimize();
- writer.close();
- }
- dir.close();
- }
-
- // LUCENE-1168
- public void testTermVectorCorruption2() throws IOException {
- Directory dir = newDirectory();
- for(int iter=0;iter<2;iter++) {
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(2).setRAMBufferSizeMB(
- IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
- new SerialMergeScheduler()).setMergePolicy(
- new LogDocMergePolicy()));
-
- Document document = new Document();
-
- Field storedField = newField("stored", "stored", Field.Store.YES,
- Field.Index.NO);
- document.add(storedField);
- writer.addDocument(document);
- writer.addDocument(document);
-
- document = new Document();
- document.add(storedField);
- Field termVectorField = newField("termVector", "termVector",
- Field.Store.NO, Field.Index.NOT_ANALYZED,
- Field.TermVector.WITH_POSITIONS_OFFSETS);
- document.add(termVectorField);
- writer.addDocument(document);
- writer.optimize();
- writer.close();
-
- IndexReader reader = IndexReader.open(dir, true);
- assertTrue(reader.getTermFreqVectors(0)==null);
- assertTrue(reader.getTermFreqVectors(1)==null);
- assertTrue(reader.getTermFreqVectors(2)!=null);
- reader.close();
- }
- dir.close();
- }
-
- // LUCENE-1168
- public void testTermVectorCorruption3() throws IOException {
- Directory dir = newDirectory();
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(2).setRAMBufferSizeMB(
- IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler(
- new SerialMergeScheduler()).setMergePolicy(new LogDocMergePolicy()));
-
- Document document = new Document();
-
- document = new Document();
- Field storedField = newField("stored", "stored", Field.Store.YES,
- Field.Index.NO);
- document.add(storedField);
- Field termVectorField = newField("termVector", "termVector",
- Field.Store.NO, Field.Index.NOT_ANALYZED,
- Field.TermVector.WITH_POSITIONS_OFFSETS);
- document.add(termVectorField);
- for(int i=0;i<10;i++)
- writer.addDocument(document);
- writer.close();
-
- writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
- new MockAnalyzer()).setMaxBufferedDocs(2)
- .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
- .setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(
- new LogDocMergePolicy()));
- for(int i=0;i<6;i++)
- writer.addDocument(document);
-
- writer.optimize();
- writer.close();
-
- IndexReader reader = IndexReader.open(dir, true);
- for(int i=0;i<10;i++) {
- reader.getTermFreqVectors(i);
- reader.document(i);
- }
- reader.close();
- dir.close();
- }
-
// LUCENE-1084: test user-specified field length
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = newDirectory();
@@ -3042,131 +1474,22 @@ public class TestIndexWriter extends Luc
dir.close();
}
- // LUCENE-1179
- public void testEmptyFieldName() throws IOException {
- Directory dir = newDirectory();
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document doc = new Document();
- doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
- writer.addDocument(doc);
- writer.close();
- dir.close();
- }
-
- // LUCENE-1198
- private static final class MockIndexWriter extends IndexWriter {
-
- public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
- super(dir, conf);
- }
-
- boolean doFail;
-
- @Override
- boolean testPoint(String name) {
- if (doFail && name.equals("DocumentsWriter.ThreadState.init start"))
- throw new RuntimeException("intentionally failing");
- return true;
- }
- }
-
-
- public void testExceptionDocumentsWriterInit() throws IOException {
- Directory dir = newDirectory();
- MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document doc = new Document();
- doc.add(newField("field", "a field", Field.Store.YES,
- Field.Index.ANALYZED));
- w.addDocument(doc);
- w.doFail = true;
- try {
- w.addDocument(doc);
- fail("did not hit exception");
- } catch (RuntimeException re) {
- // expected
- }
- w.close();
- _TestUtil.checkIndex(dir);
- dir.close();
- }
-
- // LUCENE-1208
- public void testExceptionJustBeforeFlush() throws IOException {
+ // LUCENE-1179
+ public void testEmptyFieldName() throws IOException {
Directory dir = newDirectory();
- MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
- doc.add(newField("field", "a field", Field.Store.YES,
- Field.Index.ANALYZED));
- w.addDocument(doc);
-
- Analyzer analyzer = new Analyzer() {
- @Override
- public TokenStream tokenStream(String fieldName, Reader reader) {
- return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
- }
- };
-
- Document crashDoc = new Document();
- crashDoc.add(newField("crash", "do it on token 4", Field.Store.YES,
- Field.Index.ANALYZED));
- try {
- w.addDocument(crashDoc, analyzer);
- fail("did not hit expected exception");
- } catch (IOException ioe) {
- // expected
- }
- w.addDocument(doc);
- w.close();
+ doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+ writer.addDocument(doc);
+ writer.close();
dir.close();
- }
-
- private static final class MockIndexWriter2 extends IndexWriter {
-
- public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException {
- super(dir, conf);
- }
-
- boolean doFail;
- boolean failed;
-
- @Override
- boolean testPoint(String name) {
- if (doFail && name.equals("startMergeInit")) {
- failed = true;
- throw new RuntimeException("intentionally failing");
- }
- return true;
- }
}
-
- // LUCENE-1210
- public void testExceptionOnMergeInit() throws IOException {
- Directory dir = newDirectory();
- IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler());
- ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
- MockIndexWriter2 w = new MockIndexWriter2(dir, conf);
- w.doFail = true;
- Document doc = new Document();
- doc.add(newField("field", "a field", Field.Store.YES,
- Field.Index.ANALYZED));
- for(int i=0;i<10;i++)
- try {
- w.addDocument(doc);
- } catch (RuntimeException re) {
- break;
- }
- ((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync();
- assertTrue(w.failed);
- w.close();
- dir.close();
- }
- private static final class MockIndexWriter3 extends IndexWriter {
+ private static final class MockIndexWriter extends IndexWriter {
- public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException {
+ public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
super(dir, conf);
}
@@ -3188,7 +1511,7 @@ public class TestIndexWriter extends Luc
// LUCENE-1222
public void testDoBeforeAfterFlush() throws IOException {
Directory dir = newDirectory();
- MockIndexWriter3 w = new MockIndexWriter3(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+ MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
@@ -3212,57 +1535,7 @@ public class TestIndexWriter extends Luc
dir.close();
}
- private static class FailOnlyInCommit extends MockDirectoryWrapper.Failure {
-
- boolean fail1, fail2;
-
- @Override
- public void eval(MockDirectoryWrapper dir) throws IOException {
- StackTraceElement[] trace = new Exception().getStackTrace();
- boolean isCommit = false;
- boolean isDelete = false;
- for (int i = 0; i < trace.length; i++) {
- if ("org.apache.lucene.index.SegmentInfos".equals(trace[i].getClassName()) && "prepareCommit".equals(trace[i].getMethodName()))
- isCommit = true;
- if ("org.apache.lucene.store.MockDirectoryWrapper".equals(trace[i].getClassName()) && "deleteFile".equals(trace[i].getMethodName()))
- isDelete = true;
- }
-
- if (isCommit) {
- if (!isDelete) {
- fail1 = true;
- throw new RuntimeException("now fail first");
- } else {
- fail2 = true;
- throw new IOException("now fail during delete");
- }
- }
- }
- }
-
- // LUCENE-1214
- public void testExceptionsDuringCommit() throws Throwable {
- MockDirectoryWrapper dir = newDirectory();
- FailOnlyInCommit failure = new FailOnlyInCommit();
- IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document doc = new Document();
- doc.add(newField("field", "a field", Field.Store.YES,
- Field.Index.ANALYZED));
- w.addDocument(doc);
- dir.failOn(failure);
- try {
- w.close();
- fail();
- } catch (IOException ioe) {
- fail("expected only RuntimeException");
- } catch (RuntimeException re) {
- // Expected
- }
- assertTrue(failure.fail1 && failure.fail2);
- w.rollback();
- dir.close();
- }
final String[] utf8Data = new String[] {
// unpaired low surrogate
@@ -3555,418 +1828,106 @@ public class TestIndexWriter extends Luc
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
- writer.commit();
-
- IndexReader reader3 = reader.reopen();
- assertEquals(0, reader.numDocs());
- assertEquals(0, reader2.numDocs());
- assertEquals(23, reader3.numDocs());
- reader.close();
- reader2.close();
-
- for (int i = 0; i < 17; i++)
- addDoc(writer);
-
- assertEquals(23, reader3.numDocs());
- reader3.close();
- reader = IndexReader.open(dir, true);
- assertEquals(23, reader.numDocs());
- reader.close();
-
- writer.prepareCommit();
-
- reader = IndexReader.open(dir, true);
- assertEquals(23, reader.numDocs());
- reader.close();
-
- writer.commit();
- reader = IndexReader.open(dir, true);
- assertEquals(40, reader.numDocs());
- reader.close();
- writer.close();
- dir.close();
- }
-
- // LUCENE-1274: test writer.prepareCommit()
- public void testPrepareCommitRollback() throws IOException {
- MockDirectoryWrapper dir = newDirectory();
- dir.setPreventDoubleWrite(false);
-
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
- writer.commit();
-
- for (int i = 0; i < 23; i++)
- addDoc(writer);
-
- IndexReader reader = IndexReader.open(dir, true);
- assertEquals(0, reader.numDocs());
-
- writer.prepareCommit();
-
- IndexReader reader2 = IndexReader.open(dir, true);
- assertEquals(0, reader2.numDocs());
-
- writer.rollback();
-
- IndexReader reader3 = reader.reopen();
- assertEquals(0, reader.numDocs());
- assertEquals(0, reader2.numDocs());
- assertEquals(0, reader3.numDocs());
- reader.close();
- reader2.close();
-
- writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- for (int i = 0; i < 17; i++)
- addDoc(writer);
-
- assertEquals(0, reader3.numDocs());
- reader3.close();
- reader = IndexReader.open(dir, true);
- assertEquals(0, reader.numDocs());
- reader.close();
-
- writer.prepareCommit();
-
- reader = IndexReader.open(dir, true);
- assertEquals(0, reader.numDocs());
- reader.close();
-
- writer.commit();
- reader = IndexReader.open(dir, true);
- assertEquals(17, reader.numDocs());
- reader.close();
- writer.close();
- dir.close();
- }
-
- // LUCENE-1274
- public void testPrepareCommitNoChanges() throws IOException {
- Directory dir = newDirectory();
-
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- writer.prepareCommit();
- writer.commit();
- writer.close();
-
- IndexReader reader = IndexReader.open(dir, true);
- assertEquals(0, reader.numDocs());
- reader.close();
- dir.close();
- }
-
- private abstract class RunAddIndexesThreads {
-
- Directory dir, dir2;
- final static int NUM_INIT_DOCS = 17;
- IndexWriter writer2;
- final List<Throwable> failures = new ArrayList<Throwable>();
- volatile boolean didClose;
- final IndexReader[] readers;
- final int NUM_COPY;
- final static int NUM_THREADS = 5;
- final Thread[] threads = new Thread[NUM_THREADS];
-
- public RunAddIndexesThreads(int numCopy) throws Throwable {
- NUM_COPY = numCopy;
- dir = new MockDirectoryWrapper(new RAMDirectory());
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(2));
- for (int i = 0; i < NUM_INIT_DOCS; i++)
- addDoc(writer);
- writer.close();
-
- dir2 = new MockDirectoryWrapper(new RAMDirectory());
- writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- writer2.commit();
-
- readers = new IndexReader[NUM_COPY];
- for(int i=0;i<NUM_COPY;i++)
- readers[i] = IndexReader.open(dir, true);
- }
-
- void launchThreads(final int numIter) {
-
- for(int i=0;i<NUM_THREADS;i++) {
- threads[i] = new Thread() {
- @Override
- public void run() {
- try {
-
- final Directory[] dirs = new Directory[NUM_COPY];
- for(int k=0;k<NUM_COPY;k++)
- dirs[k] = new MockDirectoryWrapper(new RAMDirectory(dir));
-
- int j=0;
-
- while(true) {
- // System.out.println(Thread.currentThread().getName() + ": iter j=" + j);
- if (numIter > 0 && j == numIter)
- break;
- doBody(j++, dirs);
- }
- } catch (Throwable t) {
- handle(t);
- }
- }
- };
- }
-
- for(int i=0;i<NUM_THREADS;i++)
- threads[i].start();
- }
-
- void joinThreads() throws Exception {
- for(int i=0;i<NUM_THREADS;i++)
- threads[i].join();
- }
-
- void close(boolean doWait) throws Throwable {
- didClose = true;
- writer2.close(doWait);
- }
-
- void closeDir() throws Throwable {
- for(int i=0;i<NUM_COPY;i++)
- readers[i].close();
- dir2.close();
- }
-
- abstract void doBody(int j, Directory[] dirs) throws Throwable;
- abstract void handle(Throwable t);
- }
-
- private class CommitAndAddIndexes extends RunAddIndexesThreads {
- public CommitAndAddIndexes(int numCopy) throws Throwable {
- super(numCopy);
- }
-
- @Override
- void handle(Throwable t) {
- t.printStackTrace(System.out);
- synchronized(failures) {
- failures.add(t);
- }
- }
-
- @Override
- void doBody(int j, Directory[] dirs) throws Throwable {
- switch(j%5) {
- case 0:
- writer2.addIndexes(dirs);
- writer2.optimize();
- break;
- case 1:
- writer2.addIndexes(dirs);
- break;
- case 2:
- writer2.addIndexes(readers);
- break;
- case 3:
- writer2.addIndexes(dirs);
- writer2.maybeMerge();
- break;
- case 4:
- writer2.commit();
- }
- }
- }
-
- // LUCENE-1335: test simultaneous addIndexes & commits
- // from multiple threads
- public void testAddIndexesWithThreads() throws Throwable {
-
- final int NUM_ITER = 15;
- final int NUM_COPY = 3;
- CommitAndAddIndexes c = new CommitAndAddIndexes(NUM_COPY);
- c.launchThreads(NUM_ITER);
-
- for(int i=0;i<100;i++)
- addDoc(c.writer2);
-
- c.joinThreads();
-
- int expectedNumDocs = 100+NUM_COPY*(4*NUM_ITER/5)*RunAddIndexesThreads.NUM_THREADS*RunAddIndexesThreads.NUM_INIT_DOCS;
- assertEquals(expectedNumDocs, c.writer2.numDocs());
-
- c.close(true);
-
- assertTrue(c.failures.size() == 0);
-
- _TestUtil.checkIndex(c.dir2);
-
- IndexReader reader = IndexReader.open(c.dir2, true);
- assertEquals(expectedNumDocs, reader.numDocs());
- reader.close();
-
- c.closeDir();
- }
-
- private class CommitAndAddIndexes2 extends CommitAndAddIndexes {
- public CommitAndAddIndexes2(int numCopy) throws Throwable {
- super(numCopy);
- }
-
- @Override
- void handle(Throwable t) {
- if (!(t instanceof AlreadyClosedException) && !(t instanceof NullPointerException)) {
- t.printStackTrace(System.out);
- synchronized(failures) {
- failures.add(t);
- }
- }
- }
- }
-
- // LUCENE-1335: test simultaneous addIndexes & close
- public void testAddIndexesWithClose() throws Throwable {
- final int NUM_COPY = 3;
- CommitAndAddIndexes2 c = new CommitAndAddIndexes2(NUM_COPY);
- //c.writer2.setInfoStream(System.out);
- c.launchThreads(-1);
-
- // Close w/o first stopping/joining the threads
- c.close(true);
- //c.writer2.close();
-
- c.joinThreads();
-
- _TestUtil.checkIndex(c.dir2);
-
- c.closeDir();
-
- assertTrue(c.failures.size() == 0);
- }
-
- private class CommitAndAddIndexes3 extends RunAddIndexesThreads {
- public CommitAndAddIndexes3(int numCopy) throws Throwable {
- super(numCopy);
- }
-
- @Override
- void doBody(int j, Directory[] dirs) throws Throwable {
- switch(j%5) {
- case 0:
- writer2.addIndexes(dirs);
- writer2.optimize();
- break;
- case 1:
- writer2.addIndexes(dirs);
- break;
- case 2:
- writer2.addIndexes(readers);
- break;
- case 3:
- writer2.optimize();
- break;
- case 4:
- writer2.commit();
- }
- }
-
- @Override
- void handle(Throwable t) {
- boolean report = true;
-
- if (t instanceof AlreadyClosedException || t instanceof MergePolicy.MergeAbortedException || t instanceof NullPointerException) {
- report = !didClose;
- } else if (t instanceof IOException) {
- Throwable t2 = t.getCause();
- if (t2 instanceof MergePolicy.MergeAbortedException) {
- report = !didClose;
- }
- }
- if (report) {
- t.printStackTrace(System.out);
- synchronized(failures) {
- failures.add(t);
- }
- }
- }
- }
-
- // LUCENE-1335: test simultaneous addIndexes & close
- public void testAddIndexesWithCloseNoWait() throws Throwable {
-
- final int NUM_COPY = 50;
- CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
- c.launchThreads(-1);
+ writer.commit();
- Thread.sleep(500);
+ IndexReader reader3 = reader.reopen();
+ assertEquals(0, reader.numDocs());
+ assertEquals(0, reader2.numDocs());
+ assertEquals(23, reader3.numDocs());
+ reader.close();
+ reader2.close();
- // Close w/o first stopping/joining the threads
- c.close(false);
+ for (int i = 0; i < 17; i++)
+ addDoc(writer);
- c.joinThreads();
+ assertEquals(23, reader3.numDocs());
+ reader3.close();
+ reader = IndexReader.open(dir, true);
+ assertEquals(23, reader.numDocs());
+ reader.close();
- _TestUtil.checkIndex(c.dir2);
+ writer.prepareCommit();
- c.closeDir();
+ reader = IndexReader.open(dir, true);
+ assertEquals(23, reader.numDocs());
+ reader.close();
- assertTrue(c.failures.size() == 0);
+ writer.commit();
+ reader = IndexReader.open(dir, true);
+ assertEquals(40, reader.numDocs());
+ reader.close();
+ writer.close();
+ dir.close();
}
- // LUCENE-1335: test simultaneous addIndexes & close
- public void testAddIndexesWithRollback() throws Throwable {
+ // LUCENE-1274: test writer.prepareCommit()
+ public void testPrepareCommitRollback() throws IOException {
+ MockDirectoryWrapper dir = newDirectory();
+ dir.setPreventDoubleWrite(false);
+
+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
+ ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
+ writer.commit();
- final int NUM_COPY = 50;
- CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
- c.launchThreads(-1);
+ for (int i = 0; i < 23; i++)
+ addDoc(writer);
- Thread.sleep(500);
+ IndexReader reader = IndexReader.open(dir, true);
+ assertEquals(0, reader.numDocs());
- // Close w/o first stopping/joining the threads
- c.didClose = true;
- c.writer2.rollback();
+ writer.prepareCommit();
- c.joinThreads();
+ IndexReader reader2 = IndexReader.open(dir, true);
+ assertEquals(0, reader2.numDocs());
- _TestUtil.checkIndex(c.dir2);
+ writer.rollback();
- c.closeDir();
+ IndexReader reader3 = reader.reopen();
+ assertEquals(0, reader.numDocs());
+ assertEquals(0, reader2.numDocs());
+ assertEquals(0, reader3.numDocs());
+ reader.close();
+ reader2.close();
- assertTrue(c.failures.size() == 0);
- }
+ writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+ for (int i = 0; i < 17; i++)
+ addDoc(writer);
- // LUCENE-1347
- private static final class MockIndexWriter4 extends IndexWriter {
+ assertEquals(0, reader3.numDocs());
+ reader3.close();
+ reader = IndexReader.open(dir, true);
+ assertEquals(0, reader.numDocs());
+ reader.close();
- public MockIndexWriter4(Directory dir, IndexWriterConfig conf) throws IOException {
- super(dir, conf);
- }
+ writer.prepareCommit();
- boolean doFail;
+ reader = IndexReader.open(dir, true);
+ assertEquals(0, reader.numDocs());
+ reader.close();
- @Override
- boolean testPoint(String name) {
- if (doFail && name.equals("rollback before checkpoint"))
- throw new RuntimeException("intentionally failing");
- return true;
- }
+ writer.commit();
+ reader = IndexReader.open(dir, true);
+ assertEquals(17, reader.numDocs());
+ reader.close();
+ writer.close();
+ dir.close();
}
-
- // LUCENE-1347
- public void testRollbackExceptionHang() throws Throwable {
+ // LUCENE-1274
+ public void testPrepareCommitNoChanges() throws IOException {
Directory dir = newDirectory();
- MockIndexWriter4 w = new MockIndexWriter4(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- addDoc(w);
- w.doFail = true;
- try {
- w.rollback();
- fail("did not hit intentional RuntimeException");
- } catch (RuntimeException re) {
- // expected
- }
-
- w.doFail = false;
- w.rollback();
+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+ writer.prepareCommit();
+ writer.commit();
+ writer.close();
+
+ IndexReader reader = IndexReader.open(dir, true);
+ assertEquals(0, reader.numDocs());
+ reader.close();
dir.close();
}
-
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
Directory dir = newDirectory();
@@ -4035,280 +1996,6 @@ public class TestIndexWriter extends Luc
dir.close();
}
- public void testOptimizeExceptions() throws IOException {
- Directory startDir = newDirectory();
- IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
- ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100);
- IndexWriter w = new IndexWriter(startDir, conf);
- for(int i=0;i<27;i++)
- addDoc(w);
- w.close();
-
- for(int i=0;i<200;i++) {
- MockDirectoryWrapper dir = new MockDirectoryWrapper(new RAMDirectory(startDir));
- conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergeScheduler(new ConcurrentMergeScheduler());
- ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
- w = new IndexWriter(dir, conf);
- dir.setRandomIOExceptionRate(0.5, 100);
- try {
- w.optimize();
- } catch (IOException ioe) {
- if (ioe.getCause() == null)
- fail("optimize threw IOException without root cause");
- }
- dir.setRandomIOExceptionRate(0, 0);
- w.close();
- dir.close();
- }
- startDir.close();
- }
-
- // LUCENE-1429
- public void testOutOfMemoryErrorCausesCloseToFail() throws Exception {
-
- final List<Throwable> thrown = new ArrayList<Throwable>();
- final Directory dir = newDirectory();
- final IndexWriter writer = new IndexWriter(dir,
- newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())) {
- @Override
- public void message(final String message) {
- if (message.startsWith("now flush at close") && 0 == thrown.size()) {
- thrown.add(null);
- throw new OutOfMemoryError("fake OOME at " + message);
- }
- }
- };
-
- // need to set an info stream so message is called
- writer.setInfoStream(new PrintStream(new ByteArrayOutputStream()));
- try {
- writer.close();
- fail("OutOfMemoryError expected");
- }
- catch (final OutOfMemoryError expected) {}
-
- // throws IllegalStateEx w/o bug fix
- writer.close();
- dir.close();
- }
-
- // LUCENE-1442
- public void testDoubleOffsetCounting() throws Exception {
- Directory dir = newDirectory();
- IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document doc = new Document();
- Field f = newField("field", "abcd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
- doc.add(f);
- doc.add(f);
- Field f2 = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
- doc.add(f2);
- doc.add(f);
- w.addDocument(doc);
- w.close();
-
- IndexReader r = IndexReader.open(dir, true);
- TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
-
- // Token "" occurred once
- assertEquals(1, termOffsets.length);
- assertEquals(8, termOffsets[0].getStartOffset());
- assertEquals(8, termOffsets[0].getEndOffset());
-
- // Token "abcd" occurred three times
- termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(1);
- assertEquals(3, termOffsets.length);
- assertEquals(0, termOffsets[0].getStartOffset());
- assertEquals(4, termOffsets[0].getEndOffset());
- assertEquals(4, termOffsets[1].getStartOffset());
- assertEquals(8, termOffsets[1].getEndOffset());
- assertEquals(8, termOffsets[2].getStartOffset());
- assertEquals(12, termOffsets[2].getEndOffset());
- r.close();
- dir.close();
- }
-
- // LUCENE-1442
- public void testDoubleOffsetCounting2() throws Exception {
- Directory dir = newDirectory();
- IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document doc = new Document();
- Field f = newField("field", "abcd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
- doc.add(f);
- doc.add(f);
- w.addDocument(doc);
- w.close();
-
- IndexReader r = IndexReader.open(dir, true);
- TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
- assertEquals(2, termOffsets.length);
- assertEquals(0, termOffsets[0].getStartOffset());
- assertEquals(4, termOffsets[0].getEndOffset());
- assertEquals(5, termOffsets[1].getStartOffset());
- assertEquals(9, termOffsets[1].getEndOffset());
- r.close();
- dir.close();
- }
-
- // LUCENE-1448
- public void testEndOffsetPositionCharAnalyzer() throws Exception {
- Directory dir = newDirectory();
- IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document doc = new Document();
- Field f = newField("field", "abcd ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
- doc.add(f);
- doc.add(f);
- w.addDocument(doc);
- w.close();
-
- IndexReader r = IndexReader.open(dir, true);
- TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
- assertEquals(2, termOffsets.length);
- assertEquals(0, termOffsets[0].getStartOffset());
- assertEquals(4, termOffsets[0].getEndOffset());
- assertEquals(8, termOffsets[1].getStartOffset());
- assertEquals(12, termOffsets[1].getEndOffset());
- r.close();
- dir.close();
- }
-
- // LUCENE-1448
- public void testEndOffsetPositionWithCachingTokenFilter() throws Exception {
- Directory dir = newDirectory();
- Analyzer analyzer = new MockAnalyzer();
- IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer));
- Document doc = new Document();
- TokenStream stream = new CachingTokenFilter(analyzer.tokenStream("field", new StringReader("abcd ")));
- Field f = new Field("field", stream, Field.TermVector.WITH_POSITIONS_OFFSETS);
- doc.add(f);
- doc.add(f);
- w.addDocument(doc);
- w.close();
-
- IndexReader r = IndexReader.open(dir, true);
- TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
- assertEquals(2, termOffsets.length);
- assertEquals(0, termOffsets[0].getStartOffset());
- assertEquals(4, termOffsets[0].getEndOffset());
- assertEquals(8, termOffsets[1].getStartOffset());
- assertEquals(12, termOffsets[1].getEndOffset());
- r.close();
- dir.close();
- }
-
- // LUCENE-1448
- public void testEndOffsetPositionStopFilter() throws Exception {
- Directory dir = newDirectory();
- IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
- Document doc = new Document();
- Field f = newField("field", "abcd the", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
- doc.add(f);
- doc.add(f);
- w.addDocument(doc);
- w.close();
-
- IndexReader r = IndexReader.open(dir, true);
[... 170 lines stripped ...]